text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""Tests for head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
# pylint: disable=g-bad-todo,g-import-not-at-top
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
import six
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: enable=g-bad-todo,g-import-not-at-top
def _assert_variables(test_case,
expected_global=None,
expected_model=None,
expected_trainable=None):
test_case.assertItemsEqual(
tuple([] if expected_global is None else expected_global),
tuple([k.name for k in variables.global_variables()]))
test_case.assertItemsEqual(
tuple([] if expected_model is None else expected_model),
tuple([k.name for k in variables.model_variables()]))
test_case.assertItemsEqual(
tuple([] if expected_trainable is None else expected_trainable),
tuple([k.name for k in variables.trainable_variables()]))
def _assert_no_variables(test_case):
_assert_variables(test_case)
# This must be called from within a tf.Session.
def _assert_metrics(test_case, expected_loss, expected_eval_metrics,
model_fn_ops):
test_case.assertAlmostEqual(expected_loss, model_fn_ops.loss.eval(), places=4)
for k in six.iterkeys(expected_eval_metrics):
test_case.assertIn(k, six.iterkeys(model_fn_ops.eval_metric_ops))
variables.initialize_local_variables().run()
for key, expected_value in six.iteritems(expected_eval_metrics):
value_tensor, update_tensor = model_fn_ops.eval_metric_ops[key]
update = update_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
update,
places=4,
msg="%s: update, expected %s, got %s." % (key, expected_value, update))
value = value_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
value,
places=4,
msg="%s: value, expected %s, got %s." % (key, expected_value, value))
# This must be called from within a tf.Session.
def _assert_summary_tags(test_case, expected_tags=None):
actual_tags = []
for summary_op in ops.get_collection(ops.GraphKeys.SUMMARIES):
summ = summary_pb2.Summary()
summ.ParseFromString(summary_op.eval())
actual_tags.append(summ.value[0].tag)
test_case.assertItemsEqual(expected_tags or [], actual_tags)
def _sigmoid(x):
return 1. / (1. + math.exp(-1 * x))
class RegressionModelHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LINEAR_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
# TODO(zakaria): test multilabel regression.
def testRegressionWithLogits(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithInvalidLogits(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1., 1.), (1., 1.), (3., 1.)))
def testRegressionWithLogitsInput(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits_input=((0., 0.), (0., 0.), (0., 0.)))
self._assert_output_alternatives(model_fn_ops)
w = ("regression_head/logits/weights:0",
"regression_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / 3, {"loss": 2. / 3}, model_fn_ops)
def testRegressionWithLogitsAndLogitsInput(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits_input=((0., 0.), (0., 0.), (0., 0.)),
logits=((1.,), (1.,), (3.,)))
def testRegressionEvalMode(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((1.,), (1.,), (3.,)),
mode=model_fn.ModeKeys.EVAL,
train_op_fn=_noop_train_op,
logits=((0.,), (1.,), (1.,)))
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithLabelName(self):
label_name = "my_label"
head = head_lib._regression_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels={label_name: ((0.,), (1.,), (1.,))},
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithWeights(self):
head = head_lib._regression_head(weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((2.,), (5.,), (0.,))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / len(weights), {"loss": 2. / np.sum(weights)},
model_fn_ops)
def testRegressionWithCenteredBias(self):
head = head_lib._regression_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"regression_head/centered_bias_weight:0",
"regression_head/regression_head/centered_bias_weight/Adagrad:0",
),
expected_trainable=("regression_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(
self, ["loss", "regression_head/centered_bias/bias_0"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionErrorInSparseTensorLabels(self):
head = head_lib._regression_head()
with ops.Graph().as_default():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (2, 0)),
values=(0., 1., 1.),
dense_shape=(3, 1))
with self.assertRaisesRegexp(ValueError,
"Must set num_classes when passing"):
head.create_model_fn_ops(
{},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
class MultiLabelModelHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.CLASSIFICATION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = ((0, 0, 1),)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 1. / 3,
"auc": 1. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": self._labels[0][0],
"labels/actual_label_mean/class1": self._labels[0][1],
"labels/actual_label_mean/class2": self._labels[0][2],
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": _sigmoid(self._logits[0][0]),
"labels/probability_mean/class1": _sigmoid(self._logits[0][1]),
"labels/probability_mean/class2": _sigmoid(self._logits[0][2]),
}
def testMultiLabelWithLogits(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithInvalidLogits(self):
head = head_lib._multi_label_head(n_classes=len(self._labels[0]) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits=self._logits)
def testMultiLabelWithLogitsInput(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits_input=((0., 0.),))
self._assert_output_alternatives(model_fn_ops)
w = ("multi_class_head/logits/weights:0",
"multi_class_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = .69314718
_assert_metrics(self, expected_loss, {
"accuracy": 2. / 3,
"auc": 2. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": self._labels[0][0],
"labels/actual_label_mean/class1": self._labels[0][1],
"labels/actual_label_mean/class2": self._labels[0][2],
"labels/logits_mean/class0": 0.,
"labels/logits_mean/class1": 0.,
"labels/logits_mean/class2": 0.,
"labels/prediction_mean/class0": 0.,
"labels/prediction_mean/class1": 0.,
"labels/prediction_mean/class2": 0.,
"labels/probability_mean/class0": .5,
"labels/probability_mean/class1": .5,
"labels/probability_mean/class2": .5,
}, model_fn_ops)
def testMultiLabelWithLogitsAndLogitsInput(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits_input=((0., 0.),), logits=self._logits)
def testMultiLabelEvalMode(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.EVAL, self._labels, _noop_train_op,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithLabelName(self):
n_classes = 3
label_name = "my_label"
head = head_lib._multi_label_head(
n_classes=n_classes,
label_name=label_name,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, {label_name: self._labels},
_noop_train_op, logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithWeight(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": .1},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, .089985214,
self._expected_eval_metrics(2.69956), model_fn_ops)
def testMultiLabelWithCenteredBias(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes,
enable_centered_bias=True,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"multi_class_head/centered_bias_weight:0",
("multi_class_head/multi_class_head/centered_bias_weight/"
"Adagrad:0"),),
expected_trainable=("multi_class_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, (
"loss",
"multi_class_head/centered_bias/bias_0",
"multi_class_head/centered_bias/bias_1",
"multi_class_head/centered_bias/bias_2"
))
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelSparseTensorLabels(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0),),
values=(2,),
dense_shape=(1, 1))
model_fn_ops = head.create_model_fn_ops(
features={},
mode=model_fn.ModeKeys.TRAIN,
labels=labels,
train_op_fn=_noop_train_op,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelSparseTensorLabelsTooFewClasses(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
# Set _logits_dimension (n_classes) to a lower value; if it's set to 1
# upfront, the class throws an error during initialization.
head._logits_dimension = 1
with ops.Graph().as_default(), session.Session():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0),),
values=(2,),
dense_shape=(1, 1))
with self.assertRaisesRegexp(ValueError,
"Must set num_classes >= 2 when passing"):
head.create_model_fn_ops(
features={},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=[0.])
class BinaryClassificationModelHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LOGISTIC_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def setUp(self):
self._logits = ((1.,), (1.,))
self._labels = ((1.,), (0.,))
def _expected_eval_metrics(self, expected_loss):
label_mean = np.mean(self._labels)
return {
"accuracy": 1. / 2,
"accuracy/baseline_label_mean": label_mean,
"accuracy/threshold_0.500000_mean": 1. / 2,
"auc": 1. / 2,
"labels/actual_label_mean": label_mean,
"labels/prediction_mean": .731059, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 1. / 2,
"recall/positive_threshold_0.500000_mean": 1. / 1,
}
def testBinaryClassificationWithLogits(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationWithInvalidLogits(self):
head = head_lib._multi_class_head(n_classes=len(self._labels) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits=self._logits)
def testBinaryClassificationWithLogitsInput(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits_input=((0., 0.), (0., 0.)))
self._assert_output_alternatives(model_fn_ops)
w = ("binary_logistic_head/logits/weights:0",
"binary_logistic_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = .69314718
label_mean = np.mean(self._labels)
_assert_metrics(self, expected_loss, {
"accuracy": 1. / 2,
"accuracy/baseline_label_mean": label_mean,
"accuracy/threshold_0.500000_mean": 1. / 2,
"auc": 1. / 2,
"labels/actual_label_mean": label_mean,
"labels/prediction_mean": .5, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 0. / 2,
"recall/positive_threshold_0.500000_mean": 0. / 1,
}, model_fn_ops)
def testBinaryClassificationWithLogitsAndLogitsInput(self):
head = head_lib._multi_class_head(n_classes=2)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits_input=((0., 0.), (0., 0.)), logits=self._logits)
def testBinaryClassificationEvalMode(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.EVAL, self._labels, _noop_train_op,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationInferMode(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.INFER, self._labels, _noop_train_op,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
def testErrorInSparseTensorLabels(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 1),
dense_shape=(3, 1))
with self.assertRaisesRegexp(ValueError,
"Must set num_classes when passing"):
head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
labels,
_noop_train_op,
logits=((1.,), (1.,), (3.,)))
def testBinaryClassificationWithLabelName(self):
label_name = "my_label"
head = head_lib._multi_class_head(n_classes=2, label_name=label_name)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{},
labels={label_name: self._labels},
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationWithWeights(self):
n_classes = 2
head = head_lib._multi_class_head(
n_classes=n_classes, weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((1.,), (0.,))
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_total_loss = .31326166
_assert_metrics(
self,
expected_total_loss / len(weights),
{
"accuracy": 1. / 1,
"accuracy/baseline_label_mean": 1. / 1,
"accuracy/threshold_0.500000_mean": 1. / 1,
"auc": 0. / 1,
"labels/actual_label_mean": 1. / 1,
"labels/prediction_mean": .731059, # softmax
# TODO(ptucker): Is this the correct eval loss, sum not average?
"loss": expected_total_loss,
"precision/positive_threshold_0.500000_mean": 1. / 1,
"recall/positive_threshold_0.500000_mean": 1. / 1,
},
model_fn_ops)
def testBinaryClassificationWithCenteredBias(self):
head = head_lib._multi_class_head(n_classes=2, enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"binary_logistic_head/centered_bias_weight:0",
("binary_logistic_head/binary_logistic_head/centered_bias_weight/"
"Adagrad:0"),),
expected_trainable=("binary_logistic_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(
self, ["loss", "binary_logistic_head/centered_bias/bias_0"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
class MultiClassModelHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.CLASSIFICATION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = (2,)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 0.,
"auc": 1. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": 0.576117, # softmax
"labels/probability_mean/class1": 0.211942, # softmax
"labels/probability_mean/class2": 0.211942, # softmax
}
def testMultiClassWithLogits(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWithInvalidLogits(self):
head = head_lib._multi_class_head(n_classes=len(self._logits[0]) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits=self._logits)
def testMultiClassWithLogitsInput(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits_input=((0., 0.),))
self._assert_output_alternatives(model_fn_ops)
w = ("multi_class_head/logits/weights:0",
"multi_class_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = 1.0986123
_assert_metrics(self, expected_loss, {
"accuracy": 0.,
"auc": 2. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": 0.,
"labels/logits_mean/class1": 0.,
"labels/logits_mean/class2": 0.,
"labels/prediction_mean/class0": 1.,
"labels/prediction_mean/class1": 0.,
"labels/prediction_mean/class2": 0.,
"labels/probability_mean/class0": 0.333333, # softmax
"labels/probability_mean/class1": 0.333333, # softmax
"labels/probability_mean/class2": 0.333333, # softmax
}, model_fn_ops)
def testMultiClassWithLogitsAndLogitsInput(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits_input=((0., 0.),), logits=self._logits)
def testMultiClassEvalMode(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.EVAL, self._labels, _noop_train_op,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWithWeight(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
weight = .1
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weight},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss * weight,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testInvalidNClasses(self):
for n_classes in (None, -1, 0, 1):
with self.assertRaisesRegexp(ValueError, "n_classes must be > 1"):
head_lib._multi_class_head(n_classes=n_classes)
class BinarySvmModelHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LOGISTIC_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def setUp(self):
# Prediction for first example is in the right side of the hyperplane
# (i.e., < 0) but it is within the [-1,1] margin. There is a 0.5 loss
# incurred by this example. The 2nd prediction is outside the margin so it
# incurs no loss at all.
self._predictions = ((-.5,), (1.2,))
self._labels = (0, 1)
self._expected_losses = (.5, 0.)
def testBinarySVMWithLogits(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
self._labels,
_noop_train_op,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithInvalidLogits(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
logits=np.ones((2, 2)))
def testBinarySVMWithLogitsInput(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
self._labels,
_noop_train_op,
logits_input=((0., 0.), (0., 0.)))
self._assert_output_alternatives(model_fn_ops)
w = ("binary_logistic_head/logits/weights:0",
"binary_logistic_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = 1.
_assert_metrics(self, expected_loss, {
"accuracy": .5,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithLogitsAndLogitsInput(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
self._labels,
_noop_train_op,
logits_input=((0., 0.), (0., 0.)),
logits=self._predictions)
def testBinarySVMEvalMode(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.EVAL,
self._labels,
_noop_train_op,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithLabelName(self):
label_name = "my_label"
head = head_lib._binary_svm_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
{label_name: self._labels},
_noop_train_op,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithWeights(self):
head = head_lib._binary_svm_head(weight_column_name="weights")
with ops.Graph().as_default(), session.Session():
weights = (7., 11.)
model_fn_ops = head.create_model_fn_ops(
features={"weights": weights},
mode=model_fn.ModeKeys.TRAIN,
labels=self._labels,
train_op_fn=_noop_train_op,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_weighted_sum = np.sum(
np.multiply(weights, self._expected_losses))
_assert_metrics(self, expected_weighted_sum / len(weights), {
"accuracy": 1.,
"loss": expected_weighted_sum / np.sum(weights),
}, model_fn_ops)
def testBinarySVMWithCenteredBias(self):
head = head_lib._binary_svm_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
self._labels,
_noop_train_op,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"binary_logistic_head/centered_bias_weight:0",
("binary_logistic_head/binary_logistic_head/centered_bias_weight/"
"Adagrad:0"),
),
expected_trainable=("binary_logistic_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(
self, ["loss", "binary_logistic_head/centered_bias/bias_0"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
class MultiHeadTest(test.TestCase):
def testInvalidHeads(self):
named_head = head_lib._multi_class_head(
n_classes=3, label_name="label", head_name="head1")
unnamed_head = head_lib._multi_class_head(
n_classes=4, label_name="label")
with self.assertRaisesRegexp(ValueError, "must have names"):
head_lib._multi_head((named_head, unnamed_head))
with self.assertRaisesRegexp(ValueError, "must be SingleHead"):
head_lib._multi_head((named_head, head_lib._multi_head((named_head,))))
def testTrain_withNoHeadWeights(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head((head1, head2))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
def testTrain_withHeadWeights(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(1.531, sess.run(model_fn_ops.loss), places=3)
def testInfer(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.INFER,
train_op_fn=_noop_train_op,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
# Tests predictions keys.
self.assertItemsEqual((
("head1", prediction_key.PredictionKey.LOGITS),
("head1", prediction_key.PredictionKey.PROBABILITIES),
("head1", prediction_key.PredictionKey.CLASSES),
("head2", prediction_key.PredictionKey.LOGITS),
("head2", prediction_key.PredictionKey.PROBABILITIES),
("head2", prediction_key.PredictionKey.CLASSES),
), model_fn_ops.predictions.keys())
# Tests output alternative.
self.assertEquals({
"head1": constants.ProblemType.CLASSIFICATION,
"head2": constants.ProblemType.CLASSIFICATION,
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
self.assertItemsEqual((
prediction_key.PredictionKey.LOGITS,
prediction_key.PredictionKey.PROBABILITIES,
prediction_key.PredictionKey.CLASSES,
), model_fn_ops.output_alternatives["head1"][1].keys())
self.assertItemsEqual((
prediction_key.PredictionKey.LOGITS,
prediction_key.PredictionKey.PROBABILITIES,
prediction_key.PredictionKey.CLASSES,
), model_fn_ops.output_alternatives["head2"][1].keys())
def testEval(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.EVAL,
train_op_fn=_noop_train_op,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
self.assertIsNotNone(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
metric_ops = model_fn_ops.eval_metric_ops
# Tests eval keys.
self.assertIn("accuracy/head1", metric_ops.keys())
self.assertIn("accuracy/head2", metric_ops.keys())
def _noop_train_op(unused_loss):
return control_flow_ops.no_op()
if __name__ == "__main__":
test.main()
| {
"content_hash": "565b9b81fca0e154975f60c872127b4b",
"timestamp": "",
"source": "github",
"line_count": 1156,
"max_line_length": 80,
"avg_line_length": 39.7621107266436,
"alnum_prop": 0.6080931143261177,
"repo_name": "handroissuazo/tensorflow",
"id": "ec8f25c657b4691af32ab82d4b25a882a5c3c700",
"size": "46654",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/estimators/head_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "177136"
},
{
"name": "C++",
"bytes": "20579965"
},
{
"name": "CMake",
"bytes": "120039"
},
{
"name": "CSS",
"bytes": "7005"
},
{
"name": "Go",
"bytes": "103991"
},
{
"name": "HTML",
"bytes": "582790"
},
{
"name": "Java",
"bytes": "278667"
},
{
"name": "JavaScript",
"bytes": "21416"
},
{
"name": "Jupyter Notebook",
"bytes": "399586"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "32007"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "187378"
},
{
"name": "Python",
"bytes": "20517015"
},
{
"name": "Shell",
"bytes": "329427"
},
{
"name": "TypeScript",
"bytes": "765149"
}
],
"symlink_target": ""
} |
"""Constants for the generic (IP Camera) integration."""
DOMAIN = "generic"
DEFAULT_NAME = "Generic Camera"
CONF_CONFIRMED_OK = "confirmed_ok"
CONF_CONTENT_TYPE = "content_type"
CONF_LIMIT_REFETCH_TO_URL_CHANGE = "limit_refetch_to_url_change"
CONF_STILL_IMAGE_URL = "still_image_url"
CONF_STREAM_SOURCE = "stream_source"
CONF_FRAMERATE = "framerate"
GET_IMAGE_TIMEOUT = 10
DEFAULT_USERNAME = None
DEFAULT_PASSWORD = None
DEFAULT_IMAGE_URL = None
DEFAULT_STREAM_SOURCE = None
| {
"content_hash": "4a37b93b12f2ed956f05c41e29598aa8",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 64,
"avg_line_length": 29.8125,
"alnum_prop": 0.7484276729559748,
"repo_name": "nkgilley/home-assistant",
"id": "eb3769094222c2c8e3003d704b18716e096e0fd6",
"size": "477",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/generic/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from flask import Blueprint
from ..extensions import ldap
foo = Blueprint('foo', __name__, url_prefix='/foo')
@foo.route('/group')
@ldap.group_required(groups=['Web Developers', 'QA'])
def group():
return 'Group restricted page in foo module'
| {
"content_hash": "0ca6c5994a3f1d218f3f707877556dfe",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 53,
"avg_line_length": 25,
"alnum_prop": 0.696,
"repo_name": "admiralobvious/flask-simpleldap",
"id": "41505187939852ae96c7cecb6fda2386e0d4a61b",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/blueprints/blueprints/foo/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16276"
}
],
"symlink_target": ""
} |
import socket
import threading
from django.core.handlers.wsgi import WSGIHandler
from django.core.servers import basehttp
from django.test.testcases import TransactionTestCase
from django.core.management import call_command
class StoppableWSGIServer(basehttp.WSGIServer):
"""WSGIServer with short timeout, so that server thread can stop this server."""
def server_bind(self):
"""Sets timeout to 1 second."""
basehttp.WSGIServer.server_bind(self)
self.socket.settimeout(1)
def get_request(self):
"""Checks for timeout when getting request."""
try:
sock, address = self.socket.accept()
sock.settimeout(None)
return (sock, address)
except socket.timeout:
raise
class TestServerThread(threading.Thread):
"""Thread for running a http server while tests are running."""
def __init__(self, address, port):
self.address = address
self.port = port
self._stopevent = threading.Event()
self.started = threading.Event()
self.error = None
super(TestServerThread, self).__init__()
def run(self):
"""Sets up test server and database and loops over handling http requests."""
try:
handler = basehttp.AdminMediaHandler(WSGIHandler())
server_address = (self.address, self.port)
httpd = StoppableWSGIServer(server_address, basehttp.WSGIRequestHandler)
httpd.set_app(handler)
self.started.set()
except basehttp.WSGIServerException, e:
self.error = e
self.started.set()
return
# Must do database stuff in this new thread if database in memory.
from django.conf import settings
if settings.DATABASE_ENGINE == 'sqlite3' \
and (not settings.TEST_DATABASE_NAME or settings.TEST_DATABASE_NAME == ':memory:'):
# Import the fixture data into the test database.
if hasattr(self, 'fixtures'):
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures, **{'verbosity': 0})
# Loop until we get a stop event.
while not self._stopevent.isSet():
httpd.handle_request()
def join(self, timeout=None):
"""Stop the thread and wait for it to finish."""
self._stopevent.set()
threading.Thread.join(self, timeout)
class TestServerTestCase(TransactionTestCase):
def start_test_server(self, address='localhost', port=8000):
"""Creates a live test server object (instance of WSGIServer)."""
self.server_thread = TestServerThread(address, port)
self.server_thread.start()
self.server_thread.started.wait()
if self.server_thread.error:
raise self.server_thread.error
def stop_test_server(self):
if self.server_thread:
self.server_thread.join()
| {
"content_hash": "0f0a58d899142373ef68f58529fe37be",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 95,
"avg_line_length": 37.45679012345679,
"alnum_prop": 0.6334871456822676,
"repo_name": "rbraley/django-tastypie",
"id": "8418507e58d8259d71831abb45e4e9b2d0423061",
"size": "3034",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "tests/testcases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "528375"
},
{
"name": "Shell",
"bytes": "842"
}
],
"symlink_target": ""
} |
import datetime
import logging
from sqlalchemy.orm import joinedload
from sqlalchemy.orm.session import Session
import smartdb
import tools
from model import MachineInterface, Machine, MachineDisk, Chassis, ChassisPort
from smartdb import cockroach_transaction
logger = logging.getLogger(__file__)
class DiscoveryRepository:
__name__ = "DiscoveryRepository"
def __init__(self, smart: smartdb.SmartDatabaseClient):
self.smart = smart
@staticmethod
def _lint_discovery_data(discovery_data: dict):
# ignore ignition-journal: NotImplemented
missing_keys = {"boot-info", "disks", "lldp", "interfaces"} - set(discovery_data.keys())
if missing_keys:
err_msg = "missing keys in discovery data: '%s'", ",".join(missing_keys)
logger.error(err_msg)
raise TypeError(err_msg)
if discovery_data["disks"] is None:
discovery_data["disks"] = list()
return discovery_data
@staticmethod
def _delete_all_attached(session: Session, machine: Machine):
"""
Delete all resources attached to a machine
As we don't need performance we can avoid heuristics by dropping and re-creating theses needed resources
The discovery data is the reference of the reality
:param session:
:param machine:
:return:
"""
session.query(MachineDisk) \
.filter(MachineDisk.machine_id == machine.id) \
.delete()
all_mi = session.query(MachineInterface) \
.filter(MachineInterface.machine_id == machine.id)
for i in all_mi:
session.query(ChassisPort) \
.filter(ChassisPort.machine_interface == i.id) \
.delete()
session.delete(i)
session.flush()
@staticmethod
def _insert_network(session: Session, machine: Machine, discovery_data: dict):
machine_interfaces = dict()
for i in discovery_data["interfaces"]:
if i["mac"]:
machine_interface = MachineInterface(
mac=i["mac"],
name=i["name"],
netmask=i["netmask"],
ipv4=i["ipv4"],
cidrv4=i["cidrv4"],
as_boot=i["mac"] == discovery_data["boot-info"]["mac"],
gateway=i["gateway"],
fqdn=tools.get_verified_dns_query(i),
machine_id=machine.id
)
# track machine interfaces to get them after during the LLDP section
machine_interfaces[machine_interface.name] = machine_interface
session.add(machine_interface)
session.flush()
if discovery_data["lldp"]["is_file"] and discovery_data["lldp"]["data"]["interfaces"]:
for lldp_interface in discovery_data["lldp"]["data"]["interfaces"]:
chassis = session.query(Chassis) \
.filter(Chassis.name == lldp_interface["chassis"]["name"] and
Chassis.name == lldp_interface["chassis"]["id"]) \
.first()
if not chassis:
chassis = Chassis(
name=lldp_interface["chassis"]["name"],
mac=lldp_interface["chassis"]["id"],
)
session.add(chassis)
session.flush()
machine_interface_id = machine_interfaces[lldp_interface["name"]].id
session.add(
ChassisPort(
# TODO on some vendor it's not a MAC but a string like Ethernet1/22
mac=lldp_interface["port"]["id"],
machine_interface=machine_interface_id,
chassis_id=chassis.id
)
)
def upsert(self, discovery_data: dict):
caller = "%s.%s" % (self.__name__, self.upsert.__name__)
discovery_data = self._lint_discovery_data(discovery_data)
now = datetime.datetime.utcnow()
@cockroach_transaction
def callback(caller=caller):
new = True
with self.smart.new_session() as session:
machine = session.query(Machine) \
.filter(Machine.uuid == discovery_data["boot-info"]["uuid"]) \
.first()
if machine:
new = False
machine.updated_date = now
self._delete_all_attached(session, machine)
else:
machine = Machine(uuid=discovery_data["boot-info"]["uuid"], created_date=now, updated_date=now)
session.add(machine)
session.flush()
for d in discovery_data["disks"]:
session.add(MachineDisk(path=d["path"], size=d["size-bytes"], machine_id=machine.id))
self._insert_network(session, machine, discovery_data)
session.commit()
return new
return callback(caller)
def fetch_all_discovery(self):
"""
Get discovery data of interfaces, disks and the boot-info
:return:
"""
machines = []
with self.smart.new_session() as session:
for m in session.query(Machine) \
.options(joinedload("interfaces")) \
.options(joinedload("disks")) \
.join(MachineInterface):
boot_interface = None
interfaces = []
for i in m.interfaces:
if i.as_boot:
boot_interface = i
interfaces.append({
"as_boot": i.as_boot,
"cidrv4": i.cidrv4,
"fqdn": i.fqdn,
"gateway": i.gateway,
"ipv4": i.ipv4,
"mac": i.mac,
"name": i.name,
"netmask": i.netmask
})
machines.append({
"boot-info": {
"uuid": m.uuid,
"created-date": m.created_date,
"updated-date": m.updated_date,
"mac": boot_interface.mac
},
"interfaces": interfaces,
"disks": [{"size-bytes": d.size, "path": d.path} for d in m.disks]
})
return machines
| {
"content_hash": "ff0e2c337bb7ab4b71e0d5cc11b7899b",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 115,
"avg_line_length": 38.17341040462428,
"alnum_prop": 0.5036341611144761,
"repo_name": "JulienBalestra/enjoliver",
"id": "54fee4e1690bdf2bbf94310b5ee7eda304ee574b",
"size": "6604",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/repositories/machine_discovery_repo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "65281"
},
{
"name": "HTML",
"bytes": "207"
},
{
"name": "JavaScript",
"bytes": "17741"
},
{
"name": "Makefile",
"bytes": "46002"
},
{
"name": "Python",
"bytes": "510653"
},
{
"name": "Roff",
"bytes": "1036"
},
{
"name": "Shell",
"bytes": "86790"
},
{
"name": "Vue",
"bytes": "8355"
}
],
"symlink_target": ""
} |
AUTHOR = 'Chris Dent'
AUTHOR_EMAIL = '[email protected]'
NAME = 'tiddlywebplugins.jsonp'
DESCRIPTION = 'JSONP serialization for TiddlyWeb'
VERSION = '0.7'
import os
from setuptools import setup, find_packages
setup(
namespace_packages = ['tiddlywebplugins'],
name = NAME,
version = VERSION,
description = DESCRIPTION,
long_description = open(os.path.join(os.path.dirname(__file__), 'README')).read(),
author = AUTHOR,
author_email = AUTHOR_EMAIL,
url = 'http://pypi.python.org/pypi/%s' % NAME,
platforms = 'Posix; MacOS X; Windows',
packages = find_packages(exclude=['test']),
install_requires = ['setuptools', 'tiddlyweb'],
zip_safe = False,
license = 'BSD',
)
| {
"content_hash": "10139008ed1b80567466c5a9d9ea3f5c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 86,
"avg_line_length": 26.814814814814813,
"alnum_prop": 0.6588397790055248,
"repo_name": "tiddlyweb/tiddlywebplugins.jsonp",
"id": "b9330aa90e2c96a940c63ccacf161c15ec1243d2",
"size": "724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3937"
}
],
"symlink_target": ""
} |
import json
import logging
import ujson
import unicodedata
from os import mkdir
from os.path import join, exists
from typing import List, Optional
import numpy as np
import requests
from bs4 import BeautifulSoup
from docqa.data_processing.text_utils import NltkAndPunctTokenizer, ParagraphWithInverse
from docqa.configurable import Configurable
WIKI_API = "https://en.wikipedia.org/w/api.php"
log = logging.getLogger('wiki')
# TODO figure out how to set this by default
log.propagate = False
if not log.handlers:
formatter = logging.Formatter("%(asctime)s: %(levelname)s: %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.INFO)
def get_wiki_page_ids(article_titles, per_query=25):
"""
Utility method to get the page_id and resolve re-directs for a set of wikipedia titles
"""
wiki_page_ids = {}
for i in range((len(article_titles) + per_query - 1) // per_query):
start = i * per_query
end = min((i + 1) * per_query, len(article_titles))
original_titles = article_titles[start:end]
r = requests.get(WIKI_API,
params=dict(action="query", format="json",
redirects=True,
titles="|".join(original_titles)))
data = r.json()
query = data["query"]
if "redirects" in query:
redirects = {x["to"]: x["from"] for x in query["redirects"]}
else:
redirects = {}
for page, page_data in query["pages"].items():
page = int(page)
title = page_data["title"]
if page == -1:
raise ValueError()
original_title = redirects.get(title, title)
if original_title not in original_titles:
raise ValueError(title)
wiki_page_ids[original_title] = (title, page)
return [wiki_page_ids[x] for x in article_titles]
class WikiParagraph(ParagraphWithInverse):
def __init__(self, paragraph_num: int, kind: str, text: List[List[str]],
original_text: Optional[str]=None, span_mapping=None):
super().__init__(text, original_text, span_mapping)
self.paragraph_num = paragraph_num
self.kind = kind
def to_json(self):
if self.spans is not None:
word_ix = 0
compact = []
for sent in self.text:
tokens = []
for word in sent:
s, e = self.spans[word_ix]
s, e = int(s), int(e) # ujson doesn't play nice with numpy
if word == self.original_text[s:e]:
tokens.append((s, e))
else:
tokens.append((word, s, e))
word_ix += 1
compact.append(tokens)
return dict(paragraph_num=self.paragraph_num,
kind=self.kind,
original_text=self.original_text,
spans=compact)
else:
return self.__dict__
@staticmethod
def from_json(data):
if data["spans"] is not None:
original_text = data["original_text"]
spans = []
text = []
for sent in data["spans"]:
sent_tokens = []
for tup in sent:
if len(tup) == 2:
spans.append(tup)
sent_tokens.append(original_text[tup[0]:tup[1]])
else:
spans.append(tup[1:])
sent_tokens.append(tup[0])
text.append(sent_tokens)
return WikiParagraph(data["paragraph_num"], data["kind"], text, original_text,
np.array(spans, dtype=np.int32))
else:
return WikiParagraph(**data)
class WikiArticle(object):
def __init__(self, title: str, page_id: int, paragraphs: List[WikiParagraph]):
self.title = title
self.page_id = page_id
self.paragraphs = paragraphs
@property
def url(self):
return "https://en.wikipedia.org/?curid=" + str(self.page_id)
class WikiCorpus(Configurable):
"""
Class the can download wiki-articles and return them as tokenized text
"""
def __init__(self, cache_dir=None, follow_redirects: bool=True,
keep_inverse_mapping: bool=False,
extract_lists: bool=False, tokenizer=NltkAndPunctTokenizer()):
self.tokenizer = tokenizer
self.extract_lists = extract_lists
self.follow_redirects = follow_redirects
self.cache_dir = cache_dir
self.keep_inverse_mapping = keep_inverse_mapping
if cache_dir is not None and not exists(self.cache_dir):
mkdir(self.cache_dir)
def _get_tokenized_filename(self, title):
title = unicodedata.normalize('NFKC', title).lower()
return join(self.cache_dir, title.replace(" ", "_")
.replace("/", "-") + ".json")
def _text_to_paragraph(self, ix, kind: str, text: str):
text = text.strip()
if not self.keep_inverse_mapping:
text = self.tokenizer.tokenize_paragraph(text)
return WikiParagraph(ix, kind, text)
else:
para = self.tokenizer.tokenize_with_inverse(text)
return WikiParagraph(ix, kind, para.text, para.original_text, para.spans)
def _sent_to_paragraph(self, ix, kind: str, text: List[str]):
if not self.keep_inverse_mapping:
tokenized = [self.tokenizer.tokenize_sentence(s.strip()) for s in text]
return WikiParagraph(ix, kind, tokenized)
else:
para = ParagraphWithInverse.concat(
[self.tokenizer.tokenize_with_inverse(x.strip(), True) for x in text], " ")
return WikiParagraph(ix, kind, para.text, para.original_text, para.spans)
def get_wiki_article(self, wiki_title) -> WikiArticle:
# Note client is responsible for rate limiting as needed
if self.cache_dir is not None:
tokenized_file = self._get_tokenized_filename(wiki_title)
if exists(tokenized_file):
log.info("Load wiki article for \"%s\" from cache", wiki_title)
with open(tokenized_file, "r") as f:
data = json.load(f)
return WikiArticle(data["title"], data["url"], [WikiParagraph.from_json(x) for
x in data["paragraphs"]])
log.info("Load wiki article for \"%s\"", wiki_title)
r = requests.get(WIKI_API, dict(action="parse", page=wiki_title,
redirects=self.follow_redirects, format="json"))
if r.status_code != 200:
raise ValueError()
raw_data = r.json()["parse"]
# Wiki html is pretty structured, so this seems to work reasonable well
soup = BeautifulSoup(raw_data["text"]["*"], "lxml")
paragraphs = []
to_find = ["p", "h2", "h3", "h4", "h5", "h6"]
if self.extract_lists:
to_find += ["ul", "ol"]
for element in soup.findAll(to_find):
if element.name[0] == "h":
if element.get_text() == "Contents":
continue
sect_name = element.find(attrs={"class": "mw-headline"}).get_text()
para = self._sent_to_paragraph(len(paragraphs), "section", [sect_name])
if para.n_tokens > 0:
paragraphs.append(para)
elif element.name == "ul" or element.name == "ol":
if dict(element.parent.attrs).get("class") != ["mw-parser-output"]:
# only extract "body" lists
continue
para = self._sent_to_paragraph(len(paragraphs),
"list" if element.name == "ul" else "ordered_list",
[x.get_text() for x in element.findAll("li")])
if para.n_tokens > 0:
paragraphs.append(para)
else:
# remove citations
for citation in element.findAll("sup", {"class": "reference"}):
citation.extract()
# remove citation needed
for sub in element.findAll("sup"):
citations = sub.findAll("a", href=True)
if len(citations) == 1:
citation = citations[0]
href = citation["href"]
if href.startswith("#cite") or href == "/wiki/Wikipedia:Citation_needed":
sub.extract()
text = element.get_text()
para = self._text_to_paragraph(len(paragraphs), "paragraph", text)
if para.n_tokens > 0:
paragraphs.append(para)
article = WikiArticle(wiki_title, raw_data["pageid"], paragraphs)
if self.cache_dir is not None:
with open(tokenized_file, "w") as f:
ujson.dump(dict(title=article.title, url=article.url,
paragraphs=[x.to_json() for x in article.paragraphs]), f)
return article
if __name__ == "__main__":
from data_processing.document_splitter import MergeParagraphs
doc = WikiCorpus(keep_inverse_mapping=True).get_wiki_article("Queen Elizabeth 2")
MergeParagraphs(400).split_inverse(doc.paragraphs)
pass | {
"content_hash": "0bf50fc973245a340309e1820c823c3c",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 98,
"avg_line_length": 39.635245901639344,
"alnum_prop": 0.5395512356529831,
"repo_name": "allenai/document-qa",
"id": "0e97ed5207bab39078633b6c548e70a0767fab2d",
"size": "9671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docqa/data_processing/wiki.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "16595"
},
{
"name": "Python",
"bytes": "647596"
}
],
"symlink_target": ""
} |
import sys
import time
import Quartz
class Mouse():
down = [Quartz.kCGEventLeftMouseDown, Quartz.kCGEventRightMouseDown, Quartz.kCGEventOtherMouseDown]
up = [Quartz.kCGEventLeftMouseUp, Quartz.kCGEventRightMouseUp, Quartz.kCGEventOtherMouseUp]
[LEFT, RIGHT, OTHER] = [0, 1, 2]
def position(self):
point = Quartz.CGEventGetLocation( Quartz.CGEventCreate(None) )
return point.x, point.y
def __mouse_event(self, type, x, y):
mouse_event = Quartz.CGEventCreateMouseEvent(None, type, (x, y), Quartz.kCGMouseButtonLeft)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, mouse_event)
def move(self, x, y):
self.__mouse_event(Quartz.kCGEventMouseMoved, x, y)
Quartz.CGWarpMouseCursorPosition((x, y))
def press(self, x, y, button=0):
event = Quartz.CGEventCreateMouseEvent(None, Mouse.down[button], (x, y), button)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def release(self, x, y, button=0):
event = Quartz.CGEventCreateMouseEvent(None, Mouse.up[button], (x, y), button)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def doubleClick(self, x, y, clickCount, button=0):
print("Double click event")
theEvent = Quartz.CGEventCreateMouseEvent(None, Mouse.down[button], (x, y), button)
Quartz.CGEventSetIntegerValueField(theEvent, Quartz.kCGMouseEventClickState, clickCount)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
Quartz.CGEventSetType(theEvent, Quartz.kCGEventLeftMouseUp)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
Quartz.CGEventSetType(theEvent, Quartz.kCGEventLeftMouseDown)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
Quartz.CGEventSetType(theEvent, Quartz.kCGEventLeftMouseUp)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
print("Double click event ended")
def click(self, button=0):
x, y = self.position()
self.press(x, y, button)
self.release(x, y, button)
def click_pos(self, x, y, button=0):
self.move(x, y)
self.click(button)
def torelative(self, x, y):
curr_pos = Quartz.CGEventGetLocation( Quartz.CGEventCreate(None) )
x += curr_pos.x;
y += curr_pos.y;
return [x, y]
def move_rel(self, x, y):
[x, y] = self.torelative(x, y)
moveEvent = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventMouseMoved, Quartz.CGPointMake(x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, moveEvent)
def mouseEvent(self, type, posx, posy):
theEvent = Quartz.CGEventCreateMouseEvent(None, type, (posx,posy), Quartz.kCGMouseButtonLeft)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
def mousedrag(self, posx, posy):
self.mouseEvent(Quartz.kCGEventLeftMouseDragged, posx,posy)
if __name__ == '__main__':
mouse = Mouse()
if sys.platform == "darwin":
print("Current mouse position: %d:%d" % mouse.position())
mouse.move_rel(25, 16)
print("Clickingthe right button...");
mouse.move(25, 26)
time.sleep(0.05)
mouse.move(35, 26)
time.sleep(0.05)
mouse.move(40, 26)
time.sleep(0.05)
mouse.move(44, 26)
time.sleep(0.05)
mouse.move(50, 26)
time.sleep(0.05)
mouse.move(55, 26)
time.sleep(0.05)
mouse.doubleClick(1264, 416, 2, 0)
time.sleep(0.05)
mouse.click_pos(1264, 416, 1)
mouse.doubleClick(1264, 46, 2, 0)
#mouse.doubleClick(25, 26, 2, 0)
elif sys.platform == "win32":
print("Error: Platform not supported!")
| {
"content_hash": "ed148f2026e287eec2a7a389d1a264ff",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 112,
"avg_line_length": 37.82828282828283,
"alnum_prop": 0.6504672897196262,
"repo_name": "mayank408/Mousely",
"id": "1debae11e02fb4c9bbbdcadc1cea2892a409395f",
"size": "3745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mouse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13372"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os, sys
from .pytem import Pytem
def print_err(*args):
print(*args, file=sys.stderr)
subject = Pytem("template",'globalfile')
def tag_test():
result = subject.render_string("title : Title---%title%")
print_err(result)
assert result == "<p>Title</p>"
def md_test():
result = subject.render_string("---#Hello World")
print_err(result)
assert result == "<h1>Hello World</h1>"
def dual_delim_test():
result = subject.render_string("title : Title---%title%---More Content")
print_err(result)
assert result == "<p>Title---More Content</p>"
def site_test():
os.environ["PROMPT"] = "no"
return subject.render_site("in","out")
| {
"content_hash": "21ad4c8a5630a60fd1bef0bf2bba903c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 76,
"avg_line_length": 22.5625,
"alnum_prop": 0.6385041551246537,
"repo_name": "wastevensv/pytem",
"id": "128f0ab5f1688125ffaf8dfff91ac746f2142119",
"size": "722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2880"
},
{
"name": "HTML",
"bytes": "4412"
},
{
"name": "Python",
"bytes": "7937"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class AllinkContactConfig(AppConfig):
name = 'allink_apps.contact'
verbose_name = "Allink Contact"
| {
"content_hash": "8c3abf141bfea2373468409c684275a1",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 37,
"avg_line_length": 23.833333333333332,
"alnum_prop": 0.7482517482517482,
"repo_name": "allink/allink-apps",
"id": "e81fdee99c9571a1b55cc96f07c11a421910c6a2",
"size": "167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contact/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "994"
},
{
"name": "HTML",
"bytes": "47533"
},
{
"name": "Python",
"bytes": "183917"
}
],
"symlink_target": ""
} |
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import datetime
import seaborn as sns
import matplotlib.patches as mpatches
import csv
from matplotlib import rcParams
sns.set_style("ticks")
rcParams['axes.labelsize'] = 22
rcParams['axes.titlesize'] = 22
rcParams['xtick.labelsize'] = 18
rcParams['ytick.labelsize'] = 18
rcParams['legend.fontsize'] = 22
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
rcParams['figure.figsize'] = 6.8, 5.0
### model parameters
startyear = 2014
startmonth = 3
startday = 2
days = 1
### import data from files
periodlength = 365
dataIB = np.genfromtxt('export.csv', delimiter = ',');
###Times array
t_start = datetime.datetime(startyear, startmonth, startday)
t_end = t_start + datetime.timedelta(minutes=(60*24*days))
t = np.array([t_start + datetime.timedelta(minutes=i) for i in xrange(60*24*periodlength)])
### Plot prices in selected time window
#ramp up imbalance volumes
dataAPX = np.genfromtxt('pricesold.txt', delimiter = ' ');
dataAPX = np.repeat(dataAPX,60)
apx = plt.plot(t[60*24*(31+28+3):60*24*(31+28+4)], dataAPX[(60*24*3):60*24*4], lw=2.0)
RDv = plt.plot(t[60*24*(31+28+3):60*24*(31+28+4)], dataIB[(60*24*(31+28+3)):60*24*(31+28+4),4]+dataIB[(60*24*(31+28+3)):60*24*(31+28+4),6], c='r', lw=2.0)
sns.despine()
plt.ylabel('Euro / MWh')
plt.xlabel('Time (Hours:Min:Sec)')
plt.legend(['APX', 'Imbalance'])
# plt.gca().set_xticks(range(0,1440,60))
# a = time_leg[0:97:8]
# a.append('24:00')
# plt.gca().set_xticklabels(a)
plt.setp( plt.gca().xaxis.get_majorticklabels(), rotation=70)
plt.savefig('foo.pdf', bbox_inches='tight')
plt.show()
| {
"content_hash": "f749b6ad556b0bd27d096b42646d70cd",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 154,
"avg_line_length": 30.509090909090908,
"alnum_prop": 0.6960667461263409,
"repo_name": "GiorgosMethe/Load-Profile-Decomposition",
"id": "779582d47e72760d1b3b65a523f3c061cb688bfc",
"size": "1678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prices/Reserve Market Analysis-Copy2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15219"
},
{
"name": "TeX",
"bytes": "549047"
}
],
"symlink_target": ""
} |
"""
Python Lexical Analyser
Actions for use in token specifications
"""
class Action(object):
def perform(self, token_stream, text):
pass # abstract
def __copy__(self):
return self # immutable, no need to copy
def __deepcopy__(self, memo):
return self # immutable, no need to copy
class Return(Action):
"""
Internal Plex action which causes |value| to
be returned as the value of the associated token
"""
def __init__(self, value):
self.value = value
def perform(self, token_stream, text):
return self.value
def __repr__(self):
return "Return(%r)" % self.value
class Call(Action):
"""
Internal Plex action which causes a function to be called.
"""
def __init__(self, function):
self.function = function
def perform(self, token_stream, text):
return self.function(token_stream, text)
def __repr__(self):
return "Call(%s)" % self.function.__name__
class Method(Action):
"""
Plex action that calls a specific method on the token stream,
passing the matched text and any provided constant keyword arguments.
"""
def __init__(self, name, **kwargs):
self.name = name
self.kwargs = kwargs or None
def perform(self, token_stream, text):
method = getattr(token_stream, self.name)
# self.kwargs is almost always unused => avoid call overhead
return method(text, **self.kwargs) if self.kwargs is not None else method(text)
def __repr__(self):
kwargs = (
', '.join(sorted(['%s=%r' % item for item in self.kwargs.items()]))
if self.kwargs is not None else '')
return "Method(%s%s%s)" % (self.name, ', ' if kwargs else '', kwargs)
class Begin(Action):
"""
Begin(state_name) is a Plex action which causes the Scanner to
enter the state |state_name|. See the docstring of Plex.Lexicon
for more information.
"""
def __init__(self, state_name):
self.state_name = state_name
def perform(self, token_stream, text):
token_stream.begin(self.state_name)
def __repr__(self):
return "Begin(%s)" % self.state_name
class Ignore(Action):
"""
IGNORE is a Plex action which causes its associated token
to be ignored. See the docstring of Plex.Lexicon for more
information.
"""
def perform(self, token_stream, text):
return None
def __repr__(self):
return "IGNORE"
IGNORE = Ignore()
class Text(Action):
"""
TEXT is a Plex action which causes the text of a token to
be returned as the value of the token. See the docstring of
Plex.Lexicon for more information.
"""
def perform(self, token_stream, text):
return text
def __repr__(self):
return "TEXT"
TEXT = Text()
| {
"content_hash": "902f8504b8d63104327f3ed3c8dd3bbd",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 87,
"avg_line_length": 24.04201680672269,
"alnum_prop": 0.611674239776302,
"repo_name": "cython/cython",
"id": "725278ddf8bb51786a4cf368e3b80ad4250030ab",
"size": "2919",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Cython/Plex/Actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1429"
},
{
"name": "C",
"bytes": "786161"
},
{
"name": "C++",
"bytes": "32603"
},
{
"name": "Cython",
"bytes": "3391513"
},
{
"name": "Emacs Lisp",
"bytes": "12379"
},
{
"name": "Makefile",
"bytes": "3184"
},
{
"name": "PowerShell",
"bytes": "4022"
},
{
"name": "Python",
"bytes": "4081204"
},
{
"name": "Shell",
"bytes": "6371"
},
{
"name": "Smalltalk",
"bytes": "618"
},
{
"name": "Starlark",
"bytes": "3341"
},
{
"name": "sed",
"bytes": "807"
}
],
"symlink_target": ""
} |
"""
A minimal EWMH-aware OO layer over xpyb. This is NOT intended to be
complete - it only implements the subset of functionalty needed by qtile.
"""
from __future__ import print_function, division
import six
import logging
from xcffib.xproto import CW, WindowClass, EventMask
from xcffib.xfixes import SelectionEventMask
import xcffib
import xcffib.randr
import xcffib.xinerama
import xcffib.xproto
from . import xkeysyms
from .xcursors import Cursors
keysyms = xkeysyms.keysyms
# These should be in xpyb:
ModMasks = {
"shift": 1 << 0,
"lock": 1 << 1,
"control": 1 << 2,
"mod1": 1 << 3,
"mod2": 1 << 4,
"mod3": 1 << 5,
"mod4": 1 << 6,
"mod5": 1 << 7,
}
ModMapOrder = [
"shift",
"lock",
"control",
"mod1",
"mod2",
"mod3",
"mod4",
"mod5"
]
AllButtonsMask = 0b11111 << 8
ButtonMotionMask = 1 << 13
ButtonReleaseMask = 1 << 3
NormalHintsFlags = {
"USPosition": 1, # User-specified x, y
"USSize": 2, # User-specified width, height
"PPosition": 4, # Program-specified position
"PSize": 8, # Program-specified size
"PMinSize": 16, # Program-specified minimum size
"PMaxSize": 32, # Program-specified maximum size
"PResizeInc": 64, # Program-specified resize increments
"PAspect": 128, # Program-specified min and max aspect ratios
"PBaseSize": 256, # Program-specified base size
"PWinGravity": 512, # Program-specified window gravity
}
HintsFlags = {
"InputHint": 1, # input
"StateHint": 2, # initial_state
"IconPixmapHint": 4, # icon_pixmap
"IconWindowHint": 8, # icon_window
"IconPositionHint": 16, # icon_x & icon_y
"IconMaskHint": 32, # icon_mask
"WindowGroupHint": 64, # window_group
"MessageHint": 128, # (this bit is obsolete)
"UrgencyHint": 256, # urgency
}
# http://standards.freedesktop.org/wm-spec/latest/ar01s05.html#idm139870830002400
WindowTypes = {
'_NET_WM_WINDOW_TYPE_DESKTOP': "desktop",
'_NET_WM_WINDOW_TYPE_DOCK': "dock",
'_NET_WM_WINDOW_TYPE_TOOLBAR': "toolbar",
'_NET_WM_WINDOW_TYPE_MENU': "menu",
'_NET_WM_WINDOW_TYPE_UTILITY': "utility",
'_NET_WM_WINDOW_TYPE_SPLASH': "splash",
'_NET_WM_WINDOW_TYPE_DIALOG': "dialog",
'_NET_WM_WINDOW_TYPE_DROPDOWN_MENU': "dropdown",
'_NET_WM_WINDOW_TYPE_POPUP_MENU': "menu",
'_NET_WM_WINDOW_TYPE_TOOLTIP': "tooltip",
'_NET_WM_WINDOW_TYPE_NOTIFICATION': "notification",
'_NET_WM_WINDOW_TYPE_COMBO': "combo",
'_NET_WM_WINDOW_TYPE_DND': "dnd",
'_NET_WM_WINDOW_TYPE_NORMAL': "normal",
}
# http://standards.freedesktop.org/wm-spec/latest/ar01s05.html#idm139870829988448
WindowStates = {
None: 'normal',
'_NET_WM_STATE_FULLSCREEN': 'fullscreen',
'_NET_WM_STATE_DEMANDS_ATTENTION': 'urgent'
}
# Maps property names to types and formats.
PropertyMap = {
# ewmh properties
"_NET_DESKTOP_GEOMETRY": ("CARDINAL", 32),
"_NET_SUPPORTED": ("ATOM", 32),
"_NET_SUPPORTING_WM_CHECK": ("WINDOW", 32),
"_NET_WM_NAME": ("UTF8_STRING", 8),
"_NET_WM_PID": ("CARDINAL", 32),
"_NET_CLIENT_LIST": ("WINDOW", 32),
"_NET_CLIENT_LIST_STACKING": ("WINDOW", 32),
"_NET_NUMBER_OF_DESKTOPS": ("CARDINAL", 32),
"_NET_CURRENT_DESKTOP": ("CARDINAL", 32),
"_NET_DESKTOP_NAMES": ("UTF8_STRING", 8),
"_NET_WORKAREA": ("CARDINAL", 32),
"_NET_ACTIVE_WINDOW": ("WINDOW", 32),
"_NET_WM_DESKTOP": ("CARDINAL", 32),
"_NET_WM_STRUT": ("CARDINAL", 32),
"_NET_WM_STRUT_PARTIAL": ("CARDINAL", 32),
"_NET_WM_WINDOW_OPACITY": ("CARDINAL", 32),
"_NET_WM_WINDOW_TYPE": ("CARDINAL", 32),
# Net State
"_NET_WM_STATE": ("ATOM", 32),
"_NET_WM_STATE_STICKY": ("ATOM", 32),
"_NET_WM_STATE_SKIP_TASKBAR": ("ATOM", 32),
"_NET_WM_STATE_FULLSCREEN": ("ATOM", 32),
"_NET_WM_STATE_MAXIMIZED_HORZ": ("ATOM", 32),
"_NET_WM_STATE_MAXIMIZED_VERT": ("ATOM", 32),
"_NET_WM_STATE_ABOVE": ("ATOM", 32),
"_NET_WM_STATE_BELOW": ("ATOM", 32),
"_NET_WM_STATE_MODAL": ("ATOM", 32),
"_NET_WM_STATE_HIDDEN": ("ATOM", 32),
"_NET_WM_STATE_DEMANDS_ATTENTION": ("ATOM", 32),
# Xembed
"_XEMBED_INFO": ("_XEMBED_INFO", 32),
# ICCCM
"WM_STATE": ("WM_STATE", 32),
# Qtile-specific properties
"QTILE_INTERNAL": ("CARDINAL", 32)
}
# TODO add everything required here:
# http://standards.freedesktop.org/wm-spec/latest/ar01s03.html
SUPPORTED_ATOMS = [
# From http://standards.freedesktop.org/wm-spec/latest/ar01s03.html
'_NET_SUPPORTED',
'_NET_CLIENT_LIST',
'_NET_CLIENT_LIST_STACKING',
'_NET_CURRENT_DESKTOP',
'_NET_ACTIVE_WINDOW',
# '_NET_WORKAREA',
'_NET_SUPPORTING_WM_CHECK',
# From http://standards.freedesktop.org/wm-spec/latest/ar01s05.html
'_NET_WM_NAME',
'_NET_WM_VISIBLE_NAME',
'_NET_WM_ICON_NAME',
'_NET_WM_DESKTOP',
'_NET_WM_WINDOW_TYPE',
'_NET_WM_STATE',
'_NET_WM_STRUT',
'_NET_WM_STRUT_PARTIAL',
'_NET_WM_PID',
]
SUPPORTED_ATOMS.extend(WindowTypes.keys())
SUPPORTED_ATOMS.extend(key for key in WindowStates.keys() if key)
XCB_CONN_ERRORS = {
1: 'XCB_CONN_ERROR',
2: 'XCB_CONN_CLOSED_EXT_NOTSUPPORTED',
3: 'XCB_CONN_CLOSED_MEM_INSUFFICIENT',
4: 'XCB_CONN_CLOSED_REQ_LEN_EXCEED',
5: 'XCB_CONN_CLOSED_PARSE_ERR',
6: 'XCB_CONN_CLOSED_INVALID_SCREEN',
7: 'XCB_CONN_CLOSED_FDPASSING_FAILED',
}
class MaskMap(object):
"""
A general utility class that encapsulates the way the mask/value idiom
works in xpyb. It understands a special attribute _maskvalue on
objects, which will be used instead of the object value if present.
This lets us passin a Font object, rather than Font.fid, for example.
"""
def __init__(self, obj):
self.mmap = []
for i in dir(obj):
if not i.startswith("_"):
self.mmap.append((getattr(obj, i), i.lower()))
self.mmap.sort()
def __call__(self, **kwargs):
"""
kwargs: keys should be in the mmap name set
Returns a (mask, values) tuple.
"""
mask = 0
values = []
for m, s in self.mmap:
if s in kwargs:
val = kwargs.get(s)
if val is not None:
mask |= m
values.append(getattr(val, "_maskvalue", val))
del kwargs[s]
if kwargs:
raise ValueError("Unknown mask names: %s" % list(kwargs.keys()))
return mask, values
ConfigureMasks = MaskMap(xcffib.xproto.ConfigWindow)
AttributeMasks = MaskMap(CW)
GCMasks = MaskMap(xcffib.xproto.GC)
class AtomCache(object):
def __init__(self, conn):
self.conn = conn
self.atoms = {}
self.reverse = {}
# We can change the pre-loads not to wait for a return
for name in WindowTypes.keys():
self.insert(name=name)
for i in dir(xcffib.xproto.Atom):
if not i.startswith("_"):
self.insert(name=i, atom=getattr(xcffib.xproto.Atom, i))
def insert(self, name=None, atom=None):
assert name or atom
if atom is None:
c = self.conn.conn.core.InternAtom(False, len(name), name)
atom = c.reply().atom
if name is None:
c = self.conn.conn.core.GetAtomName(atom)
name = c.reply().name.to_string()
self.atoms[name] = atom
self.reverse[atom] = name
def get_name(self, atom):
if atom not in self.reverse:
self.insert(atom=atom)
return self.reverse[atom]
def __getitem__(self, key):
if key not in self.atoms:
self.insert(name=key)
return self.atoms[key]
class _Wrapper(object):
def __init__(self, wrapped):
self.wrapped = wrapped
def __getattr__(self, x):
return getattr(self.wrapped, x)
class Screen(_Wrapper):
"""
This represents an actual X screen.
"""
def __init__(self, conn, screen):
_Wrapper.__init__(self, screen)
self.default_colormap = Colormap(conn, screen.default_colormap)
self.root = Window(conn, self.root)
class PseudoScreen(object):
"""
This may be a Xinerama screen or a RandR CRTC, both of which are
rectagular sections of an actual Screen.
"""
def __init__(self, conn, x, y, width, height):
self.conn = conn
self.x = x
self.y = y
self.width = width
self.height = height
class Colormap(object):
def __init__(self, conn, cid):
self.conn = conn
self.cid = cid
def alloc_color(self, color):
"""
Flexible color allocation.
"""
try:
return self.conn.conn.core.AllocNamedColor(
self.cid, len(color), color
).reply()
except xcffib.xproto.NameError:
def x8to16(i):
return 0xffff * (i & 0xff) // 0xff
r = x8to16(int(color[-6] + color[-5], 16))
g = x8to16(int(color[-4] + color[-3], 16))
b = x8to16(int(color[-2] + color[-1], 16))
return self.conn.conn.core.AllocColor(self.cid, r, g, b).reply()
class Xinerama(object):
def __init__(self, conn):
self.ext = conn.conn(xcffib.xinerama.key)
def query_screens(self):
r = self.ext.QueryScreens().reply()
return r.screen_info
class RandR(object):
def __init__(self, conn):
self.ext = conn.conn(xcffib.randr.key)
self.ext.SelectInput(
conn.default_screen.root.wid,
xcffib.randr.NotifyMask.ScreenChange
)
def query_crtcs(self, root):
l = []
for i in self.ext.GetScreenResources(root).reply().crtcs:
info = self.ext.GetCrtcInfo(i, xcffib.CurrentTime).reply()
d = dict(
x=info.x,
y=info.y,
width=info.width,
height=info.height
)
l.append(d)
return l
class XFixes(object):
selection_mask = SelectionEventMask.SetSelectionOwner | \
SelectionEventMask.SelectionClientClose | \
SelectionEventMask.SelectionWindowDestroy
def __init__(self, conn):
self.conn = conn
self.ext = conn.conn(xcffib.xfixes.key)
self.ext.QueryVersion(xcffib.xfixes.MAJOR_VERSION,
xcffib.xfixes.MINOR_VERSION)
def select_selection_input(self, window, selection="PRIMARY"):
SELECTION = self.conn.atoms[selection]
self.conn.xfixes.ext.SelectSelectionInput(window.wid,
SELECTION,
self.selection_mask)
class GC(object):
def __init__(self, conn, gid):
self.conn = conn
self.gid = gid
def change(self, **kwargs):
mask, values = GCMasks(**kwargs)
self.conn.conn.core.ChangeGC(self.gid, mask, values)
class Window(object):
def __init__(self, conn, wid):
self.conn = conn
self.wid = wid
def _propertyString(self, r):
"""
Extract a string from a window property reply message.
"""
return r.value.to_string()
def _propertyUTF8(self, r):
return r.value.to_utf8()
def send_event(self, synthevent, mask=EventMask.NoEvent):
self.conn.conn.core.SendEvent(False, self.wid, mask, synthevent.pack())
def kill_client(self):
self.conn.conn.core.KillClient(self.wid)
def set_input_focus(self):
self.conn.conn.core.SetInputFocus(
xcffib.xproto.InputFocus.PointerRoot,
self.wid,
xcffib.xproto.Time.CurrentTime
)
def warp_pointer(self, x, y):
"""
Warps the pointer to the location `x`, `y` on the window
"""
self.conn.conn.core.WarpPointer(
0, self.wid, # src_window, dst_window
0, 0, # src_x, src_y
0, 0, # src_width, src_height
x, y # dest_x, dest_y
)
def get_name(self):
"""
Tries to retrieve a canonical window name. We test the following
properties in order of preference:
- _NET_WM_VISIBLE_NAME
- _NET_WM_NAME
- WM_NAME.
"""
r = self.get_property("_NET_WM_VISIBLE_NAME", "UTF8_STRING")
if r:
return self._propertyUTF8(r)
r = self.get_property("_NET_WM_NAME", "UTF8_STRING")
if r:
return self._propertyUTF8(r)
r = self.get_property(
xcffib.xproto.Atom.WM_NAME,
xcffib.xproto.GetPropertyType.Any
)
if r:
return self._propertyString(r)
def get_wm_hints(self):
r = self.get_property("WM_HINTS", xcffib.xproto.GetPropertyType.Any)
if r:
l = r.value.to_atoms()
flags = set(k for k, v in HintsFlags.items() if l[0] & v)
return dict(
flags=flags,
input=l[1],
initial_state=l[2],
icon_pixmap=l[3],
icon_window=l[4],
icon_x=l[5],
icon_y=l[6],
icon_mask=l[7],
window_group=l[8]
)
def get_wm_normal_hints(self):
r = self.get_property(
"WM_NORMAL_HINTS",
xcffib.xproto.GetPropertyType.Any
)
if r:
l = r.value.to_atoms()
flags = set(k for k, v in NormalHintsFlags.items() if l[0] & v)
return dict(
flags=flags,
min_width=l[1 + 4],
min_height=l[2 + 4],
max_width=l[3 + 4],
max_height=l[4 + 4],
width_inc=l[5 + 4],
height_inc=l[6 + 4],
min_aspect=l[7 + 4],
max_aspect=l[8 + 4],
base_width=l[9 + 4],
base_height=l[9 + 4],
win_gravity=l[9 + 4],
)
def get_wm_protocols(self):
l = self.get_property("WM_PROTOCOLS", "ATOM", unpack=int)
return set(self.conn.atoms.get_name(i) for i in l)
def get_wm_state(self):
return self.get_property("WM_STATE", xcffib.xproto.GetPropertyType.Any, unpack=int)
def get_wm_class(self):
"""
Return an (instance, class) tuple if WM_CLASS exists, or None.
"""
r = self.get_property("WM_CLASS", "STRING")
if r:
s = self._propertyString(r)
return tuple(s.strip("\0").split("\0"))
def get_wm_window_role(self):
r = self.get_property("WM_WINDOW_ROLE", "STRING")
if r:
return self._propertyString(r)
def get_wm_transient_for(self):
r = self.get_property("WM_TRANSIENT_FOR", "WINDOW", unpack=int)
if r:
return r[0]
def get_wm_icon_name(self):
r = self.get_property("_NET_WM_ICON_NAME", "UTF8_STRING")
if r:
return self._propertyUTF8(r)
r = self.get_property("WM_ICON_NAME", "STRING")
if r:
return self._propertyUTF8(r)
def get_wm_client_machine(self):
r = self.get_property("WM_CLIENT_MACHINE", "STRING")
if r:
return self._propertyUTF8(r)
def get_geometry(self):
q = self.conn.conn.core.GetGeometry(self.wid)
return q.reply()
def get_wm_desktop(self):
r = self.get_property("_NET_WM_DESKTOP", "CARDINAL", unpack=int)
if r:
return r[0]
def get_wm_type(self):
"""
http://standards.freedesktop.org/wm-spec/wm-spec-latest.html#id2551529
"""
r = self.get_property('_NET_WM_WINDOW_TYPE', "ATOM", unpack=int)
if r:
name = self.conn.atoms.get_name(r[0])
return WindowTypes.get(name, name)
def get_net_wm_state(self):
r = self.get_property('_NET_WM_STATE', "ATOM", unpack=int)
if r:
names = [self.conn.atoms.get_name(p) for p in r]
return [WindowStates.get(n, n) for n in names]
return []
def get_net_wm_pid(self):
r = self.get_property("_NET_WM_PID", unpack=int)
if r:
return r[0]
def configure(self, **kwargs):
"""
Arguments can be: x, y, width, height, border, sibling, stackmode
"""
mask, values = ConfigureMasks(**kwargs)
# hack for negative numbers
values = [i & 0xffffffff for i in values]
return self.conn.conn.core.ConfigureWindow(self.wid, mask, values)
def set_attribute(self, **kwargs):
mask, values = AttributeMasks(**kwargs)
self.conn.conn.core.ChangeWindowAttributesChecked(
self.wid, mask, values
)
def set_cursor(self, name):
cursorId = self.conn.cursors[name]
mask, values = AttributeMasks(cursor=cursorId)
self.conn.conn.core.ChangeWindowAttributesChecked(
self.wid, mask, values
)
def set_property(self, name, value, type=None, format=None):
"""
name: String Atom name
type: String Atom name
format: 8, 16, 32
"""
if name in PropertyMap:
if type or format:
raise ValueError(
"Over-riding default type or format for property."
)
type, format = PropertyMap[name]
else:
if None in (type, format):
raise ValueError(
"Must specify type and format for unknown property."
)
try:
if isinstance(value, six.string_types):
# xcffib will pack the bytes, but we should encode them properly
if six.PY3:
value = value.encode()
elif not isinstance(value, str):
# This will only run for Python 2 unicode strings, can't
# use 'isinstance(value, unicode)' because Py 3 does not
# have unicode and pyflakes complains
value = value.encode('utf-8')
else:
# if this runs without error, the value is already a list, don't wrap it
six.next(iter(value))
except StopIteration:
# The value was an iterable, just empty
value = []
except TypeError:
# the value wasn't an interable and wasn't a string, so let's
# wrap it.
value = [value]
self.conn.conn.core.ChangePropertyChecked(
xcffib.xproto.PropMode.Replace,
self.wid,
self.conn.atoms[name],
self.conn.atoms[type],
format, # Format - 8, 16, 32
len(value),
value
).check()
def get_property(self, prop, type=None, unpack=None):
"""
Return the contents of a property as a GetPropertyReply. If unpack
is specified, a tuple of values is returned. The type to unpack,
either `str` or `int` must be specified.
"""
if type is None:
if prop not in PropertyMap:
raise ValueError(
"Must specify type for unknown property."
)
else:
type, _ = PropertyMap[prop]
try:
r = self.conn.conn.core.GetProperty(
False, self.wid,
self.conn.atoms[prop]
if isinstance(prop, six.string_types)
else prop,
self.conn.atoms[type]
if isinstance(type, six.string_types)
else type,
0, (2 ** 32) - 1
).reply()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
logging.getLogger('qtile').warning(
'X error in GetProperty (wid=%r, prop=%r), ignoring',
self.wid, prop)
return None
if not r.value_len:
if unpack:
return []
return None
elif unpack:
# Should we allow more options for unpacking?
if unpack is int:
return r.value.to_atoms()
elif unpack is str:
return r.value.to_string()
else:
return r
def list_properties(self):
r = self.conn.conn.core.ListProperties(self.wid).reply()
return [self.conn.atoms.get_name(i) for i in r.atoms]
def map(self):
self.conn.conn.core.MapWindow(self.wid)
def unmap(self):
self.conn.conn.core.UnmapWindowChecked(self.wid).check()
def get_attributes(self):
return self.conn.conn.core.GetWindowAttributes(self.wid).reply()
def create_gc(self, **kwargs):
gid = self.conn.conn.generate_id()
mask, values = GCMasks(**kwargs)
self.conn.conn.core.CreateGC(gid, self.wid, mask, values)
return GC(self.conn, gid)
def ungrab_key(self, key, modifiers):
"""
Passing None means any key, or any modifier.
"""
if key is None:
key = xcffib.xproto.Atom.Any
if modifiers is None:
modifiers = xcffib.xproto.ModMask.Any
self.conn.conn.core.UngrabKey(key, self.wid, modifiers)
def grab_key(self, key, modifiers, owner_events,
pointer_mode, keyboard_mode):
self.conn.conn.core.GrabKey(
owner_events,
self.wid,
modifiers,
key,
pointer_mode,
keyboard_mode
)
def ungrab_button(self, button, modifiers):
"""
Passing None means any key, or any modifier.
"""
if button is None:
button = xcffib.xproto.Atom.Any
if modifiers is None:
modifiers = xcffib.xproto.ModMask.Any
self.conn.conn.core.UngrabButton(button, self.wid, modifiers)
def grab_button(self, button, modifiers, owner_events,
event_mask, pointer_mode, keyboard_mode):
self.conn.conn.core.GrabButton(
owner_events,
self.wid,
event_mask,
pointer_mode,
keyboard_mode,
xcffib.xproto.Atom._None,
xcffib.xproto.Atom._None,
button,
modifiers,
)
def grab_pointer(self, owner_events, event_mask, pointer_mode,
keyboard_mode, cursor=None):
self.conn.conn.core.GrabPointer(
owner_events,
self.wid,
event_mask,
pointer_mode,
keyboard_mode,
xcffib.xproto.Atom._None,
cursor or xcffib.xproto.Atom._None,
xcffib.xproto.Atom._None,
)
def ungrab_pointer(self):
self.conn.conn.core.UngrabPointer(xcffib.xproto.Atom._None)
def query_tree(self):
q = self.conn.conn.core.QueryTree(self.wid).reply()
root = None
parent = None
if q.root:
root = Window(self.conn, q.root)
if q.parent:
parent = Window(self.conn, q.root)
return root, parent, [Window(self.conn, i) for i in q.children]
class Font(object):
def __init__(self, conn, fid):
self.conn = conn
self.fid = fid
@property
def _maskvalue(self):
return self.fid
def text_extents(self, s):
s = s + "aaa"
x = self.conn.conn.core.QueryTextExtents(self.fid, len(s), s).reply()
return x
class Connection(object):
_extmap = {
"xinerama": Xinerama,
"randr": RandR,
"xfixes": XFixes,
}
def __init__(self, display):
self.conn = xcffib.connect(display=display)
self._connected = True
self.cursors = Cursors(self)
self.setup = self.conn.get_setup()
extensions = self.extensions()
self.screens = [Screen(self, i) for i in self.setup.roots]
self.default_screen = self.screens[self.conn.pref_screen]
for i in extensions:
if i in self._extmap:
setattr(self, i, self._extmap[i](self))
self.pseudoscreens = []
if "xinerama" in extensions:
for i, s in enumerate(self.xinerama.query_screens()):
scr = PseudoScreen(
self,
s.x_org,
s.y_org,
s.width,
s.height,
)
self.pseudoscreens.append(scr)
elif "randr" in extensions:
for i in self.randr.query_crtcs(self.screens[0].root.wid):
scr = PseudoScreen(
self,
i["x"],
i["y"],
i["width"],
i["height"],
)
self.pseudoscreens.append(scr)
self.atoms = AtomCache(self)
self.code_to_syms = {}
self.first_sym_to_code = None
self.refresh_keymap()
self.modmap = None
self.refresh_modmap()
def finalize(self):
self.cursors.finalize()
self.disconnect()
def refresh_keymap(self, first=None, count=None):
if first is None:
first = self.setup.min_keycode
count = self.setup.max_keycode - self.setup.min_keycode + 1
q = self.conn.core.GetKeyboardMapping(first, count).reply()
assert len(q.keysyms) % q.keysyms_per_keycode == 0
for i in range(len(q.keysyms) // q.keysyms_per_keycode):
self.code_to_syms[first + i] = \
q.keysyms[i * q.keysyms_per_keycode:(i + 1) * q.keysyms_per_keycode]
first_sym_to_code = {}
for k, s in self.code_to_syms.items():
if s[0] and not s[0] in first_sym_to_code:
first_sym_to_code[s[0]] = k
self.first_sym_to_code = first_sym_to_code
def refresh_modmap(self):
q = self.conn.core.GetModifierMapping().reply()
modmap = {}
for i, k in enumerate(q.keycodes):
l = modmap.setdefault(ModMapOrder[i // q.keycodes_per_modifier], [])
l.append(k)
self.modmap = modmap
def get_modifier(self, keycode):
"""
Return the modifier matching keycode.
"""
for n, l in self.modmap.items():
if keycode in l:
return n
return None
def keysym_to_keycode(self, keysym):
return self.first_sym_to_code.get(keysym, 0)
def keycode_to_keysym(self, keycode, modifier):
if keycode >= len(self.code_to_syms) or \
modifier >= len(self.code_to_syms[keycode]):
return 0
return self.code_to_syms[keycode][modifier]
def create_window(self, x, y, width, height):
wid = self.conn.generate_id()
self.conn.core.CreateWindow(
self.default_screen.root_depth,
wid,
self.default_screen.root.wid,
x, y, width, height, 0,
WindowClass.InputOutput,
self.default_screen.root_visual,
CW.BackPixel | CW.EventMask,
[
self.default_screen.black_pixel,
EventMask.StructureNotify | EventMask.Exposure
]
)
return Window(self, wid)
def disconnect(self):
self.conn.disconnect()
self._connected = False
def flush(self):
if self._connected:
return self.conn.flush()
def xsync(self):
# The idea here is that pushing an innocuous request through the queue
# and waiting for a response "syncs" the connection, since requests are
# serviced in order.
self.conn.core.GetInputFocus().reply()
def grab_server(self):
return self.conn.core.GrabServer()
def get_setup(self):
return self.conn.get_setup()
def open_font(self, name):
fid = self.conn.generate_id()
self.conn.core.OpenFont(fid, len(name), name)
return Font(self, fid)
def extensions(self):
return set(
i.name.to_string().lower()
for i in self.conn.core.ListExtensions().reply().names
)
| {
"content_hash": "240a195faaf1c19901f2c365b64527b9",
"timestamp": "",
"source": "github",
"line_count": 900,
"max_line_length": 91,
"avg_line_length": 31.525555555555556,
"alnum_prop": 0.5463645014626581,
"repo_name": "StephenBarnes/qtile",
"id": "fb39da0c4a5ed36087ae34bbee1e60fdd4709ce4",
"size": "29890",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "libqtile/xcbq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "3590"
},
{
"name": "Makefile",
"bytes": "1032"
},
{
"name": "Python",
"bytes": "915512"
},
{
"name": "Shell",
"bytes": "2833"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from builtins import object
from mock import Mock
from pants.cache.cache_setup import (CacheFactory, CacheSetup, CacheSpec, CacheSpecFormatError,
EmptyCacheSpecError, InvalidCacheSpecError,
LocalCacheSpecRequiredError, RemoteCacheSpecRequiredError,
TooManyCacheSpecsError)
from pants.cache.local_artifact_cache import LocalArtifactCache
from pants.cache.resolver import Resolver
from pants.cache.restful_artifact_cache import RESTfulArtifactCache
from pants.subsystem.subsystem import Subsystem
from pants.task.task import Task
from pants.util.contextutil import temporary_dir
from pants_test.option.util.fakes import create_options
from pants_test.test_base import TestBase
from pants_test.testutils.mock_logger import MockLogger
class DummyTask(Task):
options_scope = 'dummy'
_stable_name = 'test'
@classmethod
def subsystem_dependencies(cls):
return super(DummyTask, cls).subsystem_dependencies() + (CacheSetup, )
def execute(self): pass
class MockPinger(object):
def __init__(self, hosts_to_times):
self._hosts_to_times = hosts_to_times
# Returns a fake ping time such that the last host is always the 'fastest'.
def pings(self, hosts):
return [(host, self._hosts_to_times.get(host, 9999)) for host in hosts]
class TestCacheSetup(TestBase):
TEST_RESOLVED_FROM = 'http://test-resolver'
LOCAL_URI = '/a/local/path'
INVALID_LOCAL_URI = '../not_a_valid_local_cache'
REMOTE_URI_1 = 'http://host1'
REMOTE_URI_2 = 'https://host2:666'
REMOTE_URI_3 = 'http://host3'
EMPTY_URI = 'http://localhost:9999'
CACHE_SPEC_LOCAL_ONLY = CacheSpec(local=LOCAL_URI, remote=None)
CACHE_SPEC_REMOTE_ONLY = CacheSpec(local=None, remote=REMOTE_URI_1)
CACHE_SPEC_LOCAL_REMOTE = CacheSpec(local=LOCAL_URI, remote=REMOTE_URI_1)
CACHE_SPEC_RESOLVE_ONLY = CacheSpec(local=None, remote=TEST_RESOLVED_FROM)
CACHE_SPEC_LOCAL_RESOLVE = CacheSpec(local=LOCAL_URI, remote=TEST_RESOLVED_FROM)
def create_task(self):
return DummyTask(self.context(for_task_types=[DummyTask]),
self.pants_workdir)
def setUp(self):
super(TestCacheSetup, self).setUp()
self.resolver = Mock(spec=Resolver)
self.resolver.resolve = Mock(return_value=[self.REMOTE_URI_1, self.REMOTE_URI_2])
self.log = MockLogger()
self.pinger = MockPinger({'host1': 5, 'host2:666': 3, 'host3': 7})
def cache_factory(self, **options):
cache_options = {
'pinger_timeout': .5,
'pinger_tries': 2,
'ignore': False,
'read': False,
'read_from': [self.EMPTY_URI],
'write_to': [self.EMPTY_URI],
'write': False,
'compression_level': 1,
'max_entries_per_target': 1,
'write_permissions': None,
'dereference_symlinks': True,
# Usually read from global scope.
'pants_workdir': self.pants_workdir
}
cache_options.update(**options)
return CacheFactory(create_options(options={'test': cache_options}).for_scope('test'),
MockLogger(),
self.create_task(),
resolver=self.resolver)
def test_sanitize_cache_spec(self):
cache_factory = self.cache_factory()
self.assertEqual(self.CACHE_SPEC_LOCAL_ONLY,
cache_factory._sanitize_cache_spec([self.LOCAL_URI]))
self.assertEqual(self.CACHE_SPEC_REMOTE_ONLY,
cache_factory._sanitize_cache_spec([self.REMOTE_URI_1]))
# (local, remote) and (remote, local) are equivalent as long as they are valid
self.assertEqual(self.CACHE_SPEC_LOCAL_REMOTE,
cache_factory._sanitize_cache_spec([self.LOCAL_URI, self.REMOTE_URI_1]))
self.assertEqual(self.CACHE_SPEC_LOCAL_REMOTE,
cache_factory._sanitize_cache_spec([self.REMOTE_URI_1, self.LOCAL_URI]))
with self.assertRaises(InvalidCacheSpecError):
cache_factory._sanitize_cache_spec('not a list')
with self.assertRaises(EmptyCacheSpecError):
cache_factory._sanitize_cache_spec([])
with self.assertRaises(CacheSpecFormatError):
cache_factory._sanitize_cache_spec([self.INVALID_LOCAL_URI])
with self.assertRaises(CacheSpecFormatError):
cache_factory._sanitize_cache_spec(['ftp://not_a_valid_remote_cache'])
with self.assertRaises(LocalCacheSpecRequiredError):
cache_factory._sanitize_cache_spec([self.INVALID_LOCAL_URI, self.REMOTE_URI_1])
with self.assertRaises(LocalCacheSpecRequiredError):
cache_factory._sanitize_cache_spec([self.REMOTE_URI_1, self.REMOTE_URI_2])
with self.assertRaises(RemoteCacheSpecRequiredError):
cache_factory._sanitize_cache_spec([self.LOCAL_URI, self.INVALID_LOCAL_URI])
with self.assertRaises(TooManyCacheSpecsError):
cache_factory._sanitize_cache_spec([self.LOCAL_URI,
self.REMOTE_URI_1, self.REMOTE_URI_2])
def test_resolve(self):
cache_factory = self.cache_factory()
self.assertEqual(CacheSpec(local=None,
remote='{}|{}'.format(self.REMOTE_URI_1, self.REMOTE_URI_2)),
cache_factory._resolve(self.CACHE_SPEC_RESOLVE_ONLY))
self.assertEqual(CacheSpec(local=self.LOCAL_URI,
remote='{}|{}'.format(self.REMOTE_URI_1, self.REMOTE_URI_2)),
cache_factory._resolve(self.CACHE_SPEC_LOCAL_RESOLVE))
self.resolver.resolve.side_effect = Resolver.ResolverError()
# still have local cache if resolver fails
self.assertEqual(CacheSpec(local=self.LOCAL_URI, remote=None),
cache_factory._resolve(self.CACHE_SPEC_LOCAL_RESOLVE))
# no cache created if resolver fails and no local cache
self.assertFalse(cache_factory._resolve(self.CACHE_SPEC_RESOLVE_ONLY))
def test_noop_resolve(self):
self.resolver.resolve = Mock(return_value=[])
cache_factory = self.cache_factory()
self.assertEqual(self.CACHE_SPEC_LOCAL_ONLY,
cache_factory._resolve(self.CACHE_SPEC_LOCAL_ONLY))
self.assertEqual(self.CACHE_SPEC_RESOLVE_ONLY,
cache_factory._resolve(self.CACHE_SPEC_RESOLVE_ONLY))
self.assertEqual(self.CACHE_SPEC_LOCAL_RESOLVE,
cache_factory._resolve(self.CACHE_SPEC_LOCAL_RESOLVE))
def test_cache_spec_parsing(self):
def mk_cache(spec, resolver=None):
Subsystem.reset()
self.set_options_for_scope(CacheSetup.subscope(DummyTask.options_scope),
read_from=spec, compression=1)
self.context(for_task_types=[DummyTask]) # Force option initialization.
cache_factory = CacheSetup.create_cache_factory_for_task(
self.create_task(),
pinger=self.pinger,
resolver=resolver)
return cache_factory.get_read_cache()
def check(expected_type, spec, resolver=None):
cache = mk_cache(spec, resolver=resolver)
self.assertIsInstance(cache, expected_type)
self.assertEqual(cache.artifact_root, self.pants_workdir)
with temporary_dir() as tmpdir:
cachedir = os.path.join(tmpdir, 'cachedir') # Must be a real path, so we can safe_mkdir it.
check(LocalArtifactCache, [cachedir])
check(RESTfulArtifactCache, ['http://localhost/bar'])
check(RESTfulArtifactCache, ['https://localhost/bar'])
check(RESTfulArtifactCache, [cachedir, 'http://localhost/bar'])
check(RESTfulArtifactCache, [cachedir, 'http://localhost/bar'], resolver=self.resolver)
with self.assertRaises(CacheSpecFormatError):
mk_cache(['foo'])
with self.assertRaises(CacheSpecFormatError):
mk_cache(['../foo'])
with self.assertRaises(LocalCacheSpecRequiredError):
mk_cache(['https://localhost/foo', 'http://localhost/bar'])
with self.assertRaises(RemoteCacheSpecRequiredError):
mk_cache([tmpdir, '/bar'])
with self.assertRaises(TooManyCacheSpecsError):
mk_cache([tmpdir, self.REMOTE_URI_1, self.REMOTE_URI_2])
def test_read_cache_available(self):
self.assertFalse(self.cache_factory(ignore=True, read=True, read_from=[self.EMPTY_URI])
.read_cache_available())
self.assertFalse(self.cache_factory(ignore=False, read=False, read_from=[self.EMPTY_URI])
.read_cache_available())
self.assertFalse(self.cache_factory(ignore=False, read=True, read_from=[])
.read_cache_available())
self.assertIsNone(self.cache_factory(ignore=False, read=True, read_from=[self.EMPTY_URI])
.read_cache_available())
def test_write_cache_available(self):
self.assertFalse(self.cache_factory(ignore=True, write=True, write_to=[self.EMPTY_URI])
.write_cache_available())
self.assertFalse(self.cache_factory(ignore=False, write=False, write_to=[self.EMPTY_URI])
.write_cache_available())
self.assertFalse(self.cache_factory(ignore=False, write=True, write_to=[])
.write_cache_available())
self.assertIsNone(self.cache_factory(ignore=False, write=True, write_to=[self.EMPTY_URI])
.write_cache_available())
| {
"content_hash": "52a6706d87d08f8a56003a16b8a7b911",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 98,
"avg_line_length": 42.74770642201835,
"alnum_prop": 0.667024358836785,
"repo_name": "twitter/pants",
"id": "434ad49b60f5de36a2de32beac77f6342475164f",
"size": "9466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/cache/test_cache_setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5639"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "85294"
},
{
"name": "Java",
"bytes": "498956"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "6700799"
},
{
"name": "Rust",
"bytes": "765598"
},
{
"name": "Scala",
"bytes": "89346"
},
{
"name": "Shell",
"bytes": "94395"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
"""
Created on Sun May 28 17:37:50 2017
@author: azkei
"""
# Creating a DataFrame and writing it to JSON
# Generate DataFrame
frame = pd.DataFrame(np.arange(16).reshape(4,4),
index=['white','black','red','blue'],
columns=['up','down','right','left'])
# Convert it to a json file
frame.to_json('frame.json')
# Reading in JSON files
pd.read_json('frame.json')
# Generally speaking, JSON wont be in tabular form, like we have here,
# In that case we have to convert the structure dict file into tabular form.
# This process is called normalization.
# Pandas provides a function called json_normalize(), that converts a dict/list
# in a table
# First we will need to import it
from pandas.io.json import json_normalize
import json
file = open('books.json','r')
text = file.read()
text = json.loads(text)
# We might want to see the contents of all the books with books as the key.
json_normalize(text,'books')
# The above function only gives information on an internal level.
# To get more information, we put more keys into the function
json_normalize(text,'books',['writer','nationality'])
# As a result we get a DataFrame from a starting tree structure. | {
"content_hash": "121a90a4709116471ec84ecda7a9a09e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 35.205882352941174,
"alnum_prop": 0.7101086048454469,
"repo_name": "jjsalomon/python-analytics",
"id": "fb061e4e8260a09be4f750502ac20de3d14f83b7",
"size": "1221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas2 - Reading & Writing Data/pandas5 - JSON Data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "835"
},
{
"name": "Python",
"bytes": "75839"
}
],
"symlink_target": ""
} |
import threading
import socket
import logging
import os
import collections
import time
import json
import gconlib.commands as commands
import serial
SOCKET_FILE = "/var/run/gcon.socket"
LOCAL_SOCKET_FILE = "gcon.socket"
class SockerRunner(threading.Thread):
def __init__(self, path, gcon):
threading.Thread.__init__(self)
self.setDaemon(True)
self.logger = logging.getLogger("%s.%s" % (__name__,self.__class__.__name__))
self.socket = None
self.path = path
self.gcon = gcon
self.commandexecutors = dict()
self.commandexecutors["listSerial"] = commands.CommandListSerial()
self.commandexecutors["connectSerial"] = commands.CommandConnectSerial()
self.commandexecutors["requestStatus"] = commands.CommandRequestStatus()
self.commandexecutors["runGcode"] = commands.CommandRunGcode()
self.commandexecutors["runGcodeResult"] = commands.CommandRunGcodeResult()
def run(self):
self.logger.info("Creating socket on %s", SOCKET_FILE)
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
if os.path.exists(SOCKET_FILE):
os.unlink(SOCKET_FILE)
self.socket.bind(SOCKET_FILE)
except:
localfile = os.path.join(self.path, LOCAL_SOCKET_FILE)
self.logger.error("failed to bind on %s trying local bind on %s instead", SOCKET_FILE, localfile)
if os.path.exists(localfile):
os.unlink(localfile)
self.socket.bind(localfile)
self.logger.info("Begin looping")
try:
while True:
data, addr = self.socket.recvfrom(4096)
self.logger.debug("received message from %s: %s", addr, data)
try:
requestjson = json.loads(data.decode("utf-8"))
command = requestjson["command"]
if command in self.commandexecutors:
if "data" in requestjson:
datajson = requestjson["data"]
else:
datajson = {}
replydata = self.commandexecutors[command].execute(self.gcon, datajson)
reply = dict()
reply["reply"] = command + "Reply"
reply["data"] = replydata
jsonreply = json.dumps(reply)
self.socket.sendto(jsonreply.encode('utf8'), addr)
except Exception as e:
self.logger.error("Error occured during data handling: %s", str(e))
self.logger.exception(e)
finally:
self.socket.close()
def shutdown(self):
self.logger.info("Shutting down SocketRunner")
if self.socket != None:
self.socket.close()
class SerialRunner(threading.Thread):
def __init__(self, path, gcon):
threading.Thread.__init__(self)
self.setDaemon(True)
self.logger = logging.getLogger("%s.%s" % (__name__,self.__class__.__name__))
self.path = path
self.gcon = gcon
self.serial = None
self.queue = collections.deque()
self.waitcon = threading.Condition()
self.halted = False
def run(self):
while True:
if self.serial == None:
with self.waitcon:
self.waitcon.wait(5)
continue
if len(self.queue) > 0:
line = self.queue.popleft()
self.sendline(line)
else:
with self.waitcon:
self.waitcon.wait(5)
def sendline(self, line):
if self.serial:
self.logger.debug("Write to serial : %s", line)
self.serial.write(bytes((line + "\n").encode("UTF-8")))
response = self.serial.readline().decode("UTF-8")
self.logger.debug("Read from serial: %s", response)
return response
def readline(self):
if self.serial:
return self.serial.readline()
def canAddLine(self):
return len(self.queue < 32)
def forceAddLine(self, line):
self.queue.append(line)
with self.waitcon:
self.waitcon.notify_all()
def addline(self, line):
if self.canAddLine():
self.queue.append(line)
with self.waitcon:
self.waitcon.notify_all()
return True
else:
return False
def isConnected(self):
if self.serial != None and self.serial.isOpen():
return True
return False
def connect(self, serialdevice):
if self.isConnected():
return False
self.serial = serial.Serial()
self.serial.port = serialdevice
self.serial.baudrate = 250000
self.serial.timeout = 30
self.serial.open()
self.logger.debug("Opened serial port")
time.sleep(1)
self.serial.write(b"M105\n")
ret = None
while ret == None or not ( ret.startswith("ok") and "T:" in ret):
ret = self.serial.readline().decode("UTF-8").strip()
self.logger.debug("received %s", ret)
with self.waitcon:
self.waitcon.notify_all()
return self.serial.isOpen()
def isHalted(self):
return self.halted
def shutdown(self):
self.logger.info("Shutting down SerialRunner")
if self.serial != None:
self.serial.close() | {
"content_hash": "eb15c84ddcf97f69eabe85a727e6645d",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 109,
"avg_line_length": 29.77659574468085,
"alnum_prop": 0.5521614862450875,
"repo_name": "laubed/gcon",
"id": "efd7e8f7f9caa5d0cad05a583d7c41b8f96e9376",
"size": "5622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gconlib/runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9728"
}
],
"symlink_target": ""
} |
from infra_libs.ts_mon.config import add_argparse_options
from infra_libs.ts_mon.config import process_argparse_options
from infra_libs.ts_mon.common.distribution import Distribution
from infra_libs.ts_mon.common.distribution import FixedWidthBucketer
from infra_libs.ts_mon.common.distribution import GeometricBucketer
from infra_libs.ts_mon.common.errors import MonitoringError
from infra_libs.ts_mon.common.errors import MonitoringDecreasingValueError
from infra_libs.ts_mon.common.errors import MonitoringDuplicateRegistrationError
from infra_libs.ts_mon.common.errors import MonitoringIncrementUnsetValueError
from infra_libs.ts_mon.common.errors import MonitoringInvalidFieldTypeError
from infra_libs.ts_mon.common.errors import MonitoringInvalidValueTypeError
from infra_libs.ts_mon.common.errors import MonitoringTooManyFieldsError
from infra_libs.ts_mon.common.errors import MonitoringNoConfiguredMonitorError
from infra_libs.ts_mon.common.errors import MonitoringNoConfiguredTargetError
from infra_libs.ts_mon.common.helpers import ScopedIncrementCounter
from infra_libs.ts_mon.common.helpers import ScopedMeasureTime
from infra_libs.ts_mon.common.interface import close
from infra_libs.ts_mon.common.interface import flush
from infra_libs.ts_mon.common.interface import register_global_metrics
from infra_libs.ts_mon.common.interface import register_global_metrics_callback
from infra_libs.ts_mon.common.interface import reset_for_unittest
from infra_libs.ts_mon.common.interface import target_context
from infra_libs.ts_mon.common.metrics import BooleanField
from infra_libs.ts_mon.common.metrics import IntegerField
from infra_libs.ts_mon.common.metrics import StringField
from infra_libs.ts_mon.common.metrics import BooleanMetric
from infra_libs.ts_mon.common.metrics import CounterMetric
from infra_libs.ts_mon.common.metrics import CumulativeDistributionMetric
from infra_libs.ts_mon.common.metrics import CumulativeMetric
from infra_libs.ts_mon.common.metrics import FloatMetric
from infra_libs.ts_mon.common.metrics import GaugeMetric
from infra_libs.ts_mon.common.metrics import MetricsDataUnits
from infra_libs.ts_mon.common.metrics import NonCumulativeDistributionMetric
from infra_libs.ts_mon.common.metrics import StringMetric
from infra_libs.ts_mon.common.targets import TaskTarget
from infra_libs.ts_mon.common.targets import DeviceTarget
| {
"content_hash": "0914a9654c05c12ed16fc85d2febffc0",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 55.16279069767442,
"alnum_prop": 0.856239460370995,
"repo_name": "luci/luci-py",
"id": "4e7c883376501300540b89e38cf31867485a781e",
"size": "2535",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "client/third_party/infra_libs/ts_mon/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5576"
},
{
"name": "HTML",
"bytes": "1900972"
},
{
"name": "JavaScript",
"bytes": "113046"
},
{
"name": "Makefile",
"bytes": "11718"
},
{
"name": "Python",
"bytes": "5885612"
},
{
"name": "Shell",
"bytes": "5183"
}
],
"symlink_target": ""
} |
import abc
from WebMirror.rules import load_rules
import WebMirror.TimedTriggers.TriggerBase
import WebRequest
import settings
class UrlTrigger(WebMirror.TimedTriggers.TriggerBase.TriggerBaseClass):
@abc.abstractmethod
def get_urls(self):
pass
class RssTriggerBase(UrlTrigger):
pluginName = "Rss Trigger"
loggerPath = 'RssTrigger'
def get_urls(self):
self.rules = load_rules()
feeds = []
for item in self.rules:
feeds += item['feedurls']
return feeds
def retriggerRssFeeds(self, feedurls):
self.log.info("Retriggering RSS feed URLs")
self.retriggerUrlList(feedurls, ignoreignore=True, retrigger_complete=True)
def go(self):
feeds = self.get_urls()
self.log.info("Found %s feeds in rule files.", len(feeds))
self.retriggerRssFeeds(feeds)
class PageTriggerBase(UrlTrigger):
pluginName = "Page Triggers"
loggerPath = 'PageTriggers'
@abc.abstractproperty
def pages(self):
pass
def get_urls(self):
# (hacky) explicit copy
return [tmp for tmp in self.pages]
def retriggerPages(self):
self.retriggerUrlList(self.pages, ignoreignore=True, retrigger_complete=True)
# for x in range(len(self.pages)):
# url = self.pages[x]
# if x % 50 == 0:
# self.log.info("Retriggering step %s", x)
# self.retriggerUrl(url)
self.log.info("Pages retrigger complete.")
def go(self):
self.log.info("Retriggering %s pages.", len(self.pages))
self.retriggerPages()
class ScribbleHubPageTrigger(PageTriggerBase):
pages = [
# ScribbleHub
'https://www.scribblehub.com/latest-series/',
'https://www.scribblehub.com/latest-series/?pg=2',
'https://www.scribblehub.com/series-ranking/?sort=3&order=1',
]
class HourlyPageTrigger(PageTriggerBase):
pages = [
# RoyalRoadL
'https://www.royalroad.com/fictions/new-releases',
# 'https://www.royalroad.com/fictions/weekly-popular',
'https://www.royalroad.com/fictions/latest-updates',
# 'https://www.royalroad.com/fictions/active-popular',
# 'https://www.royalroad.com/fictions/best-rated/',
'https://www.foxteller.com/releases',
'https://www.scribblehub.com/latest-series/',
# Japtem bits
'http://japtem.com/fanfic.php?action=last_updated',
'http://japtem.com/fanfic.php',
# Twitter feeds for annoying sites without better release mechanisms.
'https://twitter.com/Baka_Tsuki',
'https://twitter.com/Nano_Desu_Yo',
# Fetch the new NovelUpdates stuff.
# 'https://www.novelupdates.com',
]
class EverySixHoursPageTrigger(PageTriggerBase):
pages = [
# NovelUpdates
# 'https://www.novelupdates.com',
'https://www.webnovel.com/feed/',
]
class EveryOtherDayPageTrigger(PageTriggerBase):
pages = []
def _rrlExtractSeriesReleases(self, soup):
containers = soup.find_all('div', class_='fiction-list-item')
# print(soup)
# print("container: ", containers)
if not containers:
return []
urls = []
for item in containers:
div = item.find('h2', class_='fiction-title')
a = div.find("a")
if a:
sections = a['href'].split("/")
try:
sec_int = int(sections[2])
urls.append(sec_int)
except ValueError:
self.log.error("Section isn't an int: '%s'?", sections)
except IndexError:
self.log.error("Series URL doesn't appear to be in the expected format: '%s'?", a['href'])
else:
self.log.error("No series in container: %s", item)
return set(urls)
def get_pages(self):
wg = WebRequest.WebGetRobust()
soup = wg.getSoup('https://www.royalroad.com/fictions/new-releases')
rrl_max = self._rrlExtractSeriesReleases(soup)
rrl_pages = ['https://www.royalroad.com/fiction/%s' % x for x in range(max(rrl_max))]
japtem_pages = ['http://japtem.com/fanfic.php?novel=%s' % x for x in range(800)]
return rrl_pages + japtem_pages
def go(self):
self.pages = self.get_pages()
self.log.info("Retriggering %s pages.", len(self.pages))
self.retriggerPages()
if __name__ == "__main__":
import logSetup
logSetup.initLogging(1)
run1 = RssTriggerBase()
run1._go()
# run2 = HourlyPageTrigger()
# run2._go()
# run3 = EveryOtherDayPageTrigger()
# run3._go()
| {
"content_hash": "456794aa5c8bb99868ac968257110b4c",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 95,
"avg_line_length": 22.96067415730337,
"alnum_prop": 0.6904820161487644,
"repo_name": "fake-name/ReadableWebProxy",
"id": "54435de378ce148aa825a7e386a75ffcfe4163e7",
"size": "4089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/TimedTriggers/UrlTriggers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('userAverange', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MAP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.FloatField()),
('at', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('life', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='userAverange.UserAverage_Life')),
],
),
]
| {
"content_hash": "0b8ea75c3a5b03ee115c3218d0cef7a1",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 125,
"avg_line_length": 30.153846153846153,
"alnum_prop": 0.5841836734693877,
"repo_name": "DiegoCorrea/ouvidoMusical",
"id": "da6530e1da9cdb22771081a2de5ecf106a00c7d9",
"size": "857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/evaluators/MAP/algorithm/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182332"
},
{
"name": "Shell",
"bytes": "51486"
}
],
"symlink_target": ""
} |
"""Defines models for reading that save out to checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from samyro import integerize
import prettytensor as pt
import tensorflow as tf
class Model(object):
"""A model shape.
TODO(jkahn): convert to collections.namedtuple?
"""
def __init__(self, name, embedding=16, lstms=(128, 256)):
self.name = name
self.embedding = embedding
self.lstms = lstms
def create(self, input_placeholder, phase):
"""Creates a 2 layer LSTM model with dropout.
Args:
input_placeholder: placeholder of timesteps x sequences
phase: Phase controls whether or not dropout is active. In
training mode we want to perform dropout, but in test we
want to disable it.
Returns: The logits layer.
"""
timesteps = input_placeholder.get_shape()[1].value
text_in = integerize.reshape_cleavable(input_placeholder)
with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
# The embedding lookup must be placed on a cpu.
with tf.device('/cpu:0'):
embedded = text_in.embedding_lookup(integerize.CHARS,
[self.embedding])
# Because the sequence LSTM expects each timestep to be its
# own Tensor, we need to cleave the sequence. Below we can
# build a stacked 2 layer LSTM by just chaining them together.
# You can stack as many layers as you want.
lstm = embedded.cleave_sequence(timesteps)
assert len(self.lstms)
for lstm_size in self.lstms:
lstm = lstm.sequence_lstm(lstm_size)
# The classifier is much more efficient if it runs across the entire
# dataset at once, so we want to squash (i.e. uncleave).
# Note: if phase is test, dropout is a noop.
return (lstm.squash_sequence()
.dropout(keep_prob=0.8, phase=phase)
.fully_connected(integerize.CHARS, activation_fn=None))
def train_op_loss(self, input_placeholder, labels, reuse=None):
# Training and eval graph
with tf.variable_scope(self.name, reuse=reuse):
# Core train graph
result = self.create(input_placeholder,
pt.Phase.train).softmax(labels)
train_op = pt.apply_optimizer(tf.train.AdagradOptimizer(0.5),
losses=[result.loss])
return train_op, result.loss
def eval_accuracy(self, input_placeholder, labels, reuse=True):
with tf.variable_scope(self.name, reuse=reuse):
# Eval graph
eval_result = self.create(input_placeholder,
pt.Phase.test).softmax(labels)
# Accuracy creates variables, so do it outside of scope
return eval_result.softmax.evaluate_classifier(labels,
phase=pt.Phase.test)
def inference_io(self, reuse):
with tf.variable_scope(self.name, reuse=reuse), pt.defaults_scope(
summary_collections=['INFERENCE_SUMMARIES']):
inf_input = tf.placeholder(tf.int32, [])
inf_logits = self.create(pt.wrap(inf_input).reshape([1, 1]),
pt.Phase.infer)
return inf_input, inf_logits
| {
"content_hash": "529b6aefbbd8a1a511afe8b9c3cc53c6",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 76,
"avg_line_length": 41.15294117647059,
"alnum_prop": 0.5957690108633504,
"repo_name": "jkahn/samyro",
"id": "6be1e77fadd2cf88b4d0af2a018e526d3326bc18",
"size": "3498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samyro/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38917"
},
{
"name": "Shell",
"bytes": "137"
}
],
"symlink_target": ""
} |
import time
from oslo.config import cfg
import requests
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.extensions import portbindings
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log
from neutron.plugins.common import constants
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
ODL_NETWORK = 'network'
ODL_NETWORKS = 'networks'
ODL_SUBNET = 'subnet'
ODL_SUBNETS = 'subnets'
ODL_PORT = 'port'
ODL_PORTS = 'ports'
odl_opts = [
cfg.StrOpt('url',
help=_("HTTP URL of OpenDaylight REST interface.")),
cfg.StrOpt('username',
help=_("HTTP username for authentication")),
cfg.StrOpt('password', secret=True,
help=_("HTTP password for authentication")),
cfg.IntOpt('timeout', default=10,
help=_("HTTP timeout in seconds.")),
cfg.IntOpt('session_timeout', default=30,
help=_("Tomcat session timeout in minutes.")),
]
cfg.CONF.register_opts(odl_opts, "ml2_odl")
def try_del(d, keys):
"""Ignore key errors when deleting from a dictionary."""
for key in keys:
try:
del d[key]
except KeyError:
pass
class OpendaylightAuthError(n_exc.NeutronException):
message = '%(msg)s'
class JsessionId(requests.auth.AuthBase):
"""Attaches the JSESSIONID and JSESSIONIDSSO cookies to an HTTP Request.
If the cookies are not available or when the session expires, a new
set of cookies are obtained.
"""
def __init__(self, url, username, password):
"""Initialization function for JsessionId."""
# NOTE(kmestery) The 'limit' paramater is intended to limit how much
# data is returned from ODL. This is not implemented in the Hydrogen
# release of OpenDaylight, but will be implemented in the Helium
# timeframe. Hydrogen will silently ignore this value.
self.url = str(url) + '/' + ODL_NETWORKS + '?limit=1'
self.username = username
self.password = password
self.auth_cookies = None
self.last_request = None
self.expired = None
self.session_timeout = cfg.CONF.ml2_odl.session_timeout * 60
self.session_deadline = 0
def obtain_auth_cookies(self):
"""Make a REST call to obtain cookies for ODL authenticiation."""
try:
r = requests.get(self.url, auth=(self.username, self.password))
r.raise_for_status()
except requests.exceptions.HTTPError as e:
raise OpendaylightAuthError(msg=_("Failed to authenticate with "
"OpenDaylight: %s") % e)
except requests.exceptions.Timeout as e:
raise OpendaylightAuthError(msg=_("Authentication Timed"
" Out: %s") % e)
jsessionid = r.cookies.get('JSESSIONID')
jsessionidsso = r.cookies.get('JSESSIONIDSSO')
if jsessionid and jsessionidsso:
self.auth_cookies = dict(JSESSIONID=jsessionid,
JSESSIONIDSSO=jsessionidsso)
def __call__(self, r):
"""Verify timestamp for Tomcat session timeout."""
if time.time() > self.session_deadline:
self.obtain_auth_cookies()
self.session_deadline = time.time() + self.session_timeout
r.prepare_cookies(self.auth_cookies)
return r
class OpenDaylightMechanismDriver(api.MechanismDriver):
"""Mechanism Driver for OpenDaylight.
This driver was a port from the Tail-F NCS MechanismDriver. The API
exposed by ODL is slightly different from the API exposed by NCS,
but the general concepts are the same.
"""
auth = None
out_of_sync = True
def initialize(self):
self.url = cfg.CONF.ml2_odl.url
self.timeout = cfg.CONF.ml2_odl.timeout
self.username = cfg.CONF.ml2_odl.username
self.password = cfg.CONF.ml2_odl.password
required_opts = ('url', 'username', 'password')
for opt in required_opts:
if not getattr(self, opt):
raise cfg.RequiredOptError(opt, 'ml2_odl')
self.auth = JsessionId(self.url, self.username, self.password)
self.vif_type = portbindings.VIF_TYPE_OVS
self.vif_details = {portbindings.CAP_PORT_FILTER: True}
# Postcommit hooks are used to trigger synchronization.
def create_network_postcommit(self, context):
self.synchronize('create', ODL_NETWORKS, context)
def update_network_postcommit(self, context):
self.synchronize('update', ODL_NETWORKS, context)
def delete_network_postcommit(self, context):
self.synchronize('delete', ODL_NETWORKS, context)
def create_subnet_postcommit(self, context):
self.synchronize('create', ODL_SUBNETS, context)
def update_subnet_postcommit(self, context):
self.synchronize('update', ODL_SUBNETS, context)
def delete_subnet_postcommit(self, context):
self.synchronize('delete', ODL_SUBNETS, context)
def create_port_postcommit(self, context):
self.synchronize('create', ODL_PORTS, context)
def update_port_postcommit(self, context):
self.synchronize('update', ODL_PORTS, context)
def delete_port_postcommit(self, context):
self.synchronize('delete', ODL_PORTS, context)
def synchronize(self, operation, object_type, context):
"""Synchronize ODL with Neutron following a configuration change."""
if self.out_of_sync:
self.sync_full(context)
else:
self.sync_single_resource(operation, object_type, context)
def filter_create_network_attributes(self, network, context, dbcontext):
"""Filter out network attributes not required for a create."""
try_del(network, ['status', 'subnets'])
def filter_create_subnet_attributes(self, subnet, context, dbcontext):
"""Filter out subnet attributes not required for a create."""
pass
def filter_create_port_attributes(self, port, context, dbcontext):
"""Filter out port attributes not required for a create."""
self.add_security_groups(context, dbcontext, port)
# TODO(kmestery): Converting to uppercase due to ODL bug
# https://bugs.opendaylight.org/show_bug.cgi?id=477
port['mac_address'] = port['mac_address'].upper()
try_del(port, ['status'])
def sync_resources(self, resource_name, collection_name, resources,
context, dbcontext, attr_filter):
"""Sync objects from Neutron over to OpenDaylight.
This will handle syncing networks, subnets, and ports from Neutron to
OpenDaylight. It also filters out the requisite items which are not
valid for create API operations.
"""
to_be_synced = []
for resource in resources:
try:
urlpath = collection_name + '/' + resource['id']
self.sendjson('get', urlpath, None)
except requests.exceptions.HTTPError as e:
with excutils.save_and_reraise_exception() as ctx:
if e.response.status_code == requests.codes.not_found:
attr_filter(resource, context, dbcontext)
to_be_synced.append(resource)
ctx.reraise = False
key = resource_name if len(to_be_synced) == 1 else collection_name
# 400 errors are returned if an object exists, which we ignore.
self.sendjson('post', collection_name, {key: to_be_synced},
[requests.codes.bad_request])
@utils.synchronized('odl-sync-full')
def sync_full(self, context):
"""Resync the entire database to ODL.
Transition to the in-sync state on success.
Note: we only allow a single thread in here at a time.
"""
if not self.out_of_sync:
return
dbcontext = context._plugin_context
networks = context._plugin.get_networks(dbcontext)
subnets = context._plugin.get_subnets(dbcontext)
ports = context._plugin.get_ports(dbcontext)
self.sync_resources(ODL_NETWORK, ODL_NETWORKS, networks,
context, dbcontext,
self.filter_create_network_attributes)
self.sync_resources(ODL_SUBNET, ODL_SUBNETS, subnets,
context, dbcontext,
self.filter_create_subnet_attributes)
self.sync_resources(ODL_PORT, ODL_PORTS, ports,
context, dbcontext,
self.filter_create_port_attributes)
self.out_of_sync = False
def filter_update_network_attributes(self, network, context, dbcontext):
"""Filter out network attributes for an update operation."""
try_del(network, ['id', 'status', 'subnets', 'tenant_id'])
def filter_update_subnet_attributes(self, subnet, context, dbcontext):
"""Filter out subnet attributes for an update operation."""
try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr',
'allocation_pools', 'tenant_id'])
def filter_update_port_attributes(self, port, context, dbcontext):
"""Filter out port attributes for an update operation."""
self.add_security_groups(context, dbcontext, port)
try_del(port, ['network_id', 'id', 'status', 'mac_address',
'tenant_id', 'fixed_ips'])
create_object_map = {ODL_NETWORKS: filter_create_network_attributes,
ODL_SUBNETS: filter_create_subnet_attributes,
ODL_PORTS: filter_create_port_attributes}
update_object_map = {ODL_NETWORKS: filter_update_network_attributes,
ODL_SUBNETS: filter_update_subnet_attributes,
ODL_PORTS: filter_update_port_attributes}
def sync_single_resource(self, operation, object_type, context):
"""Sync over a single resource from Neutron to OpenDaylight.
Handle syncing a single operation over to OpenDaylight, and correctly
filter attributes out which are not required for the requisite
operation (create or update) being handled.
"""
try:
obj_id = context.current['id']
if operation == 'delete':
self.sendjson('delete', object_type + '/' + obj_id, None)
else:
if operation == 'create':
urlpath = object_type
method = 'post'
attr_filter = self.create_object_map[object_type]
elif operation == 'update':
urlpath = object_type + '/' + obj_id
method = 'put'
attr_filter = self.update_object_map[object_type]
resource = context.current.copy()
attr_filter(self, resource, context, context._plugin_context)
# 400 errors are returned if an object exists, which we ignore.
self.sendjson(method, urlpath, {object_type[:-1]: resource},
[requests.codes.bad_request])
except Exception:
with excutils.save_and_reraise_exception():
self.out_of_sync = True
def add_security_groups(self, context, dbcontext, port):
"""Populate the 'security_groups' field with entire records."""
groups = [context._plugin.get_security_group(dbcontext, sg)
for sg in port['security_groups']]
port['security_groups'] = groups
def sendjson(self, method, urlpath, obj, ignorecodes=[]):
"""Send json to the OpenDaylight controller."""
headers = {'Content-Type': 'application/json'}
data = jsonutils.dumps(obj, indent=2) if obj else None
url = '/'.join([self.url, urlpath])
LOG.debug(_('ODL-----> sending URL (%s) <-----ODL') % url)
LOG.debug(_('ODL-----> sending JSON (%s) <-----ODL') % obj)
r = requests.request(method, url=url,
headers=headers, data=data,
auth=self.auth, timeout=self.timeout)
# ignorecodes contains a list of HTTP error codes to ignore.
if r.status_code in ignorecodes:
return
r.raise_for_status()
def bind_port(self, context):
LOG.debug(_("Attempting to bind port %(port)s on "
"network %(network)s"),
{'port': context.current['id'],
'network': context.network.current['id']})
for segment in context.network.network_segments:
if self.check_segment(segment):
context.set_binding(segment[api.ID],
self.vif_type,
self.vif_details,
status=n_const.PORT_STATUS_ACTIVE)
LOG.debug(_("Bound using segment: %s"), segment)
return
else:
LOG.debug(_("Refusing to bind port for segment ID %(id)s, "
"segment %(seg)s, phys net %(physnet)s, and "
"network type %(nettype)s"),
{'id': segment[api.ID],
'seg': segment[api.SEGMENTATION_ID],
'physnet': segment[api.PHYSICAL_NETWORK],
'nettype': segment[api.NETWORK_TYPE]})
def check_segment(self, segment):
"""Verify a segment is valid for the OpenDaylight MechanismDriver.
Verify the requested segment is supported by ODL and return True or
False to indicate this to callers.
"""
network_type = segment[api.NETWORK_TYPE]
return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE,
constants.TYPE_VXLAN]
| {
"content_hash": "aa818860c7ebd706ecaffd028f931761",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 79,
"avg_line_length": 41.584070796460175,
"alnum_prop": 0.5994183159537491,
"repo_name": "sajuptpm/neutron-ipam",
"id": "a2a94874a681c1a90cb286430f8be5889d80795b",
"size": "14852",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/icehouse",
"path": "neutron/plugins/ml2/drivers/mechanism_odl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "9102565"
},
{
"name": "Shell",
"bytes": "9603"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
import os
import six
import struct
import sys
import unittest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import pfp
import pfp.errors
from pfp.fields import *
import pfp.utils
from pfp.bitwrap import BitwrappedStream
import utils
class TestBitwrap(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_bytes_read(self):
stream = six.BytesIO(pfp.utils.binary("abcd"))
bitwrapped = BitwrappedStream(stream)
res = bitwrapped.read(4)
self.assertEqual(pfp.utils.binary("abcd"), res)
def test_bits_read1(self):
stream = six.BytesIO(pfp.utils.binary(chr(int("01010101", 2))))
bitwrapped = BitwrappedStream(stream)
res = bitwrapped.read_bits(8)
self.assertEqual([0,1,0,1,0,1,0,1], res)
def test_bits_read2_padded1(self):
stream = six.BytesIO(pfp.utils.binary(chr(int("11110000",2)) + chr(int("10101010", 2))))
bitwrapped = BitwrappedStream(stream)
bitwrapped.padded = True
res = bitwrapped.read_bits(4)
self.assertEqual([1,1,1,1], res)
res = bitwrapped.read_bits(3)
self.assertEqual([0,0,0], res)
res = bitwrapped.read_bits(4)
self.assertEqual([0,1,0,1], res)
res = bitwrapped.read_bits(5)
self.assertEqual([0,1,0,1,0], res)
def test_bits_read2_padded2(self):
stream = six.BytesIO(pfp.utils.binary(chr(int("11110000",2)) + chr(int("10101010", 2))))
bitwrapped = BitwrappedStream(stream)
bitwrapped.padded = True
res = bitwrapped.read_bits(4)
self.assertEqual([1,1,1,1], res)
next_byte = bitwrapped.read(1)
self.assertEqual(pfp.utils.binary(chr(int("10101010", 2))), next_byte)
def test_bits_read_unpadded(self):
stream = six.BytesIO(pfp.utils.binary(chr(int("11110000",2)) + chr(int("10101010", 2))))
bitwrapped = BitwrappedStream(stream)
bitwrapped.padded = False
res = bitwrapped.read_bits(4)
self.assertEqual([1,1,1,1], res)
res = bitwrapped.read(1)
self.assertEqual(pfp.utils.binary(chr(int("00001010", 2))), res)
res = bitwrapped.read_bits(4)
self.assertEqual([1,0,1,0], res)
def test_bits_read_unpadded(self):
stream = six.BytesIO(pfp.utils.binary(chr(int("11110000",2)) + chr(int("10101010", 2))))
bitwrapped = BitwrappedStream(stream)
bitwrapped.padded = False
res = bitwrapped.read_bits(4)
self.assertEqual([1,1,1,1], res)
res = bitwrapped.read(1)
self.assertEqual(pfp.utils.binary(chr(int("00001010", 2))), res)
res = bitwrapped.read_bits(4)
self.assertEqual([1,0,1,0], res)
def test_bits_write_padded(self):
stream = six.BytesIO()
bitwrapped = BitwrappedStream(stream)
bitwrapped.padded = True
bitwrapped.write_bits([1,1,0,1])
# should go to a new byte now, zero padded after the
# 1101 bits
bitwrapped.write(pfp.utils.binary("hello"))
self.assertEqual(stream.getvalue(), pfp.utils.binary(chr(int("11010000", 2)) + "hello"))
def test_unconsumed_ranges1(self):
stream = six.BytesIO(pfp.utils.binary("A" * 100))
bitwrapped = BitwrappedStream(stream)
bitwrapped.read(10)
bitwrapped.seek(bitwrapped.tell()+10)
bitwrapped.read(10)
bitwrapped.seek(bitwrapped.tell()+10)
bitwrapped.read(10)
uranges = bitwrapped.unconsumed_ranges()
# test (11,20]
self.assertEqual(len(uranges[11]), 1)
self.assertEqual(len(uranges[10]), 0)
self.assertEqual(len(uranges[19]), 1)
self.assertEqual(len(uranges[20]), 0)
# test (31,40]
self.assertEqual(len(uranges[31]), 1)
self.assertEqual(len(uranges[30]), 0)
self.assertEqual(len(uranges[39]), 1)
self.assertEqual(len(uranges[40]), 0)
def test_unconsumed_ranges2(self):
stream = six.BytesIO(pfp.utils.binary("A" * 100))
bitwrapped = BitwrappedStream(stream)
bitwrapped.read(10)
bitwrapped.seek(bitwrapped.tell()+10)
# it should not need a second read to add the
# unconsumed range
uranges = bitwrapped.unconsumed_ranges()
self.assertEqual(len(uranges), 1)
# test (11,20]
self.assertEqual(len(uranges[11]), 1)
self.assertEqual(len(uranges[10]), 0)
self.assertEqual(len(uranges[19]), 1)
self.assertEqual(len(uranges[20]), 0)
def test_unconsumed_ranges3(self):
stream = six.BytesIO(pfp.utils.binary("A" * 100))
bitwrapped = BitwrappedStream(stream)
bitwrapped.read(10)
# it should not need a second read to add the
# unconsumed range
uranges = bitwrapped.unconsumed_ranges()
self.assertEqual(len(uranges), 0)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "4d1b64ee51eceef5a6e7b734dfa3210d",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 90,
"avg_line_length": 27.28125,
"alnum_prop": 0.6991981672394043,
"repo_name": "AmesianX/pfp",
"id": "fc0c66b65860707c5ec359c9ff176cc5efb6c1da",
"size": "4406",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_bitwrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "305903"
},
{
"name": "Shell",
"bytes": "386"
}
],
"symlink_target": ""
} |
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| {
"content_hash": "39a29b62e7295b7b84f86b45a4c4e52f",
"timestamp": "",
"source": "github",
"line_count": 1243,
"max_line_length": 79,
"avg_line_length": 38.18181818181818,
"alnum_prop": 0.6119258322798146,
"repo_name": "sonnyhu/scikit-learn",
"id": "60962aa0dffd390f13904ee6ed4b857e22be26c5",
"size": "47460",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/tests/test_cross_validation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "399679"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6595059"
},
{
"name": "Shell",
"bytes": "9216"
}
],
"symlink_target": ""
} |
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
from collections.abc import Iterable
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.random import sample_without_replacement
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions.
"""
if dimensions > 30:
return np.hstack([rng.randint(2, size=(samples, dimensions - 30)),
_generate_hypercube(samples, 30, rng)])
out = sample_without_replacement(2 ** dimensions, samples,
random_state=rng).astype(dtype='>u4',
copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, *, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of an ``n_informative``-dimensional hypercube with sides of
length ``2*class_sep`` and assigns an equal number of clusters to each
class. It introduces interdependence between these features and adds
various types of further noise to the data.
Without shuffling, ``X`` horizontally stacks features in the following
order: the primary ``n_informative`` features, followed by ``n_redundant``
linear combinations of the informative features, followed by ``n_repeated``
duplicates, drawn randomly with replacement from the informative and
redundant features. The remaining features are filled with random noise.
Thus, without shuffling, all useful features are contained in the columns
``X[:, :n_informative + n_redundant + n_repeated]``.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=20
The total number of features. These comprise ``n_informative``
informative features, ``n_redundant`` redundant features,
``n_repeated`` duplicated features and
``n_features-n_informative-n_redundant-n_repeated`` useless features
drawn at random.
n_informative : int, default=2
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension ``n_informative``. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, default=2
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, default=0
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, default=2
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, default=2
The number of clusters per class.
weights : array-like of shape (n_classes,) or (n_classes - 1,),\
default=None
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if ``len(weights) == n_classes - 1``,
then the last class weight is automatically inferred.
More than ``n_samples`` samples may be returned if the sum of
``weights`` exceeds 1. Note that the actual class proportions will
not exactly match ``weights`` when ``flip_y`` isn't 0.
flip_y : float, default=0.01
The fraction of samples whose class is assigned randomly. Larger
values introduce noise in the labels and make the classification
task harder. Note that the default setting flip_y > 0 might lead
to less than ``n_classes`` in y in some cases.
class_sep : float, default=1.0
The factor multiplying the hypercube size. Larger values spread
out the clusters/classes and make the classification task easier.
hypercube : bool, default=True
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, ndarray of shape (n_features,) or None, default=0.0
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, ndarray of shape (n_features,) or None, default=1.0
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : bool, default=True
Shuffle the samples and the features.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See Also
--------
make_blobs : Simplified variant.
make_multilabel_classification : Unrelated generator for multilabel tasks.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
# Use log2 to avoid overflow errors
if n_informative < np.log2(n_classes * n_clusters_per_class):
msg = "n_classes({}) * n_clusters_per_class({}) must be"
msg += " smaller or equal 2**n_informative({})={}"
raise ValueError(msg.format(n_classes, n_clusters_per_class,
n_informative, 2**n_informative))
if weights is not None:
if len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
if len(weights) == n_classes - 1:
if isinstance(weights, list):
weights = weights + [1.0 - sum(weights)]
else:
weights = np.resize(weights, n_classes)
weights[-1] = 1.0 - sum(weights[:-1])
else:
weights = [1.0 / n_classes] * n_classes
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
# Distribute samples among clusters by weight
n_samples_per_cluster = [
int(n_samples * weights[k % n_classes] / n_clusters_per_class)
for k in range(n_clusters)]
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float, copy=False)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, *,
n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=20
The total number of features.
n_classes : int, default=5
The number of classes of the classification problem.
n_labels : int, default=2
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, default=50
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, default=True
If ``True``, some instances might not belong to any class.
sparse : bool, default=False
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : {'dense', 'sparse'} or False, default='dense'
If ``'dense'`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, default=False
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
The label sets. Sparse matrix should be of CSR format.
p_c : ndarray of shape (n_classes,)
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : ndarray of shape (n_features, n_classes)
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
if n_classes < 1:
raise ValueError(
"'n_classes' should be an integer greater than 0. Got {} instead."
.format(n_classes)
)
if length < 1:
raise ValueError(
"'length' should be an integer greater than 0. Got {} instead."
.format(length)
)
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, *, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=12000
The number of samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 10)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See Also
--------
make_gaussian_quantiles : A generalization of this dataset approach.
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64, copy=False)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, *, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=100
The number of features.
n_informative : int, default=10
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, default=1
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, default=0.0
The bias term in the underlying linear model.
effective_rank : int, default=None
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float, default=0.5
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None. When a float, it should be
between 0 and 1.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
shuffle : bool, default=True
Shuffle the samples and the features.
coef : bool, default=False
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
The output values.
coef : ndarray of shape (n_features,) or (n_features, n_targets)
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, *, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or tuple of shape (2,), dtype=int, default=100
If int, it is the total number of points generated.
For odd numbers, the inner circle will have one point more than the
outer circle.
If two-element tuple, number of points in outer circle and inner
circle.
.. versionchanged:: 0.23
Added two-element tuple.
shuffle : bool, default=True
Whether to shuffle the samples.
noise : float, default=None
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
factor : float, default=.8
Scale factor between inner and outer circle in the range `(0, 1)`.
Returns
-------
X : ndarray of shape (n_samples, 2)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels (0 or 1) for class membership of each sample.
"""
if factor >= 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
if isinstance(n_samples, numbers.Integral):
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
else:
try:
n_samples_out, n_samples_in = n_samples
except ValueError as e:
raise ValueError('`n_samples` can be either an int or '
'a two-element tuple.') from e
generator = check_random_state(random_state)
# so as not to have the first point = last point, we set endpoint=False
linspace_out = np.linspace(0, 2 * np.pi, n_samples_out, endpoint=False)
linspace_in = np.linspace(0, 2 * np.pi, n_samples_in, endpoint=False)
outer_circ_x = np.cos(linspace_out)
outer_circ_y = np.sin(linspace_out)
inner_circ_x = np.cos(linspace_in) * factor
inner_circ_y = np.sin(linspace_in) * factor
X = np.vstack([np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y)]).T
y = np.hstack([np.zeros(n_samples_out, dtype=np.intp),
np.ones(n_samples_in, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, *, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles.
A simple toy dataset to visualize clustering and classification
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or tuple of shape (2,), dtype=int, default=100
If int, the total number of points generated.
If two-element tuple, number of points in each of two moons.
.. versionchanged:: 0.23
Added two-element tuple.
shuffle : bool, default=True
Whether to shuffle the samples.
noise : float, default=None
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 2)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels (0 or 1) for class membership of each sample.
"""
if isinstance(n_samples, numbers.Integral):
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
else:
try:
n_samples_out, n_samples_in = n_samples
except ValueError as e:
raise ValueError('`n_samples` can be either an int or '
'a two-element tuple.') from e
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack([np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y)]).T
y = np.hstack([np.zeros(n_samples_out, dtype=np.intp),
np.ones(n_samples_in, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, *, centers=None, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None,
return_centers=False):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or array-like, default=100
If int, it is the total number of points equally divided among
clusters.
If array-like, each element of the sequence indicates
the number of samples per cluster.
.. versionchanged:: v0.20
one can now pass an array-like to the ``n_samples`` parameter
n_features : int, default=2
The number of features for each sample.
centers : int or ndarray of shape (n_centers, n_features), default=None
The number of centers to generate, or the fixed center locations.
If n_samples is an int and centers is None, 3 centers are generated.
If n_samples is array-like, centers must be
either None or an array of length equal to the length of n_samples.
cluster_std : float or array-like of float, default=1.0
The standard deviation of the clusters.
center_box : tuple of float (min, max), default=(-10.0, 10.0)
The bounding box for each cluster center when centers are
generated at random.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
return_centers : bool, default=False
If True, then return the centers of each cluster
.. versionadded:: 0.23
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for cluster membership of each sample.
centers : ndarray of shape (n_centers, n_features)
The centers of each cluster. Only returned if
``return_centers=True``.
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
>>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0])
See Also
--------
make_classification : A more intricate variant.
"""
generator = check_random_state(random_state)
if isinstance(n_samples, numbers.Integral):
# Set n_centers by looking at centers arg
if centers is None:
centers = 3
if isinstance(centers, numbers.Integral):
n_centers = centers
centers = generator.uniform(center_box[0], center_box[1],
size=(n_centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
n_centers = centers.shape[0]
else:
# Set n_centers by looking at [n_samples] arg
n_centers = len(n_samples)
if centers is None:
centers = generator.uniform(center_box[0], center_box[1],
size=(n_centers, n_features))
try:
assert len(centers) == n_centers
except TypeError as e:
raise ValueError("Parameter `centers` must be array-like. "
"Got {!r} instead".format(centers)) from e
except AssertionError as e:
raise ValueError(
f"Length of `n_samples` not consistent with number of "
f"centers. Got n_samples = {n_samples} and centers = {centers}"
) from e
else:
centers = check_array(centers)
n_features = centers.shape[1]
# stds: if cluster_std is given as list, it must be consistent
# with the n_centers
if (hasattr(cluster_std, "__len__") and len(cluster_std) != n_centers):
raise ValueError("Length of `clusters_std` not consistent with "
"number of centers. Got centers = {} "
"and cluster_std = {}".format(centers, cluster_std))
if isinstance(cluster_std, numbers.Real):
cluster_std = np.full(len(centers), cluster_std)
X = []
y = []
if isinstance(n_samples, Iterable):
n_samples_per_center = n_samples
else:
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(generator.normal(loc=centers[i], scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
total_n_samples = np.sum(n_samples)
indices = np.arange(total_n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
if return_centers:
return X, y, centers
else:
return X, y
def make_friedman1(n_samples=100, n_features=10, *, noise=0.0,
random_state=None):
"""Generate the "Friedman #1" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=10
The number of features. Should be at least 5.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, *, noise=0.0, random_state=None):
"""Generate the "Friedman #2" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 4)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, *, noise=0.0, random_state=None):
"""Generate the "Friedman #3" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 4)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, *, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values.
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=100
The number of features.
effective_rank : int, default=10
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float, default=0.5
The relative importance of the fat noisy tail of the singular values
profile. The value should be between 0 and 1.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic',
check_finite=False)
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic',
check_finite=False)
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, *, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
Number of samples to generate
n_components : int
Number of components in the dictionary
n_features : int
Number of features of the dataset to generate
n_nonzero_coefs : int
Number of active (non-zero) coefficients in each sample
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
data : ndarray of shape (n_features, n_samples)
The encoded signal (Y).
dictionary : ndarray of shape (n_features, n_components)
The dictionary with normalized components (D).
code : ndarray of shape (n_components, n_samples)
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, *,
random_state=None):
"""Generate a random regression problem with sparse uncorrelated design.
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=10
The number of features.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, *, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_dim, n_dim)
The random symmetric, positive-definite matrix.
See Also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, _, Vt = linalg.svd(np.dot(A.T, A), check_finite=False)
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), Vt)
return X
def make_sparse_spd_matrix(dim=1, *, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim : int, default=1
The size of the random matrix to generate.
alpha : float, default=0.95
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity. The value should be in the range 0 and 1.
norm_diag : bool, default=False
Whether to normalize the output matrix to make the leading diagonal
elements all 1
smallest_coef : float, default=0.1
The value of the smallest coefficient between 0 and 1.
largest_coef : float, default=0.9
The value of the largest coefficient between 0 and 1.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See Also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, *, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of sample points on the S curve.
noise : float, default=0.0
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 3)
The points.
t : ndarray of shape (n_samples,)
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, *, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of sample points on the S curve.
noise : float, default=0.0
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 3)
The points.
t : ndarray of shape (n_samples,)
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(*, mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
r"""Generate isotropic Gaussian and label samples by quantile.
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : ndarray of shape (n_features,), default=None
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, default=1.0
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, default=100
The total number of points equally divided among classes.
n_features : int, default=2
The number of features for each sample.
n_classes : int, default=3
The number of classes
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, *, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable of shape (n_rows, n_cols)
The shape of the result.
n_clusters : int
The number of biclusters.
noise : float, default=0.0
The standard deviation of the gaussian noise.
minval : int, default=10
Minimum value of a bicluster.
maxval : int, default=100
Maximum value of a bicluster.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape `shape`
The generated array.
rows : ndarray of shape (n_clusters, X.shape[0])
The indicators for cluster membership of each row.
cols : ndarray of shape (n_clusters, X.shape[1])
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See Also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack([row_labels == c for c in range(n_clusters)])
cols = np.vstack([col_labels == c for c in range(n_clusters)])
return result, rows, cols
def make_checkerboard(shape, n_clusters, *, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : tuple of shape (n_rows, n_cols)
The shape of the result.
n_clusters : int or array-like or shape (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, default=0.0
The standard deviation of the gaussian noise.
minval : int, default=10
Minimum value of a bicluster.
maxval : int, default=100
Maximum value of a bicluster.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape `shape`
The generated array.
rows : ndarray of shape (n_clusters, X.shape[0])
The indicators for cluster membership of each row.
cols : ndarray of shape (n_clusters, X.shape[1])
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See Also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack([row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters)])
cols = np.vstack([col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters)])
return result, rows, cols
| {
"content_hash": "ced82d8bf9d7d541205e75042836f677",
"timestamp": "",
"source": "github",
"line_count": 1762,
"max_line_length": 79,
"avg_line_length": 34.4228149829739,
"alnum_prop": 0.6096153529091718,
"repo_name": "kevin-intel/scikit-learn",
"id": "3a9e1812cb1e771c31b13be690b205ffb00b2f99",
"size": "60653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/datasets/_samples_generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6394128"
},
{
"name": "Shell",
"bytes": "9250"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/tatooine/shared_planter_window_style_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "8263858185737c3ebe5c9af2ed75b3a4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 88,
"avg_line_length": 24.384615384615383,
"alnum_prop": 0.7003154574132492,
"repo_name": "obi-two/Rebelion",
"id": "1efc7affedd46d21964a2eb3aedff3deaa728f96",
"size": "462",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/static/structure/tatooine/shared_planter_window_style_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(name="mcp", version="5.2",
packages=find_packages(exclude=('mcp_test', 'mcp_test.*')))
| {
"content_hash": "ff98b0c1077e4fcbc4350ae19b367595",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 67,
"avg_line_length": 48.333333333333336,
"alnum_prop": 0.6827586206896552,
"repo_name": "sassoftware/mcp",
"id": "5f0bb0f6a55ea92daac614d354b8c199d952e0bb",
"size": "732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30192"
},
{
"name": "Shell",
"bytes": "3882"
}
],
"symlink_target": ""
} |
import Queue
import StringIO
import codecs
import json
import logging
import os
import platform
import re
import sys
import thread
import time
import threading
import unittest
from webkitpy.common.system import outputcapture, path
from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.host import Host
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests import port
from webkitpy.layout_tests import run_webkit_tests
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.port import Port
from webkitpy.layout_tests.port import test
from webkitpy.tool import grammar
from webkitpy.tool.mocktool import MockOptions
def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
extra_args = extra_args or []
args = []
if not '--platform' in extra_args:
args.extend(['--platform', 'test'])
if not new_results:
args.append('--no-new-test-results')
if not '--child-processes' in extra_args:
args.extend(['--child-processes', 1])
args.extend(extra_args)
if not tests_included:
# We use the glob to test that globbing works.
args.extend(['passes',
'http/tests',
'websocket/tests',
'failures/expected/*'])
return run_webkit_tests.parse_args(args)
def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
options, parsed_args = parse_args(extra_args, tests_included)
if not port_obj:
host = host or MockHost()
port_obj = host.port_factory.get(port_name=options.platform, options=options)
if shared_port:
port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
logging_stream = StringIO.StringIO()
run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
return run_details.exit_code == 0
def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
options, parsed_args = parse_args(extra_args=extra_args,
tests_included=tests_included,
print_nothing=False, new_results=new_results)
host = host or MockHost()
if not port_obj:
port_obj = host.port_factory.get(port_name=options.platform, options=options)
run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
return (run_details, output, host.user)
def run_and_capture(port_obj, options, parsed_args, shared_port=True):
if shared_port:
port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
oc = outputcapture.OutputCapture()
try:
oc.capture_output()
logging_stream = StringIO.StringIO()
run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
finally:
oc.restore_output()
return (run_details, logging_stream)
def get_tests_run(args, host=None, port_obj=None):
results = get_test_results(args, host=host, port_obj=port_obj)
return [result.test_name for result in results]
def get_test_batches(args, host=None):
results = get_test_results(args, host)
batches = []
batch = []
current_pid = None
for result in results:
if batch and result.pid != current_pid:
batches.append(batch)
batch = []
batch.append(result.test_name)
if batch:
batches.append(batch)
return batches
def get_test_results(args, host=None, port_obj=None):
options, parsed_args = parse_args(args, tests_included=True)
host = host or MockHost()
port_obj = port_obj or host.port_factory.get(port_name=options.platform, options=options)
oc = outputcapture.OutputCapture()
oc.capture_output()
logging_stream = StringIO.StringIO()
try:
run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
finally:
oc.restore_output()
all_results = []
if run_details.initial_results:
all_results.extend(run_details.initial_results.all_results)
if run_details.retry_results:
all_results.extend(run_details.retry_results.all_results)
return all_results
def parse_full_results(full_results_text):
json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
compressed_results = json.loads(json_to_eval)
return compressed_results
class StreamTestingMixin(object):
def assertContains(self, stream, string):
self.assertTrue(string in stream.getvalue())
def assertEmpty(self, stream):
self.assertFalse(stream.getvalue())
def assertNotEmpty(self, stream):
self.assertTrue(stream.getvalue())
class RunTest(unittest.TestCase, StreamTestingMixin):
def setUp(self):
# A real PlatformInfo object is used here instead of a
# MockPlatformInfo because we need to actually check for
# Windows and Mac to skip some tests.
self._platform = SystemHost().platform
# FIXME: Remove this when we fix test-webkitpy to work
# properly on cygwin (bug 63846).
self.should_test_processes = not self._platform.is_win()
def test_basic(self):
options, args = parse_args(tests_included=True)
logging_stream = StringIO.StringIO()
host = MockHost()
port_obj = host.port_factory.get(options.platform, options)
details = run_webkit_tests.run(port_obj, options, args, logging_stream)
# These numbers will need to be updated whenever we add new tests.
self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
self.assertEqual(details.retry_results.total, test.UNEXPECTED_FAILURES)
expected_tests = details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name)
expected_summary_str = ''
if details.initial_results.expected_failures > 0:
expected_summary_str = " (%d passed, %d didn't)" % (expected_tests - details.initial_results.expected_failures, details.initial_results.expected_failures)
one_line_summary = "%d tests ran as expected%s, %d didn't:\n" % (
expected_tests,
expected_summary_str,
len(details.initial_results.unexpected_results_by_name))
self.assertTrue(one_line_summary in logging_stream.buflist)
# Ensure the results were summarized properly.
self.assertEqual(details.summarized_failing_results['num_regressions'], details.exit_code)
# Ensure the results were written out and displayed.
failing_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
json_to_eval = failing_results_text.replace("ADD_RESULTS(", "").replace(");", "")
self.assertEqual(json.loads(json_to_eval), details.summarized_failing_results)
full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
self.assertEqual(json.loads(full_results_text), details.summarized_full_results)
self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
def test_batch_size(self):
batch_tests_run = get_test_batches(['--batch-size', '2'])
for batch in batch_tests_run:
self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
def test_max_locked_shards(self):
# Tests for the default of using one locked shard even in the case of more than one child process.
if not self.should_test_processes:
return
save_env_webkit_test_max_locked_shards = None
if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
_, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
try:
self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
finally:
if save_env_webkit_test_max_locked_shards:
os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
def test_child_processes_2(self):
if self.should_test_processes:
_, regular_output, _ = logging_run(
['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
def test_child_processes_min(self):
if self.should_test_processes:
_, regular_output, _ = logging_run(
['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/virtual_passes', 'passes'],
tests_included=True, shared_port=False)
self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
def test_dryrun(self):
tests_run = get_tests_run(['--dry-run'])
self.assertEqual(tests_run, [])
tests_run = get_tests_run(['-n'])
self.assertEqual(tests_run, [])
def test_enable_sanitizer(self):
self.assertTrue(passing_run(['--enable-sanitizer', 'failures/expected/text.html']))
def test_exception_raised(self):
# Exceptions raised by a worker are treated differently depending on
# whether they are in-process or out. inline exceptions work as normal,
# which allows us to get the full stack trace and traceback from the
# worker. The downside to this is that it could be any error, but this
# is actually useful in testing.
#
# Exceptions raised in a separate process are re-packaged into
# WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
# be printed, but don't display properly in the unit test exception handlers.
self.assertRaises(BaseException, logging_run,
['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
if self.should_test_processes:
self.assertRaises(BaseException, logging_run,
['--child-processes', '2', '--skipped=ignore', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
def test_device_failure(self):
# Test that we handle a device going offline during a test properly.
details, regular_output, _ = logging_run(['failures/expected/device_failure.html'], tests_included=True)
self.assertEqual(details.exit_code, 0)
self.assertTrue('worker/0 has failed' in regular_output.getvalue())
def test_full_results_html(self):
host = MockHost()
details, _, _ = logging_run(['--full-results-html'], host=host)
self.assertEqual(details.exit_code, 0)
self.assertEqual(len(host.user.opened_urls), 1)
def test_keyboard_interrupt(self):
# Note that this also tests running a test marked as SKIP if
# you specify it explicitly.
details, _, _ = logging_run(['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
self.assertEqual(details.exit_code, test_run_results.INTERRUPTED_EXIT_STATUS)
if self.should_test_processes:
_, regular_output, _ = logging_run(['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--skipped=ignore'], tests_included=True, shared_port=False)
self.assertTrue(any(['Interrupted, exiting' in line for line in regular_output.buflist]))
def test_no_tests_found(self):
details, err, _ = logging_run(['resources'], tests_included=True)
self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
self.assertContains(err, 'No tests to run.\n')
def test_no_tests_found_2(self):
details, err, _ = logging_run(['foo'], tests_included=True)
self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
self.assertContains(err, 'No tests to run.\n')
def test_no_tests_found_3(self):
details, err, _ = logging_run(['--run-chunk', '5:400', 'foo/bar.html'], tests_included=True)
self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
self.assertContains(err, 'No tests to run.\n')
def test_natural_order(self):
tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
tests_run = get_tests_run(['--order=natural'] + tests_to_run)
self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
def test_natural_order_test_specified_multiple_times(self):
tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
tests_run = get_tests_run(['--order=natural'] + tests_to_run)
self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
def test_random_order(self):
tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
tests_run = get_tests_run(['--order=random'] + tests_to_run)
self.assertEqual(sorted(tests_to_run), sorted(tests_run))
def test_random_daily_seed_order(self):
tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
tests_run = get_tests_run(['--order=random-seeded'] + tests_to_run)
self.assertEqual(sorted(tests_to_run), sorted(tests_run))
def test_random_order_test_specified_multiple_times(self):
tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
tests_run = get_tests_run(['--order=random'] + tests_to_run)
self.assertEqual(tests_run.count('passes/audio.html'), 2)
self.assertEqual(tests_run.count('passes/args.html'), 2)
def test_no_order(self):
tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
tests_run = get_tests_run(['--order=none'] + tests_to_run)
self.assertEqual(tests_to_run, tests_run)
def test_no_order_test_specified_multiple_times(self):
tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
tests_run = get_tests_run(['--order=none'] + tests_to_run)
self.assertEqual(tests_to_run, tests_run)
def test_no_order_with_directory_entries_in_natural_order(self):
tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
tests_run = get_tests_run(['--order=none'] + tests_to_run)
self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
def test_repeat_each(self):
tests_to_run = ['passes/image.html', 'passes/text.html']
tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
def test_ignore_flag(self):
# Note that passes/image.html is expected to be run since we specified it directly.
tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
self.assertFalse('passes/text.html' in tests_run)
self.assertTrue('passes/image.html' in tests_run)
def test_skipped_flag(self):
tests_run = get_tests_run(['passes'])
self.assertFalse('passes/skipped/skip.html' in tests_run)
num_tests_run_by_default = len(tests_run)
# Check that nothing changes when we specify skipped=default.
self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
num_tests_run_by_default)
# Now check that we run one more test (the skipped one).
tests_run = get_tests_run(['--skipped=ignore', 'passes'])
self.assertTrue('passes/skipped/skip.html' in tests_run)
self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
# Now check that we only run the skipped test.
self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
# Now check that we don't run anything.
self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
def test_iterations(self):
tests_to_run = ['passes/image.html', 'passes/text.html']
tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
def test_repeat_each_iterations_num_tests(self):
# The total number of tests should be: number_of_tests *
# repeat_each * iterations
host = MockHost()
_, err, _ = logging_run(
['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
tests_included=True, host=host)
self.assertContains(err, "All 16 tests ran as expected (8 passed, 8 didn't).\n")
def test_run_chunk(self):
# Test that we actually select the right chunk
all_tests_run = get_tests_run(['passes', 'failures'])
chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
self.assertEqual(all_tests_run[4:8], chunk_tests_run)
# Test that we wrap around if the number of tests is not evenly divisible by the chunk size
tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
def test_run_part(self):
# Test that we actually select the right part
tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
# Test that we wrap around if the number of tests is not evenly divisible by the chunk size
# (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
# last part repeats the first two tests).
chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
def test_run_singly(self):
batch_tests_run = get_test_batches(['--run-singly'])
for batch in batch_tests_run:
self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
def test_skip_failing_tests(self):
# This tests that we skip both known failing and known flaky tests. Because there are
# no known flaky tests in the default test_expectations, we add additional expectations.
host = MockHost()
host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
has_passes_text = False
for batch in batches:
self.assertFalse('failures/expected/text.html' in batch)
self.assertFalse('passes/image.html' in batch)
has_passes_text = has_passes_text or ('passes/text.html' in batch)
self.assertTrue(has_passes_text)
def test_single_file(self):
tests_run = get_tests_run(['passes/text.html'])
self.assertEqual(tests_run, ['passes/text.html'])
def test_single_file_with_prefix(self):
tests_run = get_tests_run(['LayoutTests/passes/text.html'])
self.assertEqual(['passes/text.html'], tests_run)
def test_single_skipped_file(self):
tests_run = get_tests_run(['failures/expected/keybaord.html'])
self.assertEqual([], tests_run)
def test_stderr_is_saved(self):
host = MockHost()
self.assertTrue(passing_run(host=host))
self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
'stuff going to stderr')
def test_test_list(self):
host = MockHost()
filename = '/tmp/foo.txt'
host.filesystem.write_text_file(filename, 'passes/text.html')
tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
self.assertEqual(['passes/text.html'], tests_run)
host.filesystem.remove(filename)
details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
self.assertNotEmpty(err)
def test_test_list_with_prefix(self):
host = MockHost()
filename = '/tmp/foo.txt'
host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
self.assertEqual(['passes/text.html'], tests_run)
def test_smoke_test(self):
host = MockHost()
smoke_test_filename = test.LAYOUT_TEST_DIR + '/SmokeTests'
host.filesystem.write_text_file(smoke_test_filename, 'passes/text.html\n')
# Test the default smoke testing.
tests_run = get_tests_run(['--smoke'], host=host)
self.assertEqual(['passes/text.html'], tests_run)
# Test running the smoke tests plus some manually-specified tests.
tests_run = get_tests_run(['--smoke', 'passes/image.html'], host=host)
self.assertEqual(['passes/image.html', 'passes/text.html'], tests_run)
# Test running the smoke tests plus some manually-specified tests.
tests_run = get_tests_run(['--no-smoke', 'passes/image.html'], host=host)
self.assertEqual(['passes/image.html'], tests_run)
# Test that we don't run just the smoke tests by default on a normal test port.
tests_run = get_tests_run([], host=host)
self.assertNotEqual(['passes/text.html'], tests_run)
# Create a port that does run only the smoke tests by default, and verify that works as expected.
port_obj = host.port_factory.get('test')
port_obj.default_smoke_test_only = lambda: True
tests_run = get_tests_run([], host=host, port_obj=port_obj)
self.assertEqual(['passes/text.html'], tests_run)
# Verify that --no-smoke continues to work on a smoke-by-default port.
tests_run = get_tests_run(['--no-smoke'], host=host, port_obj=port_obj)
self.assertNotEqual(['passes/text.html'], tests_run)
def test_missing_and_unexpected_results(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
details, err, _ = logging_run(['--no-show-results', '--retry-failures',
'failures/expected/missing_image.html',
'failures/unexpected/missing_text.html',
'failures/unexpected/text-image-checksum.html'],
tests_included=True, host=host)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 2)
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
self.assertTrue(json_string.find('"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","is_unexpected":true') != -1)
self.assertTrue(json_string.find('"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -1)
self.assertTrue(json_string.find('"num_regressions":2') != -1)
self.assertTrue(json_string.find('"num_flaky":0') != -1)
def test_different_failure_on_retry(self):
# This tests that if a test fails two different ways -- both unexpected
# -- we treat it as a failure rather than a flaky result. We use the
# initial failure for simplicity and consistency w/ the flakiness
# dashboard, even if the second failure is worse.
details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/text_then_crash.html'], tests_included=True)
self.assertEqual(details.exit_code, 1)
self.assertEqual(details.summarized_failing_results['tests']['failures']['unexpected']['text_then_crash.html']['actual'],
'TEXT CRASH')
# If we get a test that fails two different ways -- but the second one is expected --
# we should treat it as a flaky result and report the initial unexpected failure type
# to the dashboard. However, the test should be considered passing.
details, err, _ = logging_run(['--retry-failures', 'failures/expected/crash_then_text.html'], tests_included=True)
self.assertEqual(details.exit_code, 0)
self.assertEqual(details.summarized_failing_results['tests']['failures']['expected']['crash_then_text.html']['actual'],
'CRASH FAIL')
def test_pixel_test_directories(self):
host = MockHost()
"""Both tests have failing checksum. We include only the first in pixel tests so only that should fail."""
args = ['--pixel-tests', '--retry-failures', '--pixel-test-directory', 'failures/unexpected/pixeldir',
'failures/unexpected/pixeldir/image_in_pixeldir.html',
'failures/unexpected/image_not_in_pixeldir.html']
details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
self.assertEqual(details.exit_code, 1)
expected_token = '"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE","is_unexpected":true'
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
self.assertTrue(json_string.find(expected_token) != -1)
def test_crash_with_stderr(self):
host = MockHost()
_, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true,"is_unexpected":true') != -1)
def test_no_image_failure_with_image_diff(self):
host = MockHost()
_, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
def test_exit_after_n_failures_upload(self):
host = MockHost()
details, regular_output, user = logging_run(
['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
tests_included=True, host=host)
# By returning False, we know that the incremental results were generated and then deleted.
self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
self.assertEqual(details.exit_code, test_run_results.EARLY_EXIT_STATUS)
# This checks that passes/text.html is considered SKIPped.
self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
# This checks that we told the user we bailed out.
self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
# This checks that neither test ran as expected.
# FIXME: This log message is confusing; tests that were skipped should be called out separately.
self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
def test_exit_after_n_failures(self):
# Unexpected failures should result in tests stopping.
tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
# But we'll keep going for expected ones.
tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
def test_exit_after_n_crashes(self):
# Unexpected crashes should result in tests stopping.
tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
self.assertEqual(['failures/unexpected/crash.html'], tests_run)
# Same with timeouts.
tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
# But we'll keep going for expected ones.
tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
def test_results_directory_absolute(self):
# We run a configuration that should fail, to generate output, then
# look for what the output results url was.
host = MockHost()
with host.filesystem.mkdtemp() as tmpdir:
_, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
def test_results_directory_default(self):
# We run a configuration that should fail, to generate output, then
# look for what the output results url was.
# This is the default location.
_, _, user = logging_run(tests_included=True)
self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
def test_results_directory_relative(self):
# We run a configuration that should fail, to generate output, then
# look for what the output results url was.
host = MockHost()
host.filesystem.maybe_make_directory('/tmp/cwd')
host.filesystem.chdir('/tmp/cwd')
_, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
def test_retrying_default_value(self):
host = MockHost()
details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
self.assertEqual(details.exit_code, 1)
self.assertFalse('Retrying' in err.getvalue())
host = MockHost()
details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected'], tests_included=True, host=host)
self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7) # FIXME: This should be a constant in test.py .
self.assertTrue('Retrying' in err.getvalue())
def test_retrying_default_value_test_list(self):
host = MockHost()
filename = '/tmp/foo.txt'
host.filesystem.write_text_file(filename, 'failures/unexpected/text-image-checksum.html\nfailures/unexpected/crash.html')
details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
self.assertEqual(details.exit_code, 2)
self.assertFalse('Retrying' in err.getvalue())
host = MockHost()
filename = '/tmp/foo.txt'
host.filesystem.write_text_file(filename, 'failures')
details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7)
self.assertTrue('Retrying' in err.getvalue())
def test_retrying_and_flaky_tests(self):
host = MockHost()
details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/flaky'], tests_included=True, host=host)
self.assertEqual(details.exit_code, 0)
self.assertTrue('Retrying' in err.getvalue())
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
self.assertEqual(len(host.user.opened_urls), 0)
# Now we test that --clobber-old-results does remove the old entries and the old retries,
# and that we don't retry again.
host = MockHost()
details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
self.assertEqual(details.exit_code, 1)
self.assertTrue('Clobbering old results' in err.getvalue())
self.assertTrue('flaky/text.html' in err.getvalue())
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
self.assertFalse(host.filesystem.exists('retries'))
self.assertEqual(len(host.user.opened_urls), 1)
def test_retrying_crashed_tests(self):
host = MockHost()
details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/crash.html'], tests_included=True, host=host)
self.assertEqual(details.exit_code, 1)
self.assertTrue('Retrying' in err.getvalue())
def test_retrying_leak_tests(self):
host = MockHost()
details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/leak.html'], tests_included=True, host=host)
self.assertEqual(details.exit_code, 1)
self.assertTrue('Retrying' in err.getvalue())
def test_retrying_force_pixel_tests(self):
host = MockHost()
details, err, _ = logging_run(['--no-pixel-tests', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
self.assertEqual(details.exit_code, 1)
self.assertTrue('Retrying' in err.getvalue())
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
json = parse_full_results(json_string)
self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
{"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "is_unexpected": True})
self.assertFalse(json["pixel_tests_enabled"])
self.assertEqual(details.enabled_pixel_tests_in_retry, True)
def test_retrying_uses_retries_directory(self):
host = MockHost()
details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
self.assertEqual(details.exit_code, 1)
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
def test_run_order__inline(self):
# These next tests test that we run the tests in ascending alphabetical
# order per directory. HTTP tests are sharded separately from other tests,
# so we have to test both.
tests_run = get_tests_run(['-i', 'passes/virtual_passes', 'passes'])
self.assertEqual(tests_run, sorted(tests_run))
tests_run = get_tests_run(['http/tests/passes'])
self.assertEqual(tests_run, sorted(tests_run))
def test_virtual(self):
self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
'virtual/passes/text.html', 'virtual/passes/args.html']))
def test_reftest_run(self):
tests_run = get_tests_run(['passes/reftest.html'])
self.assertEqual(['passes/reftest.html'], tests_run)
def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
self.assertEqual(['passes/reftest.html'], tests_run)
def test_reftest_expected_html_should_be_ignored(self):
tests_run = get_tests_run(['passes/reftest-expected.html'])
self.assertEqual([], tests_run)
def test_reftest_driver_should_run_expected_html(self):
tests_run = get_test_results(['passes/reftest.html'])
self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
def test_reftest_driver_should_run_expected_mismatch_html(self):
tests_run = get_test_results(['passes/mismatch.html'])
self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
host = MockHost()
_, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
results = parse_full_results(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
self.assertEqual(results["tests"]["reftests"]["foo"]["unlistedtest.html"]["actual"], "MISSING"),
self.assertEqual(results["num_regressions"], 5)
self.assertEqual(results["num_flaky"], 0)
def test_reftest_crash(self):
test_results = get_test_results(['failures/unexpected/crash-reftest.html'])
# The list of references should be empty since the test crashed and we didn't run any references.
self.assertEqual(test_results[0].references, [])
def test_reftest_with_virtual_reference(self):
_, err, _ = logging_run(['--details', 'virtual/virtual_passes/passes/reftest.html'], tests_included=True)
self.assertTrue('ref: virtual/virtual_passes/passes/reftest-expected.html' in err.getvalue())
def test_additional_platform_directory(self):
self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
def test_additional_expectations(self):
host = MockHost()
host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
tests_included=True, host=host))
@staticmethod
def has_test_of_type(tests, type):
return [test for test in tests if type in test]
def test_platform_directories_ignored_when_searching_for_tests(self):
tests_run = get_tests_run(['--platform', 'test-mac-leopard'])
self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
def test_platform_directories_not_searched_for_additional_tests(self):
tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
def test_output_diffs(self):
# Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
# aren't available.
host = MockHost()
_, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
written_files = host.filesystem.written_files
self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
self.assertEqual(full_results['has_wdiff'], False)
self.assertEqual(full_results['has_pretty_patch'], False)
def test_unsupported_platform(self):
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
self.assertEqual(stdout.getvalue(), '')
self.assertTrue('unsupported platform' in stderr.getvalue())
def test_build_check(self):
# By using a port_name for a different platform than the one we're running on, the build check should always fail.
if sys.platform == 'darwin':
port_name = 'linux-x86'
else:
port_name = 'mac-lion'
out = StringIO.StringIO()
err = StringIO.StringIO()
self.assertEqual(run_webkit_tests.main(['--platform', port_name, 'fast/harness/results.html'], out, err), test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
def test_verbose_in_child_processes(self):
# When we actually run multiple processes, we may have to reconfigure logging in the
# child process (e.g., on win32) and we need to make sure that works and we still
# see the verbose log output. However, we can't use logging_run() because using
# outputcapture to capture stdout and stderr latter results in a nonpicklable host.
# Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
if not self.should_test_processes:
return
options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
host = MockHost()
port_obj = host.port_factory.get(port_name=options.platform, options=options)
logging_stream = StringIO.StringIO()
run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
self.assertTrue('text.html passed' in logging_stream.getvalue())
self.assertTrue('image.html passed' in logging_stream.getvalue())
def disabled_test_driver_logging(self):
# FIXME: Figure out how to either use a mock-test port to
# get output or mack mock ports work again.
host = Host()
_, err, _ = logging_run(['--platform', 'mock-win', '--driver-logging', 'fast/harness/results.html'],
tests_included=True, host=host)
self.assertTrue('OUT:' in err.getvalue())
def test_write_full_results_to(self):
host = MockHost()
details, _, _ = logging_run(['--write-full-results-to', '/tmp/full_results.json'], host=host)
self.assertEqual(details.exit_code, 0)
self.assertTrue(host.filesystem.exists('/tmp/full_results.json'))
class EndToEndTest(unittest.TestCase):
def test_reftest_with_two_notrefs(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
_, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
file_list = host.filesystem.written_files.keys()
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
json = parse_full_results(json_string)
self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
{"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "is_unexpected": True})
self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
{"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "is_unexpected": True})
self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
{"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "is_unexpected": True})
class RebaselineTest(unittest.TestCase, StreamTestingMixin):
def assertBaselines(self, file_list, file, extensions, err):
"assert that the file_list contains the baselines."""
for ext in extensions:
baseline = file + "-expected" + ext
baseline_msg = 'Writing new expected result "%s"\n' % baseline
self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
self.assertContains(err, baseline_msg)
# FIXME: Add tests to ensure that we're *not* writing baselines when we're not
# supposed to be.
def test_reset_results(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
details, err, _ = logging_run(
['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
tests_included=True, host=host, new_results=True)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 0)
self.assertEqual(len(file_list), 8)
self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
def test_missing_results(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
details, err, _ = logging_run(['--no-show-results',
'failures/unexpected/missing_text.html',
'failures/unexpected/missing_image.html',
'failures/unexpected/missing_render_tree_dump.html'],
tests_included=True, host=host, new_results=True)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 3)
self.assertEqual(len(file_list), 10)
self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
def test_missing_results_not_added_if_expected_missing(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
options, parsed_args = run_webkit_tests.parse_args([])
port = test.TestPort(host, options=options)
host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
Bug(foo) failures/unexpected/missing_text.html [ Missing ]
Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
""")
details, err, _ = logging_run(['--no-show-results',
'failures/unexpected/missing_text.html',
'failures/unexpected/missing_image.html',
'failures/unexpected/missing_audio.html',
'failures/unexpected/missing_render_tree_dump.html'],
tests_included=True, host=host, new_results=True, port_obj=port)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 0)
self.assertEqual(len(file_list), 7)
self.assertFalse(any('failures/unexpected/missing_text-expected' in file for file in file_list))
self.assertFalse(any('failures/unexpected/missing_image-expected' in file for file in file_list))
self.assertFalse(any('failures/unexpected/missing_render_tree_dump-expected' in file for file in file_list))
def test_missing_results_not_added_if_expected_missing_and_reset_results(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--reset-results'])
port = test.TestPort(host, options=options)
host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
Bug(foo) failures/unexpected/missing_text.html [ Missing ]
Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
""")
details, err, _ = logging_run(['--pixel-tests', '--reset-results',
'failures/unexpected/missing_text.html',
'failures/unexpected/missing_image.html',
'failures/unexpected/missing_audio.html',
'failures/unexpected/missing_render_tree_dump.html'],
tests_included=True, host=host, new_results=True, port_obj=port)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 0)
self.assertEqual(len(file_list), 11)
self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
self.assertBaselines(file_list, "failures/unexpected/missing_image", [".png"], err)
self.assertBaselines(file_list, "failures/unexpected/missing_render_tree_dump", [".txt"], err)
def test_new_baseline(self):
# Test that we update the platform expectations in the version-specific directories
# for both existing and new baselines.
host = MockHost()
details, err, _ = logging_run(
['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
tests_included=True, host=host, new_results=True)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 0)
self.assertEqual(len(file_list), 8)
self.assertBaselines(file_list,
"platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
self.assertBaselines(file_list,
"platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
class PortTest(unittest.TestCase):
def assert_mock_port_works(self, port_name, args=[]):
self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
def disabled_test_mac_lion(self):
self.assert_mock_port_works('mac-lion')
class MainTest(unittest.TestCase):
def test_exception_handling(self):
orig_run_fn = run_webkit_tests.run
# unused args pylint: disable=W0613
def interrupting_run(port, options, args, stderr):
raise KeyboardInterrupt
def successful_run(port, options, args, stderr):
class FakeRunDetails(object):
exit_code = test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
return FakeRunDetails()
def exception_raising_run(port, options, args, stderr):
assert False
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
try:
run_webkit_tests.run = interrupting_run
res = run_webkit_tests.main([], stdout, stderr)
self.assertEqual(res, test_run_results.INTERRUPTED_EXIT_STATUS)
run_webkit_tests.run = successful_run
res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
run_webkit_tests.run = exception_raising_run
res = run_webkit_tests.main([], stdout, stderr)
self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
finally:
run_webkit_tests.run = orig_run_fn
def test_buildbot_results_are_printed_on_early_exit(self):
# unused args pylint: disable=W0613
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
res = run_webkit_tests.main(['--platform', 'test', '--exit-after-n-failures', '1',
'failures/unexpected/missing_text.html',
'failures/unexpected/missing_image.html'],
stdout, stderr)
self.assertEqual(res, test_run_results.EARLY_EXIT_STATUS)
self.assertEqual(stdout.getvalue(),
('\n'
'Regressions: Unexpected missing results (1)\n'
' failures/unexpected/missing_image.html [ Missing ]\n\n'))
| {
"content_hash": "041b83fee64998197457f48344ba61a9",
"timestamp": "",
"source": "github",
"line_count": 1030,
"max_line_length": 216,
"avg_line_length": 53.34077669902913,
"alnum_prop": 0.6586884112047469,
"repo_name": "jtg-gg/blink",
"id": "7ac88d3e8789ed23aeb9df2ea38b3ad2ea372855",
"size": "56608",
"binary": false,
"copies": "12",
"ref": "refs/heads/dev12-m41",
"path": "Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "28126"
},
{
"name": "Assembly",
"bytes": "12983"
},
{
"name": "Bison",
"bytes": "64327"
},
{
"name": "C",
"bytes": "68435"
},
{
"name": "C++",
"bytes": "41623716"
},
{
"name": "CSS",
"bytes": "536676"
},
{
"name": "GLSL",
"bytes": "11578"
},
{
"name": "Groff",
"bytes": "28067"
},
{
"name": "HTML",
"bytes": "53137251"
},
{
"name": "Java",
"bytes": "66510"
},
{
"name": "JavaScript",
"bytes": "26485747"
},
{
"name": "Makefile",
"bytes": "677"
},
{
"name": "Objective-C",
"bytes": "46814"
},
{
"name": "Objective-C++",
"bytes": "378647"
},
{
"name": "PHP",
"bytes": "166434"
},
{
"name": "Perl",
"bytes": "585757"
},
{
"name": "Python",
"bytes": "3997910"
},
{
"name": "Ruby",
"bytes": "141818"
},
{
"name": "Shell",
"bytes": "8806"
},
{
"name": "XSLT",
"bytes": "49099"
}
],
"symlink_target": ""
} |
"""
The Arsenal Image Caching Service
"""
import sys
from oslo_config import cfg
from oslo_service import service
from arsenal.common import service as arsenal_service
CONF = cfg.CONF
def main():
# Parse config file and command line options, then start logging
arsenal_service.prepare_service(sys.argv)
scheduling_service = arsenal_service.ArsenalService()
launcher = service.launch(CONF, scheduling_service)
launcher.wait()
| {
"content_hash": "ec7bac78cc35776db824e506b84c6dec",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 68,
"avg_line_length": 21.523809523809526,
"alnum_prop": 0.75,
"repo_name": "lekhajee/arsenal",
"id": "f4978bf4361f3759dcd92736a18689c6f97b1df3",
"size": "1105",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "arsenal/cmd/director.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "122118"
},
{
"name": "Shell",
"bytes": "8033"
}
],
"symlink_target": ""
} |
"""This example adds a campaign ad extension to a given campaign.
To get campaigns, run get_campaigns.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: GeoLocationService.get, CampaignAdExtensionService.mutate
Api: AdWordsOnly
"""
__author__ = ('[email protected] (Kevin Winter)'
'Joseph DiLallo')
from googleads import adwords
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
# Initialize appropriate service.
geo_location_service = client.GetService(
'GeoLocationService', version='v201406')
campaign_ad_extension_service = client.GetService(
'CampaignAdExtensionService', version='v201406')
# Construct selector and get geo location info for given addresses.
selector = {
'addresses': [
{
'streetAddress': '1600 Amphitheatre Parkway',
'cityName': 'Mountain View',
'provinceCode': 'US-CA',
'provinceName': 'California',
'postalCode': '94043',
'countryCode': 'US'
},
{
'streetAddress': u'38 avenue de l\'Opéra',
'cityName': 'Paris',
'postalCode': '75002',
'countryCode': 'FR'
}
]
}
geo_locations = geo_location_service.get(selector)
# Construct operations and add campaign ad extension.
operations = [
{
'operator': 'ADD',
'operand': {
'xsi_type': 'CampaignAdExtension',
'campaignId': campaign_id,
'adExtension': {
'xsi_type': 'LocationExtension',
'address': geo_locations[0]['address'],
'geoPoint': geo_locations[0]['geoPoint'],
'encodedLocation': geo_locations[0]['encodedLocation'],
'source': 'ADWORDS_FRONTEND',
# Optional fields.
'companyName': 'ACME Inc.',
'phoneNumber': '(650) 253-0000'
}
}
},
{
'operator': 'ADD',
'operand': {
'xsi_type': 'CampaignAdExtension',
'campaignId': campaign_id,
'adExtension': {
'xsi_type': 'LocationExtension',
'address': geo_locations[1]['address'],
'geoPoint': geo_locations[1]['geoPoint'],
'encodedLocation': geo_locations[1]['encodedLocation'],
'source': 'ADWORDS_FRONTEND'
}
}
}
]
ad_extensions = campaign_ad_extension_service.mutate(operations)
# Display results.
for ad_extension in ad_extensions['value']:
print ('Campaign ad extension with id \'%s\' and status \'%s\' was added.'
% (ad_extension['adExtension']['id'], ad_extension['status']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID)
| {
"content_hash": "211bab9b7604c896c959a563de06bac7",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 32.74226804123711,
"alnum_prop": 0.5680100755667506,
"repo_name": "dietrichc/streamline-ppc-reports",
"id": "96b2a4df4bfb09f99b80abbde6229e6562b97f28",
"size": "3819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/adwords/v201406/campaign_management/add_location_extension.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2235969"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from mezzanine.conf import settings
from apps.las_shop.views import index_shop
admin.autodiscover()
# Add the urlpatterns for any custom Django applications here.
# You can also change the ``home`` view to add your own functionality
# to the project's homepage.
urlpatterns = i18n_patterns(
"",
# Change the admin prefix here to use an alternate URL for the
# admin interface, which would be marginally more secure.
("^admin/", include(admin.site.urls)),
)
if settings.USE_MODELTRANSLATION:
urlpatterns += patterns(
'',
url('^i18n/$', 'django.views.i18n.set_language', name='set_language'),
)
urlpatterns += patterns(
'',
# Cartridge URLs.
("^shop/", include("cartridge.shop.urls")),
url(r'^robokassa/', include('robokassa.urls')),
url("^account/orders/$", "cartridge.shop.views.order_history",
name="shop_order_history"),
url('', include('social.apps.django_app.urls', namespace='social')),
(r'^newsletter/', include('newsletter.urls')),
("^", include("lacesandstuff.apps.las_shop.urls")),
# We don't want to presume how your homepage works, so here are a
# few patterns you can use to set it up.
# HOMEPAGE AS STATIC TEMPLATE
# ---------------------------
# This pattern simply loads the index.html template. It isn't
# commented out like the others, so it's the default. You only need
# one homepage pattern, so if you use a different one, comment this
# one out.
url("^$", index_shop, name="home"),
# HOMEPAGE AS AN EDITABLE PAGE IN THE PAGE TREE
# ---------------------------------------------
# This pattern gives us a normal ``Page`` object, so that your
# homepage can be managed via the page tree in the admin. If you
# use this pattern, you'll need to create a page in the page tree,
# and specify its URL (in the Meta Data section) as "/", which
# is the value used below in the ``{"slug": "/"}`` part. Make
# sure to uncheck all templates for the "show in menus" field
# when you create the page, since the link to the homepage is
# always hard-coded into all the page menus that display navigation
# on the site. Also note that the normal rule of adding a custom
# template per page with the template name using the page's slug
# doesn't apply here, since we can't have a template called
# "/.html" - so for this case, the template "pages/index.html" can
# be used.
# url("^$", "mezzanine.pages.views.page", {"slug": "/"}, name="home"),
# HOMEPAGE FOR A BLOG-ONLY SITE
# -----------------------------
# This pattern points the homepage to the blog post listing page,
# and is useful for sites that are primarily blogs. If you use this
# pattern, you'll also need to set BLOG_SLUG = "" in your
# ``settings.py`` module, and delete the blog page object from the
# page tree in the admin if it was installed.
# url("^$", "mezzanine.blog.views.blog_post_list", name="home"),
# MEZZANINE'S URLS
# ----------------
# ADD YOUR OWN URLPATTERNS *ABOVE* THE LINE BELOW.
# ``mezzanine.urls`` INCLUDES A *CATCH ALL* PATTERN
# FOR PAGES, SO URLPATTERNS ADDED BELOW ``mezzanine.urls``
# WILL NEVER BE MATCHED!
# If you'd like more granular control over the patterns in
# ``mezzanine.urls``, go right ahead and take the parts you want
# from it, and use them directly below instead of using
# ``mezzanine.urls``.
("^", include("mezzanine.urls")),
# MOUNTING MEZZANINE UNDER A PREFIX
# ---------------------------------
# You can also mount all of Mezzanine's urlpatterns under a
# URL prefix if desired. When doing this, you need to define the
# ``SITE_PREFIX`` setting, which will contain the prefix. Eg:
# SITE_PREFIX = "my/site/prefix"
# For convenience, and to avoid repeating the prefix, use the
# commented out pattern below (commenting out the one above of course)
# which will make use of the ``SITE_PREFIX`` setting. Make sure to
# add the import ``from django.conf import settings`` to the top
# of this file as well.
# Note that for any of the various homepage patterns above, you'll
# need to use the ``SITE_PREFIX`` setting as well.
# ("^%s/" % settings.SITE_PREFIX, include("mezzanine.urls"))
)
# Adds ``STATIC_URL`` to the context of error pages, so that error
# pages can use JS, CSS and images.
handler404 = "mezzanine.core.views.page_not_found"
handler500 = "mezzanine.core.views.server_error"
| {
"content_hash": "f6cfeb24e92e2278817178b0c0d01370",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 78,
"avg_line_length": 40.66379310344828,
"alnum_prop": 0.6525333898664405,
"repo_name": "dfirst/romasshop",
"id": "8065c0c43b7fd43950361d8079a571bc2f4c5662",
"size": "4717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lacesandstuff/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "328015"
},
{
"name": "HTML",
"bytes": "140420"
},
{
"name": "JavaScript",
"bytes": "85104"
},
{
"name": "Python",
"bytes": "40917"
}
],
"symlink_target": ""
} |
class LetterStrings:
def sum(self, s):
total = 0
for string in s:
for letter in string:
if(letter != '-'):
total += 1
return total
| {
"content_hash": "e8d3ddee5fd565ecab771d97d7483615",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 34,
"avg_line_length": 23.11111111111111,
"alnum_prop": 0.42788461538461536,
"repo_name": "mikefeneley/topcoder",
"id": "875a450f99a11ecf84753b4f9ec7a5c0e5f61f7b",
"size": "209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/SRM-202/letter_strings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53468"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
class AnalysisResults(OrderedDict):
def __init__(self, result):
super(AnalysisResults, self).__init__()
if result:
try:
self["request_id"] = result.get("requestId", None)
self["metadata"] = Metadata(result.get("metadata", {}))
self["image_type"] = ImageType(result.get("imageType", {}))
self["color"] = Color(result.get("color", {}))
self["adult"] = Adult(result.get("adult", {}))
self["categories"] = Categories(result.get("categories", {}))
self["faces"] = Faces(result.get("faces", {}))
self["tags"] = Tags(result.get("tags", {}))
self["description"] = Description(result.get("description", {}))
except KeyError:
raise KeyError("requestId missing in dict" + str(result))
@property
def request_id(self):
return self["request_id"]
@request_id.setter
def request_id(self, value):
self["request_id"] = value
@property
def metadata(self):
return self["metadata"]
@metadata.setter
def metadata(self, value):
self["metadata"] = value
@property
def image_type(self):
return self["image_type"]
@image_type.setter
def image_type(self, value):
self["image_type"] = value
@property
def color(self):
return self["color"]
@color.setter
def color(self, value):
self["color"] = value
@property
def adult(self):
return self["adult"]
@adult.setter
def adult(self, value):
self["adult"] = value
@property
def categories(self):
return self["categories"]
@categories.setter
def categories(self, value):
self["categories"] = value
@property
def faces(self):
return self["faces"]
@faces.setter
def faces(self, value):
self["faces"] = value
@property
def tags(self):
return self["tags"]
@tags.setter
def tags(self, value):
self["tags"] = value
@property
def description(self):
return self["description"]
@description.setter
def description(self, value):
self["description"] = value
class Metadata(OrderedDict):
def __init__(self, metadata={}):
super(Metadata, self).__init__()
self["width"] = metadata.get("width", None)
self["height"] = metadata.get("height", None)
self["format"] = metadata.get("format", None)
@property
def width(self):
return self["width"]
@width.setter
def width(self, value):
self["width"] = value
@property
def height(self):
return self["height"]
@height.setter
def height(self, value):
self["height"] = value
@property
def format(self):
return self["format"]
@format.setter
def format(self, value):
self["format"] = value
class ImageType(OrderedDict):
def __init__(self, image_type={}):
super(ImageType, self).__init__()
self["clip_art_type"] = image_type.get("clipArtType", None)
self["line_drawing_type"] = image_type.get("lineDrawingType", None)
@property
def clip_art_type(self):
return self["clip_art_type"]
@clip_art_type.setter
def clip_art_type(self, value):
self["clip_art_type"] = value
@property
def line_drawing_type(self):
return self["line_drawing_type"]
@line_drawing_type.setter
def line_drawing_type(self, value):
self["line_drawing_type"] = value
class Color(OrderedDict):
def __init__(self, color={}):
super(Color, self).__init__()
self["accent_color"] = color.get("accentColor", None)
self["dominant_color_foreground"] = color.get("dominantColorForeground", None)
self["dominant_color_background"] = color.get("dominantColorBackground", None)
self["dominant_colors"] = color.get("dominantColors", None)
self["is_bw_img"] = color.get("isBWImg", None)
@property
def accent_color(self):
return self["accent_color"]
@accent_color.setter
def accent_color(self, value):
self["accent_color"] = value
@property
def dominant_color_foreground(self):
return self["dominant_color_foreground"]
@dominant_color_foreground.setter
def dominant_color_foreground(self, value):
self["dominant_color_foreground"] = value
@property
def dominant_color_background(self):
return self["dominant_color_background"]
@dominant_color_background.setter
def dominant_color_background(self, value):
self["dominant_color_background"] = value
@property
def dominant_colors(self):
return self["dominant_colors"]
@dominant_colors.setter
def dominant_colors(self, value):
self["dominant_colors"] = value
@property
def is_bw_img(self):
return self["is_bw_img"]
@is_bw_img.setter
def is_bw_img(self, value):
self["is_bw_img"] = value
class Adult(OrderedDict):
def __init__(self, adult={}):
super(Adult, self).__init__()
self["is_adult_content"] = adult.get("isAdultContent", None)
self["is_racy_content"] = adult.get("isRacyContent", None)
self["adult_score"] = adult.get("adultScore", None)
self["racy_score"] = adult.get("racyScore", None)
@property
def is_adult_content(self):
return self["is_adult_content"]
@is_adult_content.setter
def is_adult_content(self, value):
self["is_adult_content"] = value
@property
def is_racy_content(self):
return self["is_racy_content"]
@is_racy_content.setter
def is_racy_content(self, value):
self["is_racy_content"] = value
@property
def adult_score(self):
return self["adult_score"]
@adult_score.setter
def adult_score(self, value):
self["adult_score"] = value
@property
def racy_score(self):
return self["racy_score"]
@racy_score.setter
def racy_score(self, value):
self["racy_score"] = value
class Rectangle(OrderedDict):
def __init__(self, rectangle={}):
super(Rectangle, self).__init__()
self["width"] = rectangle.get("width", None)
self["height"] = rectangle.get("height", None)
self["left"] = rectangle.get("left", None)
self["top"] = rectangle.get("top", None)
@property
def width(self):
return self["width"]
@width.setter
def width(self, value):
self["width"] = value
@property
def height(self):
return self["height"]
@height.setter
def height(self, value):
self["height"] = value
@property
def left(self):
return self["left"]
@left.setter
def left(self, value):
self["left"] = value
@property
def top(self):
return self["top"]
@top.setter
def top(self, value):
self["top"] = value
class Face(OrderedDict):
def __init__(self, face={}):
super(Face, self).__init__()
self["age"] = face.get("age", None)
self["gender"] = face.get("gender", None)
self["face_rectangle"] = Rectangle(face['faceRectangle'])
@property
def age(self):
return self["age"]
@age.setter
def age(self, value):
self["age"] = value
@property
def gender(self):
return self["gender"]
@gender.setter
def gender(self, value):
self["gender"] = value
@property
def face_rectangle(self):
return self["face_rectangle"]
@face_rectangle.setter
def face_rectangle(self, value):
self["face_rectangle"] = value
class Faces(list):
def __init__(self, faces=[]):
for face in faces:
self.append(face)
class Tag(OrderedDict):
def __init__(self, tag={}):
super(Tag, self).__init__()
self["name"] = tag.get("name", None)
self["confidence"] = tag.get("confidence", None)
self["hint"] = tag.get("hint", None)
@property
def name(self):
return self["name"]
@name.setter
def name(self, value):
self["name"] = value
@property
def confidence(self):
return self["confidence"]
@confidence.setter
def confidence(self, value):
self["confidence"] = value
@property
def hint(self):
return self["hint"]
@hint.setter
def hint(self, value):
self["hint"] = value
class Tags(list):
def __init__(self, tags=[]):
for tag in tags:
self.append(tag)
class Captions(list):
def __init__(self, captions=[]):
for caption in captions:
self.append(caption)
class Caption(OrderedDict):
def __init__(self, description={}):
super(Caption, self).__init__()
self["text"] = description.get(description.get("text", []), None)
self["confidence"] = description.get(description.get("confidence", []), None)
@property
def text(self):
return self["text"]
@text.setter
def text(self, value):
self["text"] = value
@property
def confidence(self):
return self["confidence"]
@confidence.setter
def confidence(self, value):
self["confidence"] = value
class Description(OrderedDict):
def __init__(self, description={}):
super(Description, self).__init__()
self["tags"] = Tags(description.get("tags", []))
self["captions"] = Captions(description.get("captions", []))
@property
def tags(self):
return self["tags"]
@tags.setter
def tags(self, value):
self["tags"] = value
@property
def captions(self):
return self["captions"]
@captions.setter
def captions(self, value):
self["captions"] = value
class NameScorePair(OrderedDict):
def __init__(self, name_score_pair={}):
super(NameScorePair, self).__init__()
self["name"] = name_score_pair.get("name", None)
self["score"] = name_score_pair.get("score", None)
@property
def name(self):
return self["name"]
@name.setter
def name(self, value):
self["name"] = value
@property
def score(self):
return self["score"]
@score.setter
def score(self, value):
self["score"] = value
class Category(NameScorePair):
def __init__(self, description={}):
self["detail"] = description.get(description.get("detail", None))
@property
def text(self):
return self["text"]
@text.setter
def text(self, value):
self["text"] = value
# In all list extract ???
class Categories(list):
def __init__(self, categories=[]):
for category in categories:
self.append(category)
| {
"content_hash": "7306405218ef8b6fb9b25743e08c7fd9",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 86,
"avg_line_length": 23.66017316017316,
"alnum_prop": 0.577989205013265,
"repo_name": "chidochipotle/oxford",
"id": "90a0017ff902fd94e0b0f4e84f652074e397a2fb",
"size": "10999",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "oxford/visionmodel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94101"
}
],
"symlink_target": ""
} |
import sys
import glob
import serial
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
#print ports
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
if __name__ == '__main__':
print(serial_ports())
| {
"content_hash": "4333552fefe99a5f307324fb881b7aa6",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 26.783783783783782,
"alnum_prop": 0.5832492431886983,
"repo_name": "mox17/teensy-car-sensors",
"id": "32c8cb3f135a6281a145b2e718231ca1cc90b84f",
"size": "1013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RPi/findserialport.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "7490"
},
{
"name": "C",
"bytes": "5739"
},
{
"name": "C++",
"bytes": "46861"
},
{
"name": "Python",
"bytes": "17214"
},
{
"name": "Shell",
"bytes": "256"
}
],
"symlink_target": ""
} |
import os
from django import VERSION as DJANGO_VERSION
from django.utils.translation import ugettext_lazy as _
import dj_database_url
from prettyconf import config
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = config("FANFICAST_ALLOWED_HOSTS", cast=config.list)
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "pt-br"
# Supported languages
LANGUAGES = (
('pt-br', _(u'Português')),
)
DEBUG = config("FANFICAST_DEBUG", cast=config.boolean)
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASE_URL = config("FANFICAST_DATABASE_URL")
DATABASES = {
'default': dj_database_url.parse(DATABASE_URL, conn_max_age=600),
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_ROOT, "templates")
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.static",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
],
"builtins": [
"mezzanine.template.loader_tags",
],
},
},
]
if DJANGO_VERSION < (1, 9):
del TEMPLATES[0]["OPTIONS"]["builtins"]
SECRET_KEY = config("FANFICAST_SECRET_KEY")
if DEBUG:
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"), 'vendor')
else:
STATIC_ROOT = os.path.join(os.path.dirname(PROJECT_ROOT), 'public', 'static')
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'media')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
"django_extensions",
'podcast',
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
# Also force into sys.modules so it's visible to Django's autoreload.
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
import sys
import imp
module_name = "%s.local_settings" % PROJECT_APP
module = imp.new_module(module_name)
module.__file__ = f
sys.modules[module_name] = module
exec(open(f, "rb").read())
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
| {
"content_hash": "3bd003210350cee40a8f238f87bb6be1",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 81,
"avg_line_length": 32.26953125,
"alnum_prop": 0.6910785619174434,
"repo_name": "lamenezes/fanficast",
"id": "fd08425dc2e5076ac6cc4f695566a82bb96da1cb",
"size": "8279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fanficast/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "100483"
},
{
"name": "HTML",
"bytes": "54807"
},
{
"name": "JavaScript",
"bytes": "133458"
},
{
"name": "Makefile",
"bytes": "435"
},
{
"name": "Python",
"bytes": "21167"
}
],
"symlink_target": ""
} |
import random
import requests
import string
import time
from nose.tools import eq_ as eq
def assert_raises(excClass, callableObj, *args, **kwargs):
"""
Like unittest.TestCase.assertRaises, but returns the exception.
"""
try:
callableObj(*args, **kwargs)
except excClass as e:
return e
else:
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise AssertionError("%s not raised" % excName)
def generate_random(size, part_size=5*1024*1024):
"""
Generate the specified number random data.
(actually each MB is a repetition of the first KB)
"""
chunk = 1024
allowed = string.ascii_letters
for x in range(0, size, part_size):
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in xrange(chunk)])
s = ''
left = size - x
this_part_size = min(left, part_size)
for y in range(this_part_size / chunk):
s = s + strpart
s = s + strpart[:(this_part_size % chunk)]
yield s
if (x == size):
return
# syncs all the regions except for the one passed in
def region_sync_meta(targets, region):
for (k, r) in targets.iteritems():
if r == region:
continue
conf = r.conf
if conf.sync_agent_addr:
ret = requests.post('http://{addr}:{port}/metadata/incremental'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port))
eq(ret.status_code, 200)
if conf.sync_meta_wait:
time.sleep(conf.sync_meta_wait)
def get_grantee(policy, permission):
'''
Given an object/bucket policy, extract the grantee with the required permission
'''
for g in policy.acl.grants:
if g.permission == permission:
return g.id
| {
"content_hash": "1a8032841210a99f20d774ef35d41dcd",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 141,
"avg_line_length": 29.73015873015873,
"alnum_prop": 0.594233849439402,
"repo_name": "kalantal/s3-tests",
"id": "24f7d87d5c9c12b1137c16d2a2bc3195b778a9a9",
"size": "1873",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "s3-tests/functional/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "8971"
},
{
"name": "Python",
"bytes": "1614045"
},
{
"name": "Shell",
"bytes": "104032"
}
],
"symlink_target": ""
} |
import logging
from collections import defaultdict
from errno import ENOENT
from stat import S_IFDIR, S_IFLNK, S_IFREG, S_ISDIR, S_ISREG
from sys import argv, exit
from time import time, sleep
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn
import socket
import sys
import select
import threading
import queue
import os
import datetime
import pickle
import os
import string
import subprocess
import codecs
import zlib
import math
import re
'''
Amfora is a shared in-memory file system. Amfora is POSIX compatible.
NOTE: Methods that have not been implemented are removexattr, setxattr
'''
class Logger():
def __init__(self, logfile):
self.fd = open(logfile, "w")
def log(self, info, function, message):
self.fd.write("%s: %s %s %s\n" % (str(datetime.datetime.now()), info, function, message))
self.fd.flush()
#print("%s: %s %s %s" % (str(datetime.datetime.now()), info, function, message))
if not hasattr(__builtins__, 'bytes'):
bytes = str
class Amfora(LoggingMixIn, Operations):
def __init__(self):
'''
Amfora data is organized as following:
self.meta stores persistent metadata
self.data stores persistent file data
self.cmeta stores transient local metadata
self.cdata stores transient local file data
self.cdata is replicated in self.data once the write in self.cdata is completed (released).
'''
self.meta = {}
self.data = defaultdict(bytes)
self.cmeta = {}
self.cdata = defaultdict(bytes)
self.fd = 0
#initializing the root directory
now = time()
self.meta['/'] = dict(st_mode=(S_IFDIR | 0o755), st_ctime=now,
st_mtime=now, st_atime=now, st_nlink=2, location=[], key=None)
'''
below are collective interface
'''
def multicast(self, path, algo):
global logger
global slist
global mountpoint
global misc
tcpclient = TCPClient()
apath = path[len(mountpoint):]
logger.log("INFO", "MULTICAST", "multicast "+path+" "+apath)
if path[:len(mountpoint)] != mountpoint:
logger.log("ERROR", "MULTICAST", path[:len(mountpoint)]+" is not the mountpoint")
return 1
#if the meta data is not local, copy it to local meta data
if apath not in self.meta:
self.meta[apath] = self.getattr(apath, None)
#if the file data is not local, copy it to local storage first
if apath not in self.data:
ip = self.meta[apath]['location']
logger.log("INFO", "READ", "read sent to remote server "+apath+" "+ip)
packet = Packet(apath, "READ", {}, {}, 0, [ip], [0,0])
rpacket = tcpclient.sendpacket(packet)
if not rpacket.data:
logger.log("ERROR", "READ", "remote read on "+path+" failed on "+ip)
else:
self.data[apath] = rpacket.data[apath]
#assembe the multicast packet
ddict = dict()
ddict[apath] = self.data[apath]
mdict = dict()
mdict[apath] = self.meta[apath]
packet=Packet(apath, "MULTICAST", mdict, ddict, 0, slist, None)
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "MULTICAST", "multicasting file: "+apath+" failed")
return rpacket
def allgather(self, path, algo):
global logger
global slist
global mountpoint
global misc
#allgather is a two step procedure
#1, gather the data to one node
#2, multicast the data to all nodes
tcpclient = TCPClient()
apath = path[len(mountpoint):]
logger.log("INFO", "ALLGATHER", "allgather "+path+" "+apath)
ret, data, meta = self.gather(path, algo)
packet=Packet(apath, "MULTICAST", meta, data, 0, slist, None)
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "ALLGATHER", "allgathering path: "+apath+" failed")
return rpacket
else:
logger.log("INFO", "ALLGATHER", "allgathering path: "+apath+" finished")
return rpacket
def gather(self, path, algo):
global logger
global slist
global mountpoint
global misc
tcpclient = TCPClient()
apath = path[len(mountpoint):]
logger.log("INFO", "GATHER", "gather "+path+" "+apath)
if path[:len(mountpoint)] != mountpoint:
logger.log("ERROR", "GATHER", path[:len(mountpoint)]+" is not the mountpint")
return 1, None
if path == mountpoint:
apath = '/'
if not S_ISDIR(self.meta[apath]['st_mode']):
logger.log("ERROR", "GATHER", apath+" is not a directory")
return 1, None
#readdir to get the metadata
packet = Packet(apath, "READDIR", {}, {}, 0, slist, 0)
rpacket = tcpclient.sendallpacket(packet)
nmeta = dict(rpacket.meta)
gdict = dict()
for m in rpacket.meta:
if rpacket.meta[m]['location'] not in gdict:
gdict[rpacket.meta[m]['location']] = []
gdict[rpacket.meta[m]['location']].append(m)
self.meta.update(rpacket.meta)
packet = Packet(apath, "GATHER", {}, {}, 0, slist, gdict)
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "GATHER", "gather "+path+" failed")
return rpacket, None, None
else:
self.data.update(rpacket.data)
logger.log("INFO", "GATHER", "gather "+path+" finished")
return rpacket, rpacket.data, nmeta
def scatter(self, path, algo):
global logger
global slist
global mountpoint
global misc
#scatter is a two step procedure
#1, gather the data to one node
#2, scatter the data to all nodes
tcpclient = TCPClient()
apath = path[len(mountpoint):]
logger.log("INFO", "SCATTER", "scatter "+path+" "+apath)
if apath not in self.meta:
return Packet(apath, "SCATTER", None, None, 1, None, None)
ret, data, meta = self.gather(path, algo)
klist = list(sorted(meta.keys()))
klist.reverse()
#keep part of the data local
num_files = math.ceil(len(meta)/len(slist))
logger.log("INFO", "SCATTER", "This node keeps "+str(num_files)+" files")
logger.log("INFO", "SCATTER", str(klist))
for i in range(num_files):
k = klist.pop()
v = meta.pop(k)
data.pop(k)
logger.log("INFO", "SCATTER", "this node keeps "+k)
packet=Packet(apath, "SCATTER", meta, data, 0, slist, klist)
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "SCATTER", "allgathering path: "+apath+" failed")
return rpacket
else:
self.meta.update(rpacket.meta)
logger.log("INFO", "SCATTER", "allgathering path: "+apath+" finished")
return rpacket
def shuffle(self, path, algo, dst):
global logger
global slist
global mountpoint
global misc
tcpclient = TCPClient()
apath = path[len(mountpoint):]
dpath = dst[len(mountpoint):]
logger.log("INFO", "SHUFFLE", "shuffle from "+apath+" to "+dpath)
if path[:len(mountpoint)] != mountpoint or dst[:len(mountpoint)] != mountpoint:
logger.log("ERROR", "MULTICAST", path[:len(mountpoint)]+" or "+dst[:len(mountpoint)]+" is not the mountpoint")
return 1
if path == mountpoint:
apath = '/'
if not S_ISDIR(self.meta[apath]['st_mode']) or not S_ISDIR(self.meta[dpath]['st_mode']) :
logger.log("ERROR", "GATHER", apath+" is not a directory")
return 1
#readdir to get the metadata
packet = Packet(apath, "READDIR", {}, {}, 0, slist, 0)
rpacket = tcpclient.sendallpacket(packet)
nmeta = dict(rpacket.meta)
#assemble the ip:hvalue hashmap
ndict = dict()
for m in nmeta:
if nmeta[m]['location'] not in ndict:
ndict[nmeta[m]['location']] = []
ndict[nmeta[m]['location']].append(m)
#start shuffle server
logger.log("INFO", "SHUFFLE_START", str(slist))
packet=Packet(apath, "SHUFFLE_START", {}, {}, 0, slist, [ndict, dpath])
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "SHUFFLE_START", "shuffling from "+apath+" to "+dpath+" failed")
#assemble the shuffle packet
logger.log("INFO", "SHUFFLE", str(ndict))
packet=Packet(apath, "SHUFFLE", {}, {}, 0, slist, dpath)
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "SHUFFLE", "shuffling from "+apath+" to "+dpath+" failed")
return rpacket
def load(self, src, dst):
global logger
global slist
global mountpoint
global misc
logger.log("INFO", "LOAD", "loading "+src+" to "+dst+" started")
apath = dst[len(mountpoint):]
if dst == mountpoint:
apath = '/'
if dst[:len(mountpoint)] != mountpoint:
logger.log("ERROR", "LOAD", dst[:len(mountpoint)]+" is not the mountpint")
return 1
if apath not in self.meta:
logger.log("ERROR", "LOAD", "direcotry "+apath+" does not exist")
return 1
if not os.path.exists(src):
logger.log("ERROR", "LOAD", "directory: "+src+" does not exist")
return 1
tcpclient = TCPClient()
#mkdir
basename = os.path.basename(src)
dirname = os.path.join(apath, basename)
logger.log("INFO", "LOAD", "creating dir: "+dirname)
if dirname not in self.meta:
self.mkdir(dirname, self.meta['/']['st_mode'])
#read file names in the src dir
tlist = os.listdir(src)
flist = []
for f in tlist:
#print(os.path.join(src, f))
if os.path.isdir(os.path.join(src, f)):
logger.log("INFO", "LOAD", "recursively load "+os.path.join(src, f)+" to "+os.path.join(mountpoint, basename))
self.load(os.path.join(src, f), os.path.join(mountpoint, basename))
else:
flist.append(os.path.join(src, f))
flist.sort()
logger.log("INFO", "LOAD", "loading the following files "+str(flist)+" from "+src+" to "+dirname)
packet=Packet(dirname, "LOAD", {}, {}, 0, slist, flist)
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "LOAD", "loading "+src+" to "+dirname+" failed")
return rpacket
else:
logger.log("INFO", "LOAD", "loading "+src+" to "+dirname+" finished")
return rpacket
def dump(self, src, dst):
global logger
global slist
global mountpoint
global misc
logger.log("INFO", "DUMP", "dumping "+src+" to "+dst+" started")
apath = src[len(mountpoint):]
if src == mountpoint:
apath = '/'
if src[:len(mountpoint)] != mountpoint:
logger.log("ERROR", "DUMP", src[:len(mountpoint)]+" is not the mountpint")
return 1
if apath not in self.meta:
logger.log("ERROR", "DUMP", "direcotry "+apath+" does not exist")
return 1
if not os.path.exists(dst):
logger.log("ERROR", "DUMP", "directory: "+dst+" does not exist")
return 1
tcpclient = TCPClient()
#mkdir for dst
basename = os.path.basename(src)
dirname = os.path.join(dst, basename)
if os.path.exists(dirname):
logger.log("ERROR", "DUMP", "directory: "+dirname+" exists")
#return 1
else:
os.mkdir(dirname)
#recursively dumping sub-directory
for k in self.meta:
if S_ISDIR(self.meta[k]['st_mode']) and os.path.dirname(k) == apath:
logger.log("INFO", "DUMP", "recursively dumping "+os.path.join(mountpoint, k[1:])+" "+dirname)
self.dump(os.path.join(mountpoint, k[1:]), dirname)
#read metadata of the files in this dir
packet = Packet(apath, "READDIR", {}, {}, 0, slist, 0)
rpacket = tcpclient.sendallpacket(packet)
meta = dict(rpacket.meta)
fdict = dict() #key-ip, value-list of hvalue
for k in meta:
if meta[k]['location'] not in fdict:
fdict[meta[k]['location']] = []
fdict[meta[k]['location']].append([meta[k]['key'], os.path.join(dirname, k[len(apath)+1:])])
logger.log("INFO", "DUMP", "dumping the following files "+str(fdict)+" from "+src+" to "+dirname)
packet=Packet(src, "DUMP", {}, {}, 0, slist, fdict)
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "DUMP", "dumping "+src+" to "+dirname+" failed")
return rpacket
else:
logger.log("INFO", "DUMP", "dumping "+src+" to "+dirname+" finished")
return rpacket
def execute(self):
global logger
global slist
global mountpoint
global misc
logger.log("INFO", "EXECUTE", "execute all tasks")
tcpclient = TCPClient()
taskl = misc.readtask()
packet=Packet("", "EXECUTE", {}, {}, 0, slist, taskl)
rpacket = tcpclient.sendallpacket(packet)
if sum(rpacket.meta.values()) != 0:
logger.log("ERROR", "EXECUTE", "execution failed "+str(rpacket.meta)+"\n"+str(rpacket.data))
return rpacket
else:
logger.log("INFO", "EXECUTE", "execution finished "+str(rpacket.meta))
return rpacket
'''
below are POSIX interface
'''
def chmod(self, path, mode):
global logger
global misc
logger.log("INFO", "chmod", path+", "+str(mode))
if path in self.meta:
self.meta[path]['st_mode'] &= 0o770000
self.meta[path]['st_mode'] |= mode
else:
logger.log("INFO", "CHMOD", "chmod sent to remote server "+path+" "+str(mode))
#send a chmod message to remote server
tcpclient = TCPClient()
ip = misc.findserver(path)
tcpclient = TCPClient(ip)
packet = Packet(path, "CHMOD", {}, {}, 0, [ip], mode)
ret=tcpclient.sendpacket(packet)
if ret != 0:
logger.log("ERROR", "chmod", path+" with "+str(mode)+" failed on "+ip)
def chown(self, path, uid, gid):
global logger
logger.log("INFO", "chown", path+", "+str(uid)+", "+str(gid))
def create(self, path, mode):
global logger
global misc
global localip
logger.log("INFO", "CREATE", path+", "+str(mode))
self.cmeta[path] = dict(st_mode=(S_IFREG | mode), st_nlink=1,
st_size=0, st_ctime=time(), st_mtime=time(),
st_atime=time(), location=localip, key=misc.hash(path))
#self.cdata[path]=b''
self.cdata[path]=bytearray()
self.fd += 1
return self.fd
def getattr(self, path, fh=None):
global logger
global misc
global localip
logger.log("INFO", "getattr", path)
ip = misc.findserver(path)
logger.log("INFO", "getattr", "metadata of "+path+" is at "+ip)
if path in self.meta:
logger.log("INFO", "getattr", "metadata of "+path+" is self.meta ")
return self.meta[path]
elif path in self.cmeta:
logger.log("INFO", "getattr", "metadata of "+path+" is self.cmeta ")
return self.cmeta[path]
if ip == localip:
raise OSError(ENOENT, '')
else:
logger.log("INFO", "GETATTR", "getattr sent to remote server: "+path)
ip = misc.findserver(path)
tcpclient = TCPClient()
packet = Packet(path, "GETATTR", None, None, None, [ip], None)
ret = tcpclient.sendpacket(packet)
if not ret.meta:
raise OSError(ENOENT, '')
else:
self.meta[path]=ret.meta[path]
return self.meta[path]
def getxattr(self, path, name, position=0):
global logger
logger.log("INFO", "getxattr", path+", "+name)
#if empty return b''
try:
if path in self.cmeta:
return self.cmeta[path][name]
elif path in self.meta:
return self.meta[path][name]
else:
global misc
ip = misc.findserver(path)
packet = Packet(path, "GETATTR", None, None, None, [ip], None)
tcpclient = TCPClient()
ret = tcpclient.sendpacket(packet)
if not ret.meta:
return b''
else:
return ret.meta[name]
except KeyError:
return b''
def listxattr(self, path):
global logger
logger.log("INFO", "listxattr", path)
if path in self.cmeta:
return self.cmeta[path].keys()
elif path in self.meta[path]:
return self.meta[path].keys()
else:
global misc
ip = misc.findserver(path)
packet = Packet(path, "GETATTR", None, None, None, [ip], None)
tcpclient = TCPClient()
ret = tcpclient.sendpacket(packet)
if not ret.meta:
raise OSError(ENOENT, '')
else:
return ret.meta[path].keys()
def mkdir(self, path, mode):
global logger
global slist
logger.log("INFO", "MKDIR", path+", "+str(mode))
parent = os.path.dirname(path)
if parent not in self.meta:
logger.log("ERROR", "MKDIR", parent+" does not exist")
raise FuseOSError(ENOENT)
else:
packet = Packet(path, "MKDIR", {}, {}, 0, slist, mode)
tcpclient = TCPClient()
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "MKDIR", "creating dir: "+path+" failed")
raise FuseOSError(ENOENT)
else:
self.local_mkdir(path, mode)
def open(self, path, flags):
global logger
logger.log("INFO", "open", path+", "+str(flags))
self.fd += 1
return self.fd
def read(self, path, size, offset, fh):
global logger
global misc
logger.log("INFO", "READ", path+", "+str(size)+", "+str(offset))
if path in self.data:
return bytes(self.data[path][offset:offset + size])
elif path in self.cdata:
return bytes(self.cdata[path][offset:offset+size])
else:
ip = self.meta[path]['location']
logger.log("INFO", "READ", "read sent to remote server "+path+" "+ip)
packet = Packet(path, "READ", {}, {}, 0, [ip], [size, offset])
tcpclient = TCPClient()
rpacket = tcpclient.sendpacket(packet)
if not rpacket.data:
logger.log("ERROR", "READ", "remote read on "+path+" failed on "+ip)
return None
else:
self.data[path] = rpacket.data[path]
return bytes(self.data[path][offset:offset + size])
def readdir(self, path, fh):
global logger
global slist
logger.log("INFO", "readdir", path)
if path not in self.meta:
logger.log("ERROR", "READDIR", path+" does not exist")
return FuseOSError(ENOENT)
else:
packet = Packet(path, "READDIR", {}, {}, 0, slist, fh)
tcpclient = TCPClient()
rpacket = tcpclient.sendallpacket(packet)
self.meta.update(rpacket.meta)
#filtering local metadata in the parent dir of path
rlist = ['.', '..']
for m in self.meta:
if m != '/' and path == os.path.dirname(m):
b = os.path.basename(m)
rlist.append(b)
return rlist
def readlink(self, path):
global logger
logger.log("INFO", "readlink", path)
pass
def removexattr(self, path, name):
#not implemented yet
attrs = self.files[path].get('attrs', {})
try:
del attrs[name]
except KeyError:
pass # Should return ENOATTR
def rename(self, old, new):
global logger
global misc
global localip
logger.log("INFO", "rename", "old: "+old+", new: "+new)
oldhash = misc.hash(old)
newhash = misc.hash(new)
if old in self.meta:
self.cmeta[new] = self.meta[old]
else:
self.cmeta[new] = self.getattr(old, None)
if old in self.cdata:
self.cdata[new] = self.cdata[old]
self.cdata.pop(old)
else:
ip = self.cmeta[new]['location']
logger.log("INFO", "READ", "read sent to remote server "+old+" "+ip)
packet = Packet(old, "READ", {}, {}, 0, [ip], [0, 0])
tcpclient = TCPClient()
rpacket = tcpclient.sendpacket(packet)
if not rpacket.data:
logger.log("ERROR", "READ", "remote read on "+path+" failed on "+ip)
return None
else:
self.cdata[new] = rpacket.data[old]
self.cmeta[new]['key'] = misc.hash(new)
self.cmeta[new]['location'] = localip
if old in self.meta:
self.meta.pop(old)
ip = misc.findserver(old)
if ip != localip:
tcpclient = TCPClient()
packet = Packet(old, "RMMETA", None, None, None, [ip], None)
ret = tcpclient.sendpacket(packet)
self.release(new, 0)
def rmdir(self, path):
#rmdir is a two step procedure
#Step 1, remove the dir path and return the file meta within
#Step 2, remove all file data on all nodes
global logger
logger.log("INFO", "rmdir", path)
if path not in self.meta:
logger.log("ERROR", "RMDIR", path+" does not exist")
return FuseOSError(ENOENT)
else:
for m in list(self.meta.keys()):
if os.path.dirname(m) == path and S_ISDIR(self.meta[m]['st_mode']):
self.rmdir(m)
packet = Packet(path, "RMDIR", {}, {}, 0, slist, [])
tcpclient = TCPClient()
rpacket = tcpclient.sendallpacket(packet)
logger.log("INFO", "RMDIR", "removed "+str(rpacket.misc))
return 0
def setxattr(self, path, name, value, options, position=0):
# not implemented yet
# Ignore options
attrs = self.files[path].setdefault('attrs', {})
attrs[name] = value
def statfs(self, path):
return dict(f_bsize=512, f_blocks=4096, f_bavail=2048)
def symlink(self, target, source):
global logger
logger.log("INFO", "symlink", "target: "+target+", source:"+source)
pass
def truncate(self, path, length, fh=None):
global logger
global misc
logger.log("INFO", "truncate", path+", "+str(length))
if path in self.cdata:
self.cdata[path] = self.cdata[path][:length]
self.cmeta[path]['st_size'] = length
else:
print("truncate sent to remote server")
#ip = misc.findserver(path)
#packet = Packet(path, "truncate", None, None, None, ip, length)
#tcpclient = TCPClient()
#ret = tcpclient.sendpacket(packet)
#if ret != 0:
# logger.log("ERROR", "truncate", "failed on "+path+" with length: "+str(length))
def unlink(self, path):
#Unlink is not well funcitoning for the moment
global logger
global localip
global misc
logger.log("INFO", "UNLINK", path)
#unlink is a two step procedure, first, we need to find the metadata of this file then remove the meta
#second, clear the actual data
tcpclient = TCPClient()
dst = None
ip = misc.findserver(path)
if ip == localip:
dst = self.meta[path]['location']
self.meta.pop(path)
else:
packet = Packet(path, "UNLINK", {}, {}, 0, [ip], None)
ret = tcpclient.sendpacket(packet)
if not ret.meta:
logger.log("ERROR", "UNLINK", "unlink "+path+" failed")
raise FuseOSError(ENOENT)
else:
dst = ret.meta[path]['location']
if path in self.meta:
self.meta.pop(path)
if not dst:
logger.log("ERROR", "UNLINK", "unlink "+path+" failed")
raise FuseOSError(ENOENT)
else:
if dst == localip:
self.data.pop(path)
else:
packet = Packet(path, "REMOVE", {}, {}, 0, [dst], None)
ret = tcpclient.sendpacket(packet)
if ret.ret != 0:
logger.log("ERROR", "UNLINK", "remove "+path+" failed")
if path in self.data:
self.data.pop(path)
def utimens(self, path, times=None):
global logger
logger.log("INFO", "utimens", path)
pass
def write(self, path, data, offset, fh):
global logger
global misc
logger.log("INFO", "write", path+", length: "+str(len(data))+", offset: "+str(offset))
#write to the right place
if path in self.cdata:
if offset == len(self.cdata[path]):
self.cdata[path].extend(data)
else:
self.cdata[path] = self.cdata[path][:offset]+data
if path in self.data:
if offset == len(self.data[path]):
self.data[path].extend(data)
else:
self.data[path] = self.data[path][:offset]+data
if path not in self.cdata:
print("write sent to remote server")
#ip = misc.findserver(path)
#packet = Packet(path, "locate", None, None, None, ip, None)
#tcpclient = TCPClient()
#ret = tcpclient.sendpacket(packet)
#packet = packet(path, "write", None, None, None, ret, [data, offset])
#ret = tcpclient.sendpacket(packet)
#update the metadata
if path in self.cmeta:
self.cmeta[path]['st_size'] = len(self.cdata[path])
#self.cmeta[path]['st_size'] = self.cmeta[path]['st_size']+len(data)
if path in self.meta:
self.meta[path] = self.cmeta[path]
else:
print("write+update+meta sent to remote server")
#ip = misc.findserver(path)
#packet = Packet(path, "updatesize", None, None, None, ip, data)
#tcpclient = TCPClient()
#ret = tcpclient.sendpacket(packet)
return len(data)
def release(self, path, fh):
global logger
global misc
global localip
logger.log("INFO", "RELEASE", path)
ip = misc.findserver(path)
if path in self.cmeta:
self.data[path] = self.cdata[path]
if ip == localip:
self.meta[path] = self.cmeta[path]
return 0
elif path in self.meta:
self.local_release(path, fh)
return 0
else:
logger.log("INFO", "RELEASE", "release sent to remote server: "+path+" "+ip)
tempdict = dict()
tempdict[path] = self.cmeta[path]
packet = Packet(path, "RELEASE", tempdict, None, None, [ip], None)
tcpclient = TCPClient()
rpacket = tcpclient.sendpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "RELEASE", path+" failed")
return rpacket.ret
#elif path in self.data:
# self.data.pop(path)
else:
return 0
def type(self, src, typedef):
#This type function can transform the format of a given file
#Supported tranformations are:
#File-to-row_table, file-to-dir
#File-to-column_table, file-to-dir
#File-to-row_matrix, file-to-dir
#File-to-column_matrix, file-to-dir
#File-to-tile_matrix, file-to-dir
#In the current implemetation, we assume the file is local to the login node
global logger
global slist
global mountpoint
global misc
global ddict
ddict = defaultdict(bytes)
tcpclient = TCPClient()
apath = src[len(mountpoint):]
logger.log("INFO", "TYPE", "Transfer "+apath +" to type "+typedef)
if src[:len(mountpoint)] != mountpoint:
logger.log("ERROR", "TYPE", src[:len(mountpoint)]+" is not the mountpoint")
return 1
if apath not in self.data:
logger.log("ERROR", "TYPE", apath+" does not exist")
return 1
elif typedef == "column_table":
ddict = misc.to_column_table(self.data[apath])
elif typedef == "row_table":
ddict = misc.to_row_table(self.data[apath])
elif typedef == "column_matrix":
ddict = misc.to_column_matrix(self.data[apath])
elif typedef == "row_matrix":
ddict = misc.to_row_matrix(self.data[apath])
elif typedef == "tile_matrix":
ddict = misc.to_tile_matrix(self.data[apath])
else:
logger.log("ERROR", "transformation "+typedef+" is not supported")
basename = os.path.basename(apath)
dirname = os.path.dirname(apath)
if dirname == '/':
dirname = ''
dirname = dirname+'/.'+basename+"_"+typedef
if dirname not in self.meta:
self.mkdir(dirname, self.meta['/']['st_mode'])
klist = list(sorted(ddict.keys()))
for k in klist:
fname = os.path.join(dirname, k)
self.create(fname, 33188)
self.cdata[fname] = ddict.pop(k)
self.cmeta[fname]['st_size'] = len(self.cdata[fname])
self.release(fname, 0)
return 0
def local_chmod(self, path, mode):
global logger
logger.log("INFO", "local_chmod", path+", "+str(mode))
pass
def local_chown(self, path, uid, gid):
global logger
logger.log("INFO", "local_chown", path+", "+str(uid)+", "+str(gid))
pass
def local_create(self, path, mode, ip):
global logger
logger.log("INFO", "local_create", path+", "+str(mode)+", "+str(ip))
pass
def local_getxattr(self, path, name, position=0):
global logger
logger.log("INFO", "local_getxattr", path+", "+str(name))
pass
def local_listxattr(self, path):
global logger
logger.log("INFO", "local_listxattr", path)
pass
def local_mkdir(self, path, mode):
global logger
logger.log("INFO", "local_mkdir", path+", "+str(mode))
parent = os.path.dirname(path)
if parent not in self.meta:
logger.log("ERROR", "local_mkdir", parent+" does not exist")
return 1
else:
nlink = self.meta[parent]['st_nlink']
self.meta[path] = dict(st_mode=(S_IFDIR | mode), st_nlink=nlink+1, st_size=0, st_ctime=time(), st_mtime=time(), st_atime=time(), location=[], key=None)
self.meta[parent]['st_nlink'] += 1
return 0
def local_readdir(self, path, fh):
global logger
logger.log("INFO", "local_readdir", path)
rdict = dict()
for m in self.meta:
if path == os.path.dirname(m) and not S_ISDIR(self.meta[m]['st_mode']):
rdict[m] = self.meta[m]
return rdict
def local_readlink(self, path):
global logger
logger.log("INFO", "local_readlink", path)
pass
def local_removexattr(self, path, name):
global logger
logger.log("INFO", "local_removeattr", path+", "+name)
pass
def local_read(self, path, size, offset):
global logger
logger.log("INFO", "local_read", path+", "+str(offset)+", "+str(size))
if path in self.data:
tempdict = defaultdict(bytes)
tempdict[path] = self.data[path]
return tempdict
else:
return None
def local_rename(self, old, new):
global logger
logger.log("INFO", "local_rename", "old: "+old+" new: "+new)
pass
def local_insert(self, path, meta):
global logger
logger.log("INFO", "local_insert", path)
pass
def local_rmdir(self, path):
global logger
logger.log("INFO", "local_rmdir", path)
rlist = []
for m in list(self.meta.keys()):
if os.path.dirname(m) == path or m == path:
self.meta.pop(m)
if m not in rlist:
rlist.append(m)
for m in list(self.cmeta.keys()):
if os.path.dirname(m) == path or m == path:
self.cmeta.pop(m)
if m not in rlist:
rlist.append(m)
for m in list(self.data.keys()):
if os.path.dirname(m) == path:
self.data.pop(m)
if m not in rlist:
rlist.append(m)
for m in list(self.cdata.keys()):
if os.path.dirname(m) == path:
self.cdata.pop(m)
if m not in rlist:
rlist.append(m)
return rlist
def local_setxattr(self, path, name, value, options, position=0):
# Ignore options
attrs = self.files[path].setdefault('attrs', {})
attrs[name] = value
def local_symlink(self, target, source, ip):
global logger
logger.log("INFO", "local_symlink", "target: "+target+" source: "+source)
def local_truncate(self, path, length, fh=None):
global logger
logger.log("INFO", "local_truncate", path+", "+str(length))
def local_unlink(self, path):
global logger
logger.log("INFO", "local_unlink", path)
if path not in self.meta:
return None
else:
rdict = dict()
rdict[path] = self.meta[path]
self.meta.pop(path)
return rdict
def local_remove(self, path):
global logger
global misc
logger.log("INFO", "local_remove", path)
if path not in self.data:
return 1
else:
self.data.pop(path)
return 0
def local_rmmeta(self, path):
global logger
global misc
logger.log("INFO", "local_rmmeta", path)
if path in self.cmeta:
self.cmeta.pop(path)
if path in self.meta:
self.meta.pop(path)
return 0
def local_utimens(self, path, times=None):
global logger
logger.log("INFO", "local_utimens", path)
def local_append(self, path, offset, data):
global logger
logger.log("INFO", "local_append", path+", "+str(offset)+", "+str(len(data)))
def local_getattr(self, path):
global logger
logger.log("INFO", "local_getattr", path)
if path in self.meta:
tempdict = dict()
tempdict[path] = self.meta[path]
return tempdict
else:
return None
def local_release(self, path, meta):
global logger
logger.log("INFO", "local_release", path)
if path not in self.meta:
self.meta[path] = meta[path]
def local_updatelocation(self, path, meta):
global logger
logger.log("INFO", "local_updatelocation", path+" location: "+meta['location'])
def local_load(self, dst, filel):
global logger
logger.log("INFO", "local_load", "loading "+str(filel)+" to "+dst)
for f in filel:
basename = os.path.basename(f)
dstf = os.path.join(dst, basename)
self.create(dstf, 33188)
fd = open(f, 'rb')
self.cdata[dstf] = fd.read()
fd.close()
self.cmeta[dstf]['st_size'] = len(self.cdata[dstf])
self.release(dstf, 0)
logger.log("INFO", "local_load", "finished loading "+str(filel)+" to "+dst)
def local_dump(self, pairl):
global logger
logger.log("INFO", "local_dump", "dumping"+str(pairl))
for p in pairl:
hvalue, fname = p
fd = open(fname, 'wb')
fd.write(self.data[fname])
fd.close()
logger.log("INFO", "local_dump", "finished dumping")
class TCPClient():
def __init__(self):
self.bufsize = 1048576
self.psize = 16
def init_port(self, ip, port):
global logger
logger.log("INFO", "TCPclient_init_port", "connecting to "+ip+":"+str(port))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connected = 0
while connected == 0:
try:
sock.connect((ip, port))
except socket.error:
logger.log("ERROR", "TCPclient_init_port", "connect "+ip+" failed, try again")
sleep(1)
continue
else:
connected = 1
logger.log("INFO", "TCPclient_init_port", "connected to "+ip+":"+str(port))
return sock
def init_server(self, host, port):
global logger
server = None
while server == None:
try:
logger.log("INFO", "TCPclient_init_server", "starting server TCP socket")
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((host, port))
server.listen(5)
except socket.error as msg:
logger.log("ERROR", "TCPclient_init_server", msg)
server = None
sleep(1)
else:
logger.log("INFO", "TCPclient_init_server", "server TCP socket started")
return server
def sendpacket(self, packet):
global logger
global localip
global amfora
logger.log("INFO", "TCPclient_sendpacket()", packet.path+" "+packet.op)
#Packet sent to a single host
if len(packet.tlist) > 0:
try:
#initialize the socket
s = self.init_port(packet.tlist[0], 55000)
#dump packet into binary format
bpacket = pickle.dumps(packet)
#get the packet size
length = len(bpacket)
logger.log("INFO", "TCPclient.sendpacket()", "ready to send "+str(length)+" bytes")
#paddling the length of the packet to a 16 bytes number
slength = str(length)
while len(slength) < self.psize:
slength = slength + '\0'
#send the length, and wait for an ack
s.send(bytes(slength, 'utf-8'))
s.recv(1)
#send the bpacket data
sent = 0
while sent < length:
if length - sent > self.bufsize:
sent_iter = s.send(bpacket[sent:sent+self.bufsize])
else:
sent_iter = s.send(bpacket[sent:])
sent = sent + sent_iter
logger.log("INFO", "TCPclient.sendpacket()", "send "+str(sent_iter)+" bytes")
logger.log("INFO", "TCPclient.sendpacket()", "totally send "+str(sent)+" bytes")
#receive the size of the returned packet
data = s.recv(self.psize)
length = int(data.decode('utf8').strip('\0'))
s.send(bytes('0', 'utf8'))
data = b''
rect = 0
while rect < length:
if length - rect > self.bufsize:
temp = s.recv(self.bufsize)
else:
temp = s.recv(length-rect)
rect = rect + len(temp)
data = data + temp
logger.log("INFO", "TCPclient.sendpacket()", "receive "+str(len(temp))+" bytes")
logger.log("INFO", "TCPclient.sendpacket()", "totally receive "+str(len(data))+" bytes")
s.close()
packet = pickle.loads(data)
except socket.error as msg:
logger.log("ERROR", "TCPclient_sendpacket()", "Socket Exception: "+str(msg))
except Exception as msg:
logger.log("ERROR", "TCPclient_sendpacket()", "Other Exception: "+str(msg))
finally:
return packet
def one_sided_sendpacket(self, packet, port):
global logger
global localip
global amfora
logger.log("INFO", "TCPclient_one_sided_sendpacket()", packet.path+" "+packet.op)
#Packet sent to a single host
if len(packet.tlist) > 0:
try:
#initialize the socket
s = self.init_port(packet.tlist[0], port)
#dump packet into binary format
bpacket = pickle.dumps(packet)
#get the packet size
length = len(bpacket)
logger.log("INFO", "TCPclient.one_sided_sendpacket()", "ready to send "+str(length)+" bytes")
#paddling the length of the packet to a 16 bytes number
slength = str(length)
while len(slength) < self.psize:
slength = slength + '\0'
#send the length, and wait for an ack
s.send(bytes(slength, 'utf-8'))
s.recv(1)
#send the bpacket data
sent = 0
while sent < length:
if length - sent > self.bufsize:
sent_iter = s.send(bpacket[sent:sent+self.bufsize])
else:
sent_iter = s.send(bpacket[sent:])
sent = sent + sent_iter
logger.log("INFO", "TCPclient.one_sided_sendpacket()", "send "+str(sent_iter)+" bytes")
logger.log("INFO", "TCPclient.one_sided_sendpacket()", "totally send "+str(sent)+" bytes")
except socket.error as msg:
logger.log("ERROR", "TCPclient.one_sided_sendpacket()", "Socket Exception: "+str(msg))
except Exception as msg:
logger.log("ERROR", "TCPclient.one_sided_sendpacket()", "Other Exception: "+str(msg))
def sendallpacket(self, packet):
global logger
global misc
global sdict
global localip
global shuffleserver
global shufflethread
logger.log("INFO", "TCPclient_sendallpacket", packet.op+" "+packet.path+" "+str(packet.misc))
#partition the target list in the packet to reorganize the nodes into an MST
#The current implementation of partition_list() is MST
#SEQ algorithm can be implemented by modifying the code in partition_list()
olist = misc.partition_list(packet.tlist)
#start an asynchronous server to receive acknowledgements of collective operations
logger.log("INFO", "TCPClient_sendallpacket", "num_targets: "+str(olist))
if len(olist) > 0:
server = self.init_server('', 55001)
else:
server=None
#rdict tracks the immediate children of this node
#initiate the status of each node, 0 means not returned,
#1 means returned
rdict = dict()
for ol in olist:
rdict[ol[0]] = 0
colthread = CollectiveThread(server, rdict, packet)
colthread.start()
meta = dict(packet.meta)
data = dict(packet.data)
total_tasks = 0
total_files = 0
total_target = 0
if packet.op == "EXECUTE":
total_tasks = len(packet.misc)
total_target = len(packet.tlist)
elif packet.op == "SCATTER":
total_files = len(meta)
elif packet.op == "LOAD":
total_files = len(packet.misc)
total_target = len(packet.tlist)
for ol in olist:
if packet.op == "SCATTER":
mdict = {}
ddict = {}
klist = list(sorted(packet.misc))
num_files = math.ceil(len(ol)*total_files/(len(packet.tlist)-1))
#print(str(num_files)+" "+str(len(ol))+" "+str(total_files)+" "+str(len(packet.tlist)))
oklist = []
for i in range(num_files):
if len(klist) > 0:
k = klist.pop()
packet.misc.remove(k)
mdict[k] = meta.pop(k)
ddict[k] = data.pop(k)
oklist.append(k)
op = Packet(packet.path, packet.op, mdict, ddict, packet.ret, ol, sorted(oklist))
elif packet.op == "EXECUTE":
taskl = []
#num_tasks = math.ceil(len(packet.misc)/2)
num_tasks = math.ceil(len(ol)*total_tasks/total_target)
total_tasks = total_tasks - num_tasks
total_target = total_target - len(ol)
for i in range(num_tasks):
taskl.append(packet.misc.pop())
op = Packet(packet.path, packet.op, packet.meta, packet.data, packet.ret, ol, taskl)
elif packet.op == "LOAD":
filel = []
#num_files = math.ceil(len(packet.misc)/2)
num_files = math.ceil(len(ol)*total_files/total_target)
total_files = total_files-num_files
total_target = total_target - len(ol)
#print(str(num_files)+" "+str(len(ol))+" "+str(total_files)+" "+str(len(packet.tlist)))
for i in range(num_files):
filel.append(packet.misc.pop())
op = Packet(packet.path, packet.op, packet.meta, packet.data, packet.ret, ol, filel)
else:
op = Packet(packet.path, packet.op, packet.meta, packet.data, packet.ret, ol, packet.misc)
self.one_sided_sendpacket(op, 55000)
#start shuffleserver as a thread and shuffleclient
if packet.op == "SHUFFLE_START":
logger.log("INFO", "TCPclient_sendallpacket()", "ready to start shuffle")
global amfora
global localip
#global slist
#global sdict
#global shuffleserver
retdict = defaultdict(bytes)
for ip in slist:
retdict[ip] = b''
nextip = misc.nextip()
logger.log("INFO", "TCPclient_sendallpacket()", "Shuffle_start: "+str(packet.misc))
if localip in packet.misc[0]:
sdict = misc.shuffle(packet.misc[0][localip])
else:
sdict = misc.shuffle(dict())
#logger.log("INFO", "TCPclient_sendallpacket()", "Shuffle_start sdict: "+str(sdict))
retdict[localip] = sdict.pop(localip)
shuffleserver.reset(retdict, packet)
while not shufflethread.is_alive():
shufflethread.start()
#s_server = self.init_server('', 55003)
#shuffleserver = ShuffleServer(s_server, retdict, packet)
#shuffleserver.start()
elif packet.op =="SHUFFLE":
logger.log("INFO", "TCP_sendallpacket()", "ready to shuffle")
#nextip = misc.nextip()
iplist = misc.reorderip()
logger.log("INFO", "TCP_sendallpacket()", "target ip order: "+str(iplist))
for ip in iplist:
tempdict = dict()
tempdict[ip] = sdict.pop(ip)
p = Packet(packet.path, "SHUFFLETHREAD", {}, tempdict, 0, [ip], [localip, packet.misc])
shufflethread.push(p)
while shuffleserver.status == 1:
sleep(0.1)
elif packet.op == "EXECUTE":
logger.log("INFO", "TCPclient_sendallpacket()", "ready to execute: "+str(len(packet.misc))+" tasks")
executor = Executor(packet.misc)
executor.run()
packet.meta.update(executor.smap)
packet.data.update(executor.emap)
logger.log("INFO", "TCPclient_sendallpacket()", "finished executing: "+str(len(packet.misc))+" tasks")
elif packet.op == "LOAD":
global amfora
logger.log("INFO", "TCPclient_sendallpacket()", "read to load: "+str(packet.misc))
amfora.local_load(packet.path, packet.misc)
logger.log("INFO", "TCPclient_sendallpacket()", "finished loading: "+str(packet.misc))
elif packet.op == "DUMP":
#global amfora
#global localip
logger.log("INFO", "TCPclient_sendallpacket()", "read to dump: "+str(packet.misc))
if localip in packet.misc:
amfora.local_dump(packet.misc[localip])
logger.log("INFO", "TCPclient_sendallpacket()", "finished dumping: "+str(packet.misc))
else:
pass
#while colthread.is_alive():
# sleep(0.1)
# pass
#logger.log("INFO", "TCPclient_sendallpacket()", "waiting for colthread to finish")
colthread.join()
if server:
server.shutdown(socket.SHUT_RDWR)
server.close()
return packet
class ShuffleServer(threading.Thread):
def __init__(self, name, port):
threading.Thread.__init__(self)
self.id = name
self.host = ''
self.port = port
self.server = None
self.bufsize = 1048576
self.psize = 16
self.retdict = None
self.packet = None
self.dst = None
self.status = 0 #0 means stall, 1 means running
def reset(self, retdict, packet):
self.retdict = retdict
self.packet = packet
self.dst = packet.misc[1]
self.status = 1
def open_socket(self):
global logger
try:
logger.log("INFO", "ShuffleServer_opensocket", "Open server socket")
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((self.host, self.port))
self.server.listen(5)
except socket.error as msg:
logger.log("ERROR", "ShuffleServer_opensocket", msg)
self.server = None
def run(self):
global logger
global localip
global amfora
global slist
global shufflethread
logger.log("INFO", "ShuffleServer_run()", "thread started")
self.open_socket()
counter = 0
tcpclient = TCPClient()
nextip = misc.nextip()
while True:
#return if this server receives slist-1 packets
if counter == len(slist)-1 and len(slist) > 1:
logger.log("INFO", "ShufflerServer_run()", "received "+str(counter)+" packets, now terminates")
counter = 0
index = slist.index(localip)
if self.dst == '/':
fname = '/'+str(index)
else:
fname = self.dst+'/'+str(index)
amfora.create(fname, 33188)
temp = bytearray()
#logger.log("INFO", "ShuffleServer_run()", "retdict: "+str(self.retdict))
for k in self.retdict:
temp.extend(self.retdict[k])
logger.log("INFO", "ShuffleServer_run()", "processing record from "+k)
amfora.cdata[fname] = bytes(temp)
amfora.cmeta[fname]['st_size'] = len(amfora.cdata[fname])
amfora.release(fname, 0)
logger.log("INFO", "ShuffleServer_run()", "shuffle finished")
self.status = 0
del self.retdict
#break
else:
conn, addr = self.server.accept()
try:
peer = conn.getpeername()[0]
data = conn.recv(self.psize)
length = int(data.decode('utf8').strip('\0'))
logger.log("INFO", "ShuffleServer_run()", "ready to receive "+str(length)+" bytes")
conn.send(bytes('0', 'utf8'))
data = b''
rect = 0
while rect < length:
if length - rect > self.bufsize:
temp = conn.recv(self.bufsize)
else:
temp = conn.recv(length-rect)
rect = rect + len(temp)
data = data + temp
logger.log("INFO", "ShuffleServer_run()", "receive "+str(len(temp))+" bytes")
logger.log("INFO", "ShufflerServer_run()", "totally receive "+str(len(data))+" bytes")
#keep local data
tp = pickle.loads(data)
self.retdict[tp.misc[0]]=tp.data.pop(localip)
#logger.log("INFO", "ShufflerServer_run()", "remained dict: "+str(tp.data))
'''
if len(tp.data.keys()) > 0:
packet = Packet(tp.path, tp.op, tp.meta, tp.data, tp.ret, [nextip], tp.misc)
shufflethread.push(packet)
'''
except socket.error as msg:
logger.log("ERROR", "ShuffleServer_run()", "Socket Exception: "+str(msg))
except Exception as msg:
logger.log("ERROR", "ShuffleServer_run()", "Other Exception: "+str(msg))
finally:
#conn.close()
counter = counter + 1
logger.log("INFO", "ShuffleServer_run()", "received "+str(counter)+" packets")
#now the shuffle server has all shuffled data
'''
logger.log("INFO", "ShuffleServer_run()", "now server has "+str(len(self.retdict.keys()))+" records")
index = slist.index(localip)
if self.dst == '/':
fname = '/'+str(index)
else:
fname = self.dst+'/'+str(index)
amfora.create(fname, 33188)
temp = bytearray()
logger.log("INFO", "ShuffleServer_run()", "retdict: "+str(self.retdict))
for k in self.retdict:
temp.extend(self.retdict[k])
logger.log("INFO", "ShuffleServer_run()", "processing record from "+k)
amfora.cdata[fname] = bytes(temp)
amfora.cmeta[fname]['st_size'] = len(amfora.cdata[fname])
amfora.release(fname, 0)
logger.log("INFO", "ShuffleServer_run()", "shuffle finished")
self.server.shutdown(socket.SHUT_RDWR)
self.server.close()
self.server=None
'''
class ShuffleThread(threading.Thread):
def __init__(self):
global misc
threading.Thread.__init__(self)
self.queue = queue.Queue()
self.nextip = misc.nextip()
self.sock = None
self.psize = 16
self.bufsize = 1048576
def init_port(self, ip, port):
global logger
logger.log("INFO", "ShuffleThread_init_port", "connecting to "+ip+":"+str(port))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connected = 0
while connected == 0:
try:
sock.connect((ip, port))
except socket.error:
logger.log("ERROR", "ShuffleThread_init_port", "connect "+ip+" failed, try again")
sleep(1)
continue
else:
connected = 1
logger.log("INFO", "ShuffleThread_init_port", "connected to "+ip+":"+str(port))
return sock
def one_sided_sendpacket(self, packet):
global logger
global localip
global amfora
logger.log("INFO", "ShuffleThread_one_sided_sendpacket()", packet.path+" "+packet.op)
try:
#dump packet into binary format
bpacket = pickle.dumps(packet)
#get the packet size
length = len(bpacket)
logger.log("INFO", "ShuffleThread.one_sided_sendpacket()", "ready to send "+str(length)+" bytes")
#paddling the length of the packet to a 16 bytes number
slength = str(length)
while len(slength) < self.psize:
slength = slength + '\0'
#send the length, and wait for an ack
self.sock.send(bytes(slength, 'utf-8'))
self.sock.recv(1)
#send the bpacket data
sent = 0
while sent < length:
if length - sent > self.bufsize:
sent_iter = self.sock.send(bpacket[sent:sent+self.bufsize])
else:
sent_iter = self.sock.send(bpacket[sent:])
sent = sent + sent_iter
logger.log("INFO", "ShuffleThread.one_sided_sendpacket()", "send "+str(sent_iter)+" bytes")
logger.log("INFO", "ShuffleThread.one_sided_sendpacket()", "totally send "+str(sent)+" bytes")
except socket.error as msg:
logger.log("ERROR", "ShuffleThread.one_sided_sendpacket()", "Socket Exception: "+str(msg))
except Exception as msg:
logger.log("ERROR", "ShuffleThread.one_sided_sendpacket()", "Other Exception: "+str(msg))
def push(self, packet):
self.queue.put(packet, True, None)
def run(self):
global logger
#self.sock = self.init_port(self.nextip, 55003)
while True:
packet = self.queue.get(True, None)
logger.log("INFO", "ShuffleThread", "Sending pakcet to "+self.nextip)
tcpclient = TCPClient()
tcpclient.one_sided_sendpacket(packet, 55003)
#self.one_sided_sendpacket(packet)
#del packet
class TCPserver(threading.Thread):
def __init__(self, workerid, port):
threading.Thread.__init__(self)
self.id = workerid
self.host = ''
self.port = port
self.psize = 16
self.bufsize = 1048576
self.server = None
def open_socket(self):
global logger
try:
logger.log("INFO", "TCPserver_opensocket", "Open server socket")
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((self.host, self.port))
self.server.listen(5)
except socket.error as msg:
logger.log("ERROR", "TCPserver_opensocket", msg)
self.server = None
def run(self):
global logger
global amfora
global tcpqueue
global localip
self.open_socket()
while True:
conn, addr = self.server.accept()
try:
data = conn.recv(self.psize)
length = int(data.decode('utf8').strip('\0'))
logger.log("INFO", "TCPServer.run()", "ready to receive "+str(length)+" bytes")
conn.send(bytes('0', 'utf-8'))
rect = 0
bpacket = b''
while rect < length:
if length - rect > self.bufsize:
temp = conn.recv(self.bufsize)
else:
temp = conn.recv(length-rect)
rect = rect + len(temp)
bpacket = bpacket+temp
logger.log("INFO", "TCPServer.run()", "receive "+str(len(temp))+" bytes")
logger.log("INFO", "TCPServer.run()", "totally receive "+str(len(bpacket))+" bytes")
except socket.error:
logger.log("ERROR", "TCPserver_run", "socket exception when receiving message "+str(socket.error))
break
packet = pickle.loads(bpacket)
logger.log("INFO", "TCPserver_run", "received: "+packet.op+" "+packet.path+" "+str(packet.tlist))
tcpqueue.put([conn, packet], True, None)
class TCPworker(threading.Thread):
def __init__(self, workerid):
threading.Thread.__init__(self)
self.id = workerid
self.psize = 16
self.bufsize = 1048576
def sendpacket(self, sock, packet):
logger.log("INFO", "TCPWorker.sendpacket()", "sending packet to "+str(packet.tlist))
try:
#dump packet into binary format
bpacket = pickle.dumps(packet)
#get the packet size
length = len(bpacket)
logger.log("INFO", "TCPworker.sendpacket()", "ready to send "+str(length)+" bytes")
#paddling the length of the packet to a 16 bytes number
slength = str(length)
while len(slength) < self.psize:
slength = slength + '\0'
#send the length, and wait for an ack
sock.send(bytes(slength, 'utf-8'))
sock.recv(1)
#send the bpacket data
sent = 0
while sent < length:
if length - sent > self.bufsize:
sent_iter = sock.send(bpacket[sent:sent+self.bufsize])
else:
sent_iter = sock.send(bpacket[sent:])
sent = sent + sent_iter
logger.log("INFO", "TCPworker.sendpacket()", "send "+str(sent_iter)+" bytes")
logger.log("INFO", "TCPworker.sendpacket()", "totally send "+str(sent)+" bytes")
except socket.error as msg:
logger.log("ERROR", "TCPworker.sendpacket()", "Socket Exception: "+str(msg))
except Exception as msg:
logger.log("ERROR", "TCPworker.sendpacket()", "Other Exception: "+str(msg))
finally:
return sent
def run(self):
global logger
global tcpqueue
global locaip
global amfora
while True:
conn, packet = tcpqueue.get(True, None)
if packet.op == 'CREATE':
filename = packet.path
mode = packet.misc
remoteip, remoteport = conn.getpeername()
ret = amfora.local_create(filename, mode, remoteip)
p = Packet(packet.path, packet.op, None, None, 0, [remoteip], None)
self.sendpacket(conn, p)
conn.close()
elif packet.op == 'RELEASE':
filename = packet.path
ret = amfora.local_release(filename, packet.meta)
remoteip, remoteport = conn.getpeername()
p = Packet(packet.path, packet.op, None, None, 0, [remoteip], None)
self.sendpacket(conn, p)
conn.close()
elif packet.op == 'READ':
filename = packet.path
remoteip, remoteport = conn.getpeername()
ret = amfora.local_read(filename, packet.misc[0], packet.misc[1])
p = Packet(packet.path, packet.op, None, ret, 0, [remoteip], None)
self.sendpacket(conn, p)
conn.close()
elif packet.op == 'COPY':
key = el[0]
ret = ramdisk.data[key]
data = pickle.dumps(ret)
length = len(data)
dsize = str(length)
while len(dsize) < 10:
dsize = dsize + '\0'
conn.send(bytes(dsize, 'utf8'))
conn.recv(1)
tcp_big = TCP_big()
tcp_big.send(conn, data, length)
conn.recv(1)
conn.close()
elif packet.op == 'GETATTR':
filename = packet.path
remoteip, remoteport = conn.getpeername()
ret = amfora.local_getattr(filename)
p = Packet(packet.path, packet.op, ret, None, 0, [remoteip], None)
self.sendpacket(conn, p)
conn.close()
elif packet.op == 'GETXATTR':
filename = el[0]
ret = None
if filename in ramdisk.files:
ret = ramdisk.files[filename].get('attrs', {})
conn.send(pickle.dumps(ret))
elif packet.op == 'CHMOD':
filename = packet.path
mode = packet.misc
ret = amfora.local_chmod(filename, mode)
remoteip, remoteport = conn.getpeername()
p = Packet(packet.path, packet.op, ret, None, 0, [remoteip], None)
self.sendpacket(conn, p)
conn.close()
elif packet.op == 'CHOWN':
filename = el[0]
uid = int(el[2])
gid = int(el[3])
ret = ramdisk.local_chown(filename, uid, gid)
conn.send(bytes(str(ret), "utf8"))
conn.close()
elif packet.op == 'TRUNCATE':
filename = el[0]
length = int(el[2])
ramdisk.local_truncate(filename, length)
conn.send(bytes(str(0), "utf8"))
conn.close()
elif packet.op == 'READDIR':
path = packet.path
remoteip, remoteport = conn.getpeername()
tcpclient = TCPClient()
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "READDIR", "reading dir: "+path+" failed")
rpacket.tlist = [remoteip]
tcpclient.one_sided_sendpacket(rpacket, 55001)
conn.close()
elif packet.op == 'RMDIR':
path = packet.path
remoteip, remoteport = conn.getpeername()
tcpclient = TCPClient()
rpacket = tcpclient.sendallpacket(packet)
rpacket.tlist = [remoteip]
tcpclient.one_sided_sendpacket(rpacket, 55001)
conn.close()
elif packet.op == 'MKDIR':
path = packet.path
mode = packet.misc
remoteip, remoteport = conn.getpeername()
tcpclient = TCPClient()
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "MKDIR", "creating dir: "+path+" failed")
rpacket.tlist = [remoteip]
tcpclient.one_sided_sendpacket(rpacket, 55001)
conn.close()
elif packet.op == 'UNLINK':
path = packet.path
remoteip, remoteport = conn.getpeername()
ret = amfora.local_unlink(path)
if not ret:
logger.log("ERROR", "UNLINK", "unlinking "+path+" failed")
p = Packet(packet.path, packet.op, ret, {}, 0, [remoteip], None)
self.sendpacket(conn, p)
conn.close()
elif packet.op == 'REMOVE':
path = packet.path
remoteip, remoteport = conn.getpeername()
ret = amfora.local_remove(path)
if ret != 0:
logger.log("ERROR", "REMOVE", "removing "+path+" failed")
p = Packet(packet.path, packet.op, {}, {}, ret, [remoteip], None)
self.sendpacket(conn, p)
conn.close()
elif packet.op == 'RMMETA':
path = packet.path
remoteip, remoteport = conn.getpeername()
ret = amfora.local_rmmeta(path)
if ret != 0:
logger.log("ERROR", "REMOVE", "removing "+path+" failed")
p = Packet(packet.path, packet.op, {}, {}, ret, [remoteip], None)
self.sendpacket(conn, p)
conn.close()
elif packet.op == 'SYMLINK':
path = el[0]
source = el[2]
remoteip, remoteport = conn.getpeername()
ramdisk.local_symlink(path, source, remoteip)
conn.send(bytes(str(0), "utf8"))
conn.close()
elif packet.op == 'READLINK':
path = el[0]
data = ramdisk.local_readlink(path)
conn.send(bytes(data, "utf8"))
conn.close()
elif packet.op == 'RENAME':
old = el[0]
new = el[2]
data = ramdisk.local_rename(old, new)
conn.send(bytes(str(data), "utf8"))
conn.close()
elif packet.op == 'INSERTMETA':
path = el[0]
msize = int(el[2])
#print("INSERTMETA: size: "+str(msize))
conn.send(bytes('0', 'utf8'))
data = conn.recv(msize)
meta = pickle.loads(data)
#print("INSERTMETA: meta: "+str(meta))
data = ramdisk.local_insert(path, meta)
conn.send(bytes(str(data), "utf8"))
conn.close()
elif packet.op == 'APPENDDATA':
path = el[0]
msize = int(el[2])
offset = int(el[3])
data = conn.recv(msize)
content = pickle.loads(data)
data = ramdisk.local_append(path, offset, content)
conn.send(bytes(str(0), "utf8"))
conn.close()
elif packet.op == 'UPDATE':
path = el[0]
msize = int(el[2])
conn.send(bytes('0', 'utf8'))
temp = b''
while len(temp) < msize:
data = conn.recv(msize-len(temp))
temp = temp + data
meta = pickle.loads(temp)
ret = ramdisk.local_updatelocation(path, meta)
conn.send(bytes(str(ret), "utf8"))
conn.close()
elif packet.op == 'MULTICAST':
path = packet.path
remoteip, remoteport = conn.getpeername()
tcpclient = TCPClient()
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "MULTICAST", "multicasting file: "+path+" failed")
rpacket.tlist = [remoteip]
tcpclient.one_sided_sendpacket(rpacket, 55001)
conn.close()
elif packet.op == 'GATHER':
path = packet.path
remoteip, remoteport = conn.getpeername()
tcpclient = TCPClient()
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "GATHER", "gathering dir: "+path+" failed")
rpacket.tlist = [remoteip]
tcpclient.one_sided_sendpacket(rpacket, 55001)
conn.close()
elif packet.op == 'SCATTER':
path = packet.path
remoteip, remoteport = conn.getpeername()
#keep part of the data local
num_files = math.ceil(len(packet.meta)/len(packet.tlist))
logger.log("INFO", "SCATTER", "This node keeps "+str(num_files)+" files")
rmeta = dict()
for i in range(num_files):
k = packet.misc.pop()
v = packet.meta.pop(k)
amfora.meta[k] = v
amfora.meta[k]['location'] = localip
amfora.data[k] = packet.data.pop(k)
amfora.cdata[k] = amfora.data[k]
rmeta[k] = amfora.meta[k]
logger.log("INFO", "SCATTER", "This node keeps "+k)
tcpclient = TCPClient()
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "SCATTER", "scattering dir: "+path+" failed")
rpacket.tlist = [remoteip]
rpacket.meta.update(rmeta)
tcpclient.one_sided_sendpacket(rpacket, 55001)
conn.close()
elif packet.op == 'SHUFFLE_START':
path = packet.path
dst = packet.misc[1]
remoteip, remoteport = conn.getpeername()
tcpclient = TCPClient()
logger.log("INFO", "TCPserver_shuffle_start", str(packet.misc))
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "SHUFFLE_SHUFFLE_START", "start shuffling from "+path+" to "+dst+" failed")
rpacket.tlist = [remoteip]
tcpclient.one_sided_sendpacket(rpacket, 55001)
conn.close()
elif packet.op == 'SHUFFLE':
path = packet.path
dst = packet.misc
remoteip, remoteport = conn.getpeername()
tcpclient = TCPClient()
logger.log("INFO", "TCPserver_shuffle", str(packet.misc))
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "SHUFFLE", "shuffling from "+path+" to "+dst+" failed")
rpacket.tlist = [remoteip]
tcpclient.one_sided_sendpacket(rpacket, 55001)
conn.close()
elif packet.op == 'EXECUTE':
path = packet.path
remoteip, remoteport = conn.getpeername()
tcpclient = TCPClient()
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "EXECUTE", "Executing failed")
rpacket.tlist = [remoteip]
tcpclient.one_sided_sendpacket(rpacket, 55001)
conn.close()
elif packet.op == 'LOAD':
path = packet.path
remoteip, remoteport = conn.getpeername()
tcpclient = TCPClient()
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "LOAD", "Loading failed")
rpacket.tlist = [remoteip]
tcpclient.one_sided_sendpacket(rpacket, 55001)
conn.close()
elif packet.op == 'DUMP':
path = packet.path
remoteip, remoteport = conn.getpeername()
tcpclient = TCPClient()
rpacket = tcpclient.sendallpacket(packet)
if rpacket.ret != 0:
logger.log("ERROR", "DUMP", "Dumping failed")
rpacket.tlist = [remoteip]
tcpclient.one_sided_sendpacket(rpacket, 55001)
conn.close()
else:
logger.log("ERROR", "TCPserver.run()", "Invalid op "+packet.op)
class Interfaceserver(threading.Thread):
def __init__(self, workerid, port):
threading.Thread.__init__(self)
self.id = workerid
self.host = ''
self.port = port
self.psize = 16
self.bufsize = 1048576
self.server = None
def open_socket(self):
global logger
try:
logger.log("INFO", "Interfaceserver_opensocket", "Open server socket")
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((self.host, self.port))
self.server.listen(5)
except socket.error as msg:
logger.log("ERROR", "Interfaceserver_opensocket", msg)
self.server = None
def sendpacket(self, sock, packet):
logger.log("INFO", "Interfaceserver_sendpacket()", "sending packet to "+str(packet.tlist))
try:
#dump packet into binary format
bpacket = pickle.dumps(packet)
#get the packet size
length = len(bpacket)
logger.log("INFO", "Interfaceserver_sendpacket()", "ready to send "+str(length)+" bytes")
#paddling the length of the packet to a 16 bytes number
slength = str(length)
while len(slength) < self.psize:
slength = slength + '\0'
#send the length, and wait for an ack
sock.send(bytes(slength, 'utf-8'))
sock.recv(1)
#send the bpacket data
sent = 0
while sent < length:
if length - sent > self.bufsize:
sent_iter = sock.send(bpacket[sent:sent+self.bufsize])
else:
sent_iter = sock.send(bpacket[sent:])
sent = sent + sent_iter
logger.log("INFO", "Interfaceserver_sendpacket()", "send "+str(sent_iter)+" bytes")
logger.log("INFO", "Interfaceserver_sendpacket()", "totally send "+str(sent)+" bytes")
except socket.error as msg:
logger.log("ERROR", "Interfaceserver__sendpacket()", "Socket Exception: "+str(msg))
except Exception as msg:
logger.log("ERROR", "Interfaceserver_sendpacket()", "Other Exception: "+str(msg))
finally:
return sent
def run(self):
global logger
global amfora
#global executor
self.open_socket()
while True:
conn, addr = self.server.accept()
try:
data = conn.recv(self.bufsize)
except socket.error:
logger.log("ERROR", "Interfaceserver_run", "socket exception when receiving message "+str(socket.error))
break
msg = data.decode("utf8").strip()
logger.log("INFO", "Interfaceserver_run", "received: "+str(msg))
el = msg.split('#')
if el[1] == 'MULTI':
path = el[0]
algo = el[2]
ret = amfora.multicast(path, algo)
self.sendpacket(conn, ret)
conn.close()
elif el[1] == 'GATHER':
path = el[0]
algo = el[2]
ret, ddict, mdict = amfora.gather(path, algo)
self.sendpacket(conn, ret)
conn.close()
elif el[1] == 'ALLGATHER':
path = el[0]
algo = el[2]
ret = amfora.allgather(path, algo)
self.sendpacket(conn, ret)
conn.close()
elif el[1] == 'SCATTER':
path = el[0]
algo = el[2]
ret = amfora.scatter(path, algo)
self.sendpacket(conn, ret)
conn.close()
elif el[1] == 'SHUFFLE':
path = el[0]
algo = el[2]
dst = el[3]
ret = amfora.shuffle(path, algo, dst)
self.sendpacket(conn, ret)
conn.close()
elif el[1] == 'EXECUTE':
ret = amfora.execute()
self.sendpacket(conn, ret)
conn.close()
elif el[1] == 'STATE':
ret = executor.state()
self.sendpacket(conn, ret)
conn.close()
elif el[1] == 'LOAD':
src = el[0]
dst = el[2]
ret = amfora.load(src, dst)
self.sendpacket(conn, ret)
conn.close()
elif el[1] == 'DUMP':
src = el[0]
dst = el[2]
ret = amfora.dump(src, dst)
self.sendpacket(conn, ret)
conn.close()
elif el[1] == 'TYPE':
src = el[0]
typedef = el[2]
ret = amfora.type(src, typedef)
rpacket = Packet(src, typedef, None, None, ret, None, None)
self.sendpacket(conn, rpacket)
conn.close()
class Misc():
def __init__(self):
pass
def findserver(self, fname):
global slist
value = zlib.adler32(bytes(fname, 'utf8')) & 0xffffffff
return slist[value%(len(slist))]
def hash(self, fname):
return zlib.adler32(bytes(fname, 'utf8')) & 0xffffffff
def partition_list(self, slist):
tlist = []
global localip
vlist = list(slist)
vlist.remove(localip)
while len(vlist) > 0:
temp = []
for i in range(math.ceil(len(vlist)/2)):
ip = vlist.pop()
temp.append(ip)
tlist.append(temp)
return tlist
def nextip(self):
global localip
global slist
index = slist.index(localip)
if index+1 == len(slist):
nextindex = 0
else:
nextindex = index + 1
nextip = slist[nextindex]
return nextip
def reorderip(self):
global localip
global slist
rlist = []
index = slist.index(localip)
for i in range(len(slist)-1):
newindex = index+i+1
if newindex >= len(slist):
newindex = newindex - len(slist)
rlist.append(slist[newindex])
return rlist
def shuffle(self, hlist):
#hlist is a list containing the hash values in local storage
global logger
global amfora
global slist
hdict = defaultdict(bytes)
for ip in slist:
hdict[ip] = bytearray()
for h in hlist:
if h in amfora.data and h in amfora.cdata:
bdata = amfora.data[h]
lines = bdata.split(b'\n')
logger.log("INFO", "MISC_shuffle()", "lines: "+str(len(lines)))
for line in lines[:len(lines)-1]:
#change to regular expression in next release
#m = re.match(b"(?P<key>\w+)\s+(?P<value>\w+)", line)
#k = m.group('key')
#v = m.group('value')
k, v = line.split()
#logger.log("INFO", "MISC_shuffle()", "key: "+k.decode('utf8')+" values: "+v.decode('utf8'))
value = zlib.adler32(k) & 0xffffffff
ip = slist[value%len(slist)]
hdict[ip].extend(line+b'\n')
else:
logger.log("ERROR", "MISC_shuffle()", "hash value "+str(h)+" is not in local storage")
return hdict
def readtask(self):
fd = open("/tmp/amfora-task.txt", 'r')
lines = fd.readlines()
taskl = []
for l in lines:
task = Task(l.strip('\n'))
taskl.append(task)
return taskl
def to_column_table(self, data):
ddict = defaultdict(bytes)
rows = data.split(b'\n')
num_rows = len(rows)-1
if len(rows) == 0:
return ddict
columns = len(rows[0].split(b'\t'))
for i in range(columns):
ddict[str(i)] = bytearray()
row_tag = 0
for row in rows[:len(rows)-1]:
row_tag = row_tag+1
elist = row.split(b'\t')
for i in range(len(elist)):
ddict[str(i)].extend(elist[i]+b'\n')
#if row_tag < num_rows:
# ddict[str(i)].extend(elist[i]+b'\t')
#else:
# ddict[str(i)].extend(elist[i]+b'\n')
return ddict
def to_row_table(self, data):
global slist
ddict = defaultdict(bytes)
rows = data.split(b'\n')
if len(rows) == 0:
return ddict
num_rows = math.ceil((len(rows)-1)/len(slist))
for i in range(len(slist)):
ddict[str(i)] = bytearray()
for i in range(len(rows)-1):
key = str(int(i/num_rows))
ddict[key].extend(rows[i]+b'\n')
return ddict
def to_column_matrix(self, data):
global slist
ddict = defaultdict(bytes)
rows = data.split(b'\n')
if len(rows) == 0:
return ddict
columns = len(rows[0].split(b'\t'))
num_col = math.ceil(columns/len(slist))
for i in range(len(slist)):
ddict[str(i)] = bytearray()
for row in rows[:len(rows)-1]:
elist = row.strip(b'\n').split(b'\t')
row_tag = 0
for i in range(len(elist)):
key = str(int(i/num_col))
ddict[key].extend(elist[i])
row_tag = row_tag+1
if row_tag < num_col:
ddict[key].extend(b'\t')
else:
ddict[key].extend(b'\n')
row_tag = 0
return ddict
def to_row_matrix(self, data):
global slist
ddict = defaultdict(bytes)
rows = data.split(b'\n')
if len(rows) == 0:
return ddict
num_rows = math.ceil((len(rows)-1)/len(slist))
for i in range(len(slist)):
ddict[str(i)] = bytearray()
for i in range(len(rows)-1):
key = str(int(i/num_rows))
ddict[key].extend(rows[i]+b'\n')
return ddict
def to_tile_matrix(self, data):
global slist
ddict = defaultdict(bytes)
rows = data.split(b'\n')
if len(rows) == 0:
return ddict
sqrt_num_files = int(math.sqrt(len(slist)))
for i in range(sqrt_num_files):
for j in range(sqrt_num_files):
ddict[str(i)+str(j)] = bytearray()
num_rows = int(len(rows)/sqrt_num_files)
columns = rows[0].split(b'\t')
num_columns = int(len(columns)/sqrt_num_files)
for i in range(len(rows)-1):
elist = rows[i].split(b'\t')
column_tag = 0
for j in range(len(elist)):
key = str(int(i/num_rows))+str(int(j/num_columns))
ddict[key].extend(elist[j])
column_tag = column_tag+1
if column_tag < num_columns:
ddict[key].extend(b'\t')
else:
ddict[key].extend(b'\n')
column_tag = 0
return ddict
class CollectiveThread(threading.Thread):
def __init__(self, server, rdict, packet):
threading.Thread.__init__(self)
self.server = server
self.rdict = rdict
self.packet = packet
self.bufsize = 1048576
self.psize = 16
def run(self):
global logger
global localip
global amfora
logger.log("INFO", "CollThread_run()", "thread started")
while True:
#return if all immediate childrens return
summ = sum(self.rdict.values())
if summ == len(self.rdict):
break
#if this is the leaf node
#the above code can check leaf too
#if len(self.packet.tlist)==1 and self.packet.tlist[0]==localip:
# pass
else:
try:
conn, addr = self.server.accept()
peer = conn.getpeername()[0]
data = conn.recv(self.psize)
length = int(data.decode('utf8').strip('\0'))
logger.log("INFO", "CollThread_run()", "ready to receive "+str(length)+" bytes")
conn.send(bytes('0', 'utf8'))
data = b''
rect = 0
while rect < length:
if length - rect > self.bufsize:
temp = conn.recv(self.bufsize)
else:
temp = conn.recv(length-rect)
rect = rect + len(temp)
data = data + temp
logger.log("INFO", "CollThread_run()", "receive "+str(len(temp))+" bytes")
logger.log("INFO", "CollThread_run()", "totally receive "+str(len(data))+" bytes")
conn.close()
tp = pickle.loads(data)
self.packet.meta.update(tp.meta)
self.packet.data.update(tp.data)
self.packet.ret = self.packet.ret | tp.ret
except socket.error as msg:
logger.log("ERROR", "CollThread_run()", "Socket Exception: "+str(msg))
except Exception as msg:
logger.log("ERROR", "CollThread_run()", "Other Exception: "+str(msg))
finally:
conn.close()
self.rdict[peer] = 1
if self.packet.op == "MKDIR":
ret = amfora.local_mkdir(self.packet.path, self.packet.misc)
#mkdir raises FuseOSError(ENOENT) if parent dir does not exist
self.packet.ret = self.packet.ret | ret
logger.log("INFO", "CollThread_run()", self.packet.op+" "+self.packet.path+" finished")
elif self.packet.op == "READDIR":
ret = amfora.local_readdir(self.packet.path, self.packet.misc)
self.packet.meta.update(ret)
self.packet.ret = self.packet.ret | 0
logger.log("INFO", "CollThread_run()", self.packet.op+" "+self.packet.path+" finished")
elif self.packet.op == "RMDIR":
ret = amfora.local_rmdir(self.packet.path)
self.packet.misc.extend(ret)
self.packet.ret = self.packet.ret | 0
logger.log("INFO", "CollThread_run()", self.packet.op+" "+self.packet.path+" finished")
elif self.packet.op == "MULTICAST":
amfora.meta.update(self.packet.meta)
amfora.data.update(self.packet.data)
self.packet.ret = self.packet.ret | 0
logger.log("INFO", "CollThread_run()", self.packet.op+" "+self.packet.path+" finished")
elif self.packet.op == "GATHER":
if localip in self.packet.misc:
for k in self.packet.misc[localip]:
self.packet.data[k] = amfora.data[k]
self.packet.ret = self.packet.ret | 0
logger.log("INFO", "CollThread_run()", self.packet.op+" "+self.packet.path+" finished")
elif self.packet.op == "SCATTER":
self.packet.ret = self.packet.ret | 0
#self.packet.meta = {}
self.packet.data = {}
logger.log("INFO", "CollThread_run()", self.packet.op+" "+self.packet.path+" finished")
elif self.packet.op == "SHUFFLE_START":
self.packet.ret = self.packet.ret | 0
self.packet.meta = {}
self.packet.data = {}
logger.log("INFO", "CollThread_run()", self.packet.op+" "+self.packet.path+" finished")
elif self.packet.op == "SHUFFLE":
self.packet.ret = self.packet.ret | 0
self.packet.meta = {}
self.packet.data = {}
logger.log("INFO", "CollThread_run()", self.packet.op+" "+self.packet.path+" finished")
elif self.packet.op == "EXECUTE":
self.packet.ret = self.packet.ret | 0
logger.log("INFO", "CollThread_run()", self.packet.op+" "+self.packet.path+" finished")
elif self.packet.op == "DUMP":
self.packet.ret = self.packet.ret | 0
logger.log("INFO", "CollThread_run()", self.packet.op+" "+self.packet.path+" finished")
elif self.packet.op == "LOAD":
self.packet.ret = self.packet.ret | 0
self.misc = None
logger.log("INFO", "CollThread_run()", self.packet.op+" "+self.packet.path+" finished")
else:
logger.log("ERROR", "CollThread_run()", "operation: "+self.packet.op+" not supported")
class Packet():
def __init__(self, path, op, meta, data, ret, tlist, misc):
'''
The packet class defines the packet format used for inter-node communication.
self.path [string] specifies the file name that is being operated on
self.op [string] specifies the operation
self.meta [dict] specifies the metadata needs to be transferred related to this operation
self.data [defaultdict(bytes)] specifies the file data needs to be transferred related to this operation
self.ret [int] specifies the return value of the operation
self.tlist [string[]] specifies the targes that this packet is being routed to
self.misc [dynamic] specifies the opeartion parameter
'''
self.path = path
self.op = op
self.meta = meta
self.data = data
self.ret = ret
self.tlist = tlist
self.misc = misc
class Task():
def __init__(self, desc):
self.queuetime = time()
self.starttime = None
self.endtime = None
self.desc = desc
self.ret = None
tempkey = self.desc+str(self.queuetime)
self.key = zlib.adler32(bytes(tempkey, 'utf8')) & 0xffffffff
class Executor():
def __init__(self, tlist):
self.queue = []
self.fqueue = []
self.smap = {}
self.emap = {}
self.readyqueue = queue.Queue()
self.fmap = {} #key-file, value-task
self.tlist = tlist
for t in tlist:
self.readyqueue.put(t, True, None)
def run(self):
global logger
global mountpoint
logger.log('INFO', 'Executor_run', 'executor started')
while True:
if self.readyqueue.empty():
break
task = self.readyqueue.get(True, None)
logger.log('INFO', 'Executor_run', 'running task: '+task.desc)
task.starttime = time()
p = subprocess.Popen(task.desc, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
task.endtime = time()
task.ret = p.returncode
self.smap[task.desc+" "+str(task.key)] = task.ret
if task.ret != 0:
self.emap[task.desc+" "+str(task.key)] = stderr
#self.fqueue.append(task)
logger.log('INFO', 'Executor_run', 'finishing task: '+task.desc)
logger.log('INFO', 'Executor_run', 'all tasks finished')
if __name__ == '__main__':
if len(argv) != 4:
print(('usage: %s <mountpoint> <amfs.conf> <localip>' % argv[0]))
exit(1)
global logger
logger = Logger("/tmp/amfora-fuse.log")
global mountpoint
mountpoint = argv[1]
global localip
localip = argv[3]
global parentip
parentip = ''
global misc
misc = Misc()
global sdict
sdict = dict()
global slist
slist = []
fd = open(argv[2], 'r')
while True:
line = fd.readline()
if not line:
break
ip = line.strip('\n').strip()
slist.append(ip)
logger.log("INFO", "main", "Metadata Server List: "+str(slist))
global amfora
amfora=Amfora()
global tcpqueue
tcpqueue = queue.Queue()
global shuffleserver
shuffleserver = ShuffleServer('Shuffleserver', 55003)
while not shuffleserver.is_alive():
shuffleserver.start()
global shufflethread
shufflethread = ShuffleThread()
tcpserver = TCPserver('TCPserver', 55000)
while not tcpserver.is_alive():
tcpserver.start()
#start two worker threads to avoid the deadlock generated by concurrent collective operations and POSIX operations
workerl = []
for i in range(2):
tcpworker = TCPworker('TCPworker'+str(i))
while not tcpworker.is_alive():
tcpworker.start()
workerl.append(tcpworker)
interfaceserver = Interfaceserver('Interfaceserver', 55002)
while not interfaceserver.is_alive():
interfaceserver.start()
fuse = FUSE(amfora, mountpoint, foreground=True, big_writes=True, direct_io=True)
| {
"content_hash": "89be3e0f712872e9631505a7d4063397",
"timestamp": "",
"source": "github",
"line_count": 2484,
"max_line_length": 163,
"avg_line_length": 39.231078904991946,
"alnum_prop": 0.5149615187275526,
"repo_name": "zhaozhang/amfora",
"id": "fa928ee69c7b040984d93722962959fae7d4132d",
"size": "97473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/amfora.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "269100"
},
{
"name": "Shell",
"bytes": "10107"
}
],
"symlink_target": ""
} |
"""
Talon.One API
Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerSession](https://docs.talon.one/integration-api/#operation/updateCustomerSessionV2) endpoint is `https://mycompany.talon.one/v2/customer_sessions/{Id}` # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class InlineResponse2006(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'total_result_size': 'int',
'data': 'list[Coupon]'
}
attribute_map = {
'total_result_size': 'totalResultSize',
'data': 'data'
}
def __init__(self, total_result_size=None, data=None, local_vars_configuration=None): # noqa: E501
"""InlineResponse2006 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._total_result_size = None
self._data = None
self.discriminator = None
self.total_result_size = total_result_size
self.data = data
@property
def total_result_size(self):
"""Gets the total_result_size of this InlineResponse2006. # noqa: E501
:return: The total_result_size of this InlineResponse2006. # noqa: E501
:rtype: int
"""
return self._total_result_size
@total_result_size.setter
def total_result_size(self, total_result_size):
"""Sets the total_result_size of this InlineResponse2006.
:param total_result_size: The total_result_size of this InlineResponse2006. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and total_result_size is None: # noqa: E501
raise ValueError("Invalid value for `total_result_size`, must not be `None`") # noqa: E501
self._total_result_size = total_result_size
@property
def data(self):
"""Gets the data of this InlineResponse2006. # noqa: E501
:return: The data of this InlineResponse2006. # noqa: E501
:rtype: list[Coupon]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this InlineResponse2006.
:param data: The data of this InlineResponse2006. # noqa: E501
:type: list[Coupon]
"""
if self.local_vars_configuration.client_side_validation and data is None: # noqa: E501
raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2006):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InlineResponse2006):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "8f3ec1fcce90e85fca7356d31d067ecc",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 732,
"avg_line_length": 34.945205479452056,
"alnum_prop": 0.6072128577028616,
"repo_name": "talon-one/talon_one.py",
"id": "a498656a94b2acc545ede2acc624a3b9b0a6e573",
"size": "5119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "talon_one/models/inline_response2006.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "5139586"
},
{
"name": "Shell",
"bytes": "1826"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from tests import views
urlpatterns = patterns(
'',
url('^$', views.PostViewSet.as_view({'get': 'list'}), name='post-list')
)
| {
"content_hash": "e2a67dab660fda1b1376a9ea69b4aece",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 75,
"avg_line_length": 22.375,
"alnum_prop": 0.659217877094972,
"repo_name": "kevin-brown/drf-json-api",
"id": "c11a0852e935acaea9c6a960d8d870f7861133f5",
"size": "179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/namespace_urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1390"
},
{
"name": "Python",
"bytes": "68454"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "43d0b32cdf299518348d9e722b6bfb9d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 77,
"avg_line_length": 35.55,
"alnum_prop": 0.6075949367088608,
"repo_name": "GrantHeaslip/ghweb",
"id": "1cf432333bf59c6ccc2447753d4da8318c7c39c9",
"size": "733",
"binary": false,
"copies": "4",
"ref": "refs/heads/development",
"path": "manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1114"
},
{
"name": "HTML",
"bytes": "4603"
},
{
"name": "Python",
"bytes": "8841"
},
{
"name": "Shell",
"bytes": "160"
}
],
"symlink_target": ""
} |
from functools import partial
from inspect import getmembers
import numpy as np
from .utils import check_indices
from ..fixes import _get_args
from ..parallel import parallel_func
from ..source_estimate import _BaseSourceEstimate
from ..epochs import BaseEpochs
from ..time_frequency.multitaper import (_mt_spectra, _compute_mt_params,
_psd_from_mt, _csd_from_mt,
_psd_from_mt_adaptive)
from ..time_frequency.tfr import morlet, cwt
from ..utils import logger, verbose, _time_mask, warn
from ..externals.six import string_types
########################################################################
# Various connectivity estimators
class _AbstractConEstBase(object):
"""ABC for connectivity estimators."""
def start_epoch(self):
raise NotImplementedError('start_epoch method not implemented')
def accumulate(self, con_idx, csd_xy):
raise NotImplementedError('accumulate method not implemented')
def combine(self, other):
raise NotImplementedError('combine method not implemented')
def compute_con(self, con_idx, n_epochs):
raise NotImplementedError('compute_con method not implemented')
class _EpochMeanConEstBase(_AbstractConEstBase):
"""Base class for methods that estimate connectivity as mean epoch-wise."""
def __init__(self, n_cons, n_freqs, n_times):
self.n_cons = n_cons
self.n_freqs = n_freqs
self.n_times = n_times
if n_times == 0:
self.csd_shape = (n_cons, n_freqs)
else:
self.csd_shape = (n_cons, n_freqs, n_times)
self.con_scores = None
def start_epoch(self): # noqa: D401
"""Called at the start of each epoch."""
pass # for this type of con. method we don't do anything
def combine(self, other):
"""Include con. accumated for some epochs in this estimate."""
self._acc += other._acc
class _CohEstBase(_EpochMeanConEstBase):
"""Base Estimator for Coherence, Coherency, Imag. Coherence."""
def __init__(self, n_cons, n_freqs, n_times):
super(_CohEstBase, self).__init__(n_cons, n_freqs, n_times)
# allocate space for accumulation of CSD
self._acc = np.zeros(self.csd_shape, dtype=np.complex128)
def accumulate(self, con_idx, csd_xy):
"""Accumulate CSD for some connections."""
self._acc[con_idx] += csd_xy
class _CohEst(_CohEstBase):
"""Coherence Estimator."""
name = 'Coherence'
def compute_con(self, con_idx, n_epochs, psd_xx, psd_yy):
"""Compute final con. score for some connections."""
if self.con_scores is None:
self.con_scores = np.zeros(self.csd_shape)
csd_mean = self._acc[con_idx] / n_epochs
self.con_scores[con_idx] = np.abs(csd_mean) / np.sqrt(psd_xx * psd_yy)
class _CohyEst(_CohEstBase):
"""Coherency Estimator."""
name = 'Coherency'
def compute_con(self, con_idx, n_epochs, psd_xx, psd_yy):
"""Compute final con. score for some connections."""
if self.con_scores is None:
self.con_scores = np.zeros(self.csd_shape,
dtype=np.complex128)
csd_mean = self._acc[con_idx] / n_epochs
self.con_scores[con_idx] = csd_mean / np.sqrt(psd_xx * psd_yy)
class _ImCohEst(_CohEstBase):
"""Imaginary Coherence Estimator."""
name = 'Imaginary Coherence'
def compute_con(self, con_idx, n_epochs, psd_xx, psd_yy):
"""Compute final con. score for some connections."""
if self.con_scores is None:
self.con_scores = np.zeros(self.csd_shape)
csd_mean = self._acc[con_idx] / n_epochs
self.con_scores[con_idx] = np.imag(csd_mean) / np.sqrt(psd_xx * psd_yy)
class _PLVEst(_EpochMeanConEstBase):
"""PLV Estimator."""
name = 'PLV'
def __init__(self, n_cons, n_freqs, n_times):
super(_PLVEst, self).__init__(n_cons, n_freqs, n_times)
# allocate accumulator
self._acc = np.zeros(self.csd_shape, dtype=np.complex128)
def accumulate(self, con_idx, csd_xy):
"""Accumulate some connections."""
self._acc[con_idx] += csd_xy / np.abs(csd_xy)
def compute_con(self, con_idx, n_epochs):
"""Compute final con. score for some connections."""
if self.con_scores is None:
self.con_scores = np.zeros(self.csd_shape)
plv = np.abs(self._acc / n_epochs)
self.con_scores[con_idx] = plv
class _PLIEst(_EpochMeanConEstBase):
"""PLI Estimator."""
name = 'PLI'
def __init__(self, n_cons, n_freqs, n_times):
super(_PLIEst, self).__init__(n_cons, n_freqs, n_times)
# allocate accumulator
self._acc = np.zeros(self.csd_shape)
def accumulate(self, con_idx, csd_xy):
"""Accumulate some connections."""
self._acc[con_idx] += np.sign(np.imag(csd_xy))
def compute_con(self, con_idx, n_epochs):
"""Compute final con. score for some connections."""
if self.con_scores is None:
self.con_scores = np.zeros(self.csd_shape)
pli_mean = self._acc[con_idx] / n_epochs
self.con_scores[con_idx] = np.abs(pli_mean)
class _PLIUnbiasedEst(_PLIEst):
"""Unbiased PLI Square Estimator."""
name = 'Unbiased PLI Square'
def compute_con(self, con_idx, n_epochs):
"""Compute final con. score for some connections."""
if self.con_scores is None:
self.con_scores = np.zeros(self.csd_shape)
pli_mean = self._acc[con_idx] / n_epochs
# See Vinck paper Eq. (30)
con = (n_epochs * pli_mean ** 2 - 1) / (n_epochs - 1)
self.con_scores[con_idx] = con
class _WPLIEst(_EpochMeanConEstBase):
"""WPLI Estimator."""
name = 'WPLI'
def __init__(self, n_cons, n_freqs, n_times):
super(_WPLIEst, self).__init__(n_cons, n_freqs, n_times)
# store both imag(csd) and abs(imag(csd))
acc_shape = (2,) + self.csd_shape
self._acc = np.zeros(acc_shape)
def accumulate(self, con_idx, csd_xy):
"""Accumulate some connections."""
im_csd = np.imag(csd_xy)
self._acc[0, con_idx] += im_csd
self._acc[1, con_idx] += np.abs(im_csd)
def compute_con(self, con_idx, n_epochs):
"""Compute final con. score for some connections."""
if self.con_scores is None:
self.con_scores = np.zeros(self.csd_shape)
num = np.abs(self._acc[0, con_idx])
denom = self._acc[1, con_idx]
# handle zeros in denominator
z_denom = np.where(denom == 0.)
denom[z_denom] = 1.
con = num / denom
# where we had zeros in denominator, we set con to zero
con[z_denom] = 0.
self.con_scores[con_idx] = con
class _WPLIDebiasedEst(_EpochMeanConEstBase):
"""Debiased WPLI Square Estimator."""
name = 'Debiased WPLI Square'
def __init__(self, n_cons, n_freqs, n_times):
super(_WPLIDebiasedEst, self).__init__(n_cons, n_freqs, n_times)
# store imag(csd), abs(imag(csd)), imag(csd)^2
acc_shape = (3,) + self.csd_shape
self._acc = np.zeros(acc_shape)
def accumulate(self, con_idx, csd_xy):
"""Accumulate some connections."""
im_csd = np.imag(csd_xy)
self._acc[0, con_idx] += im_csd
self._acc[1, con_idx] += np.abs(im_csd)
self._acc[2, con_idx] += im_csd ** 2
def compute_con(self, con_idx, n_epochs):
"""Compute final con. score for some connections."""
if self.con_scores is None:
self.con_scores = np.zeros(self.csd_shape)
# note: we use the trick from fieldtrip to compute the
# the estimate over all pairwise epoch combinations
sum_im_csd = self._acc[0, con_idx]
sum_abs_im_csd = self._acc[1, con_idx]
sum_sq_im_csd = self._acc[2, con_idx]
denom = sum_abs_im_csd ** 2 - sum_sq_im_csd
# handle zeros in denominator
z_denom = np.where(denom == 0.)
denom[z_denom] = 1.
con = (sum_im_csd ** 2 - sum_sq_im_csd) / denom
# where we had zeros in denominator, we set con to zero
con[z_denom] = 0.
self.con_scores[con_idx] = con
class _PPCEst(_EpochMeanConEstBase):
"""Pairwise Phase Consistency (PPC) Estimator."""
name = 'PPC'
def __init__(self, n_cons, n_freqs, n_times):
super(_PPCEst, self).__init__(n_cons, n_freqs, n_times)
# store csd / abs(csd)
self._acc = np.zeros(self.csd_shape, dtype=np.complex128)
def accumulate(self, con_idx, csd_xy):
"""Accumulate some connections."""
denom = np.abs(csd_xy)
z_denom = np.where(denom == 0.)
denom[z_denom] = 1.
this_acc = csd_xy / denom
this_acc[z_denom] = 0. # handle division by zero
self._acc[con_idx] += this_acc
def compute_con(self, con_idx, n_epochs):
"""Compute final con. score for some connections."""
if self.con_scores is None:
self.con_scores = np.zeros(self.csd_shape)
# note: we use the trick from fieldtrip to compute the
# the estimate over all pairwise epoch combinations
con = ((self._acc[con_idx] * np.conj(self._acc[con_idx]) - n_epochs) /
(n_epochs * (n_epochs - 1.)))
self.con_scores[con_idx] = np.real(con)
###############################################################################
def _epoch_spectral_connectivity(data, sig_idx, tmin_idx, tmax_idx, sfreq,
mode, window_fun, eigvals, wavelets,
freq_mask, mt_adaptive, idx_map, block_size,
psd, accumulate_psd, con_method_types,
con_methods, n_signals, n_times,
accumulate_inplace=True):
"""Estimate connectivity for one epoch (see spectral_connectivity)."""
n_cons = len(idx_map[0])
if wavelets is not None:
n_times_spectrum = n_times
n_freqs = len(wavelets)
else:
n_times_spectrum = 0
n_freqs = np.sum(freq_mask)
if not accumulate_inplace:
# instantiate methods only for this epoch (used in parallel mode)
con_methods = [mtype(n_cons, n_freqs, n_times_spectrum)
for mtype in con_method_types]
if len(sig_idx) == n_signals:
# we use all signals: use a slice for faster indexing
sig_idx = slice(None, None)
# compute tapered spectra
if mode in ('multitaper', 'fourier'):
x_mt = list()
this_psd = list()
sig_pos_start = 0
for this_data in data:
this_n_sig = this_data.shape[0]
sig_pos_end = sig_pos_start + this_n_sig
if not isinstance(sig_idx, slice):
this_sig_idx = sig_idx[(sig_idx >= sig_pos_start) &
(sig_idx < sig_pos_end)] - sig_pos_start
else:
this_sig_idx = sig_idx
if isinstance(this_data, _BaseSourceEstimate):
_mt_spectra_partial = partial(_mt_spectra, dpss=window_fun,
sfreq=sfreq)
this_x_mt = this_data.transform_data(
_mt_spectra_partial, idx=this_sig_idx, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
else:
this_x_mt, _ = _mt_spectra(this_data[this_sig_idx,
tmin_idx:tmax_idx],
window_fun, sfreq)
if mt_adaptive:
# compute PSD and adaptive weights
_this_psd, weights = _psd_from_mt_adaptive(
this_x_mt, eigvals, freq_mask, return_weights=True)
# only keep freqs of interest
this_x_mt = this_x_mt[:, :, freq_mask]
else:
# do not use adaptive weights
this_x_mt = this_x_mt[:, :, freq_mask]
if mode == 'multitaper':
weights = np.sqrt(eigvals)[np.newaxis, :, np.newaxis]
else:
# hack to so we can sum over axis=-2
weights = np.array([1.])[:, None, None]
if accumulate_psd:
_this_psd = _psd_from_mt(this_x_mt, weights)
x_mt.append(this_x_mt)
if accumulate_psd:
this_psd.append(_this_psd)
x_mt = np.concatenate(x_mt, axis=0)
if accumulate_psd:
this_psd = np.concatenate(this_psd, axis=0)
# advance position
sig_pos_start = sig_pos_end
elif mode == 'cwt_morlet':
# estimate spectra using CWT
x_cwt = list()
this_psd = list()
sig_pos_start = 0
for this_data in data:
this_n_sig = this_data.shape[0]
sig_pos_end = sig_pos_start + this_n_sig
if not isinstance(sig_idx, slice):
this_sig_idx = sig_idx[(sig_idx >= sig_pos_start) &
(sig_idx < sig_pos_end)] - sig_pos_start
else:
this_sig_idx = sig_idx
if isinstance(this_data, _BaseSourceEstimate):
cwt_partial = partial(cwt, Ws=wavelets, use_fft=True,
mode='same')
this_x_cwt = this_data.transform_data(
cwt_partial, idx=this_sig_idx, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
else:
this_x_cwt = cwt(this_data[this_sig_idx, tmin_idx:tmax_idx],
wavelets, use_fft=True, mode='same')
if accumulate_psd:
this_psd.append((this_x_cwt * this_x_cwt.conj()).real)
x_cwt.append(this_x_cwt)
# advance position
sig_pos_start = sig_pos_end
x_cwt = np.concatenate(x_cwt, axis=0)
if accumulate_psd:
this_psd = np.concatenate(this_psd, axis=0)
else:
raise RuntimeError('invalid mode')
# accumulate or return psd
if accumulate_psd:
if accumulate_inplace:
psd += this_psd
else:
psd = this_psd
else:
psd = None
# tell the methods that a new epoch starts
for method in con_methods:
method.start_epoch()
# accumulate connectivity scores
if mode in ['multitaper', 'fourier']:
for i in range(0, n_cons, block_size):
con_idx = slice(i, i + block_size)
if mt_adaptive:
csd = _csd_from_mt(x_mt[idx_map[0][con_idx]],
x_mt[idx_map[1][con_idx]],
weights[idx_map[0][con_idx]],
weights[idx_map[1][con_idx]])
else:
csd = _csd_from_mt(x_mt[idx_map[0][con_idx]],
x_mt[idx_map[1][con_idx]],
weights, weights)
for method in con_methods:
method.accumulate(con_idx, csd)
elif mode in ('cwt_morlet',): # reminder to add alternative TFR methods
for i_block, i in enumerate(range(0, n_cons, block_size)):
con_idx = slice(i, i + block_size)
# this codes can be very slow
csd = (x_cwt[idx_map[0][con_idx]] *
x_cwt[idx_map[1][con_idx]].conjugate())
for method in con_methods:
method.accumulate(con_idx, csd)
# future estimator types need to be explicitly handled here
else:
raise RuntimeError('This should never happen')
return con_methods, psd
def _get_n_epochs(epochs, n):
"""Generate lists with at most n epochs."""
epochs_out = list()
for epoch in epochs:
if not isinstance(epoch, (list, tuple)):
epoch = (epoch,)
epochs_out.append(epoch)
if len(epochs_out) >= n:
yield epochs_out
epochs_out = list()
if 0 < len(epochs_out) < n:
yield epochs_out
def _check_method(method):
"""Test if a method implements the required interface."""
interface_members = [m[0] for m in getmembers(_AbstractConEstBase)
if not m[0].startswith('_')]
method_members = [m[0] for m in getmembers(method)
if not m[0].startswith('_')]
for member in interface_members:
if member not in method_members:
return False, member
return True, None
def _get_and_verify_data_sizes(data, n_signals=None, n_times=None, times=None):
"""Get and/or verify the data sizes and time scales."""
if not isinstance(data, (list, tuple)):
raise ValueError('data has to be a list or tuple')
n_signals_tot = 0
for this_data in data:
this_n_signals, this_n_times = this_data.shape
if n_times is not None:
if this_n_times != n_times:
raise ValueError('all input time series must have the same '
'number of time points')
else:
n_times = this_n_times
n_signals_tot += this_n_signals
if hasattr(this_data, 'times'):
this_times = this_data.times
if times is not None:
if np.any(times != this_times):
warn('time scales of input time series do not match')
else:
times = this_times
if n_signals is not None:
if n_signals != n_signals_tot:
raise ValueError('the number of time series has to be the same in '
'each epoch')
n_signals = n_signals_tot
return n_signals, n_times, times
# map names to estimator types
_CON_METHOD_MAP = {'coh': _CohEst, 'cohy': _CohyEst, 'imcoh': _ImCohEst,
'plv': _PLVEst, 'ppc': _PPCEst, 'pli': _PLIEst,
'pli2_unbiased': _PLIUnbiasedEst, 'wpli': _WPLIEst,
'wpli2_debiased': _WPLIDebiasedEst}
def _check_estimators(method, mode):
"""Check construction of connectivity estimators."""
n_methods = len(method)
con_method_types = list()
for this_method in method:
if this_method in _CON_METHOD_MAP:
con_method_types.append(_CON_METHOD_MAP[this_method])
elif isinstance(this_method, string_types):
raise ValueError('%s is not a valid connectivity method' %
this_method)
else:
# support for custom class
method_valid, msg = _check_method(this_method)
if not method_valid:
raise ValueError('The supplied connectivity method does '
'not have the method %s' % msg)
con_method_types.append(this_method)
# determine how many arguments the compute_con_function needs
n_comp_args = [len(_get_args(mtype.compute_con))
for mtype in con_method_types]
# we currently only support 3 arguments
if any(n not in (3, 5) for n in n_comp_args):
raise ValueError('The .compute_con method needs to have either '
'3 or 5 arguments')
# if none of the comp_con functions needs the PSD, we don't estimate it
accumulate_psd = any(n == 5 for n in n_comp_args)
return con_method_types, n_methods, accumulate_psd, n_comp_args
@verbose
def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
mode='multitaper', fmin=None, fmax=np.inf,
fskip=0, faverage=False, tmin=None, tmax=None,
mt_bandwidth=None, mt_adaptive=False,
mt_low_bias=True, cwt_freqs=None,
cwt_n_cycles=7, block_size=1000, n_jobs=1,
verbose=None):
"""Compute frequency- and time-frequency-domain connectivity measures.
The connectivity method(s) are specified using the "method" parameter.
All methods are based on estimates of the cross- and power spectral
densities (CSD/PSD) Sxy and Sxx, Syy.
The spectral densities can be estimated using a multitaper method with
digital prolate spheroidal sequence (DPSS) windows, a discrete Fourier
transform with Hanning windows, or a continuous wavelet transform using
Morlet wavelets. The spectral estimation mode is specified using the
"mode" parameter.
By default, the connectivity between all signals is computed (only
connections corresponding to the lower-triangular part of the
connectivity matrix). If one is only interested in the connectivity
between some signals, the "indices" parameter can be used. For example,
to compute the connectivity between the signal with index 0 and signals
"2, 3, 4" (a total of 3 connections) one can use the following::
indices = (np.array([0, 0, 0]), # row indices
np.array([2, 3, 4])) # col indices
con_flat = spectral_connectivity(data, method='coh',
indices=indices, ...)
In this case con_flat.shape = (3, n_freqs). The connectivity scores are
in the same order as defined indices.
**Supported Connectivity Measures**
The connectivity method(s) is specified using the "method" parameter. The
following methods are supported (note: ``E[]`` denotes average over
epochs). Multiple measures can be computed at once by using a list/tuple,
e.g., ``['coh', 'pli']`` to compute coherence and PLI.
'coh' : Coherence given by::
| E[Sxy] |
C = ---------------------
sqrt(E[Sxx] * E[Syy])
'cohy' : Coherency given by::
E[Sxy]
C = ---------------------
sqrt(E[Sxx] * E[Syy])
'imcoh' : Imaginary coherence [1]_ given by::
Im(E[Sxy])
C = ----------------------
sqrt(E[Sxx] * E[Syy])
'plv' : Phase-Locking Value (PLV) [2]_ given by::
PLV = |E[Sxy/|Sxy|]|
'ppc' : Pairwise Phase Consistency (PPC), an unbiased estimator
of squared PLV [3]_.
'pli' : Phase Lag Index (PLI) [4]_ given by::
PLI = |E[sign(Im(Sxy))]|
'pli2_unbiased' : Unbiased estimator of squared PLI [5]_.
'wpli' : Weighted Phase Lag Index (WPLI) [5]_ given by::
|E[Im(Sxy)]|
WPLI = ------------------
E[|Im(Sxy)|]
'wpli2_debiased' : Debiased estimator of squared WPLI [5]_.
Parameters
----------
data : array-like, shape=(n_epochs, n_signals, n_times) | Epochs
The data from which to compute connectivity. Note that it is also
possible to combine multiple signals by providing a list of tuples,
e.g., data = [(arr_0, stc_0), (arr_1, stc_1), (arr_2, stc_2)],
corresponds to 3 epochs, and arr_* could be an array with the same
number of time points as stc_*. The array-like object can also
be a list/generator of array, shape =(n_signals, n_times),
or a list/generator of SourceEstimate or VolSourceEstimate objects.
method : string | list of string
Connectivity measure(s) to compute.
indices : tuple of arrays | None
Two arrays with indices of connections for which to compute
connectivity. If None, all connections are computed.
sfreq : float
The sampling frequency.
mode : str
Spectrum estimation mode can be either: 'multitaper', 'fourier', or
'cwt_morlet'.
fmin : float | tuple of floats
The lower frequency of interest. Multiple bands are defined using
a tuple, e.g., (8., 20.) for two bands with 8Hz and 20Hz lower freq.
If None the frequency corresponding to an epoch length of 5 cycles
is used.
fmax : float | tuple of floats
The upper frequency of interest. Multiple bands are dedined using
a tuple, e.g. (13., 30.) for two band with 13Hz and 30Hz upper freq.
fskip : int
Omit every "(fskip + 1)-th" frequency bin to decimate in frequency
domain.
faverage : boolean
Average connectivity scores for each frequency band. If True,
the output freqs will be a list with arrays of the frequencies
that were averaged.
tmin : float | None
Time to start connectivity estimation. Note: when "data" is an array,
the first sample is assumed to be at time 0. For other types
(Epochs, etc.), the time information contained in the object is used
to compute the time indices.
tmax : float | None
Time to end connectivity estimation. Note: when "data" is an array,
the first sample is assumed to be at time 0. For other types
(Epochs, etc.), the time information contained in the object is used
to compute the time indices.
mt_bandwidth : float | None
The bandwidth of the multitaper windowing function in Hz.
Only used in 'multitaper' mode.
mt_adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD.
Only used in 'multitaper' mode.
mt_low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth. Only used in 'multitaper' mode.
cwt_freqs : array
Array of frequencies of interest. Only used in 'cwt_morlet' mode.
cwt_n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency. Only used in
'cwt_morlet' mode.
block_size : int
How many connections to compute at once (higher numbers are faster
but require more memory).
n_jobs : int
How many epochs to process in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
con : array | list of arrays
Computed connectivity measure(s). The shape of each array is either
(n_signals, n_signals, n_freqs) mode: 'multitaper' or 'fourier'
(n_signals, n_signals, n_freqs, n_times) mode: 'cwt_morlet'
when "indices" is None, or
(n_con, n_freqs) mode: 'multitaper' or 'fourier'
(n_con, n_freqs, n_times) mode: 'cwt_morlet'
when "indices" is specified and "n_con = len(indices[0])".
freqs : array
Frequency points at which the connectivity was computed.
times : array
Time points for which the connectivity was computed.
n_epochs : int
Number of epochs used for computation.
n_tapers : int
The number of DPSS tapers used. Only defined in 'multitaper' mode.
Otherwise None is returned.
References
----------
.. [1] Nolte et al. "Identifying true brain interaction from EEG data using
the imaginary part of coherency" Clinical neurophysiology, vol. 115,
no. 10, pp. 2292-2307, Oct. 2004.
.. [2] Lachaux et al. "Measuring phase synchrony in brain signals" Human
brain mapping, vol. 8, no. 4, pp. 194-208, Jan. 1999.
.. [3] Vinck et al. "The pairwise phase consistency: a bias-free measure of
rhythmic neuronal synchronization" NeuroImage, vol. 51, no. 1,
pp. 112-122, May 2010.
.. [4] Stam et al. "Phase lag index: assessment of functional connectivity
from multi channel EEG and MEG with diminished bias from common
sources" Human brain mapping, vol. 28, no. 11, pp. 1178-1193,
Nov. 2007.
.. [5] Vinck et al. "An improved index of phase-synchronization for
electro-physiological data in the presence of volume-conduction,
noise and sample-size bias" NeuroImage, vol. 55, no. 4,
pp. 1548-1565, Apr. 2011.
"""
if n_jobs != 1:
parallel, my_epoch_spectral_connectivity, _ = \
parallel_func(_epoch_spectral_connectivity, n_jobs,
verbose=verbose)
# format fmin and fmax and check inputs
if fmin is None:
fmin = -np.inf # set it to -inf, so we can adjust it later
fmin = np.array((fmin,), dtype=float).ravel()
fmax = np.array((fmax,), dtype=float).ravel()
if len(fmin) != len(fmax):
raise ValueError('fmin and fmax must have the same length')
if np.any(fmin > fmax):
raise ValueError('fmax must be larger than fmin')
n_bands = len(fmin)
# assign names to connectivity methods
if not isinstance(method, (list, tuple)):
method = [method] # make it a list so we can iterate over it
# handle connectivity estimators
(con_method_types, n_methods, accumulate_psd,
n_comp_args) = _check_estimators(method=method, mode=mode)
if isinstance(data, BaseEpochs):
times_in = data.times # input times for Epochs input type
sfreq = data.info['sfreq']
# loop over data; it could be a generator that returns
# (n_signals x n_times) arrays or SourceEstimates
epoch_idx = 0
logger.info('Connectivity computation...')
for epoch_block in _get_n_epochs(data, n_jobs):
if epoch_idx == 0:
# initialize everything times and frequencies
(n_cons, times, n_times, times_in, n_times_in, tmin_idx,
tmax_idx, n_freqs, freq_mask, freqs, freqs_bands, freq_idx_bands,
n_signals, indices_use) = _prepare_connectivity(
epoch_block=epoch_block, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, sfreq=sfreq, indices=indices, mode=mode,
fskip=fskip, n_bands=n_bands,
cwt_freqs=cwt_freqs, faverage=faverage)
# get the window function, wavelets, etc for different modes
(spectral_params, mt_adaptive, n_times_spectrum,
n_tapers) = _assemble_spectral_params(
mode=mode, n_times=n_times, mt_adaptive=mt_adaptive,
mt_bandwidth=mt_bandwidth, sfreq=sfreq,
mt_low_bias=mt_low_bias, cwt_n_cycles=cwt_n_cycles,
cwt_freqs=cwt_freqs, freqs=freqs, freq_mask=freq_mask)
# unique signals for which we actually need to compute PSD etc.
sig_idx = np.unique(np.r_[indices_use[0], indices_use[1]])
# map indices to unique indices
idx_map = [np.searchsorted(sig_idx, ind) for ind in indices_use]
# allocate space to accumulate PSD
if accumulate_psd:
if n_times_spectrum == 0:
psd_shape = (len(sig_idx), n_freqs)
else:
psd_shape = (len(sig_idx), n_freqs, n_times_spectrum)
psd = np.zeros(psd_shape)
else:
psd = None
# create instances of the connectivity estimators
con_methods = [mtype(n_cons, n_freqs, n_times_spectrum)
for mtype in con_method_types]
sep = ', '
metrics_str = sep.join([meth.name for meth in con_methods])
logger.info(' the following metrics will be computed: %s'
% metrics_str)
# check dimensions and time scale
for this_epoch in epoch_block:
_get_and_verify_data_sizes(this_epoch, n_signals, n_times_in,
times_in)
call_params = dict(
sig_idx=sig_idx, tmin_idx=tmin_idx,
tmax_idx=tmax_idx, sfreq=sfreq, mode=mode,
freq_mask=freq_mask, idx_map=idx_map, block_size=block_size,
psd=psd, accumulate_psd=accumulate_psd,
mt_adaptive=mt_adaptive,
con_method_types=con_method_types,
con_methods=con_methods if n_jobs == 1 else None,
n_signals=n_signals, n_times=n_times,
accumulate_inplace=True if n_jobs == 1 else False)
call_params.update(**spectral_params)
if n_jobs == 1:
# no parallel processing
for this_epoch in epoch_block:
logger.info(' computing connectivity for epoch %d'
% (epoch_idx + 1))
# con methods and psd are updated inplace
_epoch_spectral_connectivity(data=this_epoch, **call_params)
epoch_idx += 1
else:
# process epochs in parallel
logger.info(' computing connectivity for epochs %d..%d'
% (epoch_idx + 1, epoch_idx + len(epoch_block)))
out = parallel(my_epoch_spectral_connectivity(
data=this_epoch, **call_params)
for this_epoch in epoch_block)
# do the accumulation
for this_out in out:
for method, parallel_method in zip(con_methods, this_out[0]):
method.combine(parallel_method)
if accumulate_psd:
psd += this_out[1]
epoch_idx += len(epoch_block)
# normalize
n_epochs = epoch_idx
if accumulate_psd:
psd /= n_epochs
# compute final connectivity scores
con = list()
for method, n_args in zip(con_methods, n_comp_args):
# future estimators will need to be handled here
if n_args == 3:
# compute all scores at once
method.compute_con(slice(0, n_cons), n_epochs)
elif n_args == 5:
# compute scores block-wise to save memory
for i in range(0, n_cons, block_size):
con_idx = slice(i, i + block_size)
psd_xx = psd[idx_map[0][con_idx]]
psd_yy = psd[idx_map[1][con_idx]]
method.compute_con(con_idx, n_epochs, psd_xx, psd_yy)
else:
raise RuntimeError('This should never happen.')
# get the connectivity scores
this_con = method.con_scores
if this_con.shape[0] != n_cons:
raise ValueError('First dimension of connectivity scores must be '
'the same as the number of connections')
if faverage:
if this_con.shape[1] != n_freqs:
raise ValueError('2nd dimension of connectivity scores must '
'be the same as the number of frequencies')
con_shape = (n_cons, n_bands) + this_con.shape[2:]
this_con_bands = np.empty(con_shape, dtype=this_con.dtype)
for band_idx in range(n_bands):
this_con_bands[:, band_idx] =\
np.mean(this_con[:, freq_idx_bands[band_idx]], axis=1)
this_con = this_con_bands
con.append(this_con)
if indices is None:
# return all-to-all connectivity matrices
logger.info(' assembling connectivity matrix '
'(filling the upper triangular region of the matrix)')
con_flat = con
con = list()
for this_con_flat in con_flat:
this_con = np.zeros((n_signals, n_signals) +
this_con_flat.shape[1:],
dtype=this_con_flat.dtype)
this_con[indices_use] = this_con_flat
con.append(this_con)
logger.info('[Connectivity computation done]')
if n_methods == 1:
# for a single method return connectivity directly
con = con[0]
if faverage:
# for each band we return the frequencies that were averaged
freqs = freqs_bands
return con, freqs, times, n_epochs, n_tapers
def _prepare_connectivity(epoch_block, tmin, tmax, fmin, fmax, sfreq, indices,
mode, fskip, n_bands,
cwt_freqs, faverage):
"""Check and precompute dimensions of results data."""
first_epoch = epoch_block[0]
# get the data size and time scale
n_signals, n_times_in, times_in = _get_and_verify_data_sizes(first_epoch)
if times_in is None:
# we are not using Epochs or SourceEstimate(s) as input
times_in = np.linspace(0.0, n_times_in / sfreq, n_times_in,
endpoint=False)
n_times_in = len(times_in)
mask = _time_mask(times_in, tmin, tmax, sfreq=sfreq)
tmin_idx, tmax_idx = np.where(mask)[0][[0, -1]]
tmax_idx += 1
tmin_true = times_in[tmin_idx]
tmax_true = times_in[tmax_idx - 1] # time of last point used
times = times_in[tmin_idx:tmax_idx]
n_times = len(times)
if indices is None:
logger.info('only using indices for lower-triangular matrix')
# only compute r for lower-triangular region
indices_use = np.tril_indices(n_signals, -1)
else:
indices_use = check_indices(indices)
# number of connectivities to compute
n_cons = len(indices_use[0])
logger.info(' computing connectivity for %d connections'
% n_cons)
logger.info(' using t=%0.3fs..%0.3fs for estimation (%d points)'
% (tmin_true, tmax_true, n_times))
# get frequencies of interest for the different modes
if mode in ('multitaper', 'fourier'):
# fmin fmax etc is only supported for these modes
# decide which frequencies to keep
freqs_all = np.fft.rfftfreq(n_times, 1. / sfreq)
elif mode == 'cwt_morlet':
# cwt_morlet mode
if cwt_freqs is None:
raise ValueError('define frequencies of interest using '
'cwt_freqs')
else:
cwt_freqs = cwt_freqs.astype(np.float)
if any(cwt_freqs > (sfreq / 2.)):
raise ValueError('entries in cwt_freqs cannot be '
'larger than Nyquist (sfreq / 2)')
freqs_all = cwt_freqs
else:
raise ValueError('mode has an invalid value')
# check that fmin corresponds to at least 5 cycles
dur = float(n_times) / sfreq
five_cycle_freq = 5. / dur
if len(fmin) == 1 and fmin[0] == -np.inf:
# we use the 5 cycle freq. as default
fmin = np.array([five_cycle_freq])
else:
if np.any(fmin < five_cycle_freq):
warn('fmin=%0.3f Hz corresponds to %0.3f < 5 cycles '
'based on the epoch length %0.3f sec, need at least %0.3f '
'sec epochs or fmin=%0.3f. Spectrum estimate will be '
'unreliable.' % (np.min(fmin), dur * np.min(fmin), dur,
5. / np.min(fmin), five_cycle_freq))
# create a frequency mask for all bands
freq_mask = np.zeros(len(freqs_all), dtype=np.bool)
for f_lower, f_upper in zip(fmin, fmax):
freq_mask |= ((freqs_all >= f_lower) & (freqs_all <= f_upper))
# possibly skip frequency points
for pos in range(fskip):
freq_mask[pos + 1::fskip + 1] = False
# the frequency points where we compute connectivity
freqs = freqs_all[freq_mask]
n_freqs = len(freqs)
# get the freq. indices and points for each band
freq_idx_bands = [np.where((freqs >= fl) & (freqs <= fu))[0]
for fl, fu in zip(fmin, fmax)]
freqs_bands = [freqs[freq_idx] for freq_idx in freq_idx_bands]
# make sure we don't have empty bands
for i, n_f_band in enumerate([len(f) for f in freqs_bands]):
if n_f_band == 0:
raise ValueError('There are no frequency points between '
'%0.1fHz and %0.1fHz. Change the band '
'specification (fmin, fmax) or the '
'frequency resolution.'
% (fmin[i], fmax[i]))
if n_bands == 1:
logger.info(' frequencies: %0.1fHz..%0.1fHz (%d points)'
% (freqs_bands[0][0], freqs_bands[0][-1],
n_freqs))
else:
logger.info(' computing connectivity for the bands:')
for i, bfreqs in enumerate(freqs_bands):
logger.info(' band %d: %0.1fHz..%0.1fHz '
'(%d points)' % (i + 1, bfreqs[0],
bfreqs[-1], len(bfreqs)))
if faverage:
logger.info(' connectivity scores will be averaged for '
'each band')
return (n_cons, times, n_times, times_in, n_times_in, tmin_idx,
tmax_idx, n_freqs, freq_mask, freqs, freqs_bands, freq_idx_bands,
n_signals, indices_use)
def _assemble_spectral_params(mode, n_times, mt_adaptive, mt_bandwidth, sfreq,
mt_low_bias, cwt_n_cycles, cwt_freqs,
freqs, freq_mask):
"""Prepare time-frequency decomposition."""
spectral_params = dict(
eigvals=None, window_fun=None, wavelets=None)
n_tapers = None
n_times_spectrum = 0
if mode == 'multitaper':
window_fun, eigvals, mt_adaptive = _compute_mt_params(
n_times, sfreq, mt_bandwidth, mt_low_bias, mt_adaptive)
spectral_params.update(window_fun=window_fun, eigvals=eigvals)
elif mode == 'fourier':
logger.info(' using FFT with a Hanning window to estimate '
'spectra')
spectral_params.update(window_fun=np.hanning(n_times), eigvals=1.)
elif mode == 'cwt_morlet':
logger.info(' using CWT with Morlet wavelets to estimate '
'spectra')
# reformat cwt_n_cycles if we have removed some frequencies
# using fmin, fmax, fskip
cwt_n_cycles = np.array((cwt_n_cycles,), dtype=float).ravel()
if len(cwt_n_cycles) > 1:
if len(cwt_n_cycles) != len(cwt_freqs):
raise ValueError('cwt_n_cycles must be float or an '
'array with the same size as cwt_freqs')
cwt_n_cycles = cwt_n_cycles[freq_mask]
# get the Morlet wavelets
spectral_params.update(
wavelets=morlet(sfreq, freqs,
n_cycles=cwt_n_cycles, zero_mean=True))
n_times_spectrum = n_times
else:
raise ValueError('mode has an invalid value')
return spectral_params, mt_adaptive, n_times_spectrum, n_tapers
| {
"content_hash": "32324cea08c47cc0f31144512d6229a1",
"timestamp": "",
"source": "github",
"line_count": 1083,
"max_line_length": 79,
"avg_line_length": 39.091412742382275,
"alnum_prop": 0.5655706727135299,
"repo_name": "teonlamont/mne-python",
"id": "d6b2f3a892040e83a0c5647e9c706f3ac15fc877",
"size": "42476",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mne/connectivity/spectral.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3117"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4354605"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from flask import Flask, render_template
app = Flask('slow_splinter_demo')
@app.route('/')
def index():
return render_template('index.html')
| {
"content_hash": "f8027871edd9532a8d281e7f5b363841",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 40,
"avg_line_length": 18.9,
"alnum_prop": 0.708994708994709,
"repo_name": "ErinCall/slow_splinter_demo",
"id": "7a6ebc3e6df598bed49b1f85fb3b1c41a85caa3d",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "20"
},
{
"name": "Python",
"bytes": "811"
}
],
"symlink_target": ""
} |
import fixtures
from oslo_serialization import jsonutils as json
from tempest.tests import base
from tempest.tests.lib import fake_http
class BaseServiceTest(base.TestCase):
def create_response(self, body, to_utf=False, status=200, headers=None):
json_body = {}
if body:
json_body = json.dumps(body)
if to_utf:
json_body = json_body.encode('utf-8')
resp = fake_http.fake_http_response(headers, status=status), json_body
return resp
def check_service_client_function(self, function, function2mock,
body, to_utf=False, status=200,
headers=None, mock_args=None,
resp_as_string=False,
**kwargs):
"""Mock a service client function for unit testing.
:param function: The service client function to call.
:param function2mock: The REST call to mock inside the service client
function.
:param body: Expected response body returned by the service client
function.
:param to_utf: Whether to use UTF-8 encoding for response.
:param status: Expected response status returned by the service client
function.
:param headers: Expected headers returned by the service client
function.
:param mock_args: List/dict/value of expected args/kwargs called by
function2mock. For example:
* If mock_args=['foo'] then ``assert_called_once_with('foo')``
is called.
* If mock_args={'foo': 'bar'} then
``assert_called_once_with(foo='bar')`` is called.
* If mock_args='foo' then ``assert_called_once_with('foo')``
is called.
:param resp_as_string: Whether response body is retruned as string.
This is for service client methods which return ResponseBodyData
object.
:param kwargs: kwargs that are passed to function.
"""
mocked_response = self.create_response(body, to_utf, status, headers)
fixture = self.useFixture(fixtures.MockPatch(
function2mock, return_value=mocked_response))
if kwargs:
resp = function(**kwargs)
else:
resp = function()
if resp_as_string:
resp = resp.data
self.assertEqual(body, resp)
if isinstance(mock_args, list):
fixture.mock.assert_called_once_with(*mock_args)
elif isinstance(mock_args, dict):
fixture.mock.assert_called_once_with(**mock_args)
elif mock_args is not None:
fixture.mock.assert_called_once_with(mock_args)
| {
"content_hash": "54c35e424b2ec320bb4c9858608592ef",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 79,
"avg_line_length": 44.492063492063494,
"alnum_prop": 0.5829468426685694,
"repo_name": "Juniper/tempest",
"id": "924f9f20624b7a9ba4c754b75dc45a3a8b54c890",
"size": "3438",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tempest/tests/lib/services/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4194970"
},
{
"name": "Shell",
"bytes": "19343"
}
],
"symlink_target": ""
} |
from tvm.relay.frontend.common import StrAttrsDict
def test_key_is_present():
attrs = StrAttrsDict({"a": 1})
assert attrs.has_attr("a")
def test_key_is_not_present():
attrs = StrAttrsDict({"a": 1})
assert not attrs.has_attr("b")
if __name__ == "__main__":
test_key_is_present()
test_key_is_present()
| {
"content_hash": "96266b221a3e9c4dd69d528eebb6fdb5",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 50,
"avg_line_length": 20.625,
"alnum_prop": 0.6242424242424243,
"repo_name": "sxjscience/tvm",
"id": "e706f2af304acae2e98144ad92ae89a9ed1366ec",
"size": "1115",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/python/frontend/test_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6056"
},
{
"name": "C",
"bytes": "95567"
},
{
"name": "C++",
"bytes": "5565032"
},
{
"name": "CMake",
"bytes": "67305"
},
{
"name": "Go",
"bytes": "112376"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "173219"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "Makefile",
"bytes": "50818"
},
{
"name": "Objective-C",
"bytes": "15264"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "6763729"
},
{
"name": "Rust",
"bytes": "182027"
},
{
"name": "Scala",
"bytes": "184105"
},
{
"name": "Shell",
"bytes": "96967"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
} |
import datetime
import pytest
import google.cloud._helpers
KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
@pytest.fixture
def target_class():
from google.cloud.bigquery import Model
return Model
@pytest.fixture
def object_under_test(target_class):
return target_class("project-id.dataset_id.model_id")
def test_ctor(target_class):
from google.cloud.bigquery import ModelReference
ref = ModelReference.from_string("my-proj.my_dset.my_model")
got = target_class(ref)
assert got.reference == ref
def test_ctor_string(target_class):
from google.cloud.bigquery import ModelReference
model_id = "my-proj.my_dset.my_model"
ref = ModelReference.from_string(model_id)
got = target_class(model_id)
assert got.reference == ref
def test_from_api_repr(target_class):
from google.cloud.bigquery import ModelReference
creation_time = datetime.datetime(
2010, 5, 19, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
)
modified_time = datetime.datetime(
2011, 10, 1, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
)
expiration_time = datetime.datetime(
2012, 12, 21, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
)
resource = {
"modelReference": {
"projectId": "my-project",
"datasetId": "my_dataset",
"modelId": "my_model",
},
"location": "US",
"etag": "abcdefg",
"creationTime": str(google.cloud._helpers._millis(creation_time)),
"lastModifiedTime": str(google.cloud._helpers._millis(modified_time)),
"expirationTime": str(google.cloud._helpers._millis(expiration_time)),
"description": "A friendly description.",
"friendlyName": "A friendly name.",
"modelType": "LOGISTIC_REGRESSION",
"labels": {"greeting": "こんにちは"},
"trainingRuns": [
{
"trainingOptions": {"initialLearnRate": 1.0},
"startTime": str(
google.cloud._helpers._datetime_to_rfc3339(creation_time)
),
},
{
"trainingOptions": {"initialLearnRate": 0.5},
"startTime": str(
google.cloud._helpers._datetime_to_rfc3339(modified_time)
),
},
{
"trainingOptions": {"initialLearnRate": 0.25},
"startTime": str(
google.cloud._helpers._datetime_to_rfc3339(expiration_time)
),
},
],
"bestTrialId": "123",
"featureColumns": [],
"encryptionConfiguration": {"kmsKeyName": KMS_KEY_NAME},
}
got = target_class.from_api_repr(resource)
assert got.project == "my-project"
assert got.dataset_id == "my_dataset"
assert got.model_id == "my_model"
assert got.reference == ModelReference.from_string("my-project.my_dataset.my_model")
assert got.path == "/projects/my-project/datasets/my_dataset/models/my_model"
assert got.location == "US"
assert got.etag == "abcdefg"
assert got.created == creation_time
assert got.modified == modified_time
assert got.expires == expiration_time
assert got.description == "A friendly description."
assert got.friendly_name == "A friendly name."
assert got.model_type == "LOGISTIC_REGRESSION"
assert got.labels == {"greeting": "こんにちは"}
assert got.encryption_configuration.kms_key_name == KMS_KEY_NAME
assert got.best_trial_id == 123
assert got.training_runs[0]["trainingOptions"]["initialLearnRate"] == 1.0
assert (
google.cloud._helpers._rfc3339_to_datetime(got.training_runs[0]["startTime"])
== creation_time
)
assert got.training_runs[1]["trainingOptions"]["initialLearnRate"] == 0.5
assert (
google.cloud._helpers._rfc3339_to_datetime(got.training_runs[1]["startTime"])
== modified_time
)
assert got.training_runs[2]["trainingOptions"]["initialLearnRate"] == 0.25
assert (
google.cloud._helpers._rfc3339_to_datetime(got.training_runs[2]["startTime"])
== expiration_time
)
def test_from_api_repr_w_minimal_resource(target_class):
from google.cloud.bigquery import ModelReference
resource = {
"modelReference": {
"projectId": "my-project",
"datasetId": "my_dataset",
"modelId": "my_model",
}
}
got = target_class.from_api_repr(resource)
assert got.reference == ModelReference.from_string("my-project.my_dataset.my_model")
assert got.location is None
assert got.etag is None
assert got.created is None
assert got.modified is None
assert got.expires is None
assert got.description is None
assert got.friendly_name is None
assert got.model_type == "MODEL_TYPE_UNSPECIFIED"
assert got.labels == {}
assert got.encryption_configuration is None
assert len(got.training_runs) == 0
assert len(got.feature_columns) == 0
assert len(got.label_columns) == 0
assert got.best_trial_id is None
def test_from_api_repr_w_unknown_fields(target_class):
from google.cloud.bigquery import ModelReference
resource = {
"modelReference": {
"projectId": "my-project",
"datasetId": "my_dataset",
"modelId": "my_model",
},
"thisFieldIsNotInTheProto": "just ignore me",
}
got = target_class.from_api_repr(resource)
assert got.reference == ModelReference.from_string("my-project.my_dataset.my_model")
assert got._properties == resource
def test_from_api_repr_w_unknown_type(target_class):
from google.cloud.bigquery import ModelReference
resource = {
"modelReference": {
"projectId": "my-project",
"datasetId": "my_dataset",
"modelId": "my_model",
},
"modelType": "BE_A_GOOD_ROLE_MODEL", # This model type does not exist.
}
got = target_class.from_api_repr(resource)
assert got.reference == ModelReference.from_string("my-project.my_dataset.my_model")
assert got.model_type == "BE_A_GOOD_ROLE_MODEL" # No checks for invalid types.
assert got._properties == resource
def test_from_api_repr_w_missing_reference(target_class):
resource = {}
got = target_class.from_api_repr(resource)
assert got.reference is None
assert got._properties == resource
@pytest.mark.parametrize(
"resource,filter_fields,expected",
[
(
{
"friendlyName": "hello",
"description": "world",
"expirationTime": "12345",
"labels": {"a-label": "a-value"},
},
["description"],
{"description": "world"},
),
(
{"friendlyName": "hello", "description": "world"},
["friendlyName"],
{"friendlyName": "hello"},
),
(
{
"friendlyName": "hello",
"description": "world",
"expirationTime": "12345",
"labels": {"a-label": "a-value"},
},
["expires"],
{"expirationTime": "12345"},
),
(
{
"friendlyName": "hello",
"description": "world",
"expirationTime": None,
"labels": {"a-label": "a-value"},
},
["expires"],
{"expirationTime": None},
),
(
{
"friendlyName": "hello",
"description": "world",
"expirationTime": None,
"labels": {"a-label": "a-value"},
},
["labels"],
{"labels": {"a-label": "a-value"}},
),
(
{
"friendlyName": "hello",
"description": "world",
"expirationTime": None,
"labels": {"a-label": "a-value"},
"encryptionConfiguration": {"kmsKeyName": KMS_KEY_NAME},
},
["encryptionConfiguration"],
{"encryptionConfiguration": {"kmsKeyName": KMS_KEY_NAME}},
),
],
)
def test_build_resource(object_under_test, resource, filter_fields, expected):
object_under_test._properties = resource
got = object_under_test._build_resource(filter_fields)
assert got == expected
def test_feature_columns(object_under_test):
from google.cloud.bigquery import standard_sql
object_under_test._properties["featureColumns"] = [
{"name": "col_1", "type": {"typeKind": "STRING"}},
{"name": "col_2", "type": {"typeKind": "FLOAT64"}},
]
expected = [
standard_sql.StandardSqlField(
"col_1",
standard_sql.StandardSqlDataType(standard_sql.StandardSqlTypeNames.STRING),
),
standard_sql.StandardSqlField(
"col_2",
standard_sql.StandardSqlDataType(standard_sql.StandardSqlTypeNames.FLOAT64),
),
]
assert object_under_test.feature_columns == expected
def test_label_columns(object_under_test):
from google.cloud.bigquery import standard_sql
object_under_test._properties["labelColumns"] = [
{"name": "col_1", "type": {"typeKind": "STRING"}},
{"name": "col_2", "type": {"typeKind": "FLOAT64"}},
]
expected = [
standard_sql.StandardSqlField(
"col_1",
standard_sql.StandardSqlDataType(standard_sql.StandardSqlTypeNames.STRING),
),
standard_sql.StandardSqlField(
"col_2",
standard_sql.StandardSqlDataType(standard_sql.StandardSqlTypeNames.FLOAT64),
),
]
assert object_under_test.label_columns == expected
def test_set_description(object_under_test):
assert not object_under_test.description
object_under_test.description = "A model description."
assert object_under_test.description == "A model description."
object_under_test.description = None
assert not object_under_test.description
def test_set_expires(object_under_test):
assert not object_under_test.expires
expiration_time = datetime.datetime(
2012, 12, 21, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
)
object_under_test.expires = expiration_time
assert object_under_test.expires == expiration_time
object_under_test.expires = None
assert not object_under_test.expires
def test_set_friendly_name(object_under_test):
assert not object_under_test.friendly_name
object_under_test.friendly_name = "A model name."
assert object_under_test.friendly_name == "A model name."
object_under_test.friendly_name = None
assert not object_under_test.friendly_name
def test_set_labels(object_under_test):
assert object_under_test.labels == {}
object_under_test.labels["data_owner"] = "someteam"
assert object_under_test.labels == {"data_owner": "someteam"}
del object_under_test.labels["data_owner"]
assert object_under_test.labels == {}
def test_replace_labels(object_under_test):
assert object_under_test.labels == {}
object_under_test.labels = {"data_owner": "someteam"}
assert object_under_test.labels == {"data_owner": "someteam"}
labels = {}
object_under_test.labels = labels
assert object_under_test.labels is labels
object_under_test.labels = None
assert object_under_test.labels == {}
def test_set_encryption_configuration(object_under_test):
from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
assert not object_under_test.encryption_configuration
object_under_test.encryption_configuration = EncryptionConfiguration(
kms_key_name=KMS_KEY_NAME
)
assert object_under_test.encryption_configuration.kms_key_name == KMS_KEY_NAME
object_under_test.encryption_configuration = None
assert not object_under_test.encryption_configuration
def test_repr(target_class):
model = target_class("my-proj.my_dset.my_model")
got = repr(model)
assert got == (
"Model(reference=ModelReference("
"project_id='my-proj', dataset_id='my_dset', model_id='my_model'))"
)
def test_to_api_repr(target_class):
model = target_class("my-proj.my_dset.my_model")
resource = {
"etag": "abcdefg",
"modelReference": {
"projectId": "my-project",
"datasetId": "my_dataset",
"modelId": "my_model",
},
"creationTime": "1274284800000",
"lastModifiedTime": "1317484800000",
"modelType": "LOGISTIC_REGRESSION",
"trainingRuns": [
{
"trainingOptions": {"initialLearnRate": 1.0},
"startTime": "2010-05-19T16:00:00Z",
},
{
"trainingOptions": {"initialLearnRate": 0.5},
"startTime": "2011-10-01T16:00:00Z",
},
{
"trainingOptions": {"initialLearnRate": 0.25},
"startTime": "2012-12-21T16:00:00Z",
},
],
"description": "A friendly description.",
"location": "US",
"friendlyName": "A friendly name.",
"labels": {"greeting": "こんにちは"},
"expirationTime": "1356105600000",
"encryptionConfiguration": {
"kmsKeyName": "projects/1/locations/us/keyRings/1/cryptoKeys/1"
},
}
model._properties = resource
got = model.to_api_repr()
assert got == resource
| {
"content_hash": "6754dea8c593db8c5624c17374e26d74",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 88,
"avg_line_length": 33.49009900990099,
"alnum_prop": 0.59490022172949,
"repo_name": "googleapis/python-bigquery",
"id": "1ae98841490865c92ffd48baece7801612fd59bb",
"size": "14162",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/model/test_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2520564"
},
{
"name": "Shell",
"bytes": "31939"
}
],
"symlink_target": ""
} |
import pathlib
from qiime2 import sdk
from qiime2.plugin import model
from qiime2.core import util
def identity_transformer(view):
return view
class ModelType:
@staticmethod
def from_view_type(view_type):
if issubclass(view_type, model.base.FormatBase):
if issubclass(view_type,
model.SingleFileDirectoryFormatBase):
# HACK: this is necessary because we need to be able to "act"
# like a FileFormat when looking up transformers, but our
# input/output coercion still needs to bridge the
# transformation as we do not have transitivity
# In other words we have DX and we have transformers of X
# In a perfect world we would automatically define DX -> X and
# let transitivity handle it, but since that doesn't exist, we
# need to treat DX as if it were X and coerce behind the scenes
# TODO: redo this when transformers are transitive
return SingleFileDirectoryFormatType(view_type)
# Normal format type
return FormatType(view_type)
else:
# TODO: supporting stdlib.typing may require an alternate
# model type as `isinstance` is a meaningless operation
# for them so validation would need to be handled differently
return ObjectType(view_type)
def __init__(self, view_type):
self._pm = sdk.PluginManager()
self._view_type = view_type
self._view_name = util.get_view_name(self._view_type)
self._record = None
if self._view_name in self._pm.views:
self._record = self._pm.views[self._view_name]
def make_transformation(self, other, recorder=None):
# record may be None in case of identity transformer
transformer, transformer_record = self._get_transformer_to(other)
if transformer is None:
raise Exception("No transformation from %r to %r" %
(self._view_type, other._view_type))
if recorder is not None:
recorder(transformer_record, input_name=self._view_name,
input_record=self._record, output_name=other._view_name,
output_record=other._record)
def transformation(view, validate_level='min'):
view = self.coerce_view(view)
self.validate(view, validate_level)
new_view = transformer(view)
new_view = other.coerce_view(new_view)
other.validate(new_view)
if transformer is not identity_transformer:
other.set_user_owned(new_view, False)
return new_view
return transformation
def _get_transformer_to(self, other):
transformer, record = self._lookup_transformer(self._view_type,
other._view_type)
if transformer is None:
return other._get_transformer_from(self)
return transformer, record
def has_transformation(self, other):
""" Checks to see if there exist transformers for other
Parameters
----------
other : ModelType subclass
The object being checked for transformer
Returns
-------
bool
Does the specified transformer exist for other?
"""
transformer, _ = self._get_transformer_to(other)
return transformer is not None
def _get_transformer_from(self, other):
return None, None
def coerce_view(self, view):
return view
def _lookup_transformer(self, from_, to_):
if from_ == to_:
return identity_transformer, None
try:
record = self._pm.transformers[from_][to_]
return record.transformer, record
except KeyError:
return None, None
def set_user_owned(self, view, value):
pass
class FormatType(ModelType):
def coerce_view(self, view):
if type(view) is str or isinstance(view, pathlib.Path):
return self._view_type(view, mode='r')
if isinstance(view, self._view_type):
# wrap original path (inheriting the lifetime) and return a
# read-only instance
return self._view_type(view.path, mode='r')
return view
def validate(self, view, level='min'):
if not isinstance(view, self._view_type):
raise TypeError("%r is not an instance of %r."
% (view, self._view_type))
# Formats have a validate method, so defer to it
view.validate(level)
def set_user_owned(self, view, value):
view.path._user_owned = value
class SingleFileDirectoryFormatType(FormatType):
def __init__(self, view_type):
# Single file directory formats have only one file named `file`
# allowing us construct a model type from the format of `file`
self._wrapped_view_type = view_type.file.format
super().__init__(view_type)
def _get_transformer_to(self, other):
# Legend:
# - Dx: single directory format of x
# - Dy: single directory format of y
# - x: input format x
# - y: output format y
# - ->: implicit transformer
# - =>: registered transformer
# - :> final transformation
# - |: or, used when multiple situation are possible
# It looks like all permutations because it is...
# Dx :> y | Dy via Dx => y | Dy
transformer, record = self._wrap_transformer(self, other)
if transformer is not None:
return transformer, record
# Dx :> Dy via Dx -> x => y | Dy
transformer, record = self._wrap_transformer(self, other,
wrap_input=True)
if transformer is not None:
return transformer, record
if type(other) is type(self):
# Dx :> Dy via Dx -> x => y -> Dy
transformer, record = self._wrap_transformer(
self, other, wrap_input=True, wrap_output=True)
if transformer is not None:
return transformer, record
# Out of options, try for Dx :> Dy via Dx => y -> Dy
return other._get_transformer_from(self) # record is included
def _get_transformer_from(self, other):
# x | Dx :> Dy via x | Dx => y -> Dy
# IMPORTANT: reverse other and self, this method is like __radd__
return self._wrap_transformer(other, self, wrap_output=True)
def _wrap_transformer(self, in_, out_, wrap_input=False,
wrap_output=False):
input = in_._wrapped_view_type if wrap_input else in_._view_type
output = out_._wrapped_view_type if wrap_output else out_._view_type
transformer, record = self._lookup_transformer(input, output)
if transformer is None:
return None, None
if wrap_input:
transformer = in_._wrap_input(transformer)
if wrap_output:
transformer = out_._wrap_output(transformer)
return transformer, record
def _wrap_input(self, transformer):
def wrapped(view):
return transformer(view.file.view(self._wrapped_view_type))
return wrapped
def _wrap_output(self, transformer):
def wrapped(view):
new_view = self._view_type()
file_view = transformer(view)
if transformer is not identity_transformer:
self.set_user_owned(file_view, False)
new_view.file.write_data(file_view, self._wrapped_view_type)
return new_view
return wrapped
class ObjectType(ModelType):
def validate(self, view, level=None):
if not isinstance(view, self._view_type):
raise TypeError("%r is not of type %r, cannot transform further."
% (view, self._view_type))
| {
"content_hash": "6ad0f979377a3480edbda1d44652275a",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 79,
"avg_line_length": 35.4625550660793,
"alnum_prop": 0.58472049689441,
"repo_name": "jakereps/qiime2",
"id": "a5be41a13ed4ac90d52f27c36c7d2e993eef7b85",
"size": "8399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiime2/core/transform.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "506"
},
{
"name": "Python",
"bytes": "895865"
},
{
"name": "Shell",
"bytes": "217"
},
{
"name": "TeX",
"bytes": "5480"
}
],
"symlink_target": ""
} |
"""
WSGI config for contmon project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.demo"
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.org/
from whitenoise.django import DjangoWhiteNoise
application = DjangoWhiteNoise(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "561337835823744f2916fdb127b8c726",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 41.9,
"alnum_prop": 0.7941527446300716,
"repo_name": "adandan01/contmon",
"id": "ace7d92e1d81afbbc261aa93046d2582daebc407",
"size": "1676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/wsgi_demo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2782"
},
{
"name": "HTML",
"bytes": "35817"
},
{
"name": "JavaScript",
"bytes": "36063"
},
{
"name": "Python",
"bytes": "74624"
},
{
"name": "Shell",
"bytes": "3896"
}
],
"symlink_target": ""
} |
from binascii import hexlify
from binascii import unhexlify
import os, sys, logging
try:
from Crypto.Hash import SHA256, HMAC
HAS_HASH = True
except ImportError:
HAS_HASH = False
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES
HAS_AES = True
except ImportError:
HAS_AES = False
# OpenSSL pbkdf2_hmac
try:
from cryptography.hazmat.primitives.hashes import SHA256 as c_SHA256
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.backends import default_backend
HAS_PBKDF2HMAC = True
except:
HAS_PBKDF2HMAC = False
HAS_ANY_PBKDF2HMAC = HAS_PBKDF2 or HAS_PBKDF2HMAC
def check_module():
if not HAS_AES or not HAS_COUNTER or not HAS_ANY_PBKDF2HMAC or not HAS_HASH:
return False
else :
return True
from library.utils.type_conv import string2bytes, bytes2string, obj2bytes, obj2string
class AES256_Algorithm():
def __init__(self, b_password):
'''
使用AES256加解密数据
'''
self.check_result = check_module()
self.logger = logging.getLogger("security")
self.b_password = string2bytes(b_password)
self.err_msg = "可能您的python3环境中,pycrypto模块版本太低或者没有安装,请升级/安装,请SSH登陆到服务器上,执行pip3 install pycrypto或者pip3 install --upgrade pycrypto"
@staticmethod
def _create_key(b_password, b_salt, keylength, ivlength):
hash_function = SHA256
pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest()
b_derivedkey = PBKDF2(
b_password,
b_salt,
dkLen=(2 * keylength) + ivlength,
count=10000,
prf=pbkdf2_prf)
return b_derivedkey
@classmethod
def _gen_key_initctr(cls, b_password, b_salt):
# 16 for AES 128, 32 for AES256
keylength = 32
# match the size used for counter.new to avoid extra work
ivlength = 16
b_password = string2bytes(b_password)
if HAS_PBKDF2HMAC:
backend = default_backend()
kdf = PBKDF2HMAC(
algorithm=c_SHA256(),
length=2 * keylength + ivlength,
salt=b_salt,
iterations=10000,
backend=backend)
b_derivedkey = kdf.derive(b_password)
else:
b_derivedkey = cls._create_key(
b_password,
b_salt,
keylength,
ivlength)
b_key1 = b_derivedkey[:keylength]
b_key2 = b_derivedkey[keylength:(keylength * 2)]
b_iv = b_derivedkey[(keylength * 2):(keylength * 2) + ivlength]
return b_key1, b_key2, hexlify(b_iv)
def encrypt(self, data):
if not self.check_result :
self.logger.error('使用AES256解密加密数据失败,原因:' + self.err_msg)
return (False, self.err_msg)
if not self.b_password or self.b_password is None :
return (False, "加密密码为空")
data = obj2bytes(data)
if data[0] :
b_plaintext = data[1]
else :
return data
b_salt = os.urandom(32)
b_key1, b_key2, b_iv = self._gen_key_initctr(self.b_password, b_salt)
bs = AES.block_size
padding_length = (bs - len(data) % bs) or bs
temp = obj2bytes(padding_length * chr(padding_length))
if temp[0] :
temp = temp[1]
else :
return temp
b_plaintext += temp
ctr = Counter.new(128, initial_value=int(b_iv, 16))
cipher = AES.new(b_key1, AES.MODE_CTR, counter=ctr)
b_ciphertext = cipher.encrypt(b_plaintext)
hmac = HMAC.new(b_key2, b_ciphertext, SHA256)
b_temp_ciphertext = b'\n'.join([hexlify(b_salt), string2bytes(hmac.hexdigest()), hexlify(b_ciphertext)])
b_new_ciphertext = hexlify(b_temp_ciphertext)
return self._handle_result(b_new_ciphertext)
def decrypt(self, data):
if not self.check_result :
self.logger.error('使用AES256解密加密数据失败,原因:' + self.err_msg)
return (False, self.err_msg)
if not self.b_password or self.b_password is None :
return (False, "加密密码为空")
data = obj2bytes(data)
if data[0] :
data = data[1]
else :
return data
ciphertext = unhexlify(data)
b_salt, b_cryptedHmac, b_ciphertext = ciphertext.split(b"\n", 2)
b_salt = unhexlify(b_salt)
b_ciphertext = unhexlify(b_ciphertext)
b_key1, b_key2, b_iv = self._gen_key_initctr(self.b_password, b_salt)
hmacDecrypt = HMAC.new(b_key2, b_ciphertext, SHA256)
if not self._is_equal(b_cryptedHmac, bytes2string(hmacDecrypt.hexdigest())):
self.logger.error('使用AES256解密加密数据失败,原因:密码错误')
return (False, "解密失败,密码错误")
ctr = Counter.new(128, initial_value=int(b_iv, 16))
cipher = AES.new(b_key1, AES.MODE_CTR, counter=ctr)
b_plaintext = cipher.decrypt(b_ciphertext)
padding_length = b_plaintext[-1]
b_plaintext = b_plaintext[:-padding_length]
return self._handle_result(b_plaintext)
def _handle_result(self, data):
new_data = obj2string(data)
if new_data[0] :
result = new_data[1]
else :
result = bytes2string(data)
return (True, result)
@staticmethod
def _is_equal(b_a, b_b):
b_b = string2bytes(b_b)
b_a = string2bytes(b_a)
if not (isinstance(b_a, bytes) and isinstance(b_b, bytes)):
return False
if len(b_a) != len(b_b):
return False
result = 0
for b_x, b_y in zip(b_a, b_b):
result |= b_x ^ b_y
return result == 0
| {
"content_hash": "d2934def8fb1610b6103e9ab343e8816",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 136,
"avg_line_length": 29.187793427230048,
"alnum_prop": 0.5739102460994049,
"repo_name": "lykops/lykops",
"id": "d22ea8ea955407c110faad533fc60c349eaf4bb7",
"size": "6445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/security/encryption/AES256/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1446589"
}
],
"symlink_target": ""
} |
from optparse import OptionParser
from task import Task
import logging
from model_param_space import param_space_dict
def train(model_name, data_name, params_dict, logger, eval_by_rel, if_save):
task = Task(model_name, data_name, 1, params_dict, logger, eval_by_rel)
task.refit(if_save)
def parse_args(parser):
parser.add_option("-m", "--model", dest="model_name", type="string", default="best_TransE_L2")
parser.add_option("-d", "--data", dest="data_name", type="string", default="wn18")
parser.add_option("-r", "--relation", dest="relation", action="store_true", default=False)
parser.add_option("-s", "--save", dest="save", action="store_true", default=False)
options, args = parser.parse_args()
return options, args
def main(options):
logger = logging.getLogger()
logging.basicConfig(format='[%(asctime)s] %(levelname)s: %(message)s', level=logging.INFO)
train(options.model_name, options.data_name,
params_dict=param_space_dict[options.model_name],
logger=logger, eval_by_rel=options.relation, if_save=options.save)
if __name__ == "__main__":
parser = OptionParser()
options, args = parse_args(parser)
main(options)
| {
"content_hash": "548eb3dbafc74c3af033ea5d62461722",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 98,
"avg_line_length": 41.310344827586206,
"alnum_prop": 0.6786310517529215,
"repo_name": "billy-inn/tensorflow-efe",
"id": "262a62d36a15d8fd30a0b6f856b8c3537d064616",
"size": "1198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59779"
}
],
"symlink_target": ""
} |
"""Launch an iPython shell with access to the current context."""
from IPython import embed
import unsync
import click
@unsync.command()
@click.pass_context
def ipython_shell(ctx):
"""Launch an IPython shell with access to the current context."""
data = ctx.obj # noqa
embed()
ipython_shell.display_name = 'shell'
| {
"content_hash": "47901f0ac6bb16cb63741474b57ddc9c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 69,
"avg_line_length": 22,
"alnum_prop": 0.7181818181818181,
"repo_name": "PGower/Unsync",
"id": "bf150cc02b9c759a4cbd5f9894c5a6485f987012",
"size": "330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unsync/unsync/commands/utils/shell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "133964"
}
],
"symlink_target": ""
} |
from sys import byteorder
from array import array
from struct import pack
import pyaudio
import wave
THRESHOLD = 500
CHUNK_SIZE = 1024
FORMAT = pyaudio.paInt16
RATE = 44100
def is_silent(snd_data):
"Returns 'True' if below the 'silent' threshold"
return max(snd_data) < THRESHOLD
def normalize(snd_data):
"Average the volume out"
MAXIMUM = 16384
times = float(MAXIMUM)/max(abs(i) for i in snd_data)
r = array('h')
for i in snd_data:
r.append(int(i*times))
return r
def trim(snd_data):
"Trim the blank spots at the start and end"
def _trim(snd_data):
snd_started = False
r = array('h')
for i in snd_data:
if not snd_started and abs(i)>THRESHOLD:
snd_started = True
r.append(i)
elif snd_started:
r.append(i)
return r
# Trim to the left
snd_data = _trim(snd_data)
# Trim to the right
snd_data.reverse()
snd_data = _trim(snd_data)
snd_data.reverse()
return snd_data
def add_silence(snd_data, seconds):
"Add silence to the start and end of 'snd_data' of length 'seconds' (float)"
r = array('h', [0 for i in range(int(seconds*RATE))])
r.extend(snd_data)
r.extend([0 for i in range(int(seconds*RATE))])
return r
def record():
"""
Record a word or words from the microphone and
return the data as an array of signed shorts.
Normalizes the audio, trims silence from the
start and end, and pads with 0.5 seconds of
blank sound to make sure VLC et al can play
it without getting chopped off.
"""
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=1, rate=RATE,
input=True, output=True,
frames_per_buffer=CHUNK_SIZE)
num_silent = 0
snd_started = False
r = array('h')
while 1:
# little endian, signed short
snd_data = array('h', stream.read(CHUNK_SIZE))
if byteorder == 'big':
snd_data.byteswap()
r.extend(snd_data)
silent = is_silent(snd_data)
if silent and snd_started:
num_silent += 1
elif not silent and not snd_started:
snd_started = True
if snd_started and num_silent > 30:
break
sample_width = p.get_sample_size(FORMAT)
stream.stop_stream()
stream.close()
p.terminate()
r = normalize(r)
r = trim(r)
r = add_silence(r, 0.5)
return sample_width, r
def record_to_file(path):
"Records from the microphone and outputs the resulting data to 'path'"
sample_width, data = record()
data = pack('<' + ('h'*len(data)), *data)
wf = wave.open(path, 'wb')
wf.setnchannels(1)
wf.setsampwidth(sample_width)
wf.setframerate(RATE)
wf.writeframes(data)
wf.close()
if __name__ == '__main__':
print("please speak a word into the microphone")
record_to_file('demo_j_qin.wav')
print("done - result written to demo.wav") | {
"content_hash": "f9d85d74bd607e10460904b63105d839",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 80,
"avg_line_length": 24.941666666666666,
"alnum_prop": 0.6030738389575676,
"repo_name": "bbcdli/xuexi",
"id": "dfee77ee5531d84afa5f17aa69d9b48584d5c94d",
"size": "2993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "audio/record.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "862"
},
{
"name": "C++",
"bytes": "30570"
},
{
"name": "HTML",
"bytes": "250"
},
{
"name": "Python",
"bytes": "2911994"
},
{
"name": "Shell",
"bytes": "14763"
}
],
"symlink_target": ""
} |
import argparse
import logging
from typing import Text, Union, Optional
from rasa.shared.constants import (
DEFAULT_CONFIG_PATH,
DEFAULT_DOMAIN_PATH,
DEFAULT_MODELS_PATH,
DEFAULT_DATA_PATH,
DEFAULT_ENDPOINTS_PATH,
)
def add_model_param(
parser: argparse.ArgumentParser,
model_name: Text = "Rasa",
add_positional_arg: bool = True,
default: Optional[Text] = DEFAULT_MODELS_PATH,
) -> None:
help_text = (
"Path to a trained {} model. If a directory is specified, it will "
"use the latest model in this directory.".format(model_name)
)
parser.add_argument("-m", "--model", type=str, default=default, help=help_text)
if add_positional_arg:
parser.add_argument(
"model-as-positional-argument", nargs="?", type=str, help=help_text
)
def add_stories_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
stories_name: Text = "training",
) -> None:
parser.add_argument(
"-s",
"--stories",
type=str,
default=DEFAULT_DATA_PATH,
help=f"File or folder containing your {stories_name} stories.",
)
def add_nlu_data_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
help_text: Text,
default: Optional[Text] = DEFAULT_DATA_PATH,
) -> None:
parser.add_argument("-u", "--nlu", type=str, default=default, help=help_text)
def add_domain_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
default: Optional[Text] = DEFAULT_DOMAIN_PATH,
) -> None:
parser.add_argument(
"-d",
"--domain",
type=str,
default=default,
help="Domain specification. This can be a single YAML file, or a directory "
"that contains several files with domain specifications in it. The content "
"of these files will be read and merged together.",
)
def add_config_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
default: Optional[Text] = DEFAULT_CONFIG_PATH,
) -> None:
parser.add_argument(
"-c",
"--config",
type=str,
default=default,
help="The policy and NLU pipeline configuration of your bot.",
)
def add_out_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
help_text: Text,
default: Optional[Text] = DEFAULT_MODELS_PATH,
required: bool = False,
) -> None:
parser.add_argument(
"--out",
type=str,
default=default,
help=help_text,
# The desired behaviour is that required indicates if this argument must
# have a value, but argparse interprets it as "must have a value
# from user input", so we toggle it only if our default is not set
required=required and default is None,
)
def add_endpoint_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
help_text: Text,
default: Optional[Text] = DEFAULT_ENDPOINTS_PATH,
) -> None:
"""Adds an option to an argument parser to configure endpoints path."""
parser.add_argument("--endpoints", type=str, default=default, help=help_text)
def add_data_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
default: Optional[Text] = DEFAULT_DATA_PATH,
required: bool = False,
data_type: Text = "Rasa ",
) -> None:
parser.add_argument(
"--data",
default=default,
nargs="+",
type=str,
help=f"Paths to the files or directories containing {data_type} data.",
# The desired behaviour is that required indicates if this argument must
# have a value, but argparse interprets it as "must have a value
# from user input", so we toggle it only if our default is not set
required=required and default is None,
)
def add_logging_options(parser: argparse.ArgumentParser) -> None:
"""Add options to an argument parser to configure logging levels."""
logging_arguments = parser.add_argument_group("Python Logging Options")
# arguments for logging configuration
logging_arguments.add_argument(
"-v",
"--verbose",
help="Be verbose. Sets logging level to INFO.",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
logging_arguments.add_argument(
"-vv",
"--debug",
help="Print lots of debugging statements. Sets logging level to DEBUG.",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
)
logging_arguments.add_argument(
"--quiet",
help="Be quiet! Sets logging level to WARNING.",
action="store_const",
dest="loglevel",
const=logging.WARNING,
)
| {
"content_hash": "83c7e80f26a82303cff14161c2b0e386",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 84,
"avg_line_length": 31.136363636363637,
"alnum_prop": 0.6412930135557873,
"repo_name": "RasaHQ/rasa_nlu",
"id": "04e94da0daf4b1eba324ec4f29ed2ffed30d6449",
"size": "4795",
"binary": false,
"copies": "1",
"ref": "refs/heads/emptystring_10504",
"path": "rasa/cli/arguments/default_arguments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "3462"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "Python",
"bytes": "1467067"
},
{
"name": "Shell",
"bytes": "941"
}
],
"symlink_target": ""
} |
from pythonforandroid.recipe import PythonRecipe
class SixRecipe(PythonRecipe):
version = '1.10.0'
url = 'https://pypi.python.org/packages/source/s/six/six-{version}.tar.gz'
depends = ['setuptools']
recipe = SixRecipe()
| {
"content_hash": "85dec7507a9809fc9cf03fd4351d4bae",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 78,
"avg_line_length": 23.6,
"alnum_prop": 0.7076271186440678,
"repo_name": "rnixx/python-for-android",
"id": "43f15246a0657d4a41949d7ababe74fe8f192128",
"size": "236",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pythonforandroid/recipes/six/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "70942"
},
{
"name": "C++",
"bytes": "491"
},
{
"name": "CMake",
"bytes": "250"
},
{
"name": "CSS",
"bytes": "3487"
},
{
"name": "Dockerfile",
"bytes": "4440"
},
{
"name": "HTML",
"bytes": "11631"
},
{
"name": "Java",
"bytes": "517112"
},
{
"name": "Makefile",
"bytes": "27307"
},
{
"name": "Python",
"bytes": "1359684"
},
{
"name": "Shell",
"bytes": "5340"
}
],
"symlink_target": ""
} |
import importlib, logging, os
from contextlib import contextmanager
from tempfile import mkdtemp
import yaml
from datapackage_pipelines_knesset.common import object_storage
import json
import requests
import psutil
from datapackage_pipelines_metrics.influxdb import send_metric
import time
@contextmanager
def temp_loglevel(level=logging.INFO):
root_logging_handler = logging.root.handlers[0]
old_level = root_logging_handler.level
root_logging_handler.setLevel(level)
yield
root_logging_handler.setLevel(old_level)
def parse_import_func_parameter(value, *args):
if value and isinstance(value, str) and value.startswith("(") and value.endswith(")"):
cmdparts = value[1:-1].split(":")
cmdmodule = cmdparts[0]
cmdfunc = cmdparts[1]
cmdargs = cmdparts[2] if len(cmdparts) > 2 else None
func = importlib.import_module(cmdmodule)
for part in cmdfunc.split("."):
func = getattr(func, part)
if cmdargs == "args":
value = func(*args)
else:
value = func()
return value
@contextmanager
def temp_dir(*args, **kwargs):
dir = mkdtemp(*args, **kwargs)
try:
yield dir
except Exception:
if os.path.exists(dir):
os.rmdir(dir)
raise
@contextmanager
def temp_file(*args, **kwargs):
with temp_dir(*args, **kwargs) as dir:
file = os.path.join(dir, "temp")
try:
yield file
except Exception:
if os.path.exists(file):
os.unlink(file)
raise
def get_pipeline_run_step_parameters(pipeline_spec, pipeline_id, run_endswith, parameters_match=None):
with open(os.path.join(os.path.dirname(__file__), "..", "..", pipeline_spec, "pipeline-spec.yaml")) as f:
pipeline_spec = yaml.load(f.read())
for step in pipeline_spec[pipeline_id]["pipeline"]:
if step["run"].endswith(".{}".format(run_endswith)):
if not parameters_match:
parameters_match = {}
mismatch = False
for k, v in parameters_match.items():
if step["parameters"].get(k) != v:
mismatch=True
break
if not mismatch:
return step["parameters"]
raise Exception
def get_pipeline_schema(pipeline_spec, pipeline_id):
bucket = pipeline_spec
if pipeline_id == 'committee_meeting_protocols_parsed':
object_name = "table-schemas/committee_meeting_protocols_parsed.json"
elif pipeline_id == "committee-meeting-attendees":
object_name = "table-schemas/committee_meeting_attendees.json"
elif pipeline_id == "committee-meeting-speakers":
object_name = "table-schemas/committee_meeting_speakers.json"
else:
object_name = "table-schemas/{}.json".format(pipeline_id)
s3 = object_storage.get_s3()
if object_storage.exists(s3, bucket, object_name):
return json.loads(object_storage.read(s3, bucket, object_name))
else:
logging.warning("Missing local table schema, trying from remote")
url = "https://minio.oknesset.org/{}/{}".format(bucket, object_name)
res = requests.get(url)
res.raise_for_status()
return res.json()
@contextmanager
def process_metrics(measurement, tags, interval_seconds=5):
p = psutil.Process()
def callback(refresh=False):
if refresh:
callback.last_time_seconds = 0
cur_time_seconds = int(time.time())
if cur_time_seconds - callback.last_time_seconds >= interval_seconds:
send_metric(measurement, tags,
{'cpu_core_percent': p.cpu_percent(interval=1),
'memory_rss': p.memory_info().rss})
callback.last_time_seconds = cur_time_seconds
callback(True)
yield callback
| {
"content_hash": "9da30fd3a837b31d2f6e87af14600dab",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 109,
"avg_line_length": 34.4375,
"alnum_prop": 0.6245786880995593,
"repo_name": "OriHoch/knesset-data-pipelines",
"id": "016b6be395fcb43597dd19b4d2760708483ead95",
"size": "3857",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "datapackage_pipelines_knesset/common/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2992"
},
{
"name": "Jupyter Notebook",
"bytes": "33735"
},
{
"name": "Python",
"bytes": "268706"
},
{
"name": "Shell",
"bytes": "1456"
}
],
"symlink_target": ""
} |
from ..trezor.qt_generic import QtPlugin
from keepkey import KeepKeyPlugin
class Plugin(KeepKeyPlugin, QtPlugin):
icon_paired = ":icons/keepkey.png"
icon_unpaired = ":icons/keepkey_unpaired.png"
@classmethod
def pin_matrix_widget_class(self):
from keepkeylib.qt.pinmatrix import PinMatrixWidget
return PinMatrixWidget
| {
"content_hash": "503a56d800ef8a20103d11916f0ec0a8",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 59,
"avg_line_length": 29.416666666666668,
"alnum_prop": 0.7365439093484419,
"repo_name": "fireduck64/electrum",
"id": "189cc48269a00c83e3ade263bb9b820ce2ab05c2",
"size": "353",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "plugins/keepkey/qt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3867"
},
{
"name": "Makefile",
"bytes": "836"
},
{
"name": "NSIS",
"bytes": "7125"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Protocol Buffer",
"bytes": "2354"
},
{
"name": "Python",
"bytes": "1241321"
},
{
"name": "Shell",
"bytes": "7035"
}
],
"symlink_target": ""
} |
"""Unit tests for the DB API."""
import copy
import datetime
import iso8601
import mock
import netaddr
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import update_match
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_serialization import jsonutils
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
from sqlalchemy import Column
from sqlalchemy.dialects import sqlite
from sqlalchemy.exc import OperationalError
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy.orm import query
from sqlalchemy import sql
from sqlalchemy import Table
from nova import block_device
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova import context
from nova.db import api as db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import types as col_types
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova import objects
from nova.objects import fields
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_console_auth_token
from nova import utils
CONF = nova.conf.CONF
get_engine = sqlalchemy_api.get_engine
def _reservation_get(context, uuid):
@sqlalchemy_api.pick_context_manager_reader
def doit(context):
return sqlalchemy_api.model_query(
context, models.Reservation, read_deleted="no").filter_by(
uuid=uuid).first()
result = doit(context)
if not result:
raise exception.ReservationNotFound(uuid=uuid)
return result
def _make_compute_node(host, node, hv_type, service_id):
compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
uuid=uuidutils.generate_uuid(),
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, free_ram_mb=1024,
free_disk_gb=2048, hypervisor_type=hv_type,
hypervisor_version=1, cpu_info="",
running_vms=0, current_workload=0,
service_id=service_id,
host=host,
disk_available_least=100,
hypervisor_hostname=node,
host_ip='127.0.0.1',
supported_instances='',
pci_stats='',
metrics='',
extra_resources='',
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
stats='', numa_topology='')
# add some random stats
stats = dict(num_instances=3, num_proj_12345=2,
num_proj_23456=2, num_vm_building=3)
compute_node_dict['stats'] = jsonutils.dumps(stats)
return compute_node_dict
def _quota_create(context, project_id, user_id):
"""Create sample Quota objects."""
quotas = {}
user_quotas = {}
for i in range(3):
resource = 'resource%d' % i
if i == 2:
# test for project level resources
resource = 'fixed_ips'
quotas[resource] = db.quota_create(context,
project_id,
resource, i + 2).hard_limit
user_quotas[resource] = quotas[resource]
else:
quotas[resource] = db.quota_create(context,
project_id,
resource, i + 1).hard_limit
user_quotas[resource] = db.quota_create(context, project_id,
resource, i + 1,
user_id=user_id).hard_limit
class DbTestCase(test.TestCase):
def setUp(self):
super(DbTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def create_instance_with_args(self, **kwargs):
args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1',
'node': 'node1', 'project_id': self.project_id,
'vm_state': 'fake'}
if 'context' in kwargs:
ctxt = kwargs.pop('context')
args['project_id'] = ctxt.project_id
else:
ctxt = self.context
args.update(kwargs)
return db.instance_create(ctxt, args)
def fake_metadata(self, content):
meta = {}
for i in range(0, 10):
meta["foo%i" % i] = "this is %s item %i" % (content, i)
return meta
def create_metadata_for_instance(self, instance_uuid):
meta = self.fake_metadata('metadata')
db.instance_metadata_update(self.context, instance_uuid, meta, False)
sys_meta = self.fake_metadata('system_metadata')
db.instance_system_metadata_update(self.context, instance_uuid,
sys_meta, False)
return meta, sys_meta
class DecoratorTestCase(test.TestCase):
def _test_decorator_wraps_helper(self, decorator):
def test_func():
"""Test docstring."""
decorated_func = decorator(test_func)
self.assertEqual(test_func.__name__, decorated_func.__name__)
self.assertEqual(test_func.__doc__, decorated_func.__doc__)
self.assertEqual(test_func.__module__, decorated_func.__module__)
def test_require_context_decorator_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api.require_context)
def test_require_deadlock_retry_wraps_functions_properly(self):
self._test_decorator_wraps_helper(
oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True))
@mock.patch.object(enginefacade._TransactionContextManager, 'using')
@mock.patch.object(enginefacade._TransactionContextManager, '_clone')
def test_select_db_reader_mode_select_sync(self, mock_clone, mock_using):
@db.select_db_reader_mode
def func(self, context, value, use_slave=False):
pass
mock_clone.return_value = enginefacade._TransactionContextManager(
mode=enginefacade._READER)
ctxt = context.get_admin_context()
value = 'some_value'
func(self, ctxt, value)
mock_clone.assert_called_once_with(mode=enginefacade._READER)
mock_using.assert_called_once_with(ctxt)
@mock.patch.object(enginefacade._TransactionContextManager, 'using')
@mock.patch.object(enginefacade._TransactionContextManager, '_clone')
def test_select_db_reader_mode_select_async(self, mock_clone, mock_using):
@db.select_db_reader_mode
def func(self, context, value, use_slave=False):
pass
mock_clone.return_value = enginefacade._TransactionContextManager(
mode=enginefacade._ASYNC_READER)
ctxt = context.get_admin_context()
value = 'some_value'
func(self, ctxt, value, use_slave=True)
mock_clone.assert_called_once_with(mode=enginefacade._ASYNC_READER)
mock_using.assert_called_once_with(ctxt)
@mock.patch.object(enginefacade._TransactionContextManager, 'using')
@mock.patch.object(enginefacade._TransactionContextManager, '_clone')
def test_select_db_reader_mode_no_use_slave_select_sync(self, mock_clone,
mock_using):
@db.select_db_reader_mode
def func(self, context, value):
pass
mock_clone.return_value = enginefacade._TransactionContextManager(
mode=enginefacade._READER)
ctxt = context.get_admin_context()
value = 'some_value'
func(self, ctxt, value)
mock_clone.assert_called_once_with(mode=enginefacade._READER)
mock_using.assert_called_once_with(ctxt)
def _get_fake_aggr_values():
return {'name': 'fake_aggregate'}
def _get_fake_aggr_metadata():
return {'fake_key1': 'fake_value1',
'fake_key2': 'fake_value2',
'availability_zone': 'fake_avail_zone'}
def _get_fake_aggr_hosts():
return ['foo.openstack.org']
def _create_aggregate(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata()):
return db.aggregate_create(context, values, metadata)
def _create_aggregate_with_hosts(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata(),
hosts=_get_fake_aggr_hosts()):
result = _create_aggregate(context=context,
values=values, metadata=metadata)
for host in hosts:
db.aggregate_host_add(context, result['id'], host)
return result
@mock.patch.object(sqlalchemy_api, '_get_regexp_ops',
return_value=(lambda x: x, 'LIKE'))
class UnsupportedDbRegexpTestCase(DbTestCase):
def test_instance_get_all_by_filters_paginate(self, mock_get_regexp):
test1 = self.create_instance_with_args(display_name='test1')
test2 = self.create_instance_with_args(display_name='test2')
test3 = self.create_instance_with_args(display_name='test3')
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
marker=None)
self.assertEqual(3, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test1['uuid'])
self.assertEqual(2, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test2['uuid'])
self.assertEqual(1, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test3['uuid'])
self.assertEqual(0, len(result))
self.assertRaises(exception.MarkerNotFound,
db.instance_get_all_by_filters,
self.context, {'display_name': '%test%'},
marker=uuidsentinel.uuid1)
def test_instance_get_all_uuids_by_host(self, mock_get_regexp):
test1 = self.create_instance_with_args(display_name='test1')
test2 = self.create_instance_with_args(display_name='test2')
test3 = self.create_instance_with_args(display_name='test3')
uuids = [i.uuid for i in (test1, test2, test3)]
found_uuids = db.instance_get_all_uuids_by_host(self.context,
test1.host)
self.assertEqual(sorted(uuids), sorted(found_uuids))
def _assert_equals_inst_order(self, correct_order, filters,
sort_keys=None, sort_dirs=None,
limit=None, marker=None,
match_keys=['uuid', 'vm_state',
'display_name', 'id']):
'''Retrieves instances based on the given filters and sorting
information and verifies that the instances are returned in the
correct sorted order by ensuring that the supplied keys match.
'''
result = db.instance_get_all_by_filters_sort(
self.context, filters, limit=limit, marker=marker,
sort_keys=sort_keys, sort_dirs=sort_dirs)
self.assertEqual(len(correct_order), len(result))
for inst1, inst2 in zip(result, correct_order):
for key in match_keys:
self.assertEqual(inst1.get(key), inst2.get(key))
return result
def test_instance_get_all_by_filters_sort_keys(self, mock_get_regexp):
'''Verifies sort order and direction for multiple instances.'''
# Instances that will reply to the query
test1_active = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ACTIVE)
test1_error = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test1_error2 = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test2_active = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ACTIVE)
test2_error = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
test2_error2 = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
# Other instances in the DB, will not match name filter
other_error = self.create_instance_with_args(
display_name='other',
vm_state=vm_states.ERROR)
other_active = self.create_instance_with_args(
display_name='other',
vm_state=vm_states.ACTIVE)
filters = {'display_name': '%test%'}
# Verify different sort key/direction combinations
sort_keys = ['display_name', 'vm_state', 'created_at']
sort_dirs = ['asc', 'asc', 'asc']
correct_order = [test1_active, test1_error, test1_error2,
test2_active, test2_error, test2_error2]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
sort_dirs = ['asc', 'desc', 'asc']
correct_order = [test1_error, test1_error2, test1_active,
test2_error, test2_error2, test2_active]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
sort_dirs = ['desc', 'desc', 'asc']
correct_order = [test2_error, test2_error2, test2_active,
test1_error, test1_error2, test1_active]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# created_at is added by default if not supplied, descending order
sort_keys = ['display_name', 'vm_state']
sort_dirs = ['desc', 'desc']
correct_order = [test2_error2, test2_error, test2_active,
test1_error2, test1_error, test1_active]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# Now created_at should be in ascending order (defaults to the first
# sort dir direction)
sort_dirs = ['asc', 'asc']
correct_order = [test1_active, test1_error, test1_error2,
test2_active, test2_error, test2_error2]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# Remove name filter, get all instances
correct_order = [other_active, other_error,
test1_active, test1_error, test1_error2,
test2_active, test2_error, test2_error2]
self._assert_equals_inst_order(correct_order, {},
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# Default sorting, 'created_at' then 'id' in desc order
correct_order = [other_active, other_error,
test2_error2, test2_error, test2_active,
test1_error2, test1_error, test1_active]
self._assert_equals_inst_order(correct_order, {})
def test_instance_get_all_by_filters_sort_keys_paginate(self,
mock_get_regexp):
'''Verifies sort order with pagination.'''
# Instances that will reply to the query
test1_active = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ACTIVE)
test1_error = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test1_error2 = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test2_active = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ACTIVE)
test2_error = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
test2_error2 = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
# Other instances in the DB, will not match name filter
self.create_instance_with_args(display_name='other')
self.create_instance_with_args(display_name='other')
filters = {'display_name': '%test%'}
# Common sort information for every query
sort_keys = ['display_name', 'vm_state', 'created_at']
sort_dirs = ['asc', 'desc', 'asc']
# Overall correct instance order based on the sort keys
correct_order = [test1_error, test1_error2, test1_active,
test2_error, test2_error2, test2_active]
# Limits of 1, 2, and 3, verify that the instances returned are in the
# correct sorted order, update the marker to get the next correct page
for limit in range(1, 4):
marker = None
# Include the maximum number of instances (ie, 6) to ensure that
# the last query (with marker pointing to the last instance)
# returns 0 servers
for i in range(0, 7, limit):
if i == len(correct_order):
correct = []
else:
correct = correct_order[i:i + limit]
insts = self._assert_equals_inst_order(
correct, filters,
sort_keys=sort_keys, sort_dirs=sort_dirs,
limit=limit, marker=marker)
if correct:
marker = insts[-1]['uuid']
self.assertEqual(correct[-1]['uuid'], marker)
def test_instance_get_deleted_by_filters_sort_keys_paginate(self,
mock_get_regexp):
'''Verifies sort order with pagination for deleted instances.'''
ctxt = context.get_admin_context()
# Instances that will reply to the query
test1_active = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ACTIVE)
db.instance_destroy(ctxt, test1_active['uuid'])
test1_error = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test1_error['uuid'])
test1_error2 = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test1_error2['uuid'])
test2_active = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ACTIVE)
db.instance_destroy(ctxt, test2_active['uuid'])
test2_error = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test2_error['uuid'])
test2_error2 = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test2_error2['uuid'])
# Other instances in the DB, will not match name filter
self.create_instance_with_args(display_name='other')
self.create_instance_with_args(display_name='other')
filters = {'display_name': '%test%', 'deleted': True}
# Common sort information for every query
sort_keys = ['display_name', 'vm_state', 'created_at']
sort_dirs = ['asc', 'desc', 'asc']
# Overall correct instance order based on the sort keys
correct_order = [test1_error, test1_error2, test1_active,
test2_error, test2_error2, test2_active]
# Limits of 1, 2, and 3, verify that the instances returned are in the
# correct sorted order, update the marker to get the next correct page
for limit in range(1, 4):
marker = None
# Include the maximum number of instances (ie, 6) to ensure that
# the last query (with marker pointing to the last instance)
# returns 0 servers
for i in range(0, 7, limit):
if i == len(correct_order):
correct = []
else:
correct = correct_order[i:i + limit]
insts = self._assert_equals_inst_order(
correct, filters,
sort_keys=sort_keys, sort_dirs=sort_dirs,
limit=limit, marker=marker)
if correct:
marker = insts[-1]['uuid']
self.assertEqual(correct[-1]['uuid'], marker)
class ModelQueryTestCase(DbTestCase):
def test_model_query_invalid_arguments(self):
@sqlalchemy_api.pick_context_manager_reader
def test(context):
# read_deleted shouldn't accept invalid values
self.assertRaises(ValueError, sqlalchemy_api.model_query,
context, models.Instance,
read_deleted=False)
self.assertRaises(ValueError, sqlalchemy_api.model_query,
context, models.Instance,
read_deleted="foo")
# Check model is a valid model
self.assertRaises(TypeError, sqlalchemy_api.model_query,
context, "")
test(self.context)
@mock.patch.object(sqlalchemyutils, 'model_query')
def test_model_query_use_context_session(self, mock_model_query):
@sqlalchemy_api.main_context_manager.reader
def fake_method(context):
session = context.session
sqlalchemy_api.model_query(context, models.Instance)
return session
session = fake_method(self.context)
mock_model_query.assert_called_once_with(models.Instance, session,
None, deleted=False)
class EngineFacadeTestCase(DbTestCase):
def test_use_single_context_session_writer(self):
# Checks that session in context would not be overwritten by
# annotation @sqlalchemy_api.main_context_manager.writer if annotation
# is used twice.
@sqlalchemy_api.main_context_manager.writer
def fake_parent_method(context):
session = context.session
return fake_child_method(context), session
@sqlalchemy_api.main_context_manager.writer
def fake_child_method(context):
session = context.session
sqlalchemy_api.model_query(context, models.Instance)
return session
parent_session, child_session = fake_parent_method(self.context)
self.assertEqual(parent_session, child_session)
def test_use_single_context_session_reader(self):
# Checks that session in context would not be overwritten by
# annotation @sqlalchemy_api.main_context_manager.reader if annotation
# is used twice.
@sqlalchemy_api.main_context_manager.reader
def fake_parent_method(context):
session = context.session
return fake_child_method(context), session
@sqlalchemy_api.main_context_manager.reader
def fake_child_method(context):
session = context.session
sqlalchemy_api.model_query(context, models.Instance)
return session
parent_session, child_session = fake_parent_method(self.context)
self.assertEqual(parent_session, child_session)
class SqlAlchemyDbApiNoDbTestCase(test.NoDBTestCase):
"""No-DB test class for simple test cases that do not require a backend."""
def test_manual_join_columns_immutable_list(self):
# Tests that _manual_join_columns doesn't modify the list passed in.
columns_to_join = ['system_metadata', 'test']
manual_joins, columns_to_join2 = (
sqlalchemy_api._manual_join_columns(columns_to_join))
self.assertEqual(['system_metadata'], manual_joins)
self.assertEqual(['test'], columns_to_join2)
self.assertEqual(['system_metadata', 'test'], columns_to_join)
def test_convert_objects_related_datetimes(self):
t1 = timeutils.utcnow()
t2 = t1 + datetime.timedelta(seconds=10)
t3 = t2 + datetime.timedelta(hours=1)
t2_utc = t2.replace(tzinfo=iso8601.UTC)
t3_utc = t3.replace(tzinfo=iso8601.UTC)
datetime_keys = ('created_at', 'deleted_at')
test1 = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
sqlalchemy_api.convert_objects_related_datetimes(test1, *datetime_keys)
self.assertEqual(test1, expected_dict)
test2 = {'created_at': t1, 'deleted_at': t2_utc, 'updated_at': t3}
expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
sqlalchemy_api.convert_objects_related_datetimes(test2, *datetime_keys)
self.assertEqual(test2, expected_dict)
test3 = {'deleted_at': t2_utc, 'updated_at': t3_utc}
expected_dict = {'deleted_at': t2, 'updated_at': t3_utc}
sqlalchemy_api.convert_objects_related_datetimes(test3, *datetime_keys)
self.assertEqual(test3, expected_dict)
def test_convert_objects_related_datetimes_with_strings(self):
t1 = '2015-05-28T17:15:53.000000'
t2 = '2012-04-21T18:25:43-05:00'
t3 = '2012-04-23T18:25:43.511Z'
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
test1 = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
expected_dict = {
'created_at': timeutils.parse_strtime(t1).replace(tzinfo=None),
'deleted_at': timeutils.parse_isotime(t2).replace(tzinfo=None),
'updated_at': timeutils.parse_isotime(t3).replace(tzinfo=None)}
sqlalchemy_api.convert_objects_related_datetimes(test1)
self.assertEqual(test1, expected_dict)
sqlalchemy_api.convert_objects_related_datetimes(test1, *datetime_keys)
self.assertEqual(test1, expected_dict)
def test_get_regexp_op_for_database_sqlite(self):
filter, op = sqlalchemy_api._get_regexp_ops('sqlite:///')
self.assertEqual('|', filter('|'))
self.assertEqual('REGEXP', op)
def test_get_regexp_op_for_database_mysql(self):
filter, op = sqlalchemy_api._get_regexp_ops(
'mysql+pymysql://root@localhost')
self.assertEqual('\\|', filter('|'))
self.assertEqual('REGEXP', op)
def test_get_regexp_op_for_database_postgresql(self):
filter, op = sqlalchemy_api._get_regexp_ops(
'postgresql://localhost')
self.assertEqual('|', filter('|'))
self.assertEqual('~', op)
def test_get_regexp_op_for_database_unknown(self):
filter, op = sqlalchemy_api._get_regexp_ops('notdb:///')
self.assertEqual('|', filter('|'))
self.assertEqual('LIKE', op)
@mock.patch.object(sqlalchemy_api, 'main_context_manager')
def test_get_engine(self, mock_ctxt_mgr):
sqlalchemy_api.get_engine()
mock_ctxt_mgr.writer.get_engine.assert_called_once_with()
@mock.patch.object(sqlalchemy_api, 'main_context_manager')
def test_get_engine_use_slave(self, mock_ctxt_mgr):
sqlalchemy_api.get_engine(use_slave=True)
mock_ctxt_mgr.reader.get_engine.assert_called_once_with()
def test_get_db_conf_with_connection(self):
mock_conf_group = mock.MagicMock()
mock_conf_group.connection = 'fakemain://'
db_conf = sqlalchemy_api._get_db_conf(mock_conf_group,
connection='fake://')
self.assertEqual('fake://', db_conf['connection'])
@mock.patch.object(sqlalchemy_api, 'api_context_manager')
def test_get_api_engine(self, mock_ctxt_mgr):
sqlalchemy_api.get_api_engine()
mock_ctxt_mgr.writer.get_engine.assert_called_once_with()
@mock.patch.object(sqlalchemy_api, '_instance_get_by_uuid')
@mock.patch.object(sqlalchemy_api, '_instances_fill_metadata')
@mock.patch('oslo_db.sqlalchemy.utils.paginate_query')
def test_instance_get_all_by_filters_paginated_allows_deleted_marker(
self, mock_paginate, mock_fill, mock_get):
ctxt = mock.MagicMock()
ctxt.elevated.return_value = mock.sentinel.elevated
sqlalchemy_api.instance_get_all_by_filters_sort(ctxt, {}, marker='foo')
mock_get.assert_called_once_with(mock.sentinel.elevated, 'foo')
ctxt.elevated.assert_called_once_with(read_deleted='yes')
def test_replace_sub_expression(self):
ret = sqlalchemy_api._safe_regex_mysql('|')
self.assertEqual('\\|', ret)
ret = sqlalchemy_api._safe_regex_mysql('||')
self.assertEqual('\\|\\|', ret)
ret = sqlalchemy_api._safe_regex_mysql('a||')
self.assertEqual('a\\|\\|', ret)
ret = sqlalchemy_api._safe_regex_mysql('|a|')
self.assertEqual('\\|a\\|', ret)
ret = sqlalchemy_api._safe_regex_mysql('||a')
self.assertEqual('\\|\\|a', ret)
class SqlAlchemyDbApiTestCase(DbTestCase):
def test_instance_get_all_by_host(self):
ctxt = context.get_admin_context()
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
@sqlalchemy_api.pick_context_manager_reader
def test(context):
return sqlalchemy_api.instance_get_all_by_host(
context, 'host1')
result = test(ctxt)
self.assertEqual(2, len(result))
# make sure info_cache and security_groups were auto-joined
instance = result[0]
self.assertIn('info_cache', instance)
self.assertIn('security_groups', instance)
def test_instance_get_all_by_host_no_joins(self):
"""Tests that we don't join on the info_cache and security_groups
tables if columns_to_join is an empty list.
"""
self.create_instance_with_args()
@sqlalchemy_api.pick_context_manager_reader
def test(ctxt):
return sqlalchemy_api.instance_get_all_by_host(
ctxt, 'host1', columns_to_join=[])
result = test(context.get_admin_context())
self.assertEqual(1, len(result))
# make sure info_cache and security_groups were not auto-joined
instance = result[0]
self.assertNotIn('info_cache', instance)
self.assertNotIn('security_groups', instance)
def test_instance_get_all_uuids_by_host(self):
ctxt = context.get_admin_context()
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
@sqlalchemy_api.pick_context_manager_reader
def test(context):
return sqlalchemy_api._instance_get_all_uuids_by_host(
context, 'host1')
result = test(ctxt)
self.assertEqual(2, len(result))
self.assertEqual(six.text_type, type(result[0]))
@mock.patch('oslo_utils.uuidutils.generate_uuid')
def test_instance_get_active_by_window_joined_paging(self, mock_uuids):
mock_uuids.side_effect = ['BBB', 'ZZZ', 'AAA', 'CCC']
ctxt = context.get_admin_context()
now = datetime.datetime(2015, 10, 2)
self.create_instance_with_args(project_id='project-ZZZ')
self.create_instance_with_args(project_id='project-ZZZ')
self.create_instance_with_args(project_id='project-ZZZ')
self.create_instance_with_args(project_id='project-AAA')
# no limit or marker
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now, columns_to_join=[])
actual_uuids = [row['uuid'] for row in result]
self.assertEqual(['CCC', 'AAA', 'BBB', 'ZZZ'], actual_uuids)
# just limit
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now, columns_to_join=[], limit=2)
actual_uuids = [row['uuid'] for row in result]
self.assertEqual(['CCC', 'AAA'], actual_uuids)
# limit & marker
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now, columns_to_join=[], limit=2, marker='CCC')
actual_uuids = [row['uuid'] for row in result]
self.assertEqual(['AAA', 'BBB'], actual_uuids)
# unknown marker
self.assertRaises(
exception.MarkerNotFound,
sqlalchemy_api.instance_get_active_by_window_joined,
ctxt, begin=now, columns_to_join=[], limit=2, marker='unknown')
def test_instance_get_active_by_window_joined(self):
now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701)
start_time = now - datetime.timedelta(minutes=10)
now1 = now + datetime.timedelta(minutes=1)
now2 = now + datetime.timedelta(minutes=2)
now3 = now + datetime.timedelta(minutes=3)
ctxt = context.get_admin_context()
# used for testing columns_to_join
network_info = jsonutils.dumps({'ckey': 'cvalue'})
sample_data = {
'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
'info_cache': {'network_info': network_info},
}
self.create_instance_with_args(launched_at=now, **sample_data)
self.create_instance_with_args(launched_at=now1, terminated_at=now2,
**sample_data)
self.create_instance_with_args(launched_at=now2, terminated_at=now3,
**sample_data)
self.create_instance_with_args(launched_at=now3, terminated_at=None,
**sample_data)
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now)
self.assertEqual(4, len(result))
# verify that all default columns are joined
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual(sample_data['metadata'], meta)
sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
self.assertEqual(sample_data['system_metadata'], sys_meta)
self.assertIn('info_cache', result[0])
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now3, columns_to_join=['info_cache'])
self.assertEqual(2, len(result))
# verify that only info_cache is loaded
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual({}, meta)
self.assertIn('info_cache', result[0])
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=start_time, end=now)
self.assertEqual(0, len(result))
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=start_time, end=now2,
columns_to_join=['system_metadata'])
self.assertEqual(2, len(result))
# verify that only system_metadata is loaded
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual({}, meta)
sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
self.assertEqual(sample_data['system_metadata'], sys_meta)
self.assertNotIn('info_cache', result[0])
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now2, end=now3,
columns_to_join=['metadata', 'info_cache'])
self.assertEqual(2, len(result))
# verify that only metadata and info_cache are loaded
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual(sample_data['metadata'], meta)
sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
self.assertEqual({}, sys_meta)
self.assertIn('info_cache', result[0])
self.assertEqual(network_info, result[0]['info_cache']['network_info'])
@mock.patch('nova.db.sqlalchemy.api.instance_get_all_by_filters_sort')
def test_instance_get_all_by_filters_calls_sort(self,
mock_get_all_filters_sort):
'''Verifies instance_get_all_by_filters calls the sort function.'''
# sort parameters should be wrapped in a list, all other parameters
# should be passed through
ctxt = context.get_admin_context()
sqlalchemy_api.instance_get_all_by_filters(ctxt, {'foo': 'bar'},
'sort_key', 'sort_dir', limit=100, marker='uuid',
columns_to_join='columns')
mock_get_all_filters_sort.assert_called_once_with(ctxt, {'foo': 'bar'},
limit=100, marker='uuid', columns_to_join='columns',
sort_keys=['sort_key'], sort_dirs=['sort_dir'])
def test_instance_get_all_by_filters_sort_key_invalid(self):
'''InvalidSortKey raised if an invalid key is given.'''
for keys in [['foo'], ['uuid', 'foo']]:
self.assertRaises(exception.InvalidSortKey,
db.instance_get_all_by_filters_sort,
self.context,
filters={},
sort_keys=keys)
class ProcessSortParamTestCase(test.TestCase):
def test_process_sort_params_defaults(self):
'''Verifies default sort parameters.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params([], [])
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['asc', 'asc'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(None, None)
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['asc', 'asc'], sort_dirs)
def test_process_sort_params_override_default_keys(self):
'''Verifies that the default keys can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=['key1', 'key2', 'key3'])
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_override_default_dir(self):
'''Verifies that the default direction can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_dir='dir1')
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['dir1', 'dir1'], sort_dirs)
def test_process_sort_params_override_default_key_and_dir(self):
'''Verifies that the default key and dir can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=['key1', 'key2', 'key3'],
default_dir='dir1')
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir1', 'dir1'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=[], default_dir='dir1')
self.assertEqual([], sort_keys)
self.assertEqual([], sort_dirs)
def test_process_sort_params_non_default(self):
'''Verifies that non-default keys are added correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['key1', 'key2'], ['asc', 'desc'])
self.assertEqual(['key1', 'key2', 'created_at', 'id'], sort_keys)
# First sort_dir in list is used when adding the default keys
self.assertEqual(['asc', 'desc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_default(self):
'''Verifies that default keys are added correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], ['asc', 'desc'])
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['asc', 'desc', 'asc'], sort_dirs)
# Include default key value, rely on default direction
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], [])
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_default_dir(self):
'''Verifies that the default dir is applied to all keys.'''
# Direction is set, ignore default dir
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], ['desc'], default_dir='dir')
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['desc', 'desc', 'desc'], sort_dirs)
# But should be used if no direction is set
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], [], default_dir='dir')
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['dir', 'dir', 'dir'], sort_dirs)
def test_process_sort_params_unequal_length(self):
'''Verifies that a sort direction list is applied correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'desc', 'desc', 'desc'], sort_dirs)
# Default direction is the first key in the list
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc', 'asc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'asc', 'desc', 'desc'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc', 'asc', 'asc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'asc', 'asc', 'desc'], sort_dirs)
def test_process_sort_params_extra_dirs_lengths(self):
'''InvalidInput raised if more directions are given.'''
self.assertRaises(exception.InvalidInput,
sqlalchemy_api.process_sort_params,
['key1', 'key2'],
['asc', 'desc', 'desc'])
def test_process_sort_params_invalid_sort_dir(self):
'''InvalidInput raised if invalid directions are given.'''
for dirs in [['foo'], ['asc', 'foo'], ['asc', 'desc', 'foo']]:
self.assertRaises(exception.InvalidInput,
sqlalchemy_api.process_sort_params,
['key'],
dirs)
class MigrationTestCase(test.TestCase):
def setUp(self):
super(MigrationTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self._create()
self._create()
self._create(status='reverted')
self._create(status='confirmed')
self._create(status='error')
self._create(status='failed')
self._create(status='accepted')
self._create(status='done')
self._create(status='completed')
self._create(status='cancelled')
self._create(source_compute='host2', source_node='b',
dest_compute='host1', dest_node='a')
self._create(source_compute='host2', dest_compute='host3')
self._create(source_compute='host3', dest_compute='host4')
def _create(self, status='migrating', source_compute='host1',
source_node='a', dest_compute='host2', dest_node='b',
system_metadata=None, migration_type=None, uuid=None,
created_at=None, updated_at=None):
values = {'host': source_compute}
instance = db.instance_create(self.ctxt, values)
if system_metadata:
db.instance_system_metadata_update(self.ctxt, instance['uuid'],
system_metadata, False)
values = {'status': status, 'source_compute': source_compute,
'source_node': source_node, 'dest_compute': dest_compute,
'dest_node': dest_node, 'instance_uuid': instance['uuid'],
'migration_type': migration_type, 'uuid': uuid}
if created_at:
values['created_at'] = created_at
if updated_at:
values['updated_at'] = updated_at
db.migration_create(self.ctxt, values)
return values
def _assert_in_progress(self, migrations):
for migration in migrations:
self.assertNotEqual('confirmed', migration['status'])
self.assertNotEqual('reverted', migration['status'])
self.assertNotEqual('error', migration['status'])
self.assertNotEqual('failed', migration['status'])
self.assertNotEqual('accepted', migration['status'])
self.assertNotEqual('done', migration['status'])
self.assertNotEqual('cancelled', migration['status'])
def test_migration_get_in_progress_joins(self):
self._create(source_compute='foo', system_metadata={'foo': 'bar'})
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'foo', 'a')
system_metadata = migrations[0]['instance']['system_metadata'][0]
self.assertEqual(system_metadata['key'], 'foo')
self.assertEqual(system_metadata['value'], 'bar')
def test_in_progress_host1_nodea(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host1', 'a')
# 2 as source + 1 as dest
self.assertEqual(3, len(migrations))
self._assert_in_progress(migrations)
def test_in_progress_host1_nodeb(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host1', 'b')
# some migrations are to/from host1, but none with a node 'b'
self.assertEqual(0, len(migrations))
def test_in_progress_host2_nodeb(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host2', 'b')
# 2 as dest, 1 as source
self.assertEqual(3, len(migrations))
self._assert_in_progress(migrations)
def test_instance_join(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host2', 'b')
for migration in migrations:
instance = migration['instance']
self.assertEqual(migration['instance_uuid'], instance['uuid'])
def test_migration_get_by_uuid(self):
migration1 = self._create(uuid=uuidsentinel.migration1_uuid)
self._create(uuid=uuidsentinel.other_uuid)
real_migration1 = db.migration_get_by_uuid(
self.ctxt, uuidsentinel.migration1_uuid)
for key in migration1:
self.assertEqual(migration1[key], real_migration1[key])
def test_migration_get_by_uuid_soft_deleted_and_deleted(self):
migration1 = self._create(uuid=uuidsentinel.migration1_uuid)
@sqlalchemy_api.pick_context_manager_writer
def soft_delete_it(context):
sqlalchemy_api.model_query(context, models.Migration).\
filter_by(uuid=uuidsentinel.migration1_uuid).\
soft_delete()
@sqlalchemy_api.pick_context_manager_writer
def delete_it(context):
sqlalchemy_api.model_query(context, models.Migration,
read_deleted="yes").\
filter_by(uuid=uuidsentinel.migration1_uuid).\
delete()
soft_delete_it(self.ctxt)
soft_deletd_migration1 = db.migration_get_by_uuid(
self.ctxt, uuidsentinel.migration1_uuid)
for key in migration1:
self.assertEqual(migration1[key], soft_deletd_migration1[key])
delete_it(self.ctxt)
self.assertRaises(exception.MigrationNotFound,
db.migration_get_by_uuid, self.ctxt,
uuidsentinel.migration1_uuid)
def test_migration_get_by_uuid_not_found(self):
"""Asserts that MigrationNotFound is raised if a migration is not
found by a given uuid.
"""
self.assertRaises(exception.MigrationNotFound,
db.migration_get_by_uuid, self.ctxt,
uuidsentinel.migration_not_found)
def test_get_migrations_by_filters(self):
filters = {"status": "migrating", "host": "host3",
"migration_type": None, "hidden": False}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(2, len(migrations))
for migration in migrations:
self.assertEqual(filters["status"], migration['status'])
hosts = [migration['source_compute'], migration['dest_compute']]
self.assertIn(filters["host"], hosts)
def test_get_migrations_by_uuid_filters(self):
mig_uuid1 = self._create(uuid=uuidsentinel.mig_uuid1)
filters = {"uuid": [uuidsentinel.mig_uuid1]}
mig_get = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(1, len(mig_get))
for key in mig_uuid1:
self.assertEqual(mig_uuid1[key], mig_get[0][key])
def test_get_migrations_by_filters_with_multiple_statuses(self):
filters = {"status": ["reverted", "confirmed"],
"migration_type": None, "hidden": False}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(2, len(migrations))
for migration in migrations:
self.assertIn(migration['status'], filters['status'])
def test_get_migrations_by_filters_unicode_status(self):
self._create(status=u"unicode")
filters = {"status": u"unicode"}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(1, len(migrations))
for migration in migrations:
self.assertIn(migration['status'], filters['status'])
def test_get_migrations_by_filters_with_type(self):
self._create(status="special", source_compute="host9",
migration_type="evacuation")
self._create(status="special", source_compute="host9",
migration_type="live-migration")
filters = {"status": "special", "host": "host9",
"migration_type": "evacuation", "hidden": False}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(1, len(migrations))
def test_get_migrations_by_filters_source_compute(self):
filters = {'source_compute': 'host2'}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(2, len(migrations))
sources = [x['source_compute'] for x in migrations]
self.assertEqual(['host2', 'host2'], sources)
dests = [x['dest_compute'] for x in migrations]
self.assertEqual(['host1', 'host3'], dests)
def test_get_migrations_by_filters_instance_uuid(self):
migrations = db.migration_get_all_by_filters(self.ctxt, filters={})
for migration in migrations:
filters = {'instance_uuid': migration['instance_uuid']}
instance_migrations = db.migration_get_all_by_filters(
self.ctxt, filters)
self.assertEqual(1, len(instance_migrations))
self.assertEqual(migration['instance_uuid'],
instance_migrations[0]['instance_uuid'])
def test_migration_get_unconfirmed_by_dest_compute(self):
# Ensure no migrations are returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host')
self.assertEqual(0, len(results))
# Ensure no migrations are returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host2')
self.assertEqual(0, len(results))
updated_at = datetime.datetime(2000, 1, 1, 12, 0, 0)
values = {"status": "finished", "updated_at": updated_at,
"dest_compute": "fake_host2"}
migration = db.migration_create(self.ctxt, values)
# Ensure different host is not returned
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host')
self.assertEqual(0, len(results))
# Ensure one migration older than 10 seconds is returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host2')
self.assertEqual(1, len(results))
db.migration_update(self.ctxt, migration['id'],
{"status": "CONFIRMED"})
# Ensure the new migration is not returned.
updated_at = timeutils.utcnow()
values = {"status": "finished", "updated_at": updated_at,
"dest_compute": "fake_host2"}
migration = db.migration_create(self.ctxt, values)
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
"fake_host2")
self.assertEqual(0, len(results))
db.migration_update(self.ctxt, migration['id'],
{"status": "CONFIRMED"})
def test_migration_get_in_progress_by_instance(self):
values = self._create(status='running',
migration_type="live-migration")
results = db.migration_get_in_progress_by_instance(
self.ctxt, values["instance_uuid"], "live-migration")
self.assertEqual(1, len(results))
for key in values:
self.assertEqual(values[key], results[0][key])
self.assertEqual("running", results[0]["status"])
def test_migration_get_in_progress_by_instance_not_in_progress(self):
values = self._create(migration_type="live-migration")
results = db.migration_get_in_progress_by_instance(
self.ctxt, values["instance_uuid"], "live-migration")
self.assertEqual(0, len(results))
def test_migration_get_in_progress_by_instance_not_live_migration(self):
values = self._create(migration_type="resize")
results = db.migration_get_in_progress_by_instance(
self.ctxt, values["instance_uuid"], "live-migration")
self.assertEqual(0, len(results))
results = db.migration_get_in_progress_by_instance(
self.ctxt, values["instance_uuid"])
self.assertEqual(0, len(results))
def test_migration_update_not_found(self):
self.assertRaises(exception.MigrationNotFound,
db.migration_update, self.ctxt, 42, {})
def test_get_migration_for_instance(self):
migrations = db.migration_get_all_by_filters(self.ctxt, [])
migration_id = migrations[0].id
instance_uuid = migrations[0].instance_uuid
instance_migration = db.migration_get_by_id_and_instance(
self.ctxt, migration_id, instance_uuid)
self.assertEqual(migration_id, instance_migration.id)
self.assertEqual(instance_uuid, instance_migration.instance_uuid)
def test_get_migration_for_instance_not_found(self):
self.assertRaises(exception.MigrationNotFoundForInstance,
db.migration_get_by_id_and_instance, self.ctxt,
'500', '501')
def _create_3_migration_after_time(self, time=None):
time = time or timeutils.utcnow()
tmp_time = time + datetime.timedelta(days=1)
after_1hour = datetime.timedelta(hours=1)
self._create(uuid=uuidsentinel.uuid_time1, created_at=tmp_time,
updated_at=tmp_time + after_1hour)
tmp_time = time + datetime.timedelta(days=2)
self._create(uuid=uuidsentinel.uuid_time2, created_at=tmp_time,
updated_at=tmp_time + after_1hour)
tmp_time = time + datetime.timedelta(days=3)
self._create(uuid=uuidsentinel.uuid_time3, created_at=tmp_time,
updated_at=tmp_time + after_1hour)
def test_get_migrations_by_filters_with_limit(self):
migrations = db.migration_get_all_by_filters(self.ctxt, {}, limit=3)
self.assertEqual(3, len(migrations))
def test_get_migrations_by_filters_with_limit_marker(self):
self._create_3_migration_after_time()
# order by created_at, desc: time3, time2, time1
migrations = db.migration_get_all_by_filters(
self.ctxt, {}, limit=2, marker=uuidsentinel.uuid_time3)
# time3 as marker: time2, time1
self.assertEqual(2, len(migrations))
self.assertEqual(migrations[0]['uuid'], uuidsentinel.uuid_time2)
self.assertEqual(migrations[1]['uuid'], uuidsentinel.uuid_time1)
# time3 as marker, limit 2: time3, time2
migrations = db.migration_get_all_by_filters(
self.ctxt, {}, limit=1, marker=uuidsentinel.uuid_time3)
self.assertEqual(1, len(migrations))
self.assertEqual(migrations[0]['uuid'], uuidsentinel.uuid_time2)
def test_get_migrations_by_filters_with_limit_marker_sort(self):
self._create_3_migration_after_time()
# order by created_at, desc: time3, time2, time1
migrations = db.migration_get_all_by_filters(
self.ctxt, {}, limit=2, marker=uuidsentinel.uuid_time3)
# time2, time1
self.assertEqual(2, len(migrations))
self.assertEqual(migrations[0]['uuid'], uuidsentinel.uuid_time2)
self.assertEqual(migrations[1]['uuid'], uuidsentinel.uuid_time1)
# order by updated_at, desc: time1, time2, time3
migrations = db.migration_get_all_by_filters(
self.ctxt, {}, sort_keys=['updated_at'], sort_dirs=['asc'],
limit=2, marker=uuidsentinel.uuid_time1)
# time2, time3
self.assertEqual(2, len(migrations))
self.assertEqual(migrations[0]['uuid'], uuidsentinel.uuid_time2)
self.assertEqual(migrations[1]['uuid'], uuidsentinel.uuid_time3)
def test_get_migrations_by_filters_with_not_found_marker(self):
self.assertRaises(exception.MarkerNotFound,
db.migration_get_all_by_filters, self.ctxt, {},
marker=uuidsentinel.not_found_marker)
def test_get_migrations_by_filters_with_changes_since(self):
changes_time = timeutils.utcnow(with_timezone=True)
self._create_3_migration_after_time(changes_time)
after_1day_2hours = datetime.timedelta(days=1, hours=2)
filters = {"changes-since": changes_time + after_1day_2hours}
migrations = db.migration_get_all_by_filters(
self.ctxt, filters,
sort_keys=['updated_at'], sort_dirs=['asc'])
self.assertEqual(2, len(migrations))
self.assertEqual(migrations[0]['uuid'], uuidsentinel.uuid_time2)
self.assertEqual(migrations[1]['uuid'], uuidsentinel.uuid_time3)
def test_get_migrations_by_filters_with_changes_before(self):
changes_time = timeutils.utcnow(with_timezone=True)
self._create_3_migration_after_time(changes_time)
after_3day_2hours = datetime.timedelta(days=3, hours=2)
filters = {"changes-before": changes_time + after_3day_2hours}
migrations = db.migration_get_all_by_filters(
self.ctxt, filters,
sort_keys=['updated_at'], sort_dirs=['asc'])
self.assertEqual(3, len(migrations))
self.assertEqual(migrations[0]['uuid'], uuidsentinel.uuid_time1)
self.assertEqual(migrations[1]['uuid'], uuidsentinel.uuid_time2)
self.assertEqual(migrations[2]['uuid'], uuidsentinel.uuid_time3)
class ModelsObjectComparatorMixin(object):
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
return {k: v for k, v in obj.items()
if k not in ignored_keys}
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
obj1 = self._dict_from_object(obj1, ignored_keys)
obj2 = self._dict_from_object(obj2, ignored_keys)
self.assertEqual(len(obj1),
len(obj2),
"Keys mismatch: %s" %
str(set(obj1.keys()) ^ set(obj2.keys())))
for key, value in obj1.items():
self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
sort_key = lambda d: [d[k] for k in sorted(d)]
conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
def _assertEqualOrderedListOfObjects(self, objs1, objs2,
ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
conv = lambda objs: [obj_to_dict(obj) for obj in objs]
self.assertEqual(conv(objs1), conv(objs2))
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
self.assertEqual(len(primitives1), len(primitives2))
for primitive in primitives1:
self.assertIn(primitive, primitives2)
for primitive in primitives2:
self.assertIn(primitive, primitives1)
class InstanceSystemMetadataTestCase(test.TestCase):
"""Tests for db.api.instance_system_metadata_* methods."""
def setUp(self):
super(InstanceSystemMetadataTestCase, self).setUp()
values = {'host': 'h1', 'project_id': 'p1',
'system_metadata': {'key': 'value'}}
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, values)
def test_instance_system_metadata_get(self):
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'value'})
def test_instance_system_metadata_update_new_pair(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'new_key': 'new_value'}, False)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
def test_instance_system_metadata_update_existent_pair(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'key': 'new_value'}, True)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'new_value'})
def test_instance_system_metadata_update_delete_true(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'new_key': 'new_value'}, True)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'new_key': 'new_value'})
@test.testtools.skip("bug 1189462")
def test_instance_system_metadata_update_nonexistent(self):
self.assertRaises(exception.InstanceNotFound,
db.instance_system_metadata_update,
self.ctxt, 'nonexistent-uuid',
{'key': 'value'}, True)
class SecurityGroupRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(SecurityGroupRuleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'name': 'fake_sec_group',
'description': 'fake_sec_group_descr',
'user_id': 'fake',
'project_id': 'fake',
'instances': []
}
def _get_base_rule_values(self):
return {
'protocol': "tcp",
'from_port': 80,
'to_port': 8080,
'cidr': None,
'deleted': 0,
'deleted_at': None,
'grantee_group': None,
'updated_at': None
}
def _create_security_group(self, values):
v = self._get_base_values()
v.update(values)
return db.security_group_create(self.ctxt, v)
def _create_security_group_rule(self, values):
v = self._get_base_rule_values()
v.update(values)
return db.security_group_rule_create(self.ctxt, v)
def test_security_group_rule_create(self):
security_group_rule = self._create_security_group_rule({})
self.assertIsNotNone(security_group_rule['id'])
for key, value in self._get_base_rule_values().items():
self.assertEqual(value, security_group_rule[key])
def _test_security_group_rule_get_by_security_group(self, columns=None):
instance = db.instance_create(self.ctxt,
{'system_metadata': {'foo': 'bar'}})
security_group = self._create_security_group({
'instances': [instance]})
security_group_rule = self._create_security_group_rule(
{'parent_group': security_group, 'grantee_group': security_group})
security_group_rule1 = self._create_security_group_rule(
{'parent_group': security_group, 'grantee_group': security_group})
found_rules = db.security_group_rule_get_by_security_group(
self.ctxt, security_group['id'], columns_to_join=columns)
self.assertEqual(len(found_rules), 2)
rules_ids = [security_group_rule['id'], security_group_rule1['id']]
for rule in found_rules:
if columns is None:
self.assertIn('grantee_group', dict(rule))
self.assertIn('instances',
dict(rule.grantee_group))
self.assertIn(
'system_metadata',
dict(rule.grantee_group.instances[0]))
self.assertIn(rule['id'], rules_ids)
else:
self.assertNotIn('grantee_group', dict(rule))
def test_security_group_rule_get_by_security_group(self):
self._test_security_group_rule_get_by_security_group()
def test_security_group_rule_get_by_security_group_no_joins(self):
self._test_security_group_rule_get_by_security_group(columns=[])
def test_security_group_rule_get_by_instance(self):
instance = db.instance_create(self.ctxt, {})
security_group = self._create_security_group({
'instances': [instance]})
security_group_rule = self._create_security_group_rule(
{'parent_group': security_group, 'grantee_group': security_group})
security_group_rule1 = self._create_security_group_rule(
{'parent_group': security_group, 'grantee_group': security_group})
security_group_rule_ids = [security_group_rule['id'],
security_group_rule1['id']]
found_rules = db.security_group_rule_get_by_instance(self.ctxt,
instance['uuid'])
self.assertEqual(len(found_rules), 2)
for rule in found_rules:
self.assertIn('grantee_group', rule)
self.assertIn(rule['id'], security_group_rule_ids)
def test_security_group_rule_destroy(self):
self._create_security_group({'name': 'fake1'})
self._create_security_group({'name': 'fake2'})
security_group_rule1 = self._create_security_group_rule({})
security_group_rule2 = self._create_security_group_rule({})
db.security_group_rule_destroy(self.ctxt, security_group_rule1['id'])
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_get,
self.ctxt, security_group_rule1['id'])
self._assertEqualObjects(db.security_group_rule_get(self.ctxt,
security_group_rule2['id']),
security_group_rule2, ['grantee_group'])
def test_security_group_rule_destroy_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_destroy, self.ctxt, 100500)
def test_security_group_rule_get(self):
security_group_rule1 = (
self._create_security_group_rule({}))
self._create_security_group_rule({})
real_security_group_rule = db.security_group_rule_get(self.ctxt,
security_group_rule1['id'])
self._assertEqualObjects(security_group_rule1,
real_security_group_rule, ['grantee_group'])
def test_security_group_rule_get_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_get, self.ctxt, 100500)
def test_security_group_rule_count_by_group(self):
sg1 = self._create_security_group({'name': 'fake1'})
sg2 = self._create_security_group({'name': 'fake2'})
rules_by_group = {sg1: [], sg2: []}
for group in rules_by_group:
rules = rules_by_group[group]
for i in range(0, 10):
rules.append(
self._create_security_group_rule({'parent_group_id':
group['id']}))
db.security_group_rule_destroy(self.ctxt,
rules_by_group[sg1][0]['id'])
counted_groups = [db.security_group_rule_count_by_group(self.ctxt,
group['id'])
for group in [sg1, sg2]]
expected = [9, 10]
self.assertEqual(counted_groups, expected)
class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(SecurityGroupTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'name': 'fake_sec_group',
'description': 'fake_sec_group_descr',
'user_id': 'fake',
'project_id': 'fake',
'instances': []
}
def _create_security_group(self, values):
v = self._get_base_values()
v.update(values)
return db.security_group_create(self.ctxt, v)
def test_security_group_create(self):
security_group = self._create_security_group({})
self.assertIsNotNone(security_group['id'])
for key, value in self._get_base_values().items():
self.assertEqual(value, security_group[key])
def test_security_group_destroy(self):
security_group1 = self._create_security_group({})
security_group2 = \
self._create_security_group({'name': 'fake_sec_group2'})
db.security_group_destroy(self.ctxt, security_group1['id'])
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_get,
self.ctxt, security_group1['id'])
self._assertEqualObjects(db.security_group_get(
self.ctxt, security_group2['id'],
columns_to_join=['instances',
'rules']), security_group2)
def test_security_group_destroy_with_instance(self):
security_group1 = self._create_security_group({})
instance = db.instance_create(self.ctxt, {})
db.instance_add_security_group(self.ctxt, instance.uuid,
security_group1.id)
self.assertEqual(
1,
len(db.security_group_get_by_instance(self.ctxt, instance.uuid)))
db.security_group_destroy(self.ctxt, security_group1['id'])
self.assertEqual(
0,
len(db.security_group_get_by_instance(self.ctxt, instance.uuid)))
def test_security_group_get(self):
security_group1 = self._create_security_group({})
self._create_security_group({'name': 'fake_sec_group2'})
real_security_group = db.security_group_get(self.ctxt,
security_group1['id'],
columns_to_join=['instances',
'rules'])
self._assertEqualObjects(security_group1,
real_security_group)
def test_security_group_get_with_instance_columns(self):
instance = db.instance_create(self.ctxt,
{'system_metadata': {'foo': 'bar'}})
secgroup = self._create_security_group({'instances': [instance]})
secgroup = db.security_group_get(
self.ctxt, secgroup['id'],
columns_to_join=['instances.system_metadata'])
inst = secgroup.instances[0]
self.assertIn('system_metadata', dict(inst).keys())
def test_security_group_get_no_instances(self):
instance = db.instance_create(self.ctxt, {})
sid = self._create_security_group({'instances': [instance]})['id']
security_group = db.security_group_get(self.ctxt, sid,
columns_to_join=['instances'])
self.assertIn('instances', security_group.__dict__)
security_group = db.security_group_get(self.ctxt, sid)
self.assertNotIn('instances', security_group.__dict__)
def test_security_group_get_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_get, self.ctxt, 100500)
def test_security_group_get_by_name(self):
security_group1 = self._create_security_group({'name': 'fake1'})
security_group2 = self._create_security_group({'name': 'fake2'})
real_security_group1 = db.security_group_get_by_name(
self.ctxt,
security_group1['project_id'],
security_group1['name'],
columns_to_join=None)
real_security_group2 = db.security_group_get_by_name(
self.ctxt,
security_group2['project_id'],
security_group2['name'],
columns_to_join=None)
self._assertEqualObjects(security_group1, real_security_group1)
self._assertEqualObjects(security_group2, real_security_group2)
def test_security_group_get_by_project(self):
security_group1 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj1'})
security_group2 = self._create_security_group(
{'name': 'fake2', 'project_id': 'fake_proj2'})
real1 = db.security_group_get_by_project(
self.ctxt,
security_group1['project_id'])
real2 = db.security_group_get_by_project(
self.ctxt,
security_group2['project_id'])
expected1, expected2 = [security_group1], [security_group2]
self._assertEqualListsOfObjects(expected1, real1,
ignored_keys=['instances'])
self._assertEqualListsOfObjects(expected2, real2,
ignored_keys=['instances'])
def test_security_group_get_by_instance(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'name': 'fake1', 'instances': [instance]},
{'name': 'fake2', 'instances': [instance]},
{'name': 'fake3', 'instances': []},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = db.security_group_get_by_instance(self.ctxt,
instance['uuid'])
expected = security_groups[:2]
self._assertEqualListsOfObjects(expected, real,
ignored_keys=['instances'])
def test_security_group_get_all(self):
values = [
{'name': 'fake1', 'project_id': 'fake_proj1'},
{'name': 'fake2', 'project_id': 'fake_proj2'},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = db.security_group_get_all(self.ctxt)
self._assertEqualListsOfObjects(security_groups, real,
ignored_keys=['instances'])
def test_security_group_in_use(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'instances': [instance],
'name': 'fake_in_use'},
{'instances': []},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = []
for security_group in security_groups:
in_use = db.security_group_in_use(self.ctxt,
security_group['id'])
real.append(in_use)
expected = [True, False]
self.assertEqual(expected, real)
def test_security_group_ensure_default(self):
self.ctxt.project_id = 'fake'
self.ctxt.user_id = 'fake'
self.assertEqual(0, len(db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)))
db.security_group_ensure_default(self.ctxt)
security_groups = db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)
self.assertEqual(1, len(security_groups))
self.assertEqual("default", security_groups[0]["name"])
@mock.patch.object(sqlalchemy_api, '_security_group_get_by_names')
def test_security_group_ensure_default_called_concurrently(self, sg_mock):
# make sure NotFound is always raised here to trick Nova to insert the
# duplicate security group entry
sg_mock.side_effect = exception.NotFound
# create the first db entry
self.ctxt.project_id = 1
db.security_group_ensure_default(self.ctxt)
security_groups = db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)
self.assertEqual(1, len(security_groups))
# create the second one and ensure the exception is handled properly
default_group = db.security_group_ensure_default(self.ctxt)
self.assertEqual('default', default_group.name)
def test_security_group_update(self):
security_group = self._create_security_group({})
new_values = {
'name': 'sec_group1',
'description': 'sec_group_descr1',
'user_id': 'fake_user1',
'project_id': 'fake_proj1',
}
updated_group = db.security_group_update(self.ctxt,
security_group['id'],
new_values,
columns_to_join=['rules.grantee_group'])
for key, value in new_values.items():
self.assertEqual(updated_group[key], value)
self.assertEqual(updated_group['rules'], [])
def test_security_group_update_to_duplicate(self):
self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj1'})
security_group2 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj2'})
self.assertRaises(exception.SecurityGroupExists,
db.security_group_update,
self.ctxt, security_group2['id'],
{'project_id': 'fake_proj1'})
@mock.patch('time.sleep', new=lambda x: None)
class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.instance_* methods."""
sample_data = {
'project_id': 'project1',
'hostname': 'example.com',
'host': 'h1',
'node': 'n1',
'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
'info_cache': {'ckey': 'cvalue'},
}
def setUp(self):
super(InstanceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _assertEqualInstances(self, instance1, instance2):
self._assertEqualObjects(instance1, instance2,
ignored_keys=['metadata', 'system_metadata', 'info_cache',
'extra'])
def _assertEqualListsOfInstances(self, list1, list2):
self._assertEqualListsOfObjects(list1, list2,
ignored_keys=['metadata', 'system_metadata', 'info_cache',
'extra'])
def create_instance_with_args(self, **kwargs):
if 'context' in kwargs:
context = kwargs.pop('context')
else:
context = self.ctxt
args = self.sample_data.copy()
args.update(kwargs)
return db.instance_create(context, args)
def test_instance_create(self):
instance = self.create_instance_with_args()
self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
@mock.patch.object(sqlalchemy_api, 'security_group_ensure_default')
def test_instance_create_with_deadlock_retry(self, mock_sg):
mock_sg.side_effect = [db_exc.DBDeadlock(), None]
instance = self.create_instance_with_args()
self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
def test_instance_create_with_object_values(self):
values = {
'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1'),
}
dt_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at')
dt = timeutils.utcnow()
dt_utc = dt.replace(tzinfo=iso8601.UTC)
for key in dt_keys:
values[key] = dt_utc
inst = db.instance_create(self.ctxt, values)
self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
self.assertEqual(inst['access_ip_v6'], '::1')
for key in dt_keys:
self.assertEqual(inst[key], dt)
def test_instance_update_with_object_values(self):
values = {
'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1'),
}
dt_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at')
dt = timeutils.utcnow()
dt_utc = dt.replace(tzinfo=iso8601.UTC)
for key in dt_keys:
values[key] = dt_utc
inst = db.instance_create(self.ctxt, {})
inst = db.instance_update(self.ctxt, inst['uuid'], values)
self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
self.assertEqual(inst['access_ip_v6'], '::1')
for key in dt_keys:
self.assertEqual(inst[key], dt)
def test_instance_update_no_metadata_clobber(self):
meta = {'foo': 'bar'}
sys_meta = {'sfoo': 'sbar'}
values = {
'metadata': meta,
'system_metadata': sys_meta,
}
inst = db.instance_create(self.ctxt, {})
inst = db.instance_update(self.ctxt, inst['uuid'], values)
self.assertEqual(meta, utils.metadata_to_dict(inst['metadata']))
self.assertEqual(sys_meta,
utils.metadata_to_dict(inst['system_metadata']))
def test_instance_get_all_with_meta(self):
self.create_instance_with_args()
for inst in db.instance_get_all(self.ctxt):
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_update(self):
instance = self.create_instance_with_args()
metadata = {'host': 'bar', 'key2': 'wuff'}
system_metadata = {'original_image_ref': 'baz'}
# Update the metadata
db.instance_update(self.ctxt, instance['uuid'], {'metadata': metadata,
'system_metadata': system_metadata})
# Retrieve the user-provided metadata to ensure it was successfully
# updated
self.assertEqual(metadata,
db.instance_metadata_get(self.ctxt, instance['uuid']))
self.assertEqual(system_metadata,
db.instance_system_metadata_get(self.ctxt, instance['uuid']))
def test_instance_update_bad_str_dates(self):
instance = self.create_instance_with_args()
values = {'created_at': '123'}
self.assertRaises(ValueError,
db.instance_update,
self.ctxt, instance['uuid'], values)
def test_instance_update_good_str_dates(self):
instance = self.create_instance_with_args()
values = {'created_at': '2011-01-31T00:00:00.0'}
actual = db.instance_update(self.ctxt, instance['uuid'], values)
expected = datetime.datetime(2011, 1, 31)
self.assertEqual(expected, actual["created_at"])
def test_create_instance_unique_hostname(self):
context1 = context.RequestContext('user1', 'p1')
context2 = context.RequestContext('user2', 'p2')
self.create_instance_with_args(hostname='h1', project_id='p1')
# With scope 'global' any duplicate should fail, be it this project:
self.flags(osapi_compute_unique_server_name_scope='global')
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context1,
hostname='h1', project_id='p3')
# or another:
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context2,
hostname='h1', project_id='p2')
# With scope 'project' a duplicate in the project should fail:
self.flags(osapi_compute_unique_server_name_scope='project')
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context1,
hostname='h1', project_id='p1')
# With scope 'project' a duplicate in a different project should work:
self.flags(osapi_compute_unique_server_name_scope='project')
self.create_instance_with_args(context=context2, hostname='h2')
self.flags(osapi_compute_unique_server_name_scope=None)
def test_instance_get_all_by_filters_empty_list_filter(self):
filters = {'uuid': []}
instances = db.instance_get_all_by_filters_sort(self.ctxt, filters)
self.assertEqual([], instances)
@mock.patch('nova.db.sqlalchemy.api.undefer')
@mock.patch('nova.db.sqlalchemy.api.joinedload')
def test_instance_get_all_by_filters_extra_columns(self,
mock_joinedload,
mock_undefer):
db.instance_get_all_by_filters_sort(
self.ctxt, {},
columns_to_join=['info_cache', 'extra.pci_requests'])
mock_joinedload.assert_called_once_with('info_cache')
mock_undefer.assert_called_once_with('extra.pci_requests')
@mock.patch('nova.db.sqlalchemy.api.undefer')
@mock.patch('nova.db.sqlalchemy.api.joinedload')
def test_instance_get_active_by_window_extra_columns(self,
mock_joinedload,
mock_undefer):
now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701)
db.instance_get_active_by_window_joined(
self.ctxt, now,
columns_to_join=['info_cache', 'extra.pci_requests'])
mock_joinedload.assert_called_once_with('info_cache')
mock_undefer.assert_called_once_with('extra.pci_requests')
def test_instance_get_all_by_filters_with_meta(self):
self.create_instance_with_args()
for inst in db.instance_get_all_by_filters(self.ctxt, {}):
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_get_all_by_filters_without_meta(self):
self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt, {},
columns_to_join=[])
for inst in result:
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_all_by_filters_with_fault(self):
inst = self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt, {},
columns_to_join=['fault'])
self.assertIsNone(result[0]['fault'])
db.instance_fault_create(self.ctxt,
{'instance_uuid': inst['uuid'],
'code': 123})
fault2 = db.instance_fault_create(self.ctxt,
{'instance_uuid': inst['uuid'],
'code': 123})
result = db.instance_get_all_by_filters(self.ctxt, {},
columns_to_join=['fault'])
# Make sure we get the latest fault
self.assertEqual(fault2['id'], result[0]['fault']['id'])
def test_instance_get_all_by_filters(self):
instances = [self.create_instance_with_args() for i in range(3)]
filtered_instances = db.instance_get_all_by_filters(self.ctxt, {})
self._assertEqualListsOfInstances(instances, filtered_instances)
def test_instance_get_all_by_filters_zero_limit(self):
self.create_instance_with_args()
instances = db.instance_get_all_by_filters(self.ctxt, {}, limit=0)
self.assertEqual([], instances)
def test_instance_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
@sqlalchemy_api.pick_context_manager_reader
def test(context):
return sqlalchemy_api._instance_metadata_get_multi(
context, uuids)
meta = test(self.ctxt)
for row in meta:
self.assertIn(row['instance_uuid'], uuids)
@mock.patch.object(query.Query, 'filter')
def test_instance_metadata_get_multi_no_uuids(self, mock_query_filter):
with sqlalchemy_api.main_context_manager.reader.using(self.ctxt):
sqlalchemy_api._instance_metadata_get_multi(self.ctxt, [])
self.assertFalse(mock_query_filter.called)
def test_instance_system_system_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
@sqlalchemy_api.pick_context_manager_reader
def test(context):
return sqlalchemy_api._instance_system_metadata_get_multi(
context, uuids)
sys_meta = test(self.ctxt)
for row in sys_meta:
self.assertIn(row['instance_uuid'], uuids)
@mock.patch.object(query.Query, 'filter')
def test_instance_system_metadata_get_multi_no_uuids(self,
mock_query_filter):
sqlalchemy_api._instance_system_metadata_get_multi(self.ctxt, [])
self.assertFalse(mock_query_filter.called)
def test_instance_get_all_by_filters_regex(self):
i1 = self.create_instance_with_args(display_name='test1')
i2 = self.create_instance_with_args(display_name='teeeest2')
self.create_instance_with_args(display_name='diff')
result = db.instance_get_all_by_filters(self.ctxt,
{'display_name': 't.*st.'})
self._assertEqualListsOfInstances(result, [i1, i2])
def test_instance_get_all_by_filters_changes_since(self):
i1 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:25.000000')
i2 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:26.000000')
changes_since = iso8601.parse_date('2013-12-05T15:03:25.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-since':
changes_since})
self._assertEqualListsOfInstances([i1, i2], result)
changes_since = iso8601.parse_date('2013-12-05T15:03:26.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-since':
changes_since})
self._assertEqualListsOfInstances([i2], result)
db.instance_destroy(self.ctxt, i1['uuid'])
filters = {}
filters['changes-since'] = changes_since
filters['marker'] = i1['uuid']
result = db.instance_get_all_by_filters(self.ctxt,
filters)
self._assertEqualListsOfInstances([i2], result)
def test_instance_get_all_by_filters_changes_before(self):
i1 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:25.000000')
i2 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:26.000000')
changes_before = iso8601.parse_date('2013-12-05T15:03:26.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-before':
changes_before})
self._assertEqualListsOfInstances([i1, i2], result)
changes_before = iso8601.parse_date('2013-12-05T15:03:25.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-before':
changes_before})
self._assertEqualListsOfInstances([i1], result)
db.instance_destroy(self.ctxt, i2['uuid'])
filters = {}
filters['changes-before'] = changes_before
filters['marker'] = i2['uuid']
result = db.instance_get_all_by_filters(self.ctxt,
filters)
self._assertEqualListsOfInstances([i1], result)
def test_instance_get_all_by_filters_changes_time_period(self):
i1 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:25.000000')
i2 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:26.000000')
i3 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:27.000000')
changes_since = iso8601.parse_date('2013-12-05T15:03:25.000000')
changes_before = iso8601.parse_date('2013-12-05T15:03:27.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-since':
changes_since,
'changes-before':
changes_before})
self._assertEqualListsOfInstances([i1, i2, i3], result)
changes_since = iso8601.parse_date('2013-12-05T15:03:26.000000')
changes_before = iso8601.parse_date('2013-12-05T15:03:27.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-since':
changes_since,
'changes-before':
changes_before})
self._assertEqualListsOfInstances([i2, i3], result)
db.instance_destroy(self.ctxt, i1['uuid'])
filters = {}
filters['changes-since'] = changes_since
filters['changes-before'] = changes_before
filters['marker'] = i1['uuid']
result = db.instance_get_all_by_filters(self.ctxt,
filters)
self._assertEqualListsOfInstances([i2, i3], result)
def test_instance_get_all_by_filters_exact_match(self):
instance = self.create_instance_with_args(host='host1')
self.create_instance_with_args(host='host12')
result = db.instance_get_all_by_filters(self.ctxt,
{'host': 'host1'})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_metadata(self):
instance = self.create_instance_with_args(metadata={'foo': 'bar'})
self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt,
{'metadata': {'foo': 'bar'}})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_system_metadata(self):
instance = self.create_instance_with_args(
system_metadata={'foo': 'bar'})
self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt,
{'system_metadata': {'foo': 'bar'}})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_unicode_value(self):
i1 = self.create_instance_with_args(display_name=u'test♥')
i2 = self.create_instance_with_args(display_name=u'test')
i3 = self.create_instance_with_args(display_name=u'test♥test')
self.create_instance_with_args(display_name='diff')
result = db.instance_get_all_by_filters(self.ctxt,
{'display_name': u'test'})
self._assertEqualListsOfInstances([i1, i2, i3], result)
result = db.instance_get_all_by_filters(self.ctxt,
{'display_name': u'test♥'})
self._assertEqualListsOfInstances(result, [i1, i3])
def test_instance_get_by_uuid(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'])
# instance_create() will return a fault=None, so delete it before
# comparing the result of instance_get_by_uuid()
del inst.fault
self._assertEqualInstances(inst, result)
def test_instance_get_by_uuid_join_empty(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=[])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_by_uuid_join_meta(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=['metadata'])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_by_uuid_join_sys_meta(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=['system_metadata'])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_get_all_by_filters_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(reservation_id='b')
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt, {})
self._assertEqualListsOfObjects([inst1, inst2], result,
ignored_keys=['metadata', 'system_metadata',
'deleted', 'deleted_at', 'info_cache',
'pci_devices', 'extra'])
def test_instance_get_all_by_filters_deleted_and_soft_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': True})
self._assertEqualListsOfObjects([inst1, inst2], result,
ignored_keys=['metadata', 'system_metadata',
'deleted', 'deleted_at', 'info_cache',
'pci_devices', 'extra'])
def test_instance_get_all_by_filters_deleted_no_soft_deleted(self):
inst1 = self.create_instance_with_args()
self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': True,
'soft_deleted': False})
self._assertEqualListsOfObjects([inst1], result,
ignored_keys=['deleted', 'deleted_at', 'metadata',
'system_metadata', 'info_cache', 'pci_devices',
'extra'])
def test_instance_get_all_by_filters_alive_and_soft_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
inst3 = self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': False,
'soft_deleted': True})
self._assertEqualListsOfInstances([inst2, inst3], result)
def test_instance_get_all_by_filters_not_deleted(self):
inst1 = self.create_instance_with_args()
self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
inst3 = self.create_instance_with_args()
inst4 = self.create_instance_with_args(vm_state=vm_states.ACTIVE)
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': False})
self.assertIsNone(inst3.vm_state)
self._assertEqualListsOfInstances([inst3, inst4], result)
def test_instance_get_all_by_filters_cleaned(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(reservation_id='b')
db.instance_update(self.ctxt, inst1['uuid'], {'cleaned': 1})
result = db.instance_get_all_by_filters(self.ctxt, {})
self.assertEqual(2, len(result))
self.assertIn(inst1['uuid'], [result[0]['uuid'], result[1]['uuid']])
self.assertIn(inst2['uuid'], [result[0]['uuid'], result[1]['uuid']])
if inst1['uuid'] == result[0]['uuid']:
self.assertTrue(result[0]['cleaned'])
self.assertFalse(result[1]['cleaned'])
else:
self.assertTrue(result[1]['cleaned'])
self.assertFalse(result[0]['cleaned'])
def test_instance_get_all_by_host_and_node_no_join(self):
instance = self.create_instance_with_args()
result = db.instance_get_all_by_host_and_node(self.ctxt, 'h1', 'n1')
self.assertEqual(result[0]['uuid'], instance['uuid'])
self.assertEqual(result[0]['system_metadata'], [])
def test_instance_get_all_by_host_and_node(self):
instance = self.create_instance_with_args(
system_metadata={'foo': 'bar'})
result = db.instance_get_all_by_host_and_node(
self.ctxt, 'h1', 'n1',
columns_to_join=['system_metadata', 'extra'])
self.assertEqual(instance['uuid'], result[0]['uuid'])
self.assertEqual('bar', result[0]['system_metadata'][0]['value'])
self.assertEqual(instance['uuid'], result[0]['extra']['instance_uuid'])
@mock.patch('nova.db.sqlalchemy.api._instances_fill_metadata')
@mock.patch('nova.db.sqlalchemy.api._instance_get_all_query')
def test_instance_get_all_by_host_and_node_fills_manually(self,
mock_getall,
mock_fill):
db.instance_get_all_by_host_and_node(
self.ctxt, 'h1', 'n1',
columns_to_join=['metadata', 'system_metadata', 'extra', 'foo'])
self.assertEqual(sorted(['extra', 'foo']),
sorted(mock_getall.call_args[1]['joins']))
self.assertEqual(sorted(['metadata', 'system_metadata']),
sorted(mock_fill.call_args[1]['manual_joins']))
def _get_base_values(self):
return {
'name': 'fake_sec_group',
'description': 'fake_sec_group_descr',
'user_id': 'fake',
'project_id': 'fake',
'instances': []
}
def _get_base_rule_values(self):
return {
'protocol': "tcp",
'from_port': 80,
'to_port': 8080,
'cidr': None,
'deleted': 0,
'deleted_at': None,
'grantee_group': None,
'updated_at': None
}
def _create_security_group(self, values):
v = self._get_base_values()
v.update(values)
return db.security_group_create(self.ctxt, v)
def _create_security_group_rule(self, values):
v = self._get_base_rule_values()
v.update(values)
return db.security_group_rule_create(self.ctxt, v)
def test_instance_get_all_by_grantee_security_groups(self):
instance1 = self.create_instance_with_args()
instance2 = self.create_instance_with_args()
instance3 = self.create_instance_with_args()
secgroup1 = self._create_security_group(
{'name': 'fake-secgroup1', 'instances': [instance1]})
secgroup2 = self._create_security_group(
{'name': 'fake-secgroup2', 'instances': [instance1]})
secgroup3 = self._create_security_group(
{'name': 'fake-secgroup3', 'instances': [instance2]})
secgroup4 = self._create_security_group(
{'name': 'fake-secgroup4', 'instances': [instance2, instance3]})
self._create_security_group_rule({'grantee_group': secgroup1,
'parent_group': secgroup3})
self._create_security_group_rule({'grantee_group': secgroup2,
'parent_group': secgroup4})
group_ids = [secgroup['id'] for secgroup in [secgroup1, secgroup2]]
instances = db.instance_get_all_by_grantee_security_groups(self.ctxt,
group_ids)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertEqual(len(instances), 2)
self.assertIn(instance2['uuid'], instance_uuids)
self.assertIn(instance3['uuid'], instance_uuids)
def test_instance_get_all_by_grantee_security_groups_empty_group_ids(self):
results = db.instance_get_all_by_grantee_security_groups(self.ctxt, [])
self.assertEqual([], results)
def test_instance_get_all_hung_in_rebooting(self):
# Ensure no instances are returned.
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self.assertEqual([], results)
# Ensure one rebooting instance with updated_at older than 10 seconds
# is returned.
instance = self.create_instance_with_args(task_state="rebooting",
updated_at=datetime.datetime(2000, 1, 1, 12, 0, 0))
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self._assertEqualListsOfObjects([instance], results,
ignored_keys=['task_state', 'info_cache', 'security_groups',
'metadata', 'system_metadata', 'pci_devices',
'extra'])
db.instance_update(self.ctxt, instance['uuid'], {"task_state": None})
# Ensure the newly rebooted instance is not returned.
self.create_instance_with_args(task_state="rebooting",
updated_at=timeutils.utcnow())
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self.assertEqual([], results)
def test_instance_update_with_expected_vm_state(self):
instance = self.create_instance_with_args(vm_state='foo')
db.instance_update(self.ctxt, instance['uuid'], {'host': 'h1',
'expected_vm_state': ('foo', 'bar')})
def test_instance_update_with_unexpected_vm_state(self):
instance = self.create_instance_with_args(vm_state='foo')
self.assertRaises(exception.InstanceUpdateConflict,
db.instance_update, self.ctxt, instance['uuid'],
{'host': 'h1', 'expected_vm_state': ('spam', 'bar')})
def test_instance_update_with_instance_uuid(self):
# test instance_update() works when an instance UUID is passed.
ctxt = context.get_admin_context()
# Create an instance with some metadata
values = {'metadata': {'host': 'foo', 'key1': 'meow'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
# Update the metadata
values = {'metadata': {'host': 'bar', 'key2': 'wuff'},
'system_metadata': {'original_image_ref': 'baz'}}
db.instance_update(ctxt, instance['uuid'], values)
# Retrieve the user-provided metadata to ensure it was successfully
# updated
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
self.assertEqual('bar', instance_meta['host'])
self.assertEqual('wuff', instance_meta['key2'])
self.assertNotIn('key1', instance_meta)
# Retrieve the system metadata to ensure it was successfully updated
system_meta = db.instance_system_metadata_get(ctxt, instance['uuid'])
self.assertEqual('baz', system_meta['original_image_ref'])
def test_delete_block_device_mapping_on_instance_destroy(self):
# Makes sure that the block device mapping is deleted when the
# related instance is deleted.
ctxt = context.get_admin_context()
instance = db.instance_create(ctxt, dict(display_name='bdm-test'))
bdm = {
'volume_id': uuidsentinel.uuid1,
'device_name': '/dev/vdb',
'instance_uuid': instance['uuid'],
}
bdm = db.block_device_mapping_create(ctxt, bdm, legacy=False)
db.instance_destroy(ctxt, instance['uuid'])
# make sure the bdm is deleted as well
bdms = db.block_device_mapping_get_all_by_instance(
ctxt, instance['uuid'])
self.assertEqual([], bdms)
def test_delete_instance_metadata_on_instance_destroy(self):
ctxt = context.get_admin_context()
# Create an instance with some metadata
values = {'metadata': {'host': 'foo', 'key1': 'meow'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
self.assertEqual('foo', instance_meta['host'])
self.assertEqual('meow', instance_meta['key1'])
db.instance_destroy(ctxt, instance['uuid'])
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
# Make sure instance metadata is deleted as well
self.assertEqual({}, instance_meta)
def test_delete_instance_faults_on_instance_destroy(self):
ctxt = context.get_admin_context()
uuid = uuidsentinel.uuid1
# Create faults
db.instance_create(ctxt, {'uuid': uuid})
fault_values = {
'message': 'message',
'details': 'detail',
'instance_uuid': uuid,
'code': 404,
'host': 'localhost'
}
fault = db.instance_fault_create(ctxt, fault_values)
# Retrieve the fault to ensure it was successfully added
faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
self.assertEqual(1, len(faults[uuid]))
self._assertEqualObjects(fault, faults[uuid][0])
db.instance_destroy(ctxt, uuid)
faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
# Make sure instance faults is deleted as well
self.assertEqual(0, len(faults[uuid]))
def test_delete_migrations_on_instance_destroy(self):
ctxt = context.get_admin_context()
uuid = uuidsentinel.uuid1
db.instance_create(ctxt, {'uuid': uuid})
migrations_values = {'instance_uuid': uuid}
migration = db.migration_create(ctxt, migrations_values)
migrations = db.migration_get_all_by_filters(
ctxt, {'instance_uuid': uuid})
self.assertEqual(1, len(migrations))
self._assertEqualObjects(migration, migrations[0])
instance = db.instance_destroy(ctxt, uuid)
migrations = db.migration_get_all_by_filters(
ctxt, {'instance_uuid': uuid})
self.assertTrue(instance.deleted)
self.assertEqual(0, len(migrations))
def test_instance_update_and_get_original(self):
instance = self.create_instance_with_args(vm_state='building')
(old_ref, new_ref) = db.instance_update_and_get_original(self.ctxt,
instance['uuid'], {'vm_state': 'needscoffee'})
self.assertEqual('building', old_ref['vm_state'])
self.assertEqual('needscoffee', new_ref['vm_state'])
def test_instance_update_and_get_original_metadata(self):
instance = self.create_instance_with_args()
columns_to_join = ['metadata']
(old_ref, new_ref) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'vm_state': 'needscoffee'},
columns_to_join=columns_to_join)
meta = utils.metadata_to_dict(new_ref['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(new_ref['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_update_and_get_original_metadata_none_join(self):
instance = self.create_instance_with_args()
(old_ref, new_ref) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}})
meta = utils.metadata_to_dict(new_ref['metadata'])
self.assertEqual(meta, {'mk1': 'mv3'})
def test_instance_update_and_get_original_no_conflict_on_session(self):
@sqlalchemy_api.pick_context_manager_writer
def test(context):
instance = self.create_instance_with_args()
(old_ref, new_ref) = db.instance_update_and_get_original(
context, instance['uuid'], {'metadata': {'mk1': 'mv3'}})
# test some regular persisted fields
self.assertEqual(old_ref.uuid, new_ref.uuid)
self.assertEqual(old_ref.project_id, new_ref.project_id)
# after a copy operation, we can assert:
# 1. the two states have their own InstanceState
old_insp = inspect(old_ref)
new_insp = inspect(new_ref)
self.assertNotEqual(old_insp, new_insp)
# 2. only one of the objects is still in our Session
self.assertIs(new_insp.session, self.ctxt.session)
self.assertIsNone(old_insp.session)
# 3. The "new" object remains persistent and ready
# for updates
self.assertTrue(new_insp.persistent)
# 4. the "old" object is detached from this Session.
self.assertTrue(old_insp.detached)
test(self.ctxt)
def test_instance_update_and_get_original_conflict_race(self):
# Ensure that we retry if update_on_match fails for no discernable
# reason
instance = self.create_instance_with_args()
orig_update_on_match = update_match.update_on_match
# Reproduce the conditions of a race between fetching and updating the
# instance by making update_on_match fail for no discernable reason the
# first time it is called, but work normally the second time.
with mock.patch.object(update_match, 'update_on_match',
side_effect=[update_match.NoRowsMatched,
orig_update_on_match]):
db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}})
self.assertEqual(update_match.update_on_match.call_count, 2)
def test_instance_update_and_get_original_conflict_race_fallthrough(self):
# Ensure that is update_match continuously fails for no discernable
# reason, we evantually raise UnknownInstanceUpdateConflict
instance = self.create_instance_with_args()
# Reproduce the conditions of a race between fetching and updating the
# instance by making update_on_match fail for no discernable reason.
with mock.patch.object(update_match, 'update_on_match',
side_effect=update_match.NoRowsMatched):
self.assertRaises(exception.UnknownInstanceUpdateConflict,
db.instance_update_and_get_original,
self.ctxt,
instance['uuid'],
{'metadata': {'mk1': 'mv3'}})
def test_instance_update_and_get_original_expected_host(self):
# Ensure that we allow update when expecting a host field
instance = self.create_instance_with_args()
(orig, new) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'host': None},
expected={'host': 'h1'})
self.assertIsNone(new['host'])
def test_instance_update_and_get_original_expected_host_fail(self):
# Ensure that we detect a changed expected host and raise
# InstanceUpdateConflict
instance = self.create_instance_with_args()
try:
db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'host': None},
expected={'host': 'h2'})
except exception.InstanceUpdateConflict as ex:
self.assertEqual(ex.kwargs['instance_uuid'], instance['uuid'])
self.assertEqual(ex.kwargs['actual'], {'host': 'h1'})
self.assertEqual(ex.kwargs['expected'], {'host': ['h2']})
else:
self.fail('InstanceUpdateConflict was not raised')
def test_instance_update_and_get_original_expected_host_none(self):
# Ensure that we allow update when expecting a host field of None
instance = self.create_instance_with_args(host=None)
(old, new) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'host': 'h1'},
expected={'host': None})
self.assertEqual('h1', new['host'])
def test_instance_update_and_get_original_expected_host_none_fail(self):
# Ensure that we detect a changed expected host of None and raise
# InstanceUpdateConflict
instance = self.create_instance_with_args()
try:
db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'host': None},
expected={'host': None})
except exception.InstanceUpdateConflict as ex:
self.assertEqual(ex.kwargs['instance_uuid'], instance['uuid'])
self.assertEqual(ex.kwargs['actual'], {'host': 'h1'})
self.assertEqual(ex.kwargs['expected'], {'host': [None]})
else:
self.fail('InstanceUpdateConflict was not raised')
def test_instance_update_and_get_original_expected_task_state_single_fail(self): # noqa
# Ensure that we detect a changed expected task and raise
# UnexpectedTaskStateError
instance = self.create_instance_with_args()
try:
db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {
'host': None,
'expected_task_state': task_states.SCHEDULING
})
except exception.UnexpectedTaskStateError as ex:
self.assertEqual(ex.kwargs['instance_uuid'], instance['uuid'])
self.assertEqual(ex.kwargs['actual'], {'task_state': None})
self.assertEqual(ex.kwargs['expected'],
{'task_state': [task_states.SCHEDULING]})
else:
self.fail('UnexpectedTaskStateError was not raised')
def test_instance_update_and_get_original_expected_task_state_single_pass(self): # noqa
# Ensure that we allow an update when expected task is correct
instance = self.create_instance_with_args()
(orig, new) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {
'host': None,
'expected_task_state': None
})
self.assertIsNone(new['host'])
def test_instance_update_and_get_original_expected_task_state_multi_fail(self): # noqa
# Ensure that we detect a changed expected task and raise
# UnexpectedTaskStateError when there are multiple potential expected
# tasks
instance = self.create_instance_with_args()
try:
db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {
'host': None,
'expected_task_state': [task_states.SCHEDULING,
task_states.REBUILDING]
})
except exception.UnexpectedTaskStateError as ex:
self.assertEqual(ex.kwargs['instance_uuid'], instance['uuid'])
self.assertEqual(ex.kwargs['actual'], {'task_state': None})
self.assertEqual(ex.kwargs['expected'],
{'task_state': [task_states.SCHEDULING,
task_states.REBUILDING]})
else:
self.fail('UnexpectedTaskStateError was not raised')
def test_instance_update_and_get_original_expected_task_state_multi_pass(self): # noqa
# Ensure that we allow an update when expected task is in a list of
# expected tasks
instance = self.create_instance_with_args()
(orig, new) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {
'host': None,
'expected_task_state': [task_states.SCHEDULING, None]
})
self.assertIsNone(new['host'])
def test_instance_update_and_get_original_expected_task_state_deleting(self): # noqa
# Ensure that we raise UnexpectedDeletingTaskStateError when task state
# is not as expected, and it is DELETING
instance = self.create_instance_with_args(
task_state=task_states.DELETING)
try:
db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {
'host': None,
'expected_task_state': task_states.SCHEDULING
})
except exception.UnexpectedDeletingTaskStateError as ex:
self.assertEqual(ex.kwargs['instance_uuid'], instance['uuid'])
self.assertEqual(ex.kwargs['actual'],
{'task_state': task_states.DELETING})
self.assertEqual(ex.kwargs['expected'],
{'task_state': [task_states.SCHEDULING]})
else:
self.fail('UnexpectedDeletingTaskStateError was not raised')
def test_instance_update_unique_name(self):
context1 = context.RequestContext('user1', 'p1')
context2 = context.RequestContext('user2', 'p2')
inst1 = self.create_instance_with_args(context=context1,
project_id='p1',
hostname='fake_name1')
inst2 = self.create_instance_with_args(context=context1,
project_id='p1',
hostname='fake_name2')
inst3 = self.create_instance_with_args(context=context2,
project_id='p2',
hostname='fake_name3')
# osapi_compute_unique_server_name_scope is unset so this should work:
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name2'})
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name1'})
# With scope 'global' any duplicate should fail.
self.flags(osapi_compute_unique_server_name_scope='global')
self.assertRaises(exception.InstanceExists,
db.instance_update,
context1,
inst2['uuid'],
{'hostname': 'fake_name1'})
self.assertRaises(exception.InstanceExists,
db.instance_update,
context2,
inst3['uuid'],
{'hostname': 'fake_name1'})
# But we should definitely be able to update our name if we aren't
# really changing it.
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_NAME'})
# With scope 'project' a duplicate in the project should fail:
self.flags(osapi_compute_unique_server_name_scope='project')
self.assertRaises(exception.InstanceExists, db.instance_update,
context1, inst2['uuid'], {'hostname': 'fake_NAME'})
# With scope 'project' a duplicate in a different project should work:
self.flags(osapi_compute_unique_server_name_scope='project')
db.instance_update(context2, inst3['uuid'], {'hostname': 'fake_NAME'})
def _test_instance_update_updates_metadata(self, metadata_type):
instance = self.create_instance_with_args()
def set_and_check(meta):
inst = db.instance_update(self.ctxt, instance['uuid'],
{metadata_type: dict(meta)})
_meta = utils.metadata_to_dict(inst[metadata_type])
self.assertEqual(meta, _meta)
meta = {'speed': '88', 'units': 'MPH'}
set_and_check(meta)
meta['gigawatts'] = '1.21'
set_and_check(meta)
del meta['gigawatts']
set_and_check(meta)
self.ctxt.read_deleted = 'yes'
self.assertNotIn('gigawatts',
db.instance_system_metadata_get(self.ctxt, instance.uuid))
def test_security_group_in_use(self):
db.instance_create(self.ctxt, dict(host='foo'))
def test_instance_update_updates_system_metadata(self):
# Ensure that system_metadata is updated during instance_update
self._test_instance_update_updates_metadata('system_metadata')
def test_instance_update_updates_metadata(self):
# Ensure that metadata is updated during instance_update
self._test_instance_update_updates_metadata('metadata')
def test_instance_floating_address_get_all(self):
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {'host': 'h1', 'hostname': 'n1'})
instance2 = db.instance_create(ctxt, {'host': 'h2', 'hostname': 'n2'})
fixed_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_addresses = ['2.1.1.1', '2.1.1.2', '2.1.1.3']
instance_uuids = [instance1['uuid'], instance1['uuid'],
instance2['uuid']]
for fixed_addr, float_addr, instance_uuid in zip(fixed_addresses,
float_addresses,
instance_uuids):
db.fixed_ip_create(ctxt, {'address': fixed_addr,
'instance_uuid': instance_uuid})
fixed_id = db.fixed_ip_get_by_address(ctxt, fixed_addr)['id']
db.floating_ip_create(ctxt,
{'address': float_addr,
'fixed_ip_id': fixed_id})
real_float_addresses = \
db.instance_floating_address_get_all(ctxt, instance_uuids[0])
self.assertEqual(set(float_addresses[:2]), set(real_float_addresses))
real_float_addresses = \
db.instance_floating_address_get_all(ctxt, instance_uuids[2])
self.assertEqual(set([float_addresses[2]]), set(real_float_addresses))
self.assertRaises(exception.InvalidUUID,
db.instance_floating_address_get_all,
ctxt, 'invalid_uuid')
def test_instance_stringified_ips(self):
instance = self.create_instance_with_args()
instance = db.instance_update(
self.ctxt, instance['uuid'],
{'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1')})
self.assertIsInstance(instance['access_ip_v4'], six.string_types)
self.assertIsInstance(instance['access_ip_v6'], six.string_types)
instance = db.instance_get_by_uuid(self.ctxt, instance['uuid'])
self.assertIsInstance(instance['access_ip_v4'], six.string_types)
self.assertIsInstance(instance['access_ip_v6'], six.string_types)
@mock.patch('nova.db.sqlalchemy.api._check_instance_exists_in_project',
return_value=None)
def test_instance_destroy(self, mock_check_inst_exists):
ctxt = context.get_admin_context()
values = {
'metadata': {'key': 'value'},
'system_metadata': {'key': 'value'}
}
inst_uuid = self.create_instance_with_args(**values)['uuid']
db.instance_tag_set(ctxt, inst_uuid, [u'tag1', u'tag2'])
db.instance_destroy(ctxt, inst_uuid)
self.assertRaises(exception.InstanceNotFound,
db.instance_get, ctxt, inst_uuid)
self.assertIsNone(db.instance_info_cache_get(ctxt, inst_uuid))
self.assertEqual({}, db.instance_metadata_get(ctxt, inst_uuid))
self.assertEqual([], db.instance_tag_get_by_instance_uuid(
ctxt, inst_uuid))
@sqlalchemy_api.pick_context_manager_reader
def _assert_instance_id_mapping(_ctxt):
# NOTE(mriedem): We can't use ec2_instance_get_by_uuid to assert
# the instance_id_mappings record is gone because it hard-codes
# read_deleted='yes' and will read the soft-deleted record. So we
# do the model_query directly here. See bug 1061166.
inst_id_mapping = sqlalchemy_api.model_query(
_ctxt, models.InstanceIdMapping).filter_by(
uuid=inst_uuid).first()
self.assertFalse(inst_id_mapping,
'instance_id_mapping not deleted for '
'instance: %s' % inst_uuid)
_assert_instance_id_mapping(ctxt)
ctxt.read_deleted = 'yes'
self.assertEqual(values['system_metadata'],
db.instance_system_metadata_get(ctxt, inst_uuid))
def test_instance_destroy_already_destroyed(self):
ctxt = context.get_admin_context()
instance = self.create_instance_with_args()
db.instance_destroy(ctxt, instance['uuid'])
self.assertRaises(exception.InstanceNotFound,
db.instance_destroy, ctxt, instance['uuid'])
def test_check_instance_exists(self):
instance = self.create_instance_with_args()
@sqlalchemy_api.pick_context_manager_reader
def test(context):
self.assertIsNone(sqlalchemy_api._check_instance_exists_in_project(
context, instance['uuid']))
test(self.ctxt)
def test_check_instance_exists_non_existing_instance(self):
@sqlalchemy_api.pick_context_manager_reader
def test(ctxt):
self.assertRaises(exception.InstanceNotFound,
sqlalchemy_api._check_instance_exists_in_project,
self.ctxt, '123')
test(self.ctxt)
def test_check_instance_exists_from_different_tenant(self):
context1 = context.RequestContext('user1', 'project1')
context2 = context.RequestContext('user2', 'project2')
instance = self.create_instance_with_args(context=context1)
@sqlalchemy_api.pick_context_manager_reader
def test1(context):
self.assertIsNone(sqlalchemy_api._check_instance_exists_in_project(
context, instance['uuid']))
test1(context1)
@sqlalchemy_api.pick_context_manager_reader
def test2(context):
self.assertRaises(exception.InstanceNotFound,
sqlalchemy_api._check_instance_exists_in_project,
context, instance['uuid'])
test2(context2)
def test_check_instance_exists_admin_context(self):
some_context = context.RequestContext('some_user', 'some_project')
instance = self.create_instance_with_args(context=some_context)
@sqlalchemy_api.pick_context_manager_reader
def test(context):
# Check that method works correctly with admin context
self.assertIsNone(sqlalchemy_api._check_instance_exists_in_project(
context, instance['uuid']))
test(self.ctxt)
class InstanceMetadataTestCase(test.TestCase):
"""Tests for db.api.instance_metadata_* methods."""
def setUp(self):
super(InstanceMetadataTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_instance_metadata_get(self):
instance = db.instance_create(self.ctxt, {'metadata':
{'key': 'value'}})
self.assertEqual({'key': 'value'}, db.instance_metadata_get(
self.ctxt, instance['uuid']))
def test_instance_metadata_delete(self):
instance = db.instance_create(self.ctxt,
{'metadata': {'key': 'val',
'key1': 'val1'}})
db.instance_metadata_delete(self.ctxt, instance['uuid'], 'key1')
self.assertEqual({'key': 'val'}, db.instance_metadata_get(
self.ctxt, instance['uuid']))
def test_instance_metadata_update(self):
instance = db.instance_create(self.ctxt, {'host': 'h1',
'project_id': 'p1', 'metadata': {'key': 'value'}})
# This should add new key/value pair
db.instance_metadata_update(self.ctxt, instance['uuid'],
{'new_key': 'new_value'}, False)
metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
# This should leave only one key/value pair
db.instance_metadata_update(self.ctxt, instance['uuid'],
{'new_key': 'new_value'}, True)
metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
self.assertEqual(metadata, {'new_key': 'new_value'})
class InstanceExtraTestCase(test.TestCase):
def setUp(self):
super(InstanceExtraTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, {})
def test_instance_extra_get_by_uuid_instance_create(self):
inst_extra = db.instance_extra_get_by_instance_uuid(
self.ctxt, self.instance['uuid'])
self.assertIsNotNone(inst_extra)
def test_instance_extra_update_by_uuid(self):
db.instance_extra_update_by_uuid(self.ctxt, self.instance['uuid'],
{'numa_topology': 'changed',
'trusted_certs': "['123', 'foo']",
})
inst_extra = db.instance_extra_get_by_instance_uuid(
self.ctxt, self.instance['uuid'])
self.assertEqual('changed', inst_extra.numa_topology)
# NOTE(jackie-truong): trusted_certs is stored as a Text type in
# instance_extra and read as a list of strings
self.assertEqual("['123', 'foo']", inst_extra.trusted_certs)
def test_instance_extra_update_by_uuid_and_create(self):
@sqlalchemy_api.pick_context_manager_writer
def test(context):
sqlalchemy_api.model_query(context, models.InstanceExtra).\
filter_by(instance_uuid=self.instance['uuid']).\
delete()
test(self.ctxt)
inst_extra = db.instance_extra_get_by_instance_uuid(
self.ctxt, self.instance['uuid'])
self.assertIsNone(inst_extra)
db.instance_extra_update_by_uuid(self.ctxt, self.instance['uuid'],
{'numa_topology': 'changed'})
inst_extra = db.instance_extra_get_by_instance_uuid(
self.ctxt, self.instance['uuid'])
self.assertEqual('changed', inst_extra.numa_topology)
def test_instance_extra_get_with_columns(self):
extra = db.instance_extra_get_by_instance_uuid(
self.ctxt, self.instance['uuid'],
columns=['numa_topology', 'vcpu_model', 'trusted_certs'])
self.assertRaises(SQLAlchemyError,
extra.__getitem__, 'pci_requests')
self.assertIn('numa_topology', extra)
self.assertIn('vcpu_model', extra)
self.assertIn('trusted_certs', extra)
class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ServiceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'uuid': None,
'host': 'fake_host',
'binary': 'fake_binary',
'topic': 'fake_topic',
'report_count': 3,
'disabled': False,
'forced_down': False
}
def _create_service(self, values):
v = self._get_base_values()
v.update(values)
return db.service_create(self.ctxt, v)
def test_service_create(self):
service = self._create_service({})
self.assertIsNotNone(service['id'])
for key, value in self._get_base_values().items():
self.assertEqual(value, service[key])
def test_service_create_disabled(self):
self.flags(enable_new_services=False)
service = self._create_service({'binary': 'nova-compute'})
self.assertTrue(service['disabled'])
def test_service_create_disabled_reason(self):
self.flags(enable_new_services=False)
service = self._create_service({'binary': 'nova-compute'})
msg = "New compute service disabled due to config option."
self.assertEqual(msg, service['disabled_reason'])
def test_service_create_disabled_non_compute_ignored(self):
"""Tests that enable_new_services=False has no effect on
auto-disabling a new non-nova-compute service.
"""
self.flags(enable_new_services=False)
service = self._create_service({'binary': 'nova-scheduler'})
self.assertFalse(service['disabled'])
self.assertIsNone(service['disabled_reason'])
def test_service_destroy(self):
service1 = self._create_service({})
service2 = self._create_service({'host': 'fake_host2'})
db.service_destroy(self.ctxt, service1['id'])
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, service1['id'])
self._assertEqualObjects(db.service_get(self.ctxt, service2['id']),
service2, ignored_keys=['compute_node'])
def test_service_update(self):
service = self._create_service({})
new_values = {
'uuid': uuidsentinel.service,
'host': 'fake_host1',
'binary': 'fake_binary1',
'topic': 'fake_topic1',
'report_count': 4,
'disabled': True
}
db.service_update(self.ctxt, service['id'], new_values)
updated_service = db.service_get(self.ctxt, service['id'])
for key, value in new_values.items():
self.assertEqual(value, updated_service[key])
def test_service_update_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_update, self.ctxt, 100500, {})
def test_service_update_with_set_forced_down(self):
service = self._create_service({})
db.service_update(self.ctxt, service['id'], {'forced_down': True})
updated_service = db.service_get(self.ctxt, service['id'])
self.assertTrue(updated_service['forced_down'])
def test_service_update_with_unset_forced_down(self):
service = self._create_service({'forced_down': True})
db.service_update(self.ctxt, service['id'], {'forced_down': False})
updated_service = db.service_get(self.ctxt, service['id'])
self.assertFalse(updated_service['forced_down'])
def test_service_get(self):
service1 = self._create_service({})
self._create_service({'host': 'some_other_fake_host'})
real_service1 = db.service_get(self.ctxt, service1['id'])
self._assertEqualObjects(service1, real_service1,
ignored_keys=['compute_node'])
def test_service_get_by_uuid(self):
service1 = self._create_service({'uuid': uuidsentinel.service1_uuid})
self._create_service({'host': 'some_other_fake_host',
'uuid': uuidsentinel.other_uuid})
real_service1 = db.service_get_by_uuid(
self.ctxt, uuidsentinel.service1_uuid)
self._assertEqualObjects(service1, real_service1,
ignored_keys=['compute_node'])
def test_service_get_by_uuid_not_found(self):
"""Asserts that ServiceNotFound is raised if a service is not found by
a given uuid.
"""
self.assertRaises(exception.ServiceNotFound, db.service_get_by_uuid,
self.ctxt, uuidsentinel.service_not_found)
def test_service_get_minimum_version(self):
self._create_service({'version': 1,
'host': 'host3',
'binary': 'compute',
'forced_down': True})
self._create_service({'version': 2,
'host': 'host1',
'binary': 'compute'})
self._create_service({'version': 3,
'host': 'host2',
'binary': 'compute'})
self._create_service({'version': 0,
'host': 'host0',
'binary': 'compute',
'deleted': 1})
self.assertEqual({'compute': 2},
db.service_get_minimum_version(self.ctxt,
['compute']))
def test_service_get_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, 100500)
def test_service_get_by_host_and_topic(self):
service1 = self._create_service({'host': 'host1', 'topic': 'topic1'})
self._create_service({'host': 'host2', 'topic': 'topic2'})
real_service1 = db.service_get_by_host_and_topic(self.ctxt,
host='host1',
topic='topic1')
self._assertEqualObjects(service1, real_service1)
def test_service_get_by_host_and_binary(self):
service1 = self._create_service({'host': 'host1', 'binary': 'foo'})
self._create_service({'host': 'host2', 'binary': 'bar'})
real_service1 = db.service_get_by_host_and_binary(self.ctxt,
host='host1',
binary='foo')
self._assertEqualObjects(service1, real_service1)
def test_service_get_by_host_and_binary_raises(self):
self.assertRaises(exception.HostBinaryNotFound,
db.service_get_by_host_and_binary, self.ctxt,
host='host1', binary='baz')
def test_service_get_all(self):
values = [
{'host': 'host1', 'topic': 'topic1'},
{'host': 'host2', 'topic': 'topic2'},
{'disabled': True}
]
services = [self._create_service(vals) for vals in values]
disabled_services = [services[-1]]
non_disabled_services = services[:-1]
compares = [
(services, db.service_get_all(self.ctxt)),
(disabled_services, db.service_get_all(self.ctxt, True)),
(non_disabled_services, db.service_get_all(self.ctxt, False))
]
for comp in compares:
self._assertEqualListsOfObjects(*comp)
def test_service_get_all_by_topic(self):
values = [
{'host': 'host1', 'topic': 't1'},
{'host': 'host2', 'topic': 't1'},
{'disabled': True, 'topic': 't1'},
{'host': 'host3', 'topic': 't2'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_topic(self.ctxt, 't1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_all_by_binary(self):
values = [
{'host': 'host1', 'binary': 'b1'},
{'host': 'host2', 'binary': 'b1'},
{'disabled': True, 'binary': 'b1'},
{'host': 'host3', 'binary': 'b2'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_binary(self.ctxt, 'b1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_all_by_binary_include_disabled(self):
values = [
{'host': 'host1', 'binary': 'b1'},
{'host': 'host2', 'binary': 'b1'},
{'disabled': True, 'binary': 'b1'},
{'host': 'host3', 'binary': 'b2'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:3]
real = db.service_get_all_by_binary(self.ctxt, 'b1',
include_disabled=True)
self._assertEqualListsOfObjects(expected, real)
def test_service_get_all_computes_by_hv_type(self):
values = [
{'host': 'host1', 'binary': 'nova-compute'},
{'host': 'host2', 'binary': 'nova-compute', 'disabled': True},
{'host': 'host3', 'binary': 'nova-compute'},
{'host': 'host4', 'binary': 'b2'}
]
services = [self._create_service(vals) for vals in values]
compute_nodes = [
_make_compute_node('host1', 'node1', 'ironic', services[0]['id']),
_make_compute_node('host1', 'node2', 'ironic', services[0]['id']),
_make_compute_node('host2', 'node3', 'ironic', services[1]['id']),
_make_compute_node('host3', 'host3', 'kvm', services[2]['id']),
]
[db.compute_node_create(self.ctxt, cn) for cn in compute_nodes]
expected = services[:1]
real = db.service_get_all_computes_by_hv_type(self.ctxt,
'ironic',
include_disabled=False)
self._assertEqualListsOfObjects(expected, real)
def test_service_get_all_computes_by_hv_type_include_disabled(self):
values = [
{'host': 'host1', 'binary': 'nova-compute'},
{'host': 'host2', 'binary': 'nova-compute', 'disabled': True},
{'host': 'host3', 'binary': 'nova-compute'},
{'host': 'host4', 'binary': 'b2'}
]
services = [self._create_service(vals) for vals in values]
compute_nodes = [
_make_compute_node('host1', 'node1', 'ironic', services[0]['id']),
_make_compute_node('host1', 'node2', 'ironic', services[0]['id']),
_make_compute_node('host2', 'node3', 'ironic', services[1]['id']),
_make_compute_node('host3', 'host3', 'kvm', services[2]['id']),
]
[db.compute_node_create(self.ctxt, cn) for cn in compute_nodes]
expected = services[:2]
real = db.service_get_all_computes_by_hv_type(self.ctxt,
'ironic',
include_disabled=True)
self._assertEqualListsOfObjects(expected, real)
def test_service_get_all_by_host(self):
values = [
{'host': 'host1', 'topic': 't11', 'binary': 'b11'},
{'host': 'host1', 'topic': 't12', 'binary': 'b12'},
{'host': 'host2', 'topic': 't1'},
{'host': 'host3', 'topic': 't1'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_host(self.ctxt, 'host1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_by_compute_host(self):
values = [
{'host': 'host1', 'binary': 'nova-compute'},
{'host': 'host2', 'binary': 'nova-scheduler'},
{'host': 'host3', 'binary': 'nova-compute'}
]
services = [self._create_service(vals) for vals in values]
real_service = db.service_get_by_compute_host(self.ctxt, 'host1')
self._assertEqualObjects(services[0], real_service)
self.assertRaises(exception.ComputeHostNotFound,
db.service_get_by_compute_host,
self.ctxt, 'non-exists-host')
def test_service_get_by_compute_host_not_found(self):
self.assertRaises(exception.ComputeHostNotFound,
db.service_get_by_compute_host,
self.ctxt, 'non-exists-host')
def test_service_binary_exists_exception(self):
db.service_create(self.ctxt, self._get_base_values())
values = self._get_base_values()
values.update({'topic': 'top1'})
self.assertRaises(exception.ServiceBinaryExists, db.service_create,
self.ctxt, values)
def test_service_topic_exists_exceptions(self):
db.service_create(self.ctxt, self._get_base_values())
values = self._get_base_values()
values.update({'binary': 'bin1'})
self.assertRaises(exception.ServiceTopicExists, db.service_create,
self.ctxt, values)
def test_migrate_service_uuids(self):
# Start with nothing.
total, done = db.service_uuids_online_data_migration(self.ctxt, 10)
self.assertEqual(0, total)
self.assertEqual(0, done)
# Create two services, one with a uuid and one without.
db.service_create(self.ctxt,
dict(host='host1', binary='nova-compute',
topic='compute', report_count=1,
disabled=False))
db.service_create(self.ctxt,
dict(host='host2', binary='nova-compute',
topic='compute', report_count=1,
disabled=False, uuid=uuidsentinel.host2))
# Now migrate them, we should find one and update one.
total, done = db.service_uuids_online_data_migration(
self.ctxt, 10)
self.assertEqual(1, total)
self.assertEqual(1, done)
# Get the services back to make sure the original uuid didn't change.
services = db.service_get_all_by_binary(self.ctxt, 'nova-compute')
self.assertEqual(2, len(services))
for service in services:
if service['host'] == 'host2':
self.assertEqual(uuidsentinel.host2, service['uuid'])
else:
self.assertIsNotNone(service['uuid'])
# Run the online migration again to see nothing was processed.
total, done = db.service_uuids_online_data_migration(
self.ctxt, 10)
self.assertEqual(0, total)
self.assertEqual(0, done)
def test_migration_migrate_to_uuid(self):
total, done = sqlalchemy_api.migration_migrate_to_uuid(self.ctxt, 10)
self.assertEqual(0, total)
self.assertEqual(0, done)
# Create two migrations, one with a uuid and one without.
db.migration_create(self.ctxt,
dict(source_compute='src', source_node='srcnode',
dest_compute='dst', dest_node='dstnode',
status='running'))
db.migration_create(self.ctxt,
dict(source_compute='src', source_node='srcnode',
dest_compute='dst', dest_node='dstnode',
status='running',
uuid=uuidsentinel.migration2))
# Now migrate them, we should find one and update one
total, done = sqlalchemy_api.migration_migrate_to_uuid(self.ctxt, 10)
self.assertEqual(1, total)
self.assertEqual(1, done)
# Get the migrations back to make sure the original uuid didn't change.
migrations = db.migration_get_all_by_filters(self.ctxt, {})
uuids = [m.uuid for m in migrations]
self.assertIn(uuidsentinel.migration2, uuids)
self.assertNotIn(None, uuids)
# Run the online migration again to see nothing was processed.
total, done = sqlalchemy_api.migration_migrate_to_uuid(self.ctxt, 10)
self.assertEqual(0, total)
self.assertEqual(0, done)
class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
IGNORED_FIELDS = [
'id',
'created_at',
'updated_at',
'deleted_at',
'deleted'
]
def setUp(self):
super(InstanceActionTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_action_values(self, uuid, action='run_instance',
ctxt=None, extra=None, instance_create=True):
if ctxt is None:
ctxt = self.ctxt
if instance_create:
db.instance_create(ctxt, {'uuid': uuid})
utc_now = timeutils.utcnow()
values = {
'action': action,
'instance_uuid': uuid,
'request_id': ctxt.request_id,
'user_id': ctxt.user_id,
'project_id': ctxt.project_id,
'start_time': utc_now,
'updated_at': utc_now,
'message': 'action-message'
}
if extra is not None:
values.update(extra)
return values
def _create_event_values(self, uuid, event='schedule',
ctxt=None, extra=None):
if ctxt is None:
ctxt = self.ctxt
values = {
'event': event,
'instance_uuid': uuid,
'request_id': ctxt.request_id,
'start_time': timeutils.utcnow(),
'host': 'fake-host',
'details': 'fake-details',
}
if extra is not None:
values.update(extra)
return values
def _assertActionSaved(self, action, uuid):
"""Retrieve the action to ensure it was successfully added."""
actions = db.actions_get(self.ctxt, uuid)
self.assertEqual(1, len(actions))
self._assertEqualObjects(action, actions[0])
def _assertActionEventSaved(self, event, action_id):
# Retrieve the event to ensure it was successfully added
events = db.action_events_get(self.ctxt, action_id)
self.assertEqual(1, len(events))
self._assertEqualObjects(event, events[0],
['instance_uuid', 'request_id'])
def test_instance_action_start(self):
"""Create an instance action."""
uuid = uuidsentinel.uuid1
action_values = self._create_action_values(uuid)
action = db.action_start(self.ctxt, action_values)
ignored_keys = self.IGNORED_FIELDS + ['finish_time']
self._assertEqualObjects(action_values, action, ignored_keys)
self._assertActionSaved(action, uuid)
def test_instance_action_finish(self):
"""Create an instance action."""
uuid = uuidsentinel.uuid1
action_values = self._create_action_values(uuid)
db.action_start(self.ctxt, action_values)
action_values['finish_time'] = timeutils.utcnow()
action = db.action_finish(self.ctxt, action_values)
self._assertEqualObjects(action_values, action, self.IGNORED_FIELDS)
self._assertActionSaved(action, uuid)
def test_instance_action_finish_without_started_event(self):
"""Create an instance finish action."""
uuid = uuidsentinel.uuid1
action_values = self._create_action_values(uuid)
action_values['finish_time'] = timeutils.utcnow()
self.assertRaises(exception.InstanceActionNotFound, db.action_finish,
self.ctxt, action_values)
def test_instance_actions_get_by_instance(self):
"""Ensure we can get actions by UUID."""
uuid1 = uuidsentinel.uuid1
expected = []
action_values = self._create_action_values(uuid1)
action = db.action_start(self.ctxt, action_values)
expected.append(action)
action_values['action'] = 'resize'
action = db.action_start(self.ctxt, action_values)
expected.append(action)
# Create some extra actions
uuid2 = uuidsentinel.uuid2
ctxt2 = context.get_admin_context()
action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
db.action_start(ctxt2, action_values)
db.action_start(ctxt2, action_values)
# Retrieve the action to ensure it was successfully added
actions = db.actions_get(self.ctxt, uuid1)
self._assertEqualListsOfObjects(expected, actions)
def test_instance_actions_get_are_in_order(self):
"""Ensure retrived actions are in order."""
uuid1 = uuidsentinel.uuid1
extra = {
'created_at': timeutils.utcnow()
}
action_values = self._create_action_values(uuid1, extra=extra)
action1 = db.action_start(self.ctxt, action_values)
action_values['action'] = 'delete'
action2 = db.action_start(self.ctxt, action_values)
actions = db.actions_get(self.ctxt, uuid1)
self.assertEqual(2, len(actions))
self._assertEqualOrderedListOfObjects([action2, action1], actions)
def test_instance_actions_get_with_limit(self):
"""Test list instance actions can support pagination."""
uuid1 = uuidsentinel.uuid1
extra = {
'created_at': timeutils.utcnow()
}
action_values = self._create_action_values(uuid1, extra=extra)
action1 = db.action_start(self.ctxt, action_values)
action_values['action'] = 'delete'
action_values['request_id'] = 'req-' + uuidsentinel.reqid1
db.action_start(self.ctxt, action_values)
actions = db.actions_get(self.ctxt, uuid1)
self.assertEqual(2, len(actions))
actions = db.actions_get(self.ctxt, uuid1, limit=1)
self.assertEqual(1, len(actions))
actions = db.actions_get(
self.ctxt, uuid1, limit=1,
marker=action_values['request_id'])
self.assertEqual(1, len(actions))
self._assertEqualListsOfObjects([action1], actions)
def test_instance_actions_get_with_changes_since(self):
"""Test list instance actions can support timestamp filter."""
uuid1 = uuidsentinel.uuid1
extra = {
'created_at': timeutils.utcnow()
}
action_values = self._create_action_values(uuid1, extra=extra)
db.action_start(self.ctxt, action_values)
timestamp = timeutils.utcnow()
action_values['start_time'] = timestamp
action_values['updated_at'] = timestamp
action_values['action'] = 'delete'
action2 = db.action_start(self.ctxt, action_values)
actions = db.actions_get(self.ctxt, uuid1)
self.assertEqual(2, len(actions))
self.assertNotEqual(actions[0]['updated_at'],
actions[1]['updated_at'])
actions = db.actions_get(
self.ctxt, uuid1, filters={'changes-since': timestamp})
self.assertEqual(1, len(actions))
self._assertEqualListsOfObjects([action2], actions)
def test_instance_actions_get_with_changes_before(self):
"""Test list instance actions can support timestamp filter."""
uuid1 = uuidsentinel.uuid1
expected = []
extra = {
'created_at': timeutils.utcnow()
}
action_values = self._create_action_values(uuid1, extra=extra)
action = db.action_start(self.ctxt, action_values)
expected.append(action)
timestamp = timeutils.utcnow()
action_values['start_time'] = timestamp
action_values['updated_at'] = timestamp
action_values['action'] = 'delete'
action = db.action_start(self.ctxt, action_values)
expected.append(action)
actions = db.actions_get(self.ctxt, uuid1)
self.assertEqual(2, len(actions))
self.assertNotEqual(actions[0]['updated_at'],
actions[1]['updated_at'])
actions = db.actions_get(
self.ctxt, uuid1, filters={'changes-before': timestamp})
self.assertEqual(2, len(actions))
self._assertEqualListsOfObjects(expected, actions)
def test_instance_actions_get_with_not_found_marker(self):
self.assertRaises(exception.MarkerNotFound,
db.actions_get, self.ctxt, uuidsentinel.uuid1,
marker=uuidsentinel.not_found_marker)
def test_instance_action_get_by_instance_and_action(self):
"""Ensure we can get an action by instance UUID and action id."""
ctxt2 = context.get_admin_context()
uuid1 = uuidsentinel.uuid1
uuid2 = uuidsentinel.uuid2
action_values = self._create_action_values(uuid1)
db.action_start(self.ctxt, action_values)
request_id = action_values['request_id']
# NOTE(rpodolyaka): ensure we use a different req id for the 2nd req
action_values['action'] = 'resize'
action_values['request_id'] = 'req-00000000-7522-4d99-7ff-111111111111'
db.action_start(self.ctxt, action_values)
action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
db.action_start(ctxt2, action_values)
db.action_start(ctxt2, action_values)
action = db.action_get_by_request_id(self.ctxt, uuid1, request_id)
self.assertEqual('run_instance', action['action'])
self.assertEqual(self.ctxt.request_id, action['request_id'])
def test_instance_action_get_by_instance_and_action_by_order(self):
instance_uuid = uuidsentinel.uuid1
t1 = {
'created_at': timeutils.utcnow()
}
t2 = {
'created_at': timeutils.utcnow() + datetime.timedelta(seconds=5)
}
# Create a confirmResize action
action_values = self._create_action_values(
instance_uuid, action='confirmResize', extra=t1)
a1 = db.action_start(self.ctxt, action_values)
# Create a delete action with same instance uuid and req id
action_values = self._create_action_values(
instance_uuid, action='delete', extra=t2, instance_create=False)
a2 = db.action_start(self.ctxt, action_values)
self.assertEqual(a1['request_id'], a2['request_id'])
self.assertEqual(a1['instance_uuid'], a2['instance_uuid'])
self.assertTrue(a1['created_at'] < a2['created_at'])
action = db.action_get_by_request_id(self.ctxt, instance_uuid,
a1['request_id'])
# Only get the delete action(last created)
self.assertEqual(action['action'], a2['action'])
def test_instance_action_event_start(self):
"""Create an instance action event."""
uuid = uuidsentinel.uuid1
action_values = self._create_action_values(uuid)
action = db.action_start(self.ctxt, action_values)
event_values = self._create_event_values(uuid)
event = db.action_event_start(self.ctxt, event_values)
# self.fail(self._dict_from_object(event, None))
event_values['action_id'] = action['id']
ignored = self.IGNORED_FIELDS + ['finish_time', 'traceback', 'result']
self._assertEqualObjects(event_values, event, ignored)
self._assertActionEventSaved(event, action['id'])
def test_instance_action_event_start_without_action(self):
"""Create an instance action event."""
uuid = uuidsentinel.uuid1
event_values = self._create_event_values(uuid)
self.assertRaises(exception.InstanceActionNotFound,
db.action_event_start, self.ctxt, event_values)
def test_instance_action_event_finish_without_started_event(self):
"""Finish an instance action event."""
uuid = uuidsentinel.uuid1
db.action_start(self.ctxt, self._create_action_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
self.assertRaises(exception.InstanceActionEventNotFound,
db.action_event_finish, self.ctxt, event_values)
def test_instance_action_event_finish_without_action(self):
"""Finish an instance action event."""
uuid = uuidsentinel.uuid1
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
self.assertRaises(exception.InstanceActionNotFound,
db.action_event_finish, self.ctxt, event_values)
def test_instance_action_event_finish_success(self):
"""Finish an instance action event."""
uuid = uuidsentinel.uuid1
action = db.action_start(self.ctxt, self._create_action_values(uuid))
db.action_event_start(self.ctxt, self._create_event_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_finish(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertNotEqual('Error', action['message'])
def test_instance_action_event_finish_error(self):
"""Finish an instance action event with an error."""
uuid = uuidsentinel.uuid1
action = db.action_start(self.ctxt, self._create_action_values(uuid))
db.action_event_start(self.ctxt, self._create_event_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Error'
}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_finish(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertEqual('Error', action['message'])
def test_instance_action_and_event_start_string_time(self):
"""Create an instance action and event with a string start_time."""
uuid = uuidsentinel.uuid1
action = db.action_start(self.ctxt, self._create_action_values(uuid))
event_values = {'start_time': timeutils.utcnow().isoformat()}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_start(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
def test_instance_action_events_get_are_in_order(self):
"""Ensure retrived action events are in order."""
uuid1 = uuidsentinel.uuid1
action = db.action_start(self.ctxt,
self._create_action_values(uuid1))
extra1 = {
'created_at': timeutils.utcnow()
}
extra2 = {
'created_at': timeutils.utcnow() + datetime.timedelta(seconds=5)
}
event_val1 = self._create_event_values(uuid1, 'schedule', extra=extra1)
event_val2 = self._create_event_values(uuid1, 'run', extra=extra1)
event_val3 = self._create_event_values(uuid1, 'stop', extra=extra2)
event1 = db.action_event_start(self.ctxt, event_val1)
event2 = db.action_event_start(self.ctxt, event_val2)
event3 = db.action_event_start(self.ctxt, event_val3)
events = db.action_events_get(self.ctxt, action['id'])
self.assertEqual(3, len(events))
self._assertEqualOrderedListOfObjects([event3, event2, event1], events,
['instance_uuid', 'request_id'])
def test_instance_action_event_get_by_id(self):
"""Get a specific instance action event."""
ctxt2 = context.get_admin_context()
uuid1 = uuidsentinel.uuid1
uuid2 = uuidsentinel.uuid2
action = db.action_start(self.ctxt,
self._create_action_values(uuid1))
db.action_start(ctxt2,
self._create_action_values(uuid2, 'reboot', ctxt2))
event = db.action_event_start(self.ctxt,
self._create_event_values(uuid1))
event_values = self._create_event_values(uuid2, 'reboot', ctxt2)
db.action_event_start(ctxt2, event_values)
# Retrieve the event to ensure it was successfully added
saved_event = db.action_event_get_by_id(self.ctxt,
action['id'],
event['id'])
self._assertEqualObjects(event, saved_event,
['instance_uuid', 'request_id'])
def test_instance_action_event_start_with_different_request_id(self):
uuid = uuidsentinel.uuid1
action_values = self._create_action_values(uuid)
action = db.action_start(self.ctxt, action_values)
# init_host case
fake_admin_context = context.get_admin_context()
event_values = self._create_event_values(uuid, ctxt=fake_admin_context)
event = db.action_event_start(fake_admin_context, event_values)
event_values['action_id'] = action['id']
ignored = self.IGNORED_FIELDS + ['finish_time', 'traceback', 'result']
self._assertEqualObjects(event_values, event, ignored)
self._assertActionEventSaved(event, action['id'])
def test_instance_action_event_finish_with_different_request_id(self):
uuid = uuidsentinel.uuid1
action = db.action_start(self.ctxt, self._create_action_values(uuid))
# init_host case
fake_admin_context = context.get_admin_context()
db.action_event_start(fake_admin_context, self._create_event_values(
uuid, ctxt=fake_admin_context))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, ctxt=fake_admin_context,
extra=event_values)
event = db.action_event_finish(fake_admin_context, event_values)
self._assertActionEventSaved(event, action['id'])
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertNotEqual('Error', action['message'])
def test_instance_action_updated_with_event_start_and_finish_action(self):
uuid = uuidsentinel.uuid1
action = db.action_start(self.ctxt, self._create_action_values(uuid))
updated_create = action['updated_at']
self.assertIsNotNone(updated_create)
event_values = self._create_event_values(uuid)
# event start action
time_start = timeutils.utcnow() + datetime.timedelta(seconds=5)
event_values['start_time'] = time_start
db.action_event_start(self.ctxt, event_values)
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
updated_event_start = action['updated_at']
self.assertEqual(time_start.isoformat(),
updated_event_start.isoformat())
self.assertTrue(updated_event_start > updated_create)
# event finish action
time_finish = timeutils.utcnow() + datetime.timedelta(seconds=10)
event_values = {
'finish_time': time_finish,
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
db.action_event_finish(self.ctxt, event_values)
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
updated_event_finish = action['updated_at']
self.assertEqual(time_finish.isoformat(),
updated_event_finish.isoformat())
self.assertTrue(updated_event_finish > updated_event_start)
def test_instance_action_not_updated_with_unknown_event_request(self):
"""Tests that we don't update the action.updated_at field when
starting or finishing an action event if we couldn't find the
action by the request_id.
"""
# Create a valid action - this represents an active user request.
uuid = uuidsentinel.uuid1
action = db.action_start(self.ctxt, self._create_action_values(uuid))
updated_create = action['updated_at']
self.assertIsNotNone(updated_create)
event_values = self._create_event_values(uuid)
# Now start an event on an unknown request ID and admin context where
# project_id won't be set.
time_start = timeutils.utcnow() + datetime.timedelta(seconds=5)
event_values['start_time'] = time_start
random_request_id = 'req-%s' % uuidsentinel.request_id
event_values['request_id'] = random_request_id
admin_context = context.get_admin_context()
event_ref = db.action_event_start(admin_context, event_values)
# The event would be created on the existing action.
self.assertEqual(action['id'], event_ref['action_id'])
# And the action.update_at should be the same as before the event was
# started.
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertEqual(updated_create, action['updated_at'])
# Now finish the event on the unknown request ID and admin context.
time_finish = timeutils.utcnow() + datetime.timedelta(seconds=10)
event_values = {
'finish_time': time_finish,
'request_id': random_request_id,
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
db.action_event_finish(admin_context, event_values)
# And the action.update_at should be the same as before the event was
# finished.
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertEqual(updated_create, action['updated_at'])
class InstanceFaultTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(InstanceFaultTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_fault_values(self, uuid, code=404):
return {
'message': 'message',
'details': 'detail',
'instance_uuid': uuid,
'code': code,
'host': 'localhost'
}
def test_instance_fault_create(self):
"""Ensure we can create an instance fault."""
uuid = uuidsentinel.uuid1
# Ensure no faults registered for this instance
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
self.assertEqual(0, len(faults[uuid]))
# Create a fault
fault_values = self._create_fault_values(uuid)
db.instance_create(self.ctxt, {'uuid': uuid})
fault = db.instance_fault_create(self.ctxt, fault_values)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(fault_values, fault, ignored_keys)
# Retrieve the fault to ensure it was successfully added
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
self.assertEqual(1, len(faults[uuid]))
self._assertEqualObjects(fault, faults[uuid][0])
def test_instance_fault_get_by_instance(self):
"""Ensure we can retrieve faults for instance."""
uuids = [uuidsentinel.uuid1, uuidsentinel.uuid2]
fault_codes = [404, 500]
expected = {}
# Create faults
for uuid in uuids:
db.instance_create(self.ctxt, {'uuid': uuid})
expected[uuid] = []
for code in fault_codes:
fault_values = self._create_fault_values(uuid, code)
fault = db.instance_fault_create(self.ctxt, fault_values)
# We expect the faults to be returned ordered by created_at in
# descending order, so insert the newly created fault at the
# front of our list.
expected[uuid].insert(0, fault)
# Ensure faults are saved
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, uuids)
self.assertEqual(len(expected), len(faults))
for uuid in uuids:
self._assertEqualOrderedListOfObjects(expected[uuid], faults[uuid])
def test_instance_fault_get_latest_by_instance(self):
"""Ensure we can retrieve only latest faults for instance."""
uuids = [uuidsentinel.uuid1, uuidsentinel.uuid2]
fault_codes = [404, 500]
expected = {}
# Create faults
for uuid in uuids:
db.instance_create(self.ctxt, {'uuid': uuid})
expected[uuid] = []
for code in fault_codes:
fault_values = self._create_fault_values(uuid, code)
fault = db.instance_fault_create(self.ctxt, fault_values)
expected[uuid].append(fault)
# We are only interested in the latest fault for each instance
for uuid in expected:
expected[uuid] = expected[uuid][-1:]
# Ensure faults are saved
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, uuids,
latest=True)
self.assertEqual(len(expected), len(faults))
for uuid in uuids:
self._assertEqualListsOfObjects(expected[uuid], faults[uuid])
def test_instance_faults_get_by_instance_uuids_no_faults(self):
uuid = uuidsentinel.uuid1
# None should be returned when no faults exist.
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
expected = {uuid: []}
self.assertEqual(expected, faults)
@mock.patch.object(query.Query, 'filter')
def test_instance_faults_get_by_instance_uuids_no_uuids(self, mock_filter):
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [])
self.assertEqual({}, faults)
self.assertFalse(mock_filter.called)
@mock.patch('time.sleep', new=lambda x: None)
class FixedIPTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(FixedIPTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _timeout_test(self, ctxt, timeout, multi_host):
instance = db.instance_create(ctxt, dict(host='foo'))
net = db.network_create_safe(ctxt, dict(multi_host=multi_host,
host='bar'))
old = timeout - datetime.timedelta(seconds=5)
new = timeout + datetime.timedelta(seconds=5)
# should deallocate
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=net['id'],
updated_at=old))
# still allocated
db.fixed_ip_create(ctxt, dict(allocated=True,
instance_uuid=instance['uuid'],
network_id=net['id'],
updated_at=old))
# wrong network
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=None,
updated_at=old))
# too new
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=None,
updated_at=new))
def test_fixed_ip_disassociate_all_by_timeout_single_host(self):
now = timeutils.utcnow()
self._timeout_test(self.ctxt, now, False)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
self.assertEqual(result, 0)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
self.assertEqual(result, 1)
def test_fixed_ip_disassociate_all_by_timeout_multi_host(self):
now = timeutils.utcnow()
self._timeout_test(self.ctxt, now, True)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
self.assertEqual(result, 1)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
self.assertEqual(result, 0)
def test_fixed_ip_get_by_floating_address(self):
fixed_ip = db.fixed_ip_create(self.ctxt, {'address': '192.168.0.2'})
values = {'address': '8.7.6.5',
'fixed_ip_id': fixed_ip['id']}
floating = db.floating_ip_create(self.ctxt, values)['address']
fixed_ip_ref = db.fixed_ip_get_by_floating_address(self.ctxt, floating)
self._assertEqualObjects(fixed_ip, fixed_ip_ref)
def test_fixed_ip_get_by_host(self):
host_ips = {
'host1': ['1.1.1.1', '1.1.1.2', '1.1.1.3'],
'host2': ['1.1.1.4', '1.1.1.5'],
'host3': ['1.1.1.6']
}
for host, ips in host_ips.items():
for ip in ips:
instance_uuid = self._create_instance(host=host)
db.fixed_ip_create(self.ctxt, {'address': ip})
db.fixed_ip_associate(self.ctxt, ip, instance_uuid)
for host, ips in host_ips.items():
ips_on_host = [x['address']
for x in db.fixed_ip_get_by_host(self.ctxt, host)]
self._assertEqualListsOfPrimitivesAsSets(ips_on_host, ips)
def test_fixed_ip_get_by_network_host_not_found_exception(self):
self.assertRaises(
exception.FixedIpNotFoundForNetworkHost,
db.fixed_ip_get_by_network_host,
self.ctxt, 1, 'ignore')
def test_fixed_ip_get_by_network_host_fixed_ip_found(self):
db.fixed_ip_create(self.ctxt, dict(network_id=1, host='host'))
fip = db.fixed_ip_get_by_network_host(self.ctxt, 1, 'host')
self.assertEqual(1, fip['network_id'])
self.assertEqual('host', fip['host'])
def _create_instance(self, **kwargs):
instance = db.instance_create(self.ctxt, kwargs)
return instance['uuid']
def test_fixed_ip_get_by_instance_fixed_ip_found(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
[ips_list[0].address])
def test_fixed_ip_get_by_instance_multiple_fixed_ips_found(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ip_get_by_instance_inappropriate_ignored(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
another_instance = db.instance_create(self.ctxt, {})
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=another_instance['uuid'], address="192.168.1.7"))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ip_get_by_instance_not_found_exception(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.FixedIpNotFoundForInstance,
db.fixed_ip_get_by_instance,
self.ctxt, instance_uuid)
def test_fixed_ips_by_virtual_interface_fixed_ip_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
[ips_list[0].address])
def test_fixed_ips_by_virtual_interface_multiple_fixed_ips_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ips_by_virtual_interface_inappropriate_ignored(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
another_vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=another_vif.id, address="192.168.1.7"))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ips_by_virtual_interface_no_ip_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self.assertEqual(0, len(ips_list))
def create_fixed_ip(self, **params):
default_params = {'address': '192.168.0.1'}
default_params.update(params)
return db.fixed_ip_create(self.ctxt, default_params)['address']
def test_fixed_ip_associate_fails_if_ip_not_in_network(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.FixedIpNotFoundForNetwork,
db.fixed_ip_associate,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_fails_if_ip_in_use(self):
instance_uuid = self._create_instance()
address = self.create_fixed_ip(instance_uuid=instance_uuid)
self.assertRaises(exception.FixedIpAlreadyInUse,
db.fixed_ip_associate,
self.ctxt, address, instance_uuid)
def test_fixed_ip_associate_succeeds(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
def test_fixed_ip_associate_succeeds_and_sets_network(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
self.assertEqual(fixed_ip['network_id'], network['id'])
def test_fixed_ip_associate_succeeds_retry_on_deadlock(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
def fake_first():
if mock_first.call_count == 1:
raise db_exc.DBDeadlock()
else:
return objects.Instance(id=1, address=address, reserved=False,
instance_uuid=None, network_id=None)
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
self.assertEqual(2, mock_first.call_count)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
self.assertEqual(fixed_ip['network_id'], network['id'])
def test_fixed_ip_associate_succeeds_retry_on_no_rows_updated(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
def fake_first():
if mock_first.call_count == 1:
return objects.Instance(id=2, address=address, reserved=False,
instance_uuid=None, network_id=None)
else:
return objects.Instance(id=1, address=address, reserved=False,
instance_uuid=None, network_id=None)
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
self.assertEqual(2, mock_first.call_count)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
self.assertEqual(fixed_ip['network_id'], network['id'])
def test_fixed_ip_associate_succeeds_retry_limit_exceeded(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
def fake_first():
return objects.Instance(id=2, address=address, reserved=False,
instance_uuid=None, network_id=None)
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
self.assertRaises(exception.FixedIpAssociateFailed,
db.fixed_ip_associate, self.ctxt, address,
instance_uuid, network_id=network['id'])
# 5 reties + initial attempt
self.assertEqual(6, mock_first.call_count)
def test_fixed_ip_associate_ip_not_in_network_with_no_retries(self):
instance_uuid = self._create_instance()
with mock.patch('sqlalchemy.orm.query.Query.first',
return_value=None) as mock_first:
self.assertRaises(exception.FixedIpNotFoundForNetwork,
db.fixed_ip_associate,
self.ctxt, None, instance_uuid)
self.assertEqual(1, mock_first.call_count)
def test_fixed_ip_associate_no_network_id_with_no_retries(self):
# Tests that trying to associate an instance to a fixed IP on a network
# but without specifying the network ID during associate will fail.
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
with mock.patch('sqlalchemy.orm.query.Query.first',
return_value=None) as mock_first:
self.assertRaises(exception.FixedIpNotFoundForNetwork,
db.fixed_ip_associate,
self.ctxt, address, instance_uuid)
self.assertEqual(1, mock_first.call_count)
def test_fixed_ip_associate_with_vif(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
vif = db.virtual_interface_create(self.ctxt, {})
address = self.create_fixed_ip()
fixed_ip = db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'],
virtual_interface_id=vif['id'])
self.assertTrue(fixed_ip['allocated'])
self.assertEqual(vif['id'], fixed_ip['virtual_interface_id'])
def test_fixed_ip_associate_not_allocated_without_vif(self):
instance_uuid = self._create_instance()
address = self.create_fixed_ip()
fixed_ip = db.fixed_ip_associate(self.ctxt, address, instance_uuid)
self.assertFalse(fixed_ip['allocated'])
self.assertIsNone(fixed_ip['virtual_interface_id'])
def test_fixed_ip_associate_pool_invalid_uuid(self):
instance_uuid = '123'
self.assertRaises(exception.InvalidUUID, db.fixed_ip_associate_pool,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_pool_no_more_fixed_ips(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.NoMoreFixedIps, db.fixed_ip_associate_pool,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_pool_ignores_leased_addresses(self):
instance_uuid = self._create_instance()
params = {'address': '192.168.1.5',
'leased': True}
db.fixed_ip_create(self.ctxt, params)
self.assertRaises(exception.NoMoreFixedIps, db.fixed_ip_associate_pool,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_pool_succeeds(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
def test_fixed_ip_associate_pool_order(self):
"""Test that fixed_ip always uses oldest fixed_ip.
We should always be using the fixed ip with the oldest
updated_at.
"""
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
self.addCleanup(timeutils.clear_time_override)
start = timeutils.utcnow()
for i in range(1, 4):
now = start - datetime.timedelta(hours=i)
timeutils.set_time_override(now)
address = self.create_fixed_ip(
updated_at=now,
address='10.1.0.%d' % i,
network_id=network['id'])
db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
def test_fixed_ip_associate_pool_succeeds_fip_ref_network_id_is_none(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
self.create_fixed_ip(network_id=None)
fixed_ip = db.fixed_ip_associate_pool(self.ctxt,
network['id'], instance_uuid)
self.assertEqual(instance_uuid, fixed_ip['instance_uuid'])
self.assertEqual(network['id'], fixed_ip['network_id'])
def test_fixed_ip_associate_pool_succeeds_retry(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
def fake_first():
if mock_first.call_count == 1:
return {'network_id': network['id'], 'address': 'invalid',
'instance_uuid': None, 'host': None, 'id': 1}
else:
return {'network_id': network['id'], 'address': address,
'instance_uuid': None, 'host': None, 'id': 1}
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
self.assertEqual(2, mock_first.call_count)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(instance_uuid, fixed_ip['instance_uuid'])
def test_fixed_ip_associate_pool_retry_limit_exceeded(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
self.create_fixed_ip(network_id=network['id'])
def fake_first():
return {'network_id': network['id'], 'address': 'invalid',
'instance_uuid': None, 'host': None, 'id': 1}
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
self.assertRaises(exception.FixedIpAssociateFailed,
db.fixed_ip_associate_pool, self.ctxt,
network['id'], instance_uuid)
# 5 retries + initial attempt
self.assertEqual(6, mock_first.call_count)
def test_fixed_ip_create_same_address(self):
address = '192.168.1.5'
params = {'address': address}
db.fixed_ip_create(self.ctxt, params)
self.assertRaises(exception.FixedIpExists, db.fixed_ip_create,
self.ctxt, params)
def test_fixed_ip_create_success(self):
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': '192.168.1.5',
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
fixed_ip_data = db.fixed_ip_create(self.ctxt, param)
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
def test_fixed_ip_bulk_create_same_address(self):
address_1 = '192.168.1.5'
address_2 = '192.168.1.6'
instance_uuid = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
params = [
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_2, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_1, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': 'localhost', 'address': address_2, 'allocated': True,
'instance_uuid': instance_uuid, 'network_id': network_id_2,
'virtual_interface_id': None},
]
self.assertRaises(exception.FixedIpExists, db.fixed_ip_bulk_create,
self.ctxt, params)
# In this case the transaction will be rolled back and none of the ips
# will make it to the database.
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address_1)
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address_2)
def test_fixed_ip_bulk_create_success(self):
address_1 = '192.168.1.5'
address_2 = '192.168.1.6'
instance_uuid = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
params = [
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_1, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': 'localhost', 'address': address_2, 'allocated': True,
'instance_uuid': instance_uuid, 'network_id': network_id_2,
'virtual_interface_id': None}
]
db.fixed_ip_bulk_create(self.ctxt, params)
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at',
'virtual_interface', 'network', 'floating_ips']
fixed_ip_data = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
# we have no `id` in incoming data so we can not use
# _assertEqualListsOfObjects to compare incoming data and received
# objects
fixed_ip_data = sorted(fixed_ip_data, key=lambda i: i['network_id'])
params = sorted(params, key=lambda i: i['network_id'])
for param, ip in zip(params, fixed_ip_data):
self._assertEqualObjects(param, ip, ignored_keys)
def test_fixed_ip_disassociate(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
values = {'address': '192.168.1.5', 'instance_uuid': instance_uuid}
vif = db.virtual_interface_create(self.ctxt, values)
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': vif['id']
}
db.fixed_ip_create(self.ctxt, param)
db.fixed_ip_disassociate(self.ctxt, address)
fixed_ip_data = db.fixed_ip_get_by_address(self.ctxt, address)
ignored_keys = ['created_at', 'id', 'deleted_at',
'updated_at', 'instance_uuid',
'virtual_interface_id']
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
self.assertIsNone(fixed_ip_data['instance_uuid'])
self.assertIsNone(fixed_ip_data['virtual_interface_id'])
def test_fixed_ip_get_not_found_exception(self):
self.assertRaises(exception.FixedIpNotFound,
db.fixed_ip_get, self.ctxt, 0)
def test_fixed_ip_get_success2(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
fixed_ip_id = db.fixed_ip_create(self.ctxt, param)
self.ctxt.is_admin = False
self.assertRaises(exception.Forbidden, db.fixed_ip_get,
self.ctxt, fixed_ip_id)
def test_fixed_ip_get_success(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
db.fixed_ip_create(self.ctxt, param)
fixed_ip_id = db.fixed_ip_get_by_address(self.ctxt, address)['id']
fixed_ip_data = db.fixed_ip_get(self.ctxt, fixed_ip_id)
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
def test_fixed_ip_get_by_address(self):
instance_uuid = self._create_instance()
db.fixed_ip_create(self.ctxt, {'address': '1.2.3.4',
'instance_uuid': instance_uuid,
})
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, '1.2.3.4',
columns_to_join=['instance'])
self.assertIn('instance', fixed_ip.__dict__)
self.assertEqual(instance_uuid, fixed_ip.instance.uuid)
def test_fixed_ip_update_not_found_for_address(self):
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_update, self.ctxt,
'192.168.1.5', {})
def test_fixed_ip_update(self):
instance_uuid_1 = self._create_instance()
instance_uuid_2 = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
param_1 = {
'reserved': True, 'deleted': 0, 'leased': True,
'host': '192.168.133.1', 'address': '10.0.0.2',
'allocated': True, 'instance_uuid': instance_uuid_1,
'network_id': network_id_1, 'virtual_interface_id': '123',
}
param_2 = {
'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': '10.0.0.3', 'allocated': False,
'instance_uuid': instance_uuid_2, 'network_id': network_id_2,
'virtual_interface_id': None
}
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
fixed_ip_addr = db.fixed_ip_create(self.ctxt, param_1)['address']
db.fixed_ip_update(self.ctxt, fixed_ip_addr, param_2)
fixed_ip_after_update = db.fixed_ip_get_by_address(self.ctxt,
param_2['address'])
self._assertEqualObjects(param_2, fixed_ip_after_update, ignored_keys)
@mock.patch('time.sleep', new=lambda x: None)
class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(FloatingIpTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'address': '1.1.1.1',
'fixed_ip_id': None,
'project_id': 'fake_project',
'host': 'fake_host',
'auto_assigned': False,
'pool': 'fake_pool',
'interface': 'fake_interface',
}
def _create_floating_ip(self, values):
if not values:
values = {}
vals = self._get_base_values()
vals.update(values)
return db.floating_ip_create(self.ctxt, vals)
def test_floating_ip_get(self):
values = [{'address': '0.0.0.0'}, {'address': '1.1.1.1'}]
floating_ips = [self._create_floating_ip(val) for val in values]
for floating_ip in floating_ips:
real_floating_ip = db.floating_ip_get(self.ctxt, floating_ip['id'])
self._assertEqualObjects(floating_ip, real_floating_ip,
ignored_keys=['fixed_ip'])
def test_floating_ip_get_not_found(self):
self.assertRaises(exception.FloatingIpNotFound,
db.floating_ip_get, self.ctxt, 100500)
@mock.patch.object(query.Query, 'first', side_effect=db_exc.DBError())
def test_floating_ip_get_with_long_id_not_found(self, mock_query):
self.assertRaises(exception.InvalidID,
db.floating_ip_get, self.ctxt, 123456789101112)
mock_query.assert_called_once_with()
def test_floating_ip_get_pools(self):
values = [
{'address': '0.0.0.0', 'pool': 'abc'},
{'address': '1.1.1.1', 'pool': 'abc'},
{'address': '2.2.2.2', 'pool': 'def'},
{'address': '3.3.3.3', 'pool': 'ghi'},
]
for val in values:
self._create_floating_ip(val)
expected_pools = [{'name': x}
for x in set(map(lambda x: x['pool'], values))]
real_pools = db.floating_ip_get_pools(self.ctxt)
self._assertEqualListsOfPrimitivesAsSets(real_pools, expected_pools)
def test_floating_ip_allocate_address(self):
pools = {
'pool1': ['0.0.0.0', '1.1.1.1'],
'pool2': ['2.2.2.2'],
'pool3': ['3.3.3.3', '4.4.4.4', '5.5.5.5']
}
for pool, addresses in pools.items():
for address in addresses:
vals = {'pool': pool, 'address': address, 'project_id': None}
self._create_floating_ip(vals)
project_id = self._get_base_values()['project_id']
for pool, addresses in pools.items():
alloc_addrs = []
for i in addresses:
float_addr = db.floating_ip_allocate_address(self.ctxt,
project_id, pool)
alloc_addrs.append(float_addr)
self._assertEqualListsOfPrimitivesAsSets(alloc_addrs, addresses)
def test_floating_ip_allocate_auto_assigned(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
float_ips = []
for i in range(0, 2):
float_ips.append(self._create_floating_ip(
{"address": addresses[i]}))
for i in range(2, 4):
float_ips.append(self._create_floating_ip({"address": addresses[i],
"auto_assigned": True}))
for i in range(0, 2):
float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
self.assertFalse(float_ip.auto_assigned)
for i in range(2, 4):
float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
self.assertTrue(float_ip.auto_assigned)
def test_floating_ip_allocate_address_no_more_floating_ips(self):
self.assertRaises(exception.NoMoreFloatingIps,
db.floating_ip_allocate_address,
self.ctxt, 'any_project_id', 'no_such_pool')
def test_floating_ip_allocate_not_authorized(self):
ctxt = context.RequestContext(user_id='a', project_id='abc',
is_admin=False)
self.assertRaises(exception.Forbidden,
db.floating_ip_allocate_address,
ctxt, 'other_project_id', 'any_pool')
def test_floating_ip_allocate_address_succeeds_retry(self):
pool = 'pool0'
address = '0.0.0.0'
vals = {'pool': pool, 'address': address, 'project_id': None}
floating_ip = self._create_floating_ip(vals)
project_id = self._get_base_values()['project_id']
def fake_first():
if mock_first.call_count == 1:
return {'pool': pool, 'project_id': None, 'fixed_ip_id': None,
'address': address, 'id': 'invalid_id'}
else:
return {'pool': pool, 'project_id': None, 'fixed_ip_id': None,
'address': address, 'id': 1}
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
float_addr = db.floating_ip_allocate_address(self.ctxt,
project_id, pool)
self.assertEqual(address, float_addr)
self.assertEqual(2, mock_first.call_count)
float_ip = db.floating_ip_get(self.ctxt, floating_ip.id)
self.assertEqual(project_id, float_ip['project_id'])
def test_floating_ip_allocate_address_retry_limit_exceeded(self):
pool = 'pool0'
address = '0.0.0.0'
vals = {'pool': pool, 'address': address, 'project_id': None}
self._create_floating_ip(vals)
project_id = self._get_base_values()['project_id']
def fake_first():
return {'pool': pool, 'project_id': None, 'fixed_ip_id': None,
'address': address, 'id': 'invalid_id'}
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
self.assertRaises(exception.FloatingIpAllocateFailed,
db.floating_ip_allocate_address, self.ctxt,
project_id, pool)
# 5 retries + initial attempt
self.assertEqual(6, mock_first.call_count)
def test_floating_ip_allocate_address_no_more_ips_with_no_retries(self):
with mock.patch('sqlalchemy.orm.query.Query.first',
return_value=None) as mock_first:
self.assertRaises(exception.NoMoreFloatingIps,
db.floating_ip_allocate_address,
self.ctxt, 'any_project_id', 'no_such_pool')
self.assertEqual(1, mock_first.call_count)
def _get_existing_ips(self):
return [ip['address'] for ip in db.floating_ip_get_all(self.ctxt)]
def test_floating_ip_bulk_create(self):
expected_ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
result = db.floating_ip_bulk_create(self.ctxt,
[{'address': x} for x in expected_ips],
want_result=False)
self.assertIsNone(result)
self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
expected_ips)
def test_floating_ip_bulk_create_duplicate(self):
ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
prepare_ips = lambda x: {'address': x}
result = db.floating_ip_bulk_create(self.ctxt,
list(map(prepare_ips, ips)))
self.assertEqual(ips, [ip.address for ip in result])
self.assertRaises(exception.FloatingIpExists,
db.floating_ip_bulk_create,
self.ctxt,
list(map(prepare_ips, ['1.1.1.5', '1.1.1.4'])),
want_result=False)
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_get_by_address,
self.ctxt, '1.1.1.5')
def test_floating_ip_bulk_destroy(self):
ips_for_delete = []
ips_for_non_delete = []
def create_ips(i, j):
return [{'address': '1.1.%s.%s' % (i, k)} for k in range(1, j + 1)]
# NOTE(boris-42): Create more than 256 ip to check that
# _ip_range_splitter works properly.
for i in range(1, 3):
ips_for_delete.extend(create_ips(i, 255))
ips_for_non_delete.extend(create_ips(3, 255))
result = db.floating_ip_bulk_create(self.ctxt,
ips_for_delete + ips_for_non_delete,
want_result=False)
self.assertIsNone(result)
non_bulk_ips_for_delete = create_ips(4, 3)
non_bulk_ips_for_non_delete = create_ips(5, 3)
non_bulk_ips = non_bulk_ips_for_delete + non_bulk_ips_for_non_delete
for dct in non_bulk_ips:
self._create_floating_ip(dct)
ips_for_delete.extend(non_bulk_ips_for_delete)
ips_for_non_delete.extend(non_bulk_ips_for_non_delete)
db.floating_ip_bulk_destroy(self.ctxt, ips_for_delete)
expected_addresses = [x['address'] for x in ips_for_non_delete]
self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
expected_addresses)
def test_floating_ip_create(self):
floating_ip = self._create_floating_ip({})
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self.assertIsNotNone(floating_ip['id'])
self._assertEqualObjects(floating_ip, self._get_base_values(),
ignored_keys)
def test_floating_ip_create_duplicate(self):
self._create_floating_ip({})
self.assertRaises(exception.FloatingIpExists,
self._create_floating_ip, {})
def _create_fixed_ip(self, params):
default_params = {'address': '192.168.0.1'}
default_params.update(params)
return db.fixed_ip_create(self.ctxt, default_params)['address']
def test_floating_ip_fixed_ip_associate(self):
float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
project_id = self.ctxt.project_id
float_ips = [self._create_floating_ip({'address': address,
'project_id': project_id})
for address in float_addresses]
fixed_addrs = [self._create_fixed_ip({'address': address})
for address in fixed_addresses]
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt,
float_ip.address,
fixed_addr, 'host')
self.assertEqual(fixed_ip.address, fixed_addr)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertEqual(fixed_ip.id, updated_float_ip.fixed_ip_id)
self.assertEqual('host', updated_float_ip.host)
fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt,
float_addresses[0],
fixed_addresses[0],
'host')
self.assertEqual(fixed_ip.address, fixed_addresses[0])
def test_floating_ip_fixed_ip_associate_float_ip_not_found(self):
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.floating_ip_fixed_ip_associate,
self.ctxt, '10.10.10.10', 'some', 'some')
def test_floating_ip_associate_failed(self):
fixed_ip = self._create_fixed_ip({'address': '7.7.7.7'})
self.assertRaises(exception.FloatingIpAssociateFailed,
db.floating_ip_fixed_ip_associate,
self.ctxt, '10.10.10.10', fixed_ip, 'some')
def test_floating_ip_deallocate(self):
values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'}
float_ip = self._create_floating_ip(values)
rows_updated = db.floating_ip_deallocate(self.ctxt, float_ip.address)
self.assertEqual(1, rows_updated)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertIsNone(updated_float_ip.project_id)
self.assertIsNone(updated_float_ip.host)
self.assertFalse(updated_float_ip.auto_assigned)
def test_floating_ip_deallocate_address_not_found(self):
self.assertEqual(0, db.floating_ip_deallocate(self.ctxt, '2.2.2.2'))
def test_floating_ip_deallocate_address_associated_ip(self):
float_address = '1.1.1.1'
fixed_address = '2.2.2.1'
project_id = self.ctxt.project_id
float_ip = self._create_floating_ip({'address': float_address,
'project_id': project_id})
fixed_addr = self._create_fixed_ip({'address': fixed_address})
db.floating_ip_fixed_ip_associate(self.ctxt, float_ip.address,
fixed_addr, 'host')
self.assertEqual(0, db.floating_ip_deallocate(self.ctxt,
float_address))
def test_floating_ip_destroy(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
expected_len = len(addresses)
for float_ip in float_ips:
db.floating_ip_destroy(self.ctxt, float_ip.address)
self.assertRaises(exception.FloatingIpNotFound,
db.floating_ip_get, self.ctxt, float_ip.id)
expected_len -= 1
if expected_len > 0:
self.assertEqual(expected_len,
len(db.floating_ip_get_all(self.ctxt)))
else:
self.assertRaises(exception.NoFloatingIpsDefined,
db.floating_ip_get_all, self.ctxt)
def test_floating_ip_disassociate(self):
float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
project_id = self.ctxt.project_id
float_ips = [self._create_floating_ip({'address': address,
'project_id': project_id})
for address in float_addresses]
fixed_addrs = [self._create_fixed_ip({'address': address})
for address in fixed_addresses]
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
db.floating_ip_fixed_ip_associate(self.ctxt,
float_ip.address,
fixed_addr, 'host')
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
fixed = db.floating_ip_disassociate(self.ctxt, float_ip.address)
self.assertEqual(fixed.address, fixed_addr)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertIsNone(updated_float_ip.fixed_ip_id)
self.assertIsNone(updated_float_ip.host)
def test_floating_ip_disassociate_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_disassociate, self.ctxt,
'11.11.11.11')
def test_floating_ip_get_all(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
self._assertEqualListsOfObjects(float_ips,
db.floating_ip_get_all(self.ctxt),
ignored_keys="fixed_ip")
def test_floating_ip_get_all_associated(self):
instance = db.instance_create(self.ctxt, {'uuid': 'fake'})
project_id = self.ctxt.project_id
float_ip = self._create_floating_ip({'address': '1.1.1.1',
'project_id': project_id})
fixed_ip = self._create_fixed_ip({'address': '2.2.2.2',
'instance_uuid': instance.uuid})
db.floating_ip_fixed_ip_associate(self.ctxt,
float_ip.address,
fixed_ip,
'host')
float_ips = db.floating_ip_get_all(self.ctxt)
self.assertEqual(1, len(float_ips))
self.assertEqual(float_ip.address, float_ips[0].address)
self.assertEqual(fixed_ip, float_ips[0].fixed_ip.address)
self.assertEqual(instance.uuid, float_ips[0].fixed_ip.instance_uuid)
def test_floating_ip_get_all_not_found(self):
self.assertRaises(exception.NoFloatingIpsDefined,
db.floating_ip_get_all, self.ctxt)
def test_floating_ip_get_all_by_host(self):
hosts = {
'host1': ['1.1.1.1', '1.1.1.2'],
'host2': ['2.1.1.1', '2.1.1.2'],
'host3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
}
hosts_with_float_ips = {}
for host, addresses in hosts.items():
hosts_with_float_ips[host] = []
for address in addresses:
float_ip = self._create_floating_ip({'host': host,
'address': address})
hosts_with_float_ips[host].append(float_ip)
for host, float_ips in hosts_with_float_ips.items():
real_float_ips = db.floating_ip_get_all_by_host(self.ctxt, host)
self._assertEqualListsOfObjects(float_ips, real_float_ips,
ignored_keys="fixed_ip")
def test_floating_ip_get_all_by_host_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForHost,
db.floating_ip_get_all_by_host,
self.ctxt, 'non_exists_host')
def test_floating_ip_get_all_by_project(self):
projects = {
'pr1': ['1.1.1.1', '1.1.1.2'],
'pr2': ['2.1.1.1', '2.1.1.2'],
'pr3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
}
projects_with_float_ips = {}
for project_id, addresses in projects.items():
projects_with_float_ips[project_id] = []
for address in addresses:
float_ip = self._create_floating_ip({'project_id': project_id,
'address': address})
projects_with_float_ips[project_id].append(float_ip)
for project_id, float_ips in projects_with_float_ips.items():
real_float_ips = db.floating_ip_get_all_by_project(self.ctxt,
project_id)
self._assertEqualListsOfObjects(float_ips, real_float_ips,
ignored_keys='fixed_ip')
def test_floating_ip_get_all_by_project_not_authorized(self):
ctxt = context.RequestContext(user_id='a', project_id='abc',
is_admin=False)
self.assertRaises(exception.Forbidden,
db.floating_ip_get_all_by_project,
ctxt, 'other_project')
def test_floating_ip_get_by_address(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
for float_ip in float_ips:
real_float_ip = db.floating_ip_get_by_address(self.ctxt,
float_ip.address)
self._assertEqualObjects(float_ip, real_float_ip,
ignored_keys='fixed_ip')
def test_floating_ip_get_by_address_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_get_by_address,
self.ctxt, '20.20.20.20')
@mock.patch.object(query.Query, 'first', side_effect=db_exc.DBError())
def test_floating_ip_get_by_invalid_address(self, mock_query):
self.assertRaises(exception.InvalidIpAddressError,
db.floating_ip_get_by_address,
self.ctxt, 'non_exists_host')
mock_query.assert_called_once_with()
def test_floating_ip_get_by_fixed_address(self):
fixed_float = [
('1.1.1.1', '2.2.2.1'),
('1.1.1.2', '2.2.2.2'),
('1.1.1.3', '2.2.2.3')
]
for fixed_addr, float_addr in fixed_float:
project_id = self.ctxt.project_id
self._create_floating_ip({'address': float_addr,
'project_id': project_id})
self._create_fixed_ip({'address': fixed_addr})
db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
fixed_addr, 'some_host')
for fixed_addr, float_addr in fixed_float:
float_ip = db.floating_ip_get_by_fixed_address(self.ctxt,
fixed_addr)
self.assertEqual(float_addr, float_ip[0]['address'])
def test_floating_ip_get_by_fixed_ip_id(self):
fixed_float = [
('1.1.1.1', '2.2.2.1'),
('1.1.1.2', '2.2.2.2'),
('1.1.1.3', '2.2.2.3')
]
for fixed_addr, float_addr in fixed_float:
project_id = self.ctxt.project_id
self._create_floating_ip({'address': float_addr,
'project_id': project_id})
self._create_fixed_ip({'address': fixed_addr})
db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
fixed_addr, 'some_host')
for fixed_addr, float_addr in fixed_float:
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, fixed_addr)
float_ip = db.floating_ip_get_by_fixed_ip_id(self.ctxt,
fixed_ip['id'])
self.assertEqual(float_addr, float_ip[0]['address'])
def test_floating_ip_update(self):
float_ip = self._create_floating_ip({})
values = {
'project_id': 'some_pr',
'host': 'some_host',
'auto_assigned': True,
'interface': 'some_interface',
'pool': 'some_pool'
}
floating_ref = db.floating_ip_update(self.ctxt, float_ip['address'],
values)
self.assertIsNotNone(floating_ref)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id'])
self._assertEqualObjects(updated_float_ip, values,
ignored_keys=['id', 'address', 'updated_at',
'deleted_at', 'created_at',
'deleted', 'fixed_ip_id',
'fixed_ip'])
def test_floating_ip_update_to_duplicate(self):
float_ip1 = self._create_floating_ip({'address': '1.1.1.1'})
float_ip2 = self._create_floating_ip({'address': '1.1.1.2'})
self.assertRaises(exception.FloatingIpExists,
db.floating_ip_update,
self.ctxt, float_ip2['address'],
{'address': float_ip1['address']})
class InstanceDestroyConstraints(test.TestCase):
def test_destroy_with_equal_any_constraint_met_single_value(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.equal_any('deleting'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_equal_any_constraint_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.equal_any('deleting',
'error'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_equal_any_constraint_not_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'vm_state': 'resize'})
constraint = db.constraint(vm_state=db.equal_any('active', 'error'))
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
def test_destroy_with_not_equal_constraint_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.not_equal('error', 'resize'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_not_equal_constraint_not_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'vm_state': 'active'})
constraint = db.constraint(vm_state=db.not_equal('active', 'error'))
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
class VolumeUsageDBApiTestCase(test.TestCase):
def setUp(self):
super(VolumeUsageDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.useFixture(test.TimeOverride())
def test_vol_usage_update_no_totals_update(self):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
self.useFixture(utils_fixture.TimeFixture(now))
start_time = now - datetime.timedelta(seconds=10)
expected_vol_usages = {
u'1': {'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'user_id': 'fake-user-uuid1',
'curr_reads': 1000,
'curr_read_bytes': 2000,
'curr_writes': 3000,
'curr_write_bytes': 4000,
'curr_last_refreshed': now,
'tot_reads': 0,
'tot_read_bytes': 0,
'tot_writes': 0,
'tot_write_bytes': 0,
'tot_last_refreshed': None},
u'2': {'volume_id': u'2',
'instance_uuid': 'fake-instance-uuid2',
'project_id': 'fake-project-uuid2',
'user_id': 'fake-user-uuid2',
'curr_reads': 100,
'curr_read_bytes': 200,
'curr_writes': 300,
'curr_write_bytes': 400,
'tot_reads': 0,
'tot_read_bytes': 0,
'tot_writes': 0,
'tot_write_bytes': 0,
'tot_last_refreshed': None}
}
def _compare(vol_usage, expected):
for key, value in expected.items():
self.assertEqual(vol_usage[key], value)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1', rd_req=10, rd_bytes=20,
wr_req=30, wr_bytes=40,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
user_id='fake-user-uuid1',
availability_zone='fake-az')
db.vol_usage_update(ctxt, u'2', rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid2',
project_id='fake-project-uuid2',
user_id='fake-user-uuid2',
availability_zone='fake-az')
db.vol_usage_update(ctxt, u'1', rd_req=1000, rd_bytes=2000,
wr_req=3000, wr_bytes=4000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
user_id='fake-user-uuid1',
availability_zone='fake-az')
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 2)
for usage in vol_usages:
_compare(usage, expected_vol_usages[usage.volume_id])
def test_vol_usage_update_totals_update(self):
ctxt = context.get_admin_context()
now = datetime.datetime(1, 1, 1, 1, 0, 0)
start_time = now - datetime.timedelta(seconds=10)
now1 = now + datetime.timedelta(minutes=1)
now2 = now + datetime.timedelta(minutes=2)
now3 = now + datetime.timedelta(minutes=3)
time_fixture = self.useFixture(utils_fixture.TimeFixture(now))
db.vol_usage_update(ctxt, u'1', rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az')
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 0)
self.assertEqual(current_usage['curr_reads'], 100)
time_fixture.advance_time_delta(now1 - now)
db.vol_usage_update(ctxt, u'1', rd_req=200, rd_bytes=300,
wr_req=400, wr_bytes=500,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az',
update_totals=True)
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 200)
self.assertEqual(current_usage['curr_reads'], 0)
time_fixture.advance_time_delta(now2 - now1)
db.vol_usage_update(ctxt, u'1', rd_req=300, rd_bytes=400,
wr_req=500, wr_bytes=600,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
availability_zone='fake-az',
user_id='fake-user-uuid')
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 200)
self.assertEqual(current_usage['curr_reads'], 300)
time_fixture.advance_time_delta(now3 - now2)
db.vol_usage_update(ctxt, u'1', rd_req=400, rd_bytes=500,
wr_req=600, wr_bytes=700,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az',
update_totals=True)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
expected_vol_usages = {'volume_id': u'1',
'project_id': 'fake-project-uuid',
'user_id': 'fake-user-uuid',
'instance_uuid': 'fake-instance-uuid',
'availability_zone': 'fake-az',
'tot_reads': 600,
'tot_read_bytes': 800,
'tot_writes': 1000,
'tot_write_bytes': 1200,
'tot_last_refreshed': now3,
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'curr_last_refreshed': now2}
self.assertEqual(1, len(vol_usages))
for key, value in expected_vol_usages.items():
self.assertEqual(vol_usages[0][key], value, key)
def test_vol_usage_update_when_blockdevicestats_reset(self):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
start_time = now - datetime.timedelta(seconds=10)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1',
rd_req=10000, rd_bytes=20000,
wr_req=30000, wr_bytes=40000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
# Instance rebooted or crashed. block device stats were reset and are
# less than the previous values
db.vol_usage_update(ctxt, u'1',
rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
db.vol_usage_update(ctxt, u'1',
rd_req=200, rd_bytes=300,
wr_req=400, wr_bytes=500,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
expected_vol_usage = {'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'availability_zone': 'fake-az',
'user_id': 'fake-user-uuid1',
'curr_reads': 200,
'curr_read_bytes': 300,
'curr_writes': 400,
'curr_write_bytes': 500,
'tot_reads': 10000,
'tot_read_bytes': 20000,
'tot_writes': 30000,
'tot_write_bytes': 40000}
for key, value in expected_vol_usage.items():
self.assertEqual(vol_usage[key], value, key)
def test_vol_usage_update_totals_update_when_blockdevicestats_reset(self):
# This is unlikely to happen, but could when a volume is detached
# right after an instance has rebooted / recovered and before
# the system polled and updated the volume usage cache table.
ctxt = context.get_admin_context()
now = timeutils.utcnow()
start_time = now - datetime.timedelta(seconds=10)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1',
rd_req=10000, rd_bytes=20000,
wr_req=30000, wr_bytes=40000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
# Instance rebooted or crashed. block device stats were reset and are
# less than the previous values
db.vol_usage_update(ctxt, u'1',
rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1',
update_totals=True)
vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
expected_vol_usage = {'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'availability_zone': 'fake-az',
'user_id': 'fake-user-uuid1',
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'tot_reads': 10100,
'tot_read_bytes': 20200,
'tot_writes': 30300,
'tot_write_bytes': 40400}
for key, value in expected_vol_usage.items():
self.assertEqual(vol_usage[key], value, key)
class TaskLogTestCase(test.TestCase):
def setUp(self):
super(TaskLogTestCase, self).setUp()
self.context = context.get_admin_context()
now = timeutils.utcnow()
self.begin = (now - datetime.timedelta(seconds=10)).isoformat()
self.end = (now - datetime.timedelta(seconds=5)).isoformat()
self.task_name = 'fake-task-name'
self.host = 'fake-host'
self.message = 'Fake task message'
db.task_log_begin_task(self.context, self.task_name, self.begin,
self.end, self.host, message=self.message)
def test_task_log_get(self):
result = db.task_log_get(self.context, self.task_name, self.begin,
self.end, self.host)
self.assertEqual(result['task_name'], self.task_name)
self.assertEqual(result['period_beginning'],
timeutils.parse_strtime(self.begin))
self.assertEqual(result['period_ending'],
timeutils.parse_strtime(self.end))
self.assertEqual(result['host'], self.host)
self.assertEqual(result['message'], self.message)
def test_task_log_get_all(self):
result = db.task_log_get_all(self.context, self.task_name, self.begin,
self.end, host=self.host)
self.assertEqual(len(result), 1)
result = db.task_log_get_all(self.context, self.task_name, self.begin,
self.end, host=self.host, state='')
self.assertEqual(len(result), 0)
def test_task_log_begin_task(self):
db.task_log_begin_task(self.context, 'fake', self.begin,
self.end, self.host, task_items=42,
message=self.message)
result = db.task_log_get(self.context, 'fake', self.begin,
self.end, self.host)
self.assertEqual(result['task_name'], 'fake')
def test_task_log_begin_task_duplicate(self):
params = (self.context, 'fake', self.begin, self.end, self.host)
db.task_log_begin_task(*params, message=self.message)
self.assertRaises(exception.TaskAlreadyRunning,
db.task_log_begin_task,
*params, message=self.message)
def test_task_log_end_task(self):
errors = 1
db.task_log_end_task(self.context, self.task_name, self.begin,
self.end, self.host, errors, message=self.message)
result = db.task_log_get(self.context, self.task_name, self.begin,
self.end, self.host)
self.assertEqual(result['errors'], 1)
def test_task_log_end_task_task_not_running(self):
self.assertRaises(exception.TaskNotRunning,
db.task_log_end_task, self.context, 'nonexistent',
self.begin, self.end, self.host, 42,
message=self.message)
class BlockDeviceMappingTestCase(test.TestCase):
def setUp(self):
super(BlockDeviceMappingTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, {})
def _create_bdm(self, values):
values.setdefault('instance_uuid', self.instance['uuid'])
values.setdefault('device_name', 'fake_device')
values.setdefault('source_type', 'volume')
values.setdefault('destination_type', 'volume')
block_dev = block_device.BlockDeviceDict(values)
db.block_device_mapping_create(self.ctxt, block_dev, legacy=False)
uuid = block_dev['instance_uuid']
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
for bdm in bdms:
if bdm['device_name'] == values['device_name']:
return bdm
def test_scrub_empty_str_values_no_effect(self):
values = {'volume_size': 5}
expected = copy.copy(values)
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, expected)
def test_scrub_empty_str_values_empty_string(self):
values = {'volume_size': ''}
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, {})
def test_scrub_empty_str_values_empty_unicode(self):
values = {'volume_size': u''}
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, {})
def test_block_device_mapping_create(self):
bdm = self._create_bdm({})
self.assertIsNotNone(bdm)
self.assertTrue(uuidutils.is_uuid_like(bdm['uuid']))
def test_block_device_mapping_create_with_blank_uuid(self):
bdm = self._create_bdm({'uuid': ''})
self.assertIsNotNone(bdm)
self.assertTrue(uuidutils.is_uuid_like(bdm['uuid']))
def test_block_device_mapping_create_with_invalid_uuid(self):
self.assertRaises(exception.InvalidUUID,
self._create_bdm, {'uuid': 'invalid-uuid'})
def test_block_device_mapping_create_with_attachment_id(self):
bdm = self._create_bdm({'attachment_id': uuidsentinel.attachment_id})
self.assertEqual(uuidsentinel.attachment_id, bdm.attachment_id)
def test_block_device_mapping_update(self):
bdm = self._create_bdm({})
self.assertIsNone(bdm.attachment_id)
result = db.block_device_mapping_update(
self.ctxt, bdm['id'],
{'destination_type': 'moon',
'attachment_id': uuidsentinel.attachment_id},
legacy=False)
uuid = bdm['instance_uuid']
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(bdm_real[0]['destination_type'], 'moon')
self.assertEqual(uuidsentinel.attachment_id, bdm_real[0].attachment_id)
# Also make sure the update call returned correct data
self.assertEqual(dict(bdm_real[0]),
dict(result))
def test_block_device_mapping_update_or_create(self):
values = {
'instance_uuid': self.instance['uuid'],
'device_name': 'fake_name',
'source_type': 'volume',
'destination_type': 'volume'
}
# check create
bdm = db.block_device_mapping_update_or_create(self.ctxt,
copy.deepcopy(values),
legacy=False)
self.assertTrue(uuidutils.is_uuid_like(bdm['uuid']))
uuid = values['instance_uuid']
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
self.assertEqual(bdm_real[0]['device_name'], 'fake_name')
# check update
bdm0 = copy.deepcopy(values)
bdm0['destination_type'] = 'camelot'
db.block_device_mapping_update_or_create(self.ctxt, bdm0,
legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
bdm_real = bdm_real[0]
self.assertEqual(bdm_real['device_name'], 'fake_name')
self.assertEqual(bdm_real['destination_type'], 'camelot')
# check create without device_name
bdm1 = copy.deepcopy(values)
bdm1['device_name'] = None
db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
with_device_name = [b for b in bdms if b['device_name'] is not None]
without_device_name = [b for b in bdms if b['device_name'] is None]
self.assertEqual(2, len(bdms))
self.assertEqual(len(with_device_name), 1,
'expected 1 bdm with device_name, found %d' %
len(with_device_name))
self.assertEqual(len(without_device_name), 1,
'expected 1 bdm without device_name, found %d' %
len(without_device_name))
# check create multiple devices without device_name
bdm2 = dict(values)
bdm2['device_name'] = None
db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
with_device_name = [b for b in bdms if b['device_name'] is not None]
without_device_name = [b for b in bdms if b['device_name'] is None]
self.assertEqual(len(with_device_name), 1,
'expected 1 bdm with device_name, found %d' %
len(with_device_name))
self.assertEqual(len(without_device_name), 2,
'expected 2 bdms without device_name, found %d' %
len(without_device_name))
def test_block_device_mapping_update_or_create_with_uuid(self):
# Test that we are able to change device_name when calling
# block_device_mapping_update_or_create with a uuid.
bdm = self._create_bdm({})
values = {
'uuid': bdm['uuid'],
'instance_uuid': bdm['instance_uuid'],
'device_name': 'foobar',
}
db.block_device_mapping_update_or_create(self.ctxt, values,
legacy=False)
real_bdms = db.block_device_mapping_get_all_by_instance(
self.ctxt, bdm['instance_uuid'])
self.assertEqual(1, len(real_bdms))
self.assertEqual('foobar', real_bdms[0]['device_name'])
def test_block_device_mapping_update_or_create_with_blank_uuid(self):
# Test that create with block_device_mapping_update_or_create raises an
# exception if given an invalid uuid
values = {
'uuid': '',
'instance_uuid': uuidsentinel.instance,
'device_name': 'foobar',
}
db.block_device_mapping_update_or_create(self.ctxt, values)
real_bdms = db.block_device_mapping_get_all_by_instance(
self.ctxt, uuidsentinel.instance)
self.assertEqual(1, len(real_bdms))
self.assertTrue(uuidutils.is_uuid_like(real_bdms[0]['uuid']))
def test_block_device_mapping_update_or_create_with_invalid_uuid(self):
# Test that create with block_device_mapping_update_or_create raises an
# exception if given an invalid uuid
values = {
'uuid': 'invalid-uuid',
'instance_uuid': uuidsentinel.instance,
'device_name': 'foobar',
}
self.assertRaises(exception.InvalidUUID,
db.block_device_mapping_update_or_create,
self.ctxt, values)
def test_block_device_mapping_update_or_create_multiple_ephemeral(self):
uuid = self.instance['uuid']
values = {
'instance_uuid': uuid,
'source_type': 'blank',
'guest_format': 'myformat',
}
bdm1 = dict(values)
bdm1['device_name'] = '/dev/sdb'
db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
bdm2 = dict(values)
bdm2['device_name'] = '/dev/sdc'
db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
bdm_real = sorted(
db.block_device_mapping_get_all_by_instance(self.ctxt, uuid),
key=lambda bdm: bdm['device_name']
)
self.assertEqual(len(bdm_real), 2)
for bdm, device_name in zip(bdm_real, ['/dev/sdb', '/dev/sdc']):
self.assertEqual(bdm['device_name'], device_name)
self.assertEqual(bdm['guest_format'], 'myformat')
def test_block_device_mapping_update_or_create_check_remove_virt(self):
uuid = self.instance['uuid']
values = {
'instance_uuid': uuid,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap',
}
# check that old swap bdms are deleted on create
val1 = dict(values)
val1['device_name'] = 'device1'
db.block_device_mapping_create(self.ctxt, val1, legacy=False)
val2 = dict(values)
val2['device_name'] = 'device2'
db.block_device_mapping_update_or_create(self.ctxt, val2, legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
bdm_real = bdm_real[0]
self.assertEqual(bdm_real['device_name'], 'device2')
self.assertEqual(bdm_real['source_type'], 'blank')
self.assertEqual(bdm_real['guest_format'], 'swap')
db.block_device_mapping_destroy(self.ctxt, bdm_real['id'])
def test_block_device_mapping_get_all_by_instance_uuids(self):
uuid1 = self.instance['uuid']
uuid2 = db.instance_create(self.ctxt, {})['uuid']
bdms_values = [{'instance_uuid': uuid1,
'device_name': '/dev/vda'},
{'instance_uuid': uuid2,
'device_name': '/dev/vdb'},
{'instance_uuid': uuid2,
'device_name': '/dev/vdc'}]
for bdm in bdms_values:
self._create_bdm(bdm)
bdms = db.block_device_mapping_get_all_by_instance_uuids(
self.ctxt, [])
self.assertEqual(len(bdms), 0)
bdms = db.block_device_mapping_get_all_by_instance_uuids(
self.ctxt, [uuid2])
self.assertEqual(len(bdms), 2)
bdms = db.block_device_mapping_get_all_by_instance_uuids(
self.ctxt, [uuid1, uuid2])
self.assertEqual(len(bdms), 3)
def test_block_device_mapping_get_all_by_instance(self):
uuid1 = self.instance['uuid']
uuid2 = db.instance_create(self.ctxt, {})['uuid']
bdms_values = [{'instance_uuid': uuid1,
'device_name': '/dev/vda'},
{'instance_uuid': uuid2,
'device_name': '/dev/vdb'},
{'instance_uuid': uuid2,
'device_name': '/dev/vdc'}]
for bdm in bdms_values:
self._create_bdm(bdm)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1)
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['device_name'], '/dev/vda')
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2)
self.assertEqual(len(bdms), 2)
def test_block_device_mapping_destroy(self):
bdm = self._create_bdm({})
db.block_device_mapping_destroy(self.ctxt, bdm['id'])
bdm = db.block_device_mapping_get_all_by_instance(self.ctxt,
bdm['instance_uuid'])
self.assertEqual(len(bdm), 0)
def test_block_device_mapping_destroy_by_instance_and_volume(self):
vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f'
vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f'
self._create_bdm({'device_name': '/dev/vda', 'volume_id': vol_id1})
self._create_bdm({'device_name': '/dev/vdb', 'volume_id': vol_id2})
uuid = self.instance['uuid']
db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid,
vol_id1)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['device_name'], '/dev/vdb')
def test_block_device_mapping_destroy_by_instance_and_device(self):
self._create_bdm({'device_name': '/dev/vda'})
self._create_bdm({'device_name': '/dev/vdb'})
uuid = self.instance['uuid']
params = (self.ctxt, uuid, '/dev/vdb')
db.block_device_mapping_destroy_by_instance_and_device(*params)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['device_name'], '/dev/vda')
def test_block_device_mapping_get_all_by_volume_id(self):
self._create_bdm({'volume_id': 'fake_id'})
self._create_bdm({'volume_id': 'fake_id'})
bdms = db.block_device_mapping_get_all_by_volume_id(self.ctxt,
'fake_id')
self.assertEqual(bdms[0]['volume_id'], 'fake_id')
self.assertEqual(bdms[1]['volume_id'], 'fake_id')
self.assertEqual(2, len(bdms))
def test_block_device_mapping_get_all_by_volume_id_join_instance(self):
self._create_bdm({'volume_id': 'fake_id'})
bdms = db.block_device_mapping_get_all_by_volume_id(self.ctxt,
'fake_id',
['instance'])
self.assertEqual(bdms[0]['volume_id'], 'fake_id')
self.assertEqual(bdms[0]['instance']['uuid'], self.instance['uuid'])
def test_block_device_mapping_get_by_instance_and_volume_id(self):
self._create_bdm({'volume_id': 'fake_id'})
bdm = db.block_device_mapping_get_by_instance_and_volume_id(self.ctxt,
'fake_id', self.instance['uuid'])
self.assertEqual(bdm['volume_id'], 'fake_id')
self.assertEqual(bdm['instance_uuid'], self.instance['uuid'])
def test_block_device_mapping_get_by_instance_and_volume_id_multiplebdms(
self):
self._create_bdm({'volume_id': 'fake_id',
'instance_uuid': self.instance['uuid']})
self._create_bdm({'volume_id': 'fake_id',
'instance_uuid': self.instance['uuid']})
db_bdm = db.block_device_mapping_get_by_instance_and_volume_id(
self.ctxt, 'fake_id', self.instance['uuid'])
self.assertIsNotNone(db_bdm)
self.assertEqual(self.instance['uuid'], db_bdm['instance_uuid'])
def test_block_device_mapping_get_by_instance_and_volume_id_multiattach(
self):
self.instance2 = db.instance_create(self.ctxt, {})
self._create_bdm({'volume_id': 'fake_id',
'instance_uuid': self.instance['uuid']})
self._create_bdm({'volume_id': 'fake_id',
'instance_uuid': self.instance2['uuid']})
bdm = db.block_device_mapping_get_by_instance_and_volume_id(self.ctxt,
'fake_id', self.instance['uuid'])
self.assertEqual(bdm['volume_id'], 'fake_id')
self.assertEqual(bdm['instance_uuid'], self.instance['uuid'])
bdm2 = db.block_device_mapping_get_by_instance_and_volume_id(
self.ctxt, 'fake_id', self.instance2['uuid'])
self.assertEqual(bdm2['volume_id'], 'fake_id')
self.assertEqual(bdm2['instance_uuid'], self.instance2['uuid'])
class AgentBuildTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.agent_build_* methods."""
def setUp(self):
super(AgentBuildTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_agent_build_create_and_get_all(self):
self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
agent_build = db.agent_build_create(self.ctxt, {'os': 'GNU/HURD'})
all_agent_builds = db.agent_build_get_all(self.ctxt)
self.assertEqual(1, len(all_agent_builds))
self._assertEqualObjects(agent_build, all_agent_builds[0])
def test_agent_build_get_by_triple(self):
agent_build = db.agent_build_create(
self.ctxt, {'hypervisor': 'kvm', 'os': 'FreeBSD',
'architecture': fields.Architecture.X86_64})
self.assertIsNone(db.agent_build_get_by_triple(
self.ctxt, 'kvm', 'FreeBSD', 'i386'))
self._assertEqualObjects(agent_build, db.agent_build_get_by_triple(
self.ctxt, 'kvm', 'FreeBSD', fields.Architecture.X86_64))
def test_agent_build_destroy(self):
agent_build = db.agent_build_create(self.ctxt, {})
self.assertEqual(1, len(db.agent_build_get_all(self.ctxt)))
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
def test_agent_build_update(self):
agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
db.agent_build_update(self.ctxt, agent_build.id, {'os': 'ReactOS'})
self.assertEqual('ReactOS', db.agent_build_get_all(self.ctxt)[0].os)
def test_agent_build_destroy_destroyed(self):
agent_build = db.agent_build_create(self.ctxt, {})
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertRaises(exception.AgentBuildNotFound,
db.agent_build_destroy, self.ctxt, agent_build.id)
def test_agent_build_update_destroyed(self):
agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertRaises(exception.AgentBuildNotFound,
db.agent_build_update, self.ctxt, agent_build.id, {'os': 'OS/2'})
def test_agent_build_exists(self):
values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
'architecture': fields.Architecture.X86_64}
db.agent_build_create(self.ctxt, values)
self.assertRaises(exception.AgentBuildExists, db.agent_build_create,
self.ctxt, values)
def test_agent_build_get_all_by_hypervisor(self):
values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
'architecture': fields.Architecture.X86_64}
created = db.agent_build_create(self.ctxt, values)
actual = db.agent_build_get_all(self.ctxt, hypervisor='kvm')
self._assertEqualListsOfObjects([created], actual)
class VirtualInterfaceTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(VirtualInterfaceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.instance_uuid = db.instance_create(self.ctxt, {})['uuid']
values = {'host': 'localhost', 'project_id': 'project1'}
self.network = db.network_create_safe(self.ctxt, values)
def _get_base_values(self):
return {
'instance_uuid': self.instance_uuid,
'address': 'fake_address',
'network_id': self.network['id'],
'uuid': uuidutils.generate_uuid(),
'tag': 'fake-tag',
}
def _create_virt_interface(self, values):
v = self._get_base_values()
v.update(values)
return db.virtual_interface_create(self.ctxt, v)
def test_virtual_interface_create(self):
vif = self._create_virt_interface({})
self.assertIsNotNone(vif['id'])
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at', 'uuid']
self._assertEqualObjects(vif, self._get_base_values(), ignored_keys)
def test_virtual_interface_create_with_duplicate_address(self):
vif = self._create_virt_interface({})
self.assertRaises(exception.VirtualInterfaceCreateException,
self._create_virt_interface, {"uuid": vif['uuid']})
def test_virtual_interface_get(self):
vifs = [self._create_virt_interface({'address': 'a'}),
self._create_virt_interface({'address': 'b'})]
for vif in vifs:
real_vif = db.virtual_interface_get(self.ctxt, vif['id'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_address(self):
vifs = [self._create_virt_interface({'address': 'first'}),
self._create_virt_interface({'address': 'second'})]
for vif in vifs:
real_vif = db.virtual_interface_get_by_address(self.ctxt,
vif['address'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_address_not_found(self):
self.assertIsNone(db.virtual_interface_get_by_address(self.ctxt,
"i.nv.ali.ip"))
@mock.patch.object(query.Query, 'first', side_effect=db_exc.DBError())
def test_virtual_interface_get_by_address_data_error_exception(self,
mock_query):
self.assertRaises(exception.InvalidIpAddressError,
db.virtual_interface_get_by_address,
self.ctxt,
"i.nv.ali.ip")
mock_query.assert_called_once_with()
def test_virtual_interface_get_by_uuid(self):
vifs = [self._create_virt_interface({"address": "address_1"}),
self._create_virt_interface({"address": "address_2"})]
for vif in vifs:
real_vif = db.virtual_interface_get_by_uuid(self.ctxt, vif['uuid'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_instance(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
vifs1 = [self._create_virt_interface({'address': 'fake1'}),
self._create_virt_interface({'address': 'fake2'})]
# multiple nic of same instance
vifs2 = [self._create_virt_interface({'address': 'fake3',
'instance_uuid': inst_uuid2}),
self._create_virt_interface({'address': 'fake4',
'instance_uuid': inst_uuid2})]
vifs1_real = db.virtual_interface_get_by_instance(self.ctxt,
self.instance_uuid)
vifs2_real = db.virtual_interface_get_by_instance(self.ctxt,
inst_uuid2)
self._assertEqualListsOfObjects(vifs1, vifs1_real)
self._assertEqualOrderedListOfObjects(vifs2, vifs2_real)
def test_virtual_interface_get_by_instance_and_network(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = {'host': 'localhost', 'project_id': 'project2'}
network_id = db.network_create_safe(self.ctxt, values)['id']
vifs = [self._create_virt_interface({'address': 'fake1'}),
self._create_virt_interface({'address': 'fake2',
'network_id': network_id,
'instance_uuid': inst_uuid2}),
self._create_virt_interface({'address': 'fake3',
'instance_uuid': inst_uuid2})]
for vif in vifs:
params = (self.ctxt, vif['instance_uuid'], vif['network_id'])
r_vif = db.virtual_interface_get_by_instance_and_network(*params)
self._assertEqualObjects(r_vif, vif)
def test_virtual_interface_delete_by_instance(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = [dict(address='fake1'), dict(address='fake2'),
dict(address='fake3', instance_uuid=inst_uuid2)]
for vals in values:
self._create_virt_interface(vals)
db.virtual_interface_delete_by_instance(self.ctxt, self.instance_uuid)
real_vifs1 = db.virtual_interface_get_by_instance(self.ctxt,
self.instance_uuid)
real_vifs2 = db.virtual_interface_get_by_instance(self.ctxt,
inst_uuid2)
self.assertEqual(len(real_vifs1), 0)
self.assertEqual(len(real_vifs2), 1)
def test_virtual_interface_delete(self):
values = [dict(address='fake1'), dict(address='fake2'),
dict(address='fake3')]
vifs = []
for vals in values:
vifs.append(self._create_virt_interface(
dict(vals, instance_uuid=self.instance_uuid)))
db.virtual_interface_delete(self.ctxt, vifs[0]['id'])
real_vifs = db.virtual_interface_get_by_instance(self.ctxt,
self.instance_uuid)
self.assertEqual(2, len(real_vifs))
def test_virtual_interface_get_all(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = [dict(address='fake1'), dict(address='fake2'),
dict(address='fake3', instance_uuid=inst_uuid2)]
vifs = [self._create_virt_interface(val) for val in values]
real_vifs = db.virtual_interface_get_all(self.ctxt)
self._assertEqualListsOfObjects(vifs, real_vifs)
def test_virtual_interface_update(self):
instance_uuid = db.instance_create(self.ctxt, {})['uuid']
network_id = db.network_create_safe(self.ctxt, {})['id']
create = {'address': 'fake1',
'network_id': network_id,
'instance_uuid': instance_uuid,
'uuid': uuidsentinel.vif_uuid,
'tag': 'foo'}
update = {'tag': 'bar'}
updated = {'address': 'fake1',
'network_id': network_id,
'instance_uuid': instance_uuid,
'uuid': uuidsentinel.vif_uuid,
'tag': 'bar',
'deleted': 0}
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
vif_addr = db.virtual_interface_create(self.ctxt, create)['address']
db.virtual_interface_update(self.ctxt, vif_addr, update)
updated_vif = db.virtual_interface_get_by_address(self.ctxt,
updated['address'])
self._assertEqualObjects(updated, updated_vif, ignored_keys)
@mock.patch('time.sleep', new=lambda x: None)
class NetworkTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.network_* methods."""
def setUp(self):
super(NetworkTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_associated_fixed_ip(self, host, cidr, ip):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'cidr': cidr})
self.assertFalse(db.network_in_use_on_host(self.ctxt, network.id,
host))
instance = db.instance_create(self.ctxt,
{'project_id': 'project1', 'host': host})
virtual_interface = db.virtual_interface_create(self.ctxt,
{'instance_uuid': instance.uuid, 'network_id': network.id,
'address': ip})
db.fixed_ip_create(self.ctxt, {'address': ip,
'network_id': network.id, 'allocated': True,
'virtual_interface_id': virtual_interface.id})
db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
network.id, virtual_interface_id=virtual_interface['id'])
return network, instance
def test_network_get_associated_default_route(self):
network, instance = self._get_associated_fixed_ip('host.net',
'192.0.2.0/30', '192.0.2.1')
network2 = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'cidr': '192.0.3.0/30'})
ip = '192.0.3.1'
virtual_interface = db.virtual_interface_create(self.ctxt,
{'instance_uuid': instance.uuid, 'network_id': network2.id,
'address': ip})
db.fixed_ip_create(self.ctxt, {'address': ip,
'network_id': network2.id, 'allocated': True,
'virtual_interface_id': virtual_interface.id})
db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
network2.id)
data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
self.assertEqual(1, len(data))
self.assertTrue(data[0]['default_route'])
data = db.network_get_associated_fixed_ips(self.ctxt, network2.id)
self.assertEqual(1, len(data))
self.assertFalse(data[0]['default_route'])
def test_network_get_associated_fixed_ips(self):
network, instance = self._get_associated_fixed_ip('host.net',
'192.0.2.0/30', '192.0.2.1')
data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
self.assertEqual(1, len(data))
self.assertEqual('192.0.2.1', data[0]['address'])
self.assertEqual('192.0.2.1', data[0]['vif_address'])
self.assertEqual(instance.uuid, data[0]['instance_uuid'])
self.assertTrue(data[0][fields.PciDeviceStatus.ALLOCATED])
def test_network_create_safe(self):
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertEqual(36, len(network['uuid']))
db_network = db.network_get(self.ctxt, network['id'])
self._assertEqualObjects(network, db_network)
def test_network_create_with_duplicate_vlan(self):
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 1}
db.network_create_safe(self.ctxt, values1)
self.assertRaises(exception.DuplicateVlan,
db.network_create_safe, self.ctxt, values2)
def test_network_delete_safe(self):
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
db.network_get(self.ctxt, network['id'])
values = {'network_id': network['id'], 'address': '192.168.1.5'}
address1 = db.fixed_ip_create(self.ctxt, values)['address']
values = {'network_id': network['id'],
'address': '192.168.1.6',
'allocated': True}
address2 = db.fixed_ip_create(self.ctxt, values)['address']
self.assertRaises(exception.NetworkInUse,
db.network_delete_safe, self.ctxt, network['id'])
db.fixed_ip_update(self.ctxt, address2, {'allocated': False})
network = db.network_delete_safe(self.ctxt, network['id'])
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address1)
ctxt = self.ctxt.elevated(read_deleted='yes')
fixed_ip = db.fixed_ip_get_by_address(ctxt, address1)
self.assertTrue(fixed_ip['deleted'])
def test_network_in_use_on_host(self):
values = {'host': 'foo', 'hostname': 'myname'}
instance = db.instance_create(self.ctxt, values)
values = {'address': '192.168.1.5', 'instance_uuid': instance['uuid']}
vif = db.virtual_interface_create(self.ctxt, values)
values = {'address': '192.168.1.6',
'network_id': 1,
'allocated': True,
'instance_uuid': instance['uuid'],
'virtual_interface_id': vif['id']}
db.fixed_ip_create(self.ctxt, values)
self.assertTrue(db.network_in_use_on_host(self.ctxt, 1, 'foo'))
self.assertFalse(db.network_in_use_on_host(self.ctxt, 1, 'bar'))
def test_network_update_nonexistent(self):
self.assertRaises(exception.NetworkNotFound,
db.network_update, self.ctxt, 123456, {})
def test_network_update_with_duplicate_vlan(self):
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 2}
network_ref = db.network_create_safe(self.ctxt, values1)
db.network_create_safe(self.ctxt, values2)
self.assertRaises(exception.DuplicateVlan,
db.network_update, self.ctxt,
network_ref["id"], values2)
def test_network_update(self):
network = db.network_create_safe(self.ctxt, {'project_id': 'project1',
'vlan': 1, 'host': 'test.com'})
db.network_update(self.ctxt, network.id, {'vlan': 2})
network_new = db.network_get(self.ctxt, network.id)
self.assertEqual(2, network_new.vlan)
def test_network_set_host_nonexistent_network(self):
self.assertRaises(exception.NetworkNotFound, db.network_set_host,
self.ctxt, 123456, 'nonexistent')
def test_network_set_host_already_set_correct(self):
values = {'host': 'example.com', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertIsNone(db.network_set_host(self.ctxt, network.id,
'example.com'))
def test_network_set_host_already_set_incorrect(self):
values = {'host': 'example.com', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertIsNone(db.network_set_host(self.ctxt, network.id,
'new.example.com'))
def test_network_set_host_with_initially_no_host(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
db.network_set_host(self.ctxt, network.id, 'example.com')
self.assertEqual('example.com',
db.network_get(self.ctxt, network.id).host)
def test_network_set_host_succeeds_retry_on_deadlock(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
def fake_update(params):
if mock_update.call_count == 1:
raise db_exc.DBDeadlock()
else:
return 1
with mock.patch('sqlalchemy.orm.query.Query.update',
side_effect=fake_update) as mock_update:
db.network_set_host(self.ctxt, network.id, 'example.com')
self.assertEqual(2, mock_update.call_count)
def test_network_set_host_succeeds_retry_on_no_rows_updated(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
def fake_update(params):
if mock_update.call_count == 1:
return 0
else:
return 1
with mock.patch('sqlalchemy.orm.query.Query.update',
side_effect=fake_update) as mock_update:
db.network_set_host(self.ctxt, network.id, 'example.com')
self.assertEqual(2, mock_update.call_count)
def test_network_set_host_failed_with_retry_on_no_rows_updated(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
with mock.patch('sqlalchemy.orm.query.Query.update',
return_value=0) as mock_update:
self.assertRaises(exception.NetworkSetHostFailed,
db.network_set_host, self.ctxt, network.id,
'example.com')
# 5 retries + initial attempt
self.assertEqual(6, mock_update.call_count)
def test_network_get_all_by_host(self):
self.assertEqual([],
db.network_get_all_by_host(self.ctxt, 'example.com'))
host = 'h1.example.com'
# network with host set
net1 = db.network_create_safe(self.ctxt, {'host': host})
self._assertEqualListsOfObjects([net1],
db.network_get_all_by_host(self.ctxt, host))
# network with fixed ip with host set
net2 = db.network_create_safe(self.ctxt, {})
db.fixed_ip_create(self.ctxt, {'host': host, 'network_id': net2.id})
db.network_get_all_by_host(self.ctxt, host)
self._assertEqualListsOfObjects([net1, net2],
db.network_get_all_by_host(self.ctxt, host))
# network with instance with host set
net3 = db.network_create_safe(self.ctxt, {})
instance = db.instance_create(self.ctxt, {'host': host})
db.fixed_ip_create(self.ctxt, {'network_id': net3.id,
'instance_uuid': instance.uuid})
self._assertEqualListsOfObjects([net1, net2, net3],
db.network_get_all_by_host(self.ctxt, host))
def test_network_get_by_cidr(self):
cidr = '192.0.2.0/30'
cidr_v6 = '2001:db8:1::/64'
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'cidr': cidr, 'cidr_v6': cidr_v6})
self._assertEqualObjects(network,
db.network_get_by_cidr(self.ctxt, cidr))
self._assertEqualObjects(network,
db.network_get_by_cidr(self.ctxt, cidr_v6))
def test_network_get_by_cidr_nonexistent(self):
self.assertRaises(exception.NetworkNotFoundForCidr,
db.network_get_by_cidr, self.ctxt, '192.0.2.0/30')
def test_network_get_by_uuid(self):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project_1'})
self._assertEqualObjects(network,
db.network_get_by_uuid(self.ctxt, network.uuid))
def test_network_get_by_uuid_nonexistent(self):
self.assertRaises(exception.NetworkNotFoundForUUID,
db.network_get_by_uuid, self.ctxt, 'non-existent-uuid')
def test_network_get_all_by_uuids_no_networks(self):
self.assertRaises(exception.NoNetworksFound,
db.network_get_all_by_uuids, self.ctxt, ['non-existent-uuid'])
def test_network_get_all_by_uuids(self):
net1 = db.network_create_safe(self.ctxt, {})
net2 = db.network_create_safe(self.ctxt, {})
self._assertEqualListsOfObjects([net1, net2],
db.network_get_all_by_uuids(self.ctxt, [net1.uuid, net2.uuid]))
def test_network_get_all_no_networks(self):
self.assertRaises(exception.NoNetworksFound,
db.network_get_all, self.ctxt)
def test_network_get_all(self):
network = db.network_create_safe(self.ctxt, {})
network_db = db.network_get_all(self.ctxt)
self.assertEqual(1, len(network_db))
self._assertEqualObjects(network, network_db[0])
def test_network_get_all_admin_user(self):
network1 = db.network_create_safe(self.ctxt, {})
network2 = db.network_create_safe(self.ctxt,
{'project_id': 'project1'})
self._assertEqualListsOfObjects([network1, network2],
db.network_get_all(self.ctxt,
project_only=True))
def test_network_get_all_normal_user(self):
normal_ctxt = context.RequestContext('fake', 'fake')
db.network_create_safe(self.ctxt, {})
db.network_create_safe(self.ctxt, {'project_id': 'project1'})
network1 = db.network_create_safe(self.ctxt,
{'project_id': 'fake'})
network_db = db.network_get_all(normal_ctxt, project_only=True)
self.assertEqual(1, len(network_db))
self._assertEqualObjects(network1, network_db[0])
def test_network_get(self):
network = db.network_create_safe(self.ctxt, {})
self._assertEqualObjects(db.network_get(self.ctxt, network.id),
network)
db.network_delete_safe(self.ctxt, network.id)
self.assertRaises(exception.NetworkNotFound,
db.network_get, self.ctxt, network.id)
def test_network_associate(self):
network = db.network_create_safe(self.ctxt, {})
self.assertIsNone(network.project_id)
db.network_associate(self.ctxt, "project1", network.id)
self.assertEqual("project1", db.network_get(self.ctxt,
network.id).project_id)
def test_network_diassociate(self):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'host': 'test.net'})
# disassociate project
db.network_disassociate(self.ctxt, network.id, False, True)
self.assertIsNone(db.network_get(self.ctxt, network.id).project_id)
# disassociate host
db.network_disassociate(self.ctxt, network.id, True, False)
self.assertIsNone(db.network_get(self.ctxt, network.id).host)
def test_network_count_reserved_ips(self):
net = db.network_create_safe(self.ctxt, {})
self.assertEqual(0, db.network_count_reserved_ips(self.ctxt, net.id))
db.fixed_ip_create(self.ctxt, {'network_id': net.id,
'reserved': True})
self.assertEqual(1, db.network_count_reserved_ips(self.ctxt, net.id))
class KeyPairTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(KeyPairTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_key_pair(self, values):
return db.key_pair_create(self.ctxt, values)
def test_key_pair_create(self):
param = {
'name': 'test_1',
'type': 'ssh',
'user_id': 'test_user_id_1',
'public_key': 'test_public_key_1',
'fingerprint': 'test_fingerprint_1'
}
key_pair = self._create_key_pair(param)
self.assertIsNotNone(key_pair['id'])
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(key_pair, param, ignored_keys)
def test_key_pair_create_with_duplicate_name(self):
params = {'name': 'test_name', 'user_id': 'test_user_id',
'type': 'ssh'}
self._create_key_pair(params)
self.assertRaises(exception.KeyPairExists, self._create_key_pair,
params)
def test_key_pair_get(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_2', 'user_id': 'test_user_id_2', 'type': 'ssh'},
{'name': 'test_3', 'user_id': 'test_user_id_3', 'type': 'ssh'}
]
key_pairs = [self._create_key_pair(p) for p in params]
for key in key_pairs:
real_key = db.key_pair_get(self.ctxt, key['user_id'], key['name'])
self._assertEqualObjects(key, real_key)
def test_key_pair_get_no_results(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
def test_key_pair_get_deleted(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'}
key_pair_created = self._create_key_pair(param)
db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
ctxt = self.ctxt.elevated(read_deleted='yes')
key_pair_deleted = db.key_pair_get(ctxt, param['user_id'],
param['name'])
ignored_keys = ['deleted', 'created_at', 'updated_at', 'deleted_at']
self._assertEqualObjects(key_pair_deleted, key_pair_created,
ignored_keys)
self.assertEqual(key_pair_deleted['deleted'], key_pair_deleted['id'])
def test_key_pair_get_all_by_user(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_2', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_3', 'user_id': 'test_user_id_2', 'type': 'ssh'}
]
key_pairs_user_1 = [self._create_key_pair(p) for p in params
if p['user_id'] == 'test_user_id_1']
key_pairs_user_2 = [self._create_key_pair(p) for p in params
if p['user_id'] == 'test_user_id_2']
real_keys_1 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_1')
real_keys_2 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_2')
self._assertEqualListsOfObjects(key_pairs_user_1, real_keys_1)
self._assertEqualListsOfObjects(key_pairs_user_2, real_keys_2)
def test_key_pair_get_all_by_user_limit_and_marker(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id', 'type': 'ssh'},
{'name': 'test_2', 'user_id': 'test_user_id', 'type': 'ssh'},
{'name': 'test_3', 'user_id': 'test_user_id', 'type': 'ssh'}
]
# check all 3 keypairs
keys = [self._create_key_pair(p) for p in params]
db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id')
self._assertEqualListsOfObjects(keys, db_keys)
# check only 1 keypair
expected_keys = [keys[0]]
db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id',
limit=1)
self._assertEqualListsOfObjects(expected_keys, db_keys)
# check keypairs after 'test_1'
expected_keys = [keys[1], keys[2]]
db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id',
marker='test_1')
self._assertEqualListsOfObjects(expected_keys, db_keys)
# check only 1 keypairs after 'test_1'
expected_keys = [keys[1]]
db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id',
limit=1,
marker='test_1')
self._assertEqualListsOfObjects(expected_keys, db_keys)
# check non-existing keypair
self.assertRaises(exception.MarkerNotFound,
db.key_pair_get_all_by_user,
self.ctxt, 'test_user_id',
limit=1, marker='unknown_kp')
def test_key_pair_get_all_by_user_different_users(self):
params1 = [
{'name': 'test_1', 'user_id': 'test_user_1', 'type': 'ssh'},
{'name': 'test_2', 'user_id': 'test_user_1', 'type': 'ssh'},
{'name': 'test_3', 'user_id': 'test_user_1', 'type': 'ssh'}
]
params2 = [
{'name': 'test_1', 'user_id': 'test_user_2', 'type': 'ssh'},
{'name': 'test_2', 'user_id': 'test_user_2', 'type': 'ssh'},
{'name': 'test_3', 'user_id': 'test_user_2', 'type': 'ssh'}
]
# create keypairs for two users
keys1 = [self._create_key_pair(p) for p in params1]
keys2 = [self._create_key_pair(p) for p in params2]
# check all 2 keypairs for test_user_1
db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_1')
self._assertEqualListsOfObjects(keys1, db_keys)
# check all 2 keypairs for test_user_2
db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_2')
self._assertEqualListsOfObjects(keys2, db_keys)
# check only 1 keypair for test_user_1
expected_keys = [keys1[0]]
db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_1',
limit=1)
self._assertEqualListsOfObjects(expected_keys, db_keys)
# check keypairs after 'test_1' for test_user_2
expected_keys = [keys2[1], keys2[2]]
db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_2',
marker='test_1')
self._assertEqualListsOfObjects(expected_keys, db_keys)
# check only 1 keypairs after 'test_1' for test_user_1
expected_keys = [keys1[1]]
db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_1',
limit=1,
marker='test_1')
self._assertEqualListsOfObjects(expected_keys, db_keys)
# check non-existing keypair for test_user_2
self.assertRaises(exception.MarkerNotFound,
db.key_pair_get_all_by_user,
self.ctxt, 'test_user_2',
limit=1, marker='unknown_kp')
def test_key_pair_count_by_user(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_2', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_3', 'user_id': 'test_user_id_2', 'type': 'ssh'}
]
for p in params:
self._create_key_pair(p)
count_1 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_1')
self.assertEqual(count_1, 2)
count_2 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_2')
self.assertEqual(count_2, 1)
def test_key_pair_destroy(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'}
self._create_key_pair(param)
db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
def test_key_pair_destroy_no_such_key(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
self.assertRaises(exception.KeypairNotFound,
db.key_pair_destroy, self.ctxt,
param['user_id'], param['name'])
class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.quota_* methods."""
def setUp(self):
super(QuotaTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_quota_create(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
self.assertEqual(quota.resource, 'resource')
self.assertEqual(quota.hard_limit, 99)
self.assertEqual(quota.project_id, 'project1')
def test_quota_get(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
quota_db = db.quota_get(self.ctxt, 'project1', 'resource')
self._assertEqualObjects(quota, quota_db)
def test_quota_get_all_by_project(self):
for i in range(3):
for j in range(3):
db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j, j)
for i in range(3):
quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i)
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
'resource0': 0,
'resource1': 1,
'resource2': 2})
def test_quota_get_all_by_project_and_user(self):
for i in range(3):
for j in range(3):
db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j,
j - 1, user_id='user%d' % i)
for i in range(3):
quotas_db = db.quota_get_all_by_project_and_user(self.ctxt,
'proj%d' % i,
'user%d' % i)
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
'user_id': 'user%d' % i,
'resource0': -1,
'resource1': 0,
'resource2': 1})
def test_quota_update(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
db.quota_update(self.ctxt, 'project1', 'resource1', 42)
quota = db.quota_get(self.ctxt, 'project1', 'resource1')
self.assertEqual(quota.hard_limit, 42)
self.assertEqual(quota.resource, 'resource1')
self.assertEqual(quota.project_id, 'project1')
def test_quota_update_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_update, self.ctxt, 'project1', 'resource1', 42)
def test_quota_get_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_get, self.ctxt, 'project1', 'resource1')
def test_quota_destroy_all_by_project(self):
_quota_create(self.ctxt, 'project1', 'user1')
db.quota_destroy_all_by_project(self.ctxt, 'project1')
self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'),
{'project_id': 'project1'})
self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
'project1', 'user1'),
{'project_id': 'project1', 'user_id': 'user1'})
def test_quota_destroy_all_by_project_and_user(self):
_quota_create(self.ctxt, 'project1', 'user1')
db.quota_destroy_all_by_project_and_user(self.ctxt, 'project1',
'user1')
self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
'project1', 'user1'),
{'project_id': 'project1',
'user_id': 'user1'})
def test_quota_create_exists(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
self.assertRaises(exception.QuotaExists, db.quota_create, self.ctxt,
'project1', 'resource1', 42)
class QuotaClassTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(QuotaClassTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_quota_class_get_default(self):
params = {
'test_resource1': '10',
'test_resource2': '20',
'test_resource3': '30',
}
for res, limit in params.items():
db.quota_class_create(self.ctxt, 'default', res, limit)
defaults = db.quota_class_get_default(self.ctxt)
self.assertEqual(defaults, dict(class_name='default',
test_resource1=10,
test_resource2=20,
test_resource3=30))
def test_quota_class_create(self):
qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
self.assertEqual(qc.class_name, 'class name')
self.assertEqual(qc.resource, 'resource')
self.assertEqual(qc.hard_limit, 42)
def test_quota_class_get(self):
qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
qc_db = db.quota_class_get(self.ctxt, 'class name', 'resource')
self._assertEqualObjects(qc, qc_db)
def test_quota_class_get_nonexistent(self):
self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get,
self.ctxt, 'nonexistent', 'resource')
def test_quota_class_get_all_by_name(self):
for i in range(3):
for j in range(3):
db.quota_class_create(self.ctxt, 'class%d' % i,
'resource%d' % j, j)
for i in range(3):
classes = db.quota_class_get_all_by_name(self.ctxt, 'class%d' % i)
self.assertEqual(classes, {'class_name': 'class%d' % i,
'resource0': 0, 'resource1': 1, 'resource2': 2})
def test_quota_class_update(self):
db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
db.quota_class_update(self.ctxt, 'class name', 'resource', 43)
self.assertEqual(db.quota_class_get(self.ctxt, 'class name',
'resource').hard_limit, 43)
def test_quota_class_update_nonexistent(self):
self.assertRaises(exception.QuotaClassNotFound, db.quota_class_update,
self.ctxt, 'class name', 'resource', 42)
class S3ImageTestCase(test.TestCase):
def setUp(self):
super(S3ImageTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.values = [uuidutils.generate_uuid() for i in range(3)]
self.images = [db.s3_image_create(self.ctxt, uuid)
for uuid in self.values]
def test_s3_image_create(self):
for ref in self.images:
self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
self.assertEqual(sorted(self.values),
sorted([ref.uuid for ref in self.images]))
def test_s3_image_get_by_uuid(self):
for uuid in self.values:
ref = db.s3_image_get_by_uuid(self.ctxt, uuid)
self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
self.assertEqual(uuid, ref.uuid)
def test_s3_image_get(self):
self.assertEqual(sorted(self.values),
sorted([db.s3_image_get(self.ctxt, ref.id).uuid
for ref in self.images]))
def test_s3_image_get_not_found(self):
self.assertRaises(exception.ImageNotFound, db.s3_image_get, self.ctxt,
100500)
def test_s3_image_get_by_uuid_not_found(self):
self.assertRaises(exception.ImageNotFound, db.s3_image_get_by_uuid,
self.ctxt, uuidsentinel.uuid1)
class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(ComputeNodeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.service_dict = dict(host='host1', binary='nova-compute',
topic=compute_rpcapi.RPC_TOPIC,
report_count=1, disabled=False)
self.service = db.service_create(self.ctxt, self.service_dict)
self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
uuid=uuidutils.generate_uuid(),
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, free_ram_mb=1024,
free_disk_gb=2048, hypervisor_type="xen",
hypervisor_version=1, cpu_info="",
running_vms=0, current_workload=0,
service_id=self.service['id'],
host=self.service['host'],
disk_available_least=100,
hypervisor_hostname='abracadabra104',
host_ip='127.0.0.1',
supported_instances='',
pci_stats='',
metrics='',
mapped=0,
extra_resources='',
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
stats='', numa_topology='')
# add some random stats
self.stats = dict(num_instances=3, num_proj_12345=2,
num_proj_23456=2, num_vm_building=3)
self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
self.flags(reserved_host_memory_mb=0)
self.flags(reserved_host_disk_mb=0)
self.item = db.compute_node_create(self.ctxt, self.compute_node_dict)
def test_compute_node_create(self):
self._assertEqualObjects(self.compute_node_dict, self.item,
ignored_keys=self._ignored_keys + ['stats'])
new_stats = jsonutils.loads(self.item['stats'])
self.assertEqual(self.stats, new_stats)
def test_compute_node_get_all(self):
nodes = db.compute_node_get_all(self.ctxt)
self.assertEqual(1, len(nodes))
node = nodes[0]
self._assertEqualObjects(self.compute_node_dict, node,
ignored_keys=self._ignored_keys +
['stats', 'service'])
new_stats = jsonutils.loads(node['stats'])
self.assertEqual(self.stats, new_stats)
def test_compute_node_get_all_mapped_less_than(self):
cn = dict(self.compute_node_dict,
hostname='foo',
hypervisor_hostname='foo',
mapped=None,
uuid=uuidutils.generate_uuid())
db.compute_node_create(self.ctxt, cn)
cn = dict(self.compute_node_dict,
hostname='bar',
hypervisor_hostname='nar',
mapped=3,
uuid=uuidutils.generate_uuid())
db.compute_node_create(self.ctxt, cn)
cns = db.compute_node_get_all_mapped_less_than(self.ctxt, 1)
self.assertEqual(2, len(cns))
def test_compute_node_get_all_by_pagination(self):
service_dict = dict(host='host2', binary='nova-compute',
topic=compute_rpcapi.RPC_TOPIC,
report_count=1, disabled=False)
service = db.service_create(self.ctxt, service_dict)
compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
uuid=uuidsentinel.fake_compute_node,
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, free_ram_mb=1024,
free_disk_gb=2048, hypervisor_type="xen",
hypervisor_version=1, cpu_info="",
running_vms=0, current_workload=0,
service_id=service['id'],
host=service['host'],
disk_available_least=100,
hypervisor_hostname='abcde11',
host_ip='127.0.0.1',
supported_instances='',
pci_stats='',
metrics='',
mapped=0,
extra_resources='',
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
stats='', numa_topology='')
stats = dict(num_instances=2, num_proj_12345=1,
num_proj_23456=1, num_vm_building=2)
compute_node_dict['stats'] = jsonutils.dumps(stats)
db.compute_node_create(self.ctxt, compute_node_dict)
nodes = db.compute_node_get_all_by_pagination(self.ctxt,
limit=1, marker=1)
self.assertEqual(1, len(nodes))
node = nodes[0]
self._assertEqualObjects(compute_node_dict, node,
ignored_keys=self._ignored_keys +
['stats', 'service'])
new_stats = jsonutils.loads(node['stats'])
self.assertEqual(stats, new_stats)
nodes = db.compute_node_get_all_by_pagination(self.ctxt)
self.assertEqual(2, len(nodes))
node = nodes[0]
self._assertEqualObjects(self.compute_node_dict, node,
ignored_keys=self._ignored_keys +
['stats', 'service'])
new_stats = jsonutils.loads(node['stats'])
self.assertEqual(self.stats, new_stats)
self.assertRaises(exception.MarkerNotFound,
db.compute_node_get_all_by_pagination,
self.ctxt, limit=1, marker=999)
def test_compute_node_get_all_deleted_compute_node(self):
# Create a service and compute node and ensure we can find its stats;
# delete the service and compute node when done and loop again
for x in range(2, 5):
# Create a service
service_data = self.service_dict.copy()
service_data['host'] = 'host-%s' % x
service = db.service_create(self.ctxt, service_data)
# Create a compute node
compute_node_data = self.compute_node_dict.copy()
compute_node_data['uuid'] = uuidutils.generate_uuid()
compute_node_data['service_id'] = service['id']
compute_node_data['stats'] = jsonutils.dumps(self.stats.copy())
compute_node_data['hypervisor_hostname'] = 'hypervisor-%s' % x
node = db.compute_node_create(self.ctxt, compute_node_data)
# Ensure the "new" compute node is found
nodes = db.compute_node_get_all(self.ctxt)
self.assertEqual(2, len(nodes))
found = None
for n in nodes:
if n['id'] == node['id']:
found = n
break
self.assertIsNotNone(found)
# Now ensure the match has stats!
self.assertNotEqual(jsonutils.loads(found['stats']), {})
# Now delete the newly-created compute node to ensure the related
# compute node stats are wiped in a cascaded fashion
db.compute_node_delete(self.ctxt, node['id'])
# Clean up the service
db.service_destroy(self.ctxt, service['id'])
def test_compute_node_get_all_mult_compute_nodes_one_service_entry(self):
service_data = self.service_dict.copy()
service_data['host'] = 'host2'
service = db.service_create(self.ctxt, service_data)
existing_node = dict(self.item.items())
expected = [existing_node]
for name in ['bm_node1', 'bm_node2']:
compute_node_data = self.compute_node_dict.copy()
compute_node_data['uuid'] = uuidutils.generate_uuid()
compute_node_data['service_id'] = service['id']
compute_node_data['stats'] = jsonutils.dumps(self.stats)
compute_node_data['hypervisor_hostname'] = name
node = db.compute_node_create(self.ctxt, compute_node_data)
node = dict(node)
expected.append(node)
result = sorted(db.compute_node_get_all(self.ctxt),
key=lambda n: n['hypervisor_hostname'])
self._assertEqualListsOfObjects(expected, result,
ignored_keys=['stats'])
def test_compute_node_get_all_by_host_with_distinct_hosts(self):
# Create another service with another node
service2 = self.service_dict.copy()
service2['host'] = 'host2'
db.service_create(self.ctxt, service2)
compute_node_another_host = self.compute_node_dict.copy()
compute_node_another_host['uuid'] = uuidutils.generate_uuid()
compute_node_another_host['stats'] = jsonutils.dumps(self.stats)
compute_node_another_host['hypervisor_hostname'] = 'node_2'
compute_node_another_host['host'] = 'host2'
node = db.compute_node_create(self.ctxt, compute_node_another_host)
result = db.compute_node_get_all_by_host(self.ctxt, 'host1')
self._assertEqualListsOfObjects([self.item], result)
result = db.compute_node_get_all_by_host(self.ctxt, 'host2')
self._assertEqualListsOfObjects([node], result)
def test_compute_node_get_all_by_host_with_same_host(self):
# Create another node on top of the same service
compute_node_same_host = self.compute_node_dict.copy()
compute_node_same_host['uuid'] = uuidutils.generate_uuid()
compute_node_same_host['stats'] = jsonutils.dumps(self.stats)
compute_node_same_host['hypervisor_hostname'] = 'node_3'
node = db.compute_node_create(self.ctxt, compute_node_same_host)
expected = [self.item, node]
result = sorted(db.compute_node_get_all_by_host(
self.ctxt, 'host1'),
key=lambda n: n['hypervisor_hostname'])
ignored = ['stats']
self._assertEqualListsOfObjects(expected, result,
ignored_keys=ignored)
def test_compute_node_get_all_by_host_not_found(self):
self.assertRaises(exception.ComputeHostNotFound,
db.compute_node_get_all_by_host, self.ctxt, 'wrong')
def test_compute_nodes_get_by_service_id_one_result(self):
expected = [self.item]
result = db.compute_nodes_get_by_service_id(
self.ctxt, self.service['id'])
ignored = ['stats']
self._assertEqualListsOfObjects(expected, result,
ignored_keys=ignored)
def test_compute_nodes_get_by_service_id_multiple_results(self):
# Create another node on top of the same service
compute_node_same_host = self.compute_node_dict.copy()
compute_node_same_host['uuid'] = uuidutils.generate_uuid()
compute_node_same_host['stats'] = jsonutils.dumps(self.stats)
compute_node_same_host['hypervisor_hostname'] = 'node_2'
node = db.compute_node_create(self.ctxt, compute_node_same_host)
expected = [self.item, node]
result = sorted(db.compute_nodes_get_by_service_id(
self.ctxt, self.service['id']),
key=lambda n: n['hypervisor_hostname'])
ignored = ['stats']
self._assertEqualListsOfObjects(expected, result,
ignored_keys=ignored)
def test_compute_nodes_get_by_service_id_not_found(self):
self.assertRaises(exception.ServiceNotFound,
db.compute_nodes_get_by_service_id, self.ctxt,
'fake')
def test_compute_node_get_by_host_and_nodename(self):
# Create another node on top of the same service
compute_node_same_host = self.compute_node_dict.copy()
compute_node_same_host['uuid'] = uuidutils.generate_uuid()
compute_node_same_host['stats'] = jsonutils.dumps(self.stats)
compute_node_same_host['hypervisor_hostname'] = 'node_2'
node = db.compute_node_create(self.ctxt, compute_node_same_host)
expected = node
result = db.compute_node_get_by_host_and_nodename(
self.ctxt, 'host1', 'node_2')
self._assertEqualObjects(expected, result,
ignored_keys=self._ignored_keys +
['stats', 'service'])
def test_compute_node_get_by_host_and_nodename_not_found(self):
self.assertRaises(exception.ComputeHostNotFound,
db.compute_node_get_by_host_and_nodename,
self.ctxt, 'host1', 'wrong')
def test_compute_node_get(self):
compute_node_id = self.item['id']
node = db.compute_node_get(self.ctxt, compute_node_id)
self._assertEqualObjects(self.compute_node_dict, node,
ignored_keys=self._ignored_keys +
['stats', 'service'])
new_stats = jsonutils.loads(node['stats'])
self.assertEqual(self.stats, new_stats)
def test_compute_node_update(self):
compute_node_id = self.item['id']
stats = jsonutils.loads(self.item['stats'])
# change some values:
stats['num_instances'] = 8
stats['num_tribbles'] = 1
values = {
'vcpus': 4,
'stats': jsonutils.dumps(stats),
}
item_updated = db.compute_node_update(self.ctxt, compute_node_id,
values)
self.assertEqual(4, item_updated['vcpus'])
new_stats = jsonutils.loads(item_updated['stats'])
self.assertEqual(stats, new_stats)
def test_compute_node_delete(self):
compute_node_id = self.item['id']
db.compute_node_delete(self.ctxt, compute_node_id)
nodes = db.compute_node_get_all(self.ctxt)
self.assertEqual(len(nodes), 0)
def test_compute_node_search_by_hypervisor(self):
nodes_created = []
new_service = copy.copy(self.service_dict)
for i in range(3):
new_service['binary'] += str(i)
new_service['topic'] += str(i)
service = db.service_create(self.ctxt, new_service)
self.compute_node_dict['service_id'] = service['id']
self.compute_node_dict['hypervisor_hostname'] = 'testhost' + str(i)
self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
self.compute_node_dict['uuid'] = uuidutils.generate_uuid()
node = db.compute_node_create(self.ctxt, self.compute_node_dict)
nodes_created.append(node)
nodes = db.compute_node_search_by_hypervisor(self.ctxt, 'host')
self.assertEqual(3, len(nodes))
self._assertEqualListsOfObjects(nodes_created, nodes,
ignored_keys=self._ignored_keys + ['stats', 'service'])
def test_compute_node_statistics(self):
service_dict = dict(host='hostA', binary='nova-compute',
topic=compute_rpcapi.RPC_TOPIC,
report_count=1, disabled=False)
service = db.service_create(self.ctxt, service_dict)
# Define the various values for the new compute node
new_vcpus = 4
new_memory_mb = 4096
new_local_gb = 2048
new_vcpus_used = 1
new_memory_mb_used = 1024
new_local_gb_used = 100
new_free_ram_mb = 3072
new_free_disk_gb = 1948
new_running_vms = 1
new_current_workload = 0
# Calculate the expected values by adding the values for the new
# compute node to those for self.item
itm = self.item
exp_count = 2
exp_vcpus = new_vcpus + itm['vcpus']
exp_memory_mb = new_memory_mb + itm['memory_mb']
exp_local_gb = new_local_gb + itm['local_gb']
exp_vcpus_used = new_vcpus_used + itm['vcpus_used']
exp_memory_mb_used = new_memory_mb_used + itm['memory_mb_used']
exp_local_gb_used = new_local_gb_used + itm['local_gb_used']
exp_free_ram_mb = new_free_ram_mb + itm['free_ram_mb']
exp_free_disk_gb = new_free_disk_gb + itm['free_disk_gb']
exp_running_vms = new_running_vms + itm['running_vms']
exp_current_workload = new_current_workload + itm['current_workload']
# Create the new compute node
compute_node_dict = dict(vcpus=new_vcpus,
memory_mb=new_memory_mb,
local_gb=new_local_gb,
uuid=uuidsentinel.fake_compute_node,
vcpus_used=new_vcpus_used,
memory_mb_used=new_memory_mb_used,
local_gb_used=new_local_gb_used,
free_ram_mb=new_free_ram_mb,
free_disk_gb=new_free_disk_gb,
hypervisor_type="xen",
hypervisor_version=1,
cpu_info="",
running_vms=new_running_vms,
current_workload=new_current_workload,
service_id=service['id'],
host=service['host'],
disk_available_least=100,
hypervisor_hostname='abracadabra',
host_ip='127.0.0.2',
supported_instances='',
pci_stats='',
metrics='',
extra_resources='',
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
stats='',
numa_topology='')
db.compute_node_create(self.ctxt, compute_node_dict)
# Get the stats, and make sure the stats agree with the expected
# amounts.
stats = db.compute_node_statistics(self.ctxt)
self.assertEqual(exp_count, stats['count'])
self.assertEqual(exp_vcpus, stats['vcpus'])
self.assertEqual(exp_memory_mb, stats['memory_mb'])
self.assertEqual(exp_local_gb, stats['local_gb'])
self.assertEqual(exp_vcpus_used, stats['vcpus_used'])
self.assertEqual(exp_memory_mb_used, stats['memory_mb_used'])
self.assertEqual(exp_local_gb_used, stats['local_gb_used'])
self.assertEqual(exp_free_ram_mb, stats['free_ram_mb'])
self.assertEqual(exp_free_disk_gb, stats['free_disk_gb'])
self.assertEqual(exp_running_vms, stats['running_vms'])
self.assertEqual(exp_current_workload, stats['current_workload'])
def test_compute_node_statistics_disabled_service(self):
serv = db.service_get_by_host_and_topic(
self.ctxt, 'host1', compute_rpcapi.RPC_TOPIC)
db.service_update(self.ctxt, serv['id'], {'disabled': True})
stats = db.compute_node_statistics(self.ctxt)
self.assertEqual(stats.pop('count'), 0)
def test_compute_node_statistics_with_old_service_id(self):
# NOTE(sbauza): This test is only for checking backwards compatibility
# with old versions of compute_nodes not providing host column.
# This test could be removed once we are sure that all compute nodes
# are populating the host field thanks to the ResourceTracker
service2 = self.service_dict.copy()
service2['host'] = 'host2'
db_service2 = db.service_create(self.ctxt, service2)
compute_node_old_host = self.compute_node_dict.copy()
compute_node_old_host['uuid'] = uuidutils.generate_uuid()
compute_node_old_host['stats'] = jsonutils.dumps(self.stats)
compute_node_old_host['hypervisor_hostname'] = 'node_2'
compute_node_old_host['service_id'] = db_service2['id']
compute_node_old_host.pop('host')
db.compute_node_create(self.ctxt, compute_node_old_host)
stats = db.compute_node_statistics(self.ctxt)
self.assertEqual(2, stats.pop('count'))
def test_compute_node_statistics_with_other_service(self):
other_service = self.service_dict.copy()
other_service['topic'] = 'fake-topic'
other_service['binary'] = 'nova-api'
db.service_create(self.ctxt, other_service)
stats = db.compute_node_statistics(self.ctxt)
data = {'count': 1,
'vcpus_used': 0,
'local_gb_used': 0,
'memory_mb': 1024,
'current_workload': 0,
'vcpus': 2,
'running_vms': 0,
'free_disk_gb': 2048,
'disk_available_least': 100,
'local_gb': 2048,
'free_ram_mb': 1024,
'memory_mb_used': 0}
for key, value in data.items():
self.assertEqual(value, stats.pop(key))
def test_compute_node_statistics_delete_and_recreate_service(self):
# Test added for bug #1692397, this test tests that deleted
# service record will not be selected when calculate compute
# node statistics.
# Let's first assert what we expect the setup to look like.
self.assertEqual(1, len(db.service_get_all_by_binary(
self.ctxt, 'nova-compute')))
self.assertEqual(1, len(db.compute_node_get_all_by_host(
self.ctxt, 'host1')))
# Get the statistics for the original node/service before we delete
# the service.
original_stats = db.compute_node_statistics(self.ctxt)
# At this point we have one compute_nodes record and one services
# record pointing at the same host. Now we need to simulate the user
# deleting the service record in the API, which will only delete very
# old compute_nodes records where the service and compute node are
# linked via the compute_nodes.service_id column, which is the case
# in this test class; at some point we should decouple those to be more
# modern.
db.service_destroy(self.ctxt, self.service['id'])
# Now we're going to simulate that the nova-compute service was
# restarted, which will create a new services record with a unique
# uuid but it will have the same host, binary and topic values as the
# deleted service. The unique constraints don't fail in this case since
# they include the deleted column and this service and the old service
# have a different deleted value.
service2_dict = self.service_dict.copy()
service2_dict['uuid'] = uuidsentinel.service2_uuid
db.service_create(self.ctxt, service2_dict)
# Again, because of the way the setUp is done currently, the compute
# node was linked to the original now-deleted service, so when we
# deleted that service it also deleted the compute node record, so we
# have to simulate the ResourceTracker in the nova-compute worker
# re-creating the compute nodes record.
new_compute_node = self.compute_node_dict.copy()
del new_compute_node['service_id'] # make it a new style compute node
new_compute_node['uuid'] = uuidsentinel.new_compute_uuid
db.compute_node_create(self.ctxt, new_compute_node)
# Now get the stats for all compute nodes (we just have one) and it
# should just be for a single service, not double, as we should ignore
# the (soft) deleted service.
stats = db.compute_node_statistics(self.ctxt)
self.assertDictEqual(original_stats, stats)
def test_compute_node_not_found(self):
self.assertRaises(exception.ComputeHostNotFound, db.compute_node_get,
self.ctxt, 100500)
def test_compute_node_update_always_updates_updated_at(self):
item_updated = db.compute_node_update(self.ctxt,
self.item['id'], {})
self.assertNotEqual(self.item['updated_at'],
item_updated['updated_at'])
def test_compute_node_update_override_updated_at(self):
# Update the record once so updated_at is set.
first = db.compute_node_update(self.ctxt, self.item['id'],
{'free_ram_mb': '12'})
self.assertIsNotNone(first['updated_at'])
# Update a second time. Make sure that the updated_at value we send
# is overridden.
second = db.compute_node_update(self.ctxt, self.item['id'],
{'updated_at': first.updated_at,
'free_ram_mb': '13'})
self.assertNotEqual(first['updated_at'], second['updated_at'])
def test_service_destroy_with_compute_node(self):
db.service_destroy(self.ctxt, self.service['id'])
self.assertRaises(exception.ComputeHostNotFound,
db.compute_node_get_model, self.ctxt,
self.item['id'])
def test_service_destroy_with_old_compute_node(self):
# NOTE(sbauza): This test is only for checking backwards compatibility
# with old versions of compute_nodes not providing host column.
# This test could be removed once we are sure that all compute nodes
# are populating the host field thanks to the ResourceTracker
compute_node_old_host_dict = self.compute_node_dict.copy()
compute_node_old_host_dict['uuid'] = uuidutils.generate_uuid()
compute_node_old_host_dict.pop('host')
item_old = db.compute_node_create(self.ctxt,
compute_node_old_host_dict)
db.service_destroy(self.ctxt, self.service['id'])
self.assertRaises(exception.ComputeHostNotFound,
db.compute_node_get_model, self.ctxt,
item_old['id'])
@mock.patch("nova.db.sqlalchemy.api.compute_node_get_model")
def test_dbapi_compute_node_get_model(self, mock_get_model):
cid = self.item["id"]
db.compute_node_get_model(self.ctxt, cid)
mock_get_model.assert_called_once_with(self.ctxt, cid)
@mock.patch("nova.db.sqlalchemy.api.model_query")
def test_compute_node_get_model(self, mock_model_query):
class FakeFiltered(object):
def first(self):
return mock.sentinel.first
fake_filtered_cn = FakeFiltered()
class FakeModelQuery(object):
def filter_by(self, id):
return fake_filtered_cn
mock_model_query.return_value = FakeModelQuery()
result = sqlalchemy_api.compute_node_get_model(self.ctxt,
self.item["id"])
self.assertEqual(result, mock.sentinel.first)
mock_model_query.assert_called_once_with(self.ctxt, models.ComputeNode)
class ProviderFwRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ProviderFwRuleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.values = self._get_rule_values()
self.rules = [db.provider_fw_rule_create(self.ctxt, rule)
for rule in self.values]
def _get_rule_values(self):
cidr_samples = ['192.168.0.0/24', '10.1.2.3/32',
'2001:4f8:3:ba::/64',
'2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128']
values = []
for i in range(len(cidr_samples)):
rule = {}
rule['protocol'] = 'foo' + str(i)
rule['from_port'] = 9999 + i
rule['to_port'] = 9898 + i
rule['cidr'] = cidr_samples[i]
values.append(rule)
return values
def test_provider_fw_rule_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for i, rule in enumerate(self.values):
self._assertEqualObjects(self.rules[i], rule,
ignored_keys=ignored_keys)
def test_provider_fw_rule_get_all(self):
self._assertEqualListsOfObjects(self.rules,
db.provider_fw_rule_get_all(self.ctxt))
def test_provider_fw_rule_destroy(self):
for rule in self.rules:
db.provider_fw_rule_destroy(self.ctxt, rule.id)
self.assertEqual([], db.provider_fw_rule_get_all(self.ctxt))
class CertificateTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(CertificateTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.created = self._certificates_create()
def _get_certs_values(self):
base_values = {
'user_id': 'user',
'project_id': 'project',
'file_name': 'filename'
}
return [{k: v + str(x) for k, v in base_values.items()}
for x in range(1, 4)]
def _certificates_create(self):
return [db.certificate_create(self.ctxt, cert)
for cert in self._get_certs_values()]
def test_certificate_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for i, cert in enumerate(self._get_certs_values()):
self._assertEqualObjects(self.created[i], cert,
ignored_keys=ignored_keys)
def test_certificate_get_all_by_project(self):
cert = db.certificate_get_all_by_project(self.ctxt,
self.created[1].project_id)
self._assertEqualObjects(self.created[1], cert[0])
def test_certificate_get_all_by_user(self):
cert = db.certificate_get_all_by_user(self.ctxt,
self.created[1].user_id)
self._assertEqualObjects(self.created[1], cert[0])
def test_certificate_get_all_by_user_and_project(self):
cert = db.certificate_get_all_by_user_and_project(self.ctxt,
self.created[1].user_id, self.created[1].project_id)
self._assertEqualObjects(self.created[1], cert[0])
class ConsoleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ConsoleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
pools_data = [
{'address': '192.168.10.10',
'username': 'user1',
'password': 'passwd1',
'console_type': 'type1',
'public_hostname': 'public_host1',
'host': 'host1',
'compute_host': 'compute_host1',
},
{'address': '192.168.10.11',
'username': 'user2',
'password': 'passwd2',
'console_type': 'type2',
'public_hostname': 'public_host2',
'host': 'host2',
'compute_host': 'compute_host2',
},
]
self.console_pools = [db.console_pool_create(self.ctxt, val)
for val in pools_data]
instance_uuid = uuidsentinel.uuid1
db.instance_create(self.ctxt, {'uuid': instance_uuid})
self.console_data = [{'instance_name': 'name' + str(x),
'instance_uuid': instance_uuid,
'password': 'pass' + str(x),
'port': 7878 + x,
'pool_id': self.console_pools[x]['id']}
for x in range(len(pools_data))]
self.consoles = [db.console_create(self.ctxt, val)
for val in self.console_data]
def test_console_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for console in self.consoles:
self.assertIsNotNone(console['id'])
self._assertEqualListsOfObjects(self.console_data, self.consoles,
ignored_keys=ignored_keys)
def test_console_get_by_id(self):
console = self.consoles[0]
console_get = db.console_get(self.ctxt, console['id'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_by_id_uuid(self):
console = self.consoles[0]
console_get = db.console_get(self.ctxt, console['id'],
console['instance_uuid'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_by_pool_instance(self):
console = self.consoles[0]
console_get = db.console_get_by_pool_instance(self.ctxt,
console['pool_id'], console['instance_uuid'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_all_by_instance(self):
instance_uuid = self.consoles[0]['instance_uuid']
consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfObjects(self.consoles, consoles_get)
def test_console_get_all_by_instance_with_pool(self):
instance_uuid = self.consoles[0]['instance_uuid']
consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid,
columns_to_join=['pool'])
self._assertEqualListsOfObjects(self.consoles, consoles_get,
ignored_keys=['pool'])
self._assertEqualListsOfObjects([pool for pool in self.console_pools],
[c['pool'] for c in consoles_get])
def test_console_get_all_by_instance_empty(self):
consoles_get = db.console_get_all_by_instance(self.ctxt,
uuidsentinel.uuid2)
self.assertEqual(consoles_get, [])
def test_console_delete(self):
console_id = self.consoles[0]['id']
db.console_delete(self.ctxt, console_id)
self.assertRaises(exception.ConsoleNotFound, db.console_get,
self.ctxt, console_id)
def test_console_get_by_pool_instance_not_found(self):
self.assertRaises(exception.ConsoleNotFoundInPoolForInstance,
db.console_get_by_pool_instance, self.ctxt,
self.consoles[0]['pool_id'],
uuidsentinel.uuid2)
def test_console_get_not_found(self):
self.assertRaises(exception.ConsoleNotFound, db.console_get,
self.ctxt, 100500)
def test_console_get_not_found_instance(self):
self.assertRaises(exception.ConsoleNotFoundForInstance, db.console_get,
self.ctxt, self.consoles[0]['id'],
uuidsentinel.uuid2)
class CellTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(CellTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_cell_base_values(self):
return {
'name': 'myname',
'api_url': 'apiurl',
'transport_url': 'transporturl',
'weight_offset': 0.5,
'weight_scale': 1.5,
'is_parent': True,
}
def _cell_value_modify(self, value, step):
if isinstance(value, six.string_types):
return value + str(step)
elif isinstance(value, float):
return value + step + 0.6
elif isinstance(value, bool):
return bool(step % 2)
elif isinstance(value, int):
return value + step
def _create_cells(self):
test_values = []
for x in range(1, 4):
modified_val = {k: self._cell_value_modify(v, x)
for k, v in self._get_cell_base_values().items()}
db.cell_create(self.ctxt, modified_val)
test_values.append(modified_val)
return test_values
def test_cell_create(self):
cell = db.cell_create(self.ctxt, self._get_cell_base_values())
self.assertIsNotNone(cell['id'])
self._assertEqualObjects(cell, self._get_cell_base_values(),
ignored_keys=self._ignored_keys)
def test_cell_update(self):
db.cell_create(self.ctxt, self._get_cell_base_values())
new_values = {
'api_url': 'apiurl1',
'transport_url': 'transporturl1',
'weight_offset': 0.6,
'weight_scale': 1.6,
'is_parent': False,
}
test_cellname = self._get_cell_base_values()['name']
updated_cell = db.cell_update(self.ctxt, test_cellname, new_values)
self._assertEqualObjects(updated_cell, new_values,
ignored_keys=self._ignored_keys + ['name'])
def test_cell_delete(self):
new_cells = self._create_cells()
for cell in new_cells:
test_cellname = cell['name']
db.cell_delete(self.ctxt, test_cellname)
self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
test_cellname)
def test_cell_get(self):
new_cells = self._create_cells()
for cell in new_cells:
cell_get = db.cell_get(self.ctxt, cell['name'])
self._assertEqualObjects(cell_get, cell,
ignored_keys=self._ignored_keys)
def test_cell_get_all(self):
new_cells = self._create_cells()
cells = db.cell_get_all(self.ctxt)
self.assertEqual(len(new_cells), len(cells))
cells_byname = {newcell['name']: newcell
for newcell in new_cells}
for cell in cells:
self._assertEqualObjects(cell, cells_byname[cell['name']],
self._ignored_keys)
def test_cell_get_not_found(self):
self._create_cells()
self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
'cellnotinbase')
def test_cell_update_not_found(self):
self._create_cells()
self.assertRaises(exception.CellNotFound, db.cell_update, self.ctxt,
'cellnotinbase', self._get_cell_base_values())
def test_cell_create_exists(self):
db.cell_create(self.ctxt, self._get_cell_base_values())
self.assertRaises(exception.CellExists, db.cell_create,
self.ctxt, self._get_cell_base_values())
class ConsolePoolTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ConsolePoolTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.test_console_pool_1 = {
'address': '192.168.2.10',
'username': 'user_1',
'password': 'secret_123',
'console_type': 'type_1',
'public_hostname': 'public_hostname_123',
'host': 'localhost',
'compute_host': '127.0.0.1',
}
self.test_console_pool_2 = {
'address': '192.168.2.11',
'username': 'user_2',
'password': 'secret_1234',
'console_type': 'type_2',
'public_hostname': 'public_hostname_1234',
'host': '127.0.0.1',
'compute_host': 'localhost',
}
self.test_console_pool_3 = {
'address': '192.168.2.12',
'username': 'user_3',
'password': 'secret_12345',
'console_type': 'type_2',
'public_hostname': 'public_hostname_12345',
'host': '127.0.0.1',
'compute_host': '192.168.1.1',
}
def test_console_pool_create(self):
console_pool = db.console_pool_create(
self.ctxt, self.test_console_pool_1)
self.assertIsNotNone(console_pool.get('id'))
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(
console_pool, self.test_console_pool_1, ignored_keys)
def test_console_pool_create_duplicate(self):
db.console_pool_create(self.ctxt, self.test_console_pool_1)
self.assertRaises(exception.ConsolePoolExists, db.console_pool_create,
self.ctxt, self.test_console_pool_1)
def test_console_pool_get_by_host_type(self):
params = [
self.test_console_pool_1,
self.test_console_pool_2,
]
for p in params:
db.console_pool_create(self.ctxt, p)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id', 'consoles']
cp = self.test_console_pool_1
db_cp = db.console_pool_get_by_host_type(
self.ctxt, cp['compute_host'], cp['host'], cp['console_type']
)
self._assertEqualObjects(cp, db_cp, ignored_keys)
def test_console_pool_get_by_host_type_no_resuls(self):
self.assertRaises(
exception.ConsolePoolNotFoundForHostType,
db.console_pool_get_by_host_type, self.ctxt, 'compute_host',
'host', 'console_type')
def test_console_pool_get_all_by_host_type(self):
params = [
self.test_console_pool_1,
self.test_console_pool_2,
self.test_console_pool_3,
]
for p in params:
db.console_pool_create(self.ctxt, p)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id', 'consoles']
cp = self.test_console_pool_2
db_cp = db.console_pool_get_all_by_host_type(
self.ctxt, cp['host'], cp['console_type'])
self._assertEqualListsOfObjects(
db_cp, [self.test_console_pool_2, self.test_console_pool_3],
ignored_keys)
def test_console_pool_get_all_by_host_type_no_results(self):
res = db.console_pool_get_all_by_host_type(
self.ctxt, 'cp_host', 'cp_console_type')
self.assertEqual([], res)
class DnsdomainTestCase(test.TestCase):
def setUp(self):
super(DnsdomainTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.domain = 'test.domain'
self.testzone = 'testzone'
self.project = 'fake'
def test_dnsdomain_register_for_zone(self):
db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertEqual(domain['domain'], self.domain)
self.assertEqual(domain['availability_zone'], self.testzone)
self.assertEqual(domain['scope'], 'private')
def test_dnsdomain_register_for_project(self):
db.dnsdomain_register_for_project(self.ctxt, self.domain, self.project)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertEqual(domain['domain'], self.domain)
self.assertEqual(domain['project_id'], self.project)
self.assertEqual(domain['scope'], 'public')
def test_dnsdomain_unregister(self):
db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
db.dnsdomain_unregister(self.ctxt, self.domain)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertIsNone(domain)
def test_dnsdomain_get_all(self):
d_list = ['test.domain.one', 'test.domain.two']
db.dnsdomain_register_for_zone(self.ctxt, d_list[0], 'zone')
db.dnsdomain_register_for_zone(self.ctxt, d_list[1], 'zone')
db_list = db.dnsdomain_get_all(self.ctxt)
db_domain_list = [d.domain for d in db_list]
self.assertEqual(sorted(d_list), sorted(db_domain_list))
class BwUsageTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(BwUsageTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.useFixture(test.TimeOverride())
def test_bw_usage_get_by_uuids(self):
now = timeutils.utcnow()
start_period = now - datetime.timedelta(seconds=10)
start_period_str = start_period.isoformat()
uuid3_refreshed = now - datetime.timedelta(seconds=5)
uuid3_refreshed_str = uuid3_refreshed.isoformat()
expected_bw_usages = {
'fake_uuid1': {'uuid': 'fake_uuid1',
'mac': 'fake_mac1',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 12345,
'last_ctr_out': 67890,
'last_refreshed': now},
'fake_uuid2': {'uuid': 'fake_uuid2',
'mac': 'fake_mac2',
'start_period': start_period,
'bw_in': 200,
'bw_out': 300,
'last_ctr_in': 22345,
'last_ctr_out': 77890,
'last_refreshed': now},
'fake_uuid3': {'uuid': 'fake_uuid3',
'mac': 'fake_mac3',
'start_period': start_period,
'bw_in': 400,
'bw_out': 500,
'last_ctr_in': 32345,
'last_ctr_out': 87890,
'last_refreshed': uuid3_refreshed}
}
bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
['fake_uuid1', 'fake_uuid2'], start_period_str)
# No matches
self.assertEqual(len(bw_usages), 0)
# Add 3 entries
db.bw_usage_update(self.ctxt, 'fake_uuid1',
'fake_mac1', start_period_str,
100, 200, 12345, 67890)
db.bw_usage_update(self.ctxt, 'fake_uuid2',
'fake_mac2', start_period_str,
100, 200, 42, 42)
# Test explicit refreshed time
db.bw_usage_update(self.ctxt, 'fake_uuid3',
'fake_mac3', start_period_str,
400, 500, 32345, 87890,
last_refreshed=uuid3_refreshed_str)
# Update 2nd entry
db.bw_usage_update(self.ctxt, 'fake_uuid2',
'fake_mac2', start_period_str,
200, 300, 22345, 77890)
bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
['fake_uuid1', 'fake_uuid2', 'fake_uuid3'], start_period_str)
self.assertEqual(len(bw_usages), 3)
for usage in bw_usages:
self._assertEqualObjects(expected_bw_usages[usage['uuid']], usage,
ignored_keys=self._ignored_keys)
def _test_bw_usage_update(self, **expected_bw_usage):
bw_usage = db.bw_usage_update(self.ctxt, **expected_bw_usage)
self._assertEqualObjects(expected_bw_usage, bw_usage,
ignored_keys=self._ignored_keys)
uuid = expected_bw_usage['uuid']
mac = expected_bw_usage['mac']
start_period = expected_bw_usage['start_period']
bw_usage = db.bw_usage_get(self.ctxt, uuid, start_period, mac)
self._assertEqualObjects(expected_bw_usage, bw_usage,
ignored_keys=self._ignored_keys)
def _create_bw_usage(self, context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, id, last_refreshed=None):
with sqlalchemy_api.get_context_manager(context).writer.using(context):
bwusage = models.BandwidthUsage()
bwusage.start_period = start_period
bwusage.uuid = uuid
bwusage.mac = mac
bwusage.last_refreshed = last_refreshed
bwusage.bw_in = bw_in
bwusage.bw_out = bw_out
bwusage.last_ctr_in = last_ctr_in
bwusage.last_ctr_out = last_ctr_out
bwusage.id = id
bwusage.save(context.session)
def test_bw_usage_update_exactly_one_record(self):
now = timeutils.utcnow()
start_period = now - datetime.timedelta(seconds=10)
uuid = 'fake_uuid'
# create two equal bw_usages with IDs 1 and 2
for id in range(1, 3):
bw_usage = {'uuid': uuid,
'mac': 'fake_mac',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 12345,
'last_ctr_out': 67890,
'last_refreshed': now,
'id': id}
self._create_bw_usage(self.ctxt, **bw_usage)
# check that we have two equal bw_usages
self.assertEqual(
2, len(db.bw_usage_get_by_uuids(self.ctxt, [uuid], start_period)))
# update 'last_ctr_in' field in one bw_usage
updated_bw_usage = {'uuid': uuid,
'mac': 'fake_mac',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 54321,
'last_ctr_out': 67890,
'last_refreshed': now}
result = db.bw_usage_update(
self.ctxt, update_cells=False, **updated_bw_usage)
# check that only bw_usage with ID 1 was updated
self.assertEqual(1, result['id'])
self._assertEqualObjects(updated_bw_usage, result,
ignored_keys=self._ignored_keys)
def test_bw_usage_get(self):
now = timeutils.utcnow()
start_period = now - datetime.timedelta(seconds=10)
start_period_str = start_period.isoformat()
expected_bw_usage = {'uuid': 'fake_uuid1',
'mac': 'fake_mac1',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 12345,
'last_ctr_out': 67890,
'last_refreshed': now}
bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period_str,
'fake_mac1')
self.assertIsNone(bw_usage)
self._test_bw_usage_update(**expected_bw_usage)
def test_bw_usage_update_new(self):
now = timeutils.utcnow()
start_period = now - datetime.timedelta(seconds=10)
expected_bw_usage = {'uuid': 'fake_uuid1',
'mac': 'fake_mac1',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 12345,
'last_ctr_out': 67890,
'last_refreshed': now}
self._test_bw_usage_update(**expected_bw_usage)
def test_bw_usage_update_existing(self):
now = timeutils.utcnow()
start_period = now - datetime.timedelta(seconds=10)
expected_bw_usage = {'uuid': 'fake_uuid1',
'mac': 'fake_mac1',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 12345,
'last_ctr_out': 67890,
'last_refreshed': now}
self._test_bw_usage_update(**expected_bw_usage)
expected_bw_usage['bw_in'] = 300
expected_bw_usage['bw_out'] = 400
expected_bw_usage['last_ctr_in'] = 23456
expected_bw_usage['last_ctr_out'] = 78901
self._test_bw_usage_update(**expected_bw_usage)
class Ec2TestCase(test.TestCase):
def setUp(self):
super(Ec2TestCase, self).setUp()
self.ctxt = context.RequestContext('fake_user', 'fake_project')
def test_ec2_ids_not_found_are_printable(self):
def check_exc_format(method, value):
try:
method(self.ctxt, value)
except exception.NotFound as exc:
self.assertIn(six.text_type(value), six.text_type(exc))
check_exc_format(db.get_instance_uuid_by_ec2_id, 123456)
check_exc_format(db.ec2_snapshot_get_by_ec2_id, 123456)
check_exc_format(db.ec2_snapshot_get_by_uuid, 'fake')
def test_ec2_volume_create(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(vol['id'])
self.assertEqual(vol['uuid'], 'fake-uuid')
def test_ec2_volume_get_by_id(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
vol2 = db.ec2_volume_get_by_id(self.ctxt, vol['id'])
self.assertEqual(vol2['uuid'], vol['uuid'])
def test_ec2_volume_get_by_uuid(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
vol2 = db.ec2_volume_get_by_uuid(self.ctxt, vol['uuid'])
self.assertEqual(vol2['id'], vol['id'])
def test_ec2_snapshot_create(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(snap['id'])
self.assertEqual(snap['uuid'], 'fake-uuid')
def test_ec2_snapshot_get_by_ec2_id(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
snap2 = db.ec2_snapshot_get_by_ec2_id(self.ctxt, snap['id'])
self.assertEqual(snap2['uuid'], 'fake-uuid')
def test_ec2_snapshot_get_by_uuid(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
snap2 = db.ec2_snapshot_get_by_uuid(self.ctxt, 'fake-uuid')
self.assertEqual(snap['id'], snap2['id'])
def test_ec2_snapshot_get_by_ec2_id_not_found(self):
self.assertRaises(exception.SnapshotNotFound,
db.ec2_snapshot_get_by_ec2_id,
self.ctxt, 123456)
def test_ec2_snapshot_get_by_uuid_not_found(self):
self.assertRaises(exception.SnapshotNotFound,
db.ec2_snapshot_get_by_uuid,
self.ctxt, 'fake-uuid')
def test_ec2_instance_create(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(inst['id'])
self.assertEqual(inst['uuid'], 'fake-uuid')
def test_ec2_instance_get_by_uuid(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
inst2 = db.ec2_instance_get_by_uuid(self.ctxt, 'fake-uuid')
self.assertEqual(inst['id'], inst2['id'])
def test_ec2_instance_get_by_id(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
inst2 = db.ec2_instance_get_by_id(self.ctxt, inst['id'])
self.assertEqual(inst['id'], inst2['id'])
def test_ec2_instance_get_by_uuid_not_found(self):
self.assertRaises(exception.InstanceNotFound,
db.ec2_instance_get_by_uuid,
self.ctxt, 'uuid-not-present')
def test_ec2_instance_get_by_id_not_found(self):
self.assertRaises(exception.InstanceNotFound,
db.ec2_instance_get_by_uuid,
self.ctxt, 12345)
def test_get_instance_uuid_by_ec2_id(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
inst_uuid = db.get_instance_uuid_by_ec2_id(self.ctxt, inst['id'])
self.assertEqual(inst_uuid, 'fake-uuid')
def test_get_instance_uuid_by_ec2_id_not_found(self):
self.assertRaises(exception.InstanceNotFound,
db.get_instance_uuid_by_ec2_id,
self.ctxt, 100500)
class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ArchiveTestCase, self).setUp()
self.engine = get_engine()
self.conn = self.engine.connect()
self.instance_id_mappings = models.InstanceIdMapping.__table__
self.shadow_instance_id_mappings = sqlalchemyutils.get_table(
self.engine, "shadow_instance_id_mappings")
self.dns_domains = models.DNSDomain.__table__
self.shadow_dns_domains = sqlalchemyutils.get_table(
self.engine, "shadow_dns_domains")
self.consoles = models.Console.__table__
self.shadow_consoles = sqlalchemyutils.get_table(
self.engine, "shadow_consoles")
self.console_pools = models.ConsolePool.__table__
self.shadow_console_pools = sqlalchemyutils.get_table(
self.engine, "shadow_console_pools")
self.instances = models.Instance.__table__
self.shadow_instances = sqlalchemyutils.get_table(
self.engine, "shadow_instances")
self.migrations = models.Migration.__table__
self.shadow_migrations = sqlalchemyutils.get_table(
self.engine, "shadow_migrations")
self.uuidstrs = []
for _ in range(6):
self.uuidstrs.append(uuidutils.generate_uuid(dashed=False))
def _assert_shadow_tables_empty_except(self, *exceptions):
"""Ensure shadow tables are empty
This method ensures that all the shadow tables in the schema,
except for specificially named exceptions, are empty. This
makes sure that archiving isn't moving unexpected content.
"""
metadata = MetaData(bind=self.engine)
metadata.reflect()
for table in metadata.tables:
if table.startswith("shadow_") and table not in exceptions:
rows = self.conn.execute("select * from %s" % table).fetchall()
self.assertEqual(rows, [], "Table %s not empty" % table)
def test_shadow_tables(self):
metadata = MetaData(bind=self.engine)
metadata.reflect()
for table_name in metadata.tables:
# NOTE(rpodolyaka): migration 209 introduced a few new tables,
# which don't have shadow tables and it's
# completely OK, so we should skip them here
if table_name.startswith("dump_"):
continue
# NOTE(snikitin): migration 266 introduced a new table 'tags',
# which have no shadow table and it's
# completely OK, so we should skip it here
# NOTE(cdent): migration 314 introduced three new
# ('resource_providers', 'allocations' and 'inventories')
# with no shadow table and it's OK, so skip.
# 318 adds one more: 'resource_provider_aggregates'.
# NOTE(PaulMurray): migration 333 adds 'console_auth_tokens'
if table_name in ['tags', 'resource_providers', 'allocations',
'inventories', 'resource_provider_aggregates',
'console_auth_tokens']:
continue
if table_name.startswith("shadow_"):
self.assertIn(table_name[7:], metadata.tables)
continue
self.assertTrue(db_utils.check_shadow_table(self.engine,
table_name))
self._assert_shadow_tables_empty_except()
def test_archive_deleted_rows(self):
# Add 6 rows to table
for uuidstr in self.uuidstrs:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt)
# Set 4 to deleted
update_statement = self.instance_id_mappings.update().\
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
qiim = sql.select([self.instance_id_mappings]).where(self.
instance_id_mappings.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qiim).fetchall()
# Verify we have 6 in main
self.assertEqual(len(rows), 6)
qsiim = sql.select([self.shadow_instance_id_mappings]).\
where(self.shadow_instance_id_mappings.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 0 in shadow
self.assertEqual(len(rows), 0)
# Archive 2 rows
results = db.archive_deleted_rows(max_rows=2)
expected = dict(instance_id_mappings=2)
self._assertEqualObjects(expected, results[0])
rows = self.conn.execute(qiim).fetchall()
# Verify we have 4 left in main
self.assertEqual(len(rows), 4)
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 2 in shadow
self.assertEqual(len(rows), 2)
# Archive 2 more rows
results = db.archive_deleted_rows(max_rows=2)
expected = dict(instance_id_mappings=2)
self._assertEqualObjects(expected, results[0])
rows = self.conn.execute(qiim).fetchall()
# Verify we have 2 left in main
self.assertEqual(len(rows), 2)
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 4 in shadow
self.assertEqual(len(rows), 4)
# Try to archive more, but there are no deleted rows left.
results = db.archive_deleted_rows(max_rows=2)
expected = dict()
self._assertEqualObjects(expected, results[0])
rows = self.conn.execute(qiim).fetchall()
# Verify we still have 2 left in main
self.assertEqual(len(rows), 2)
rows = self.conn.execute(qsiim).fetchall()
# Verify we still have 4 in shadow
self.assertEqual(len(rows), 4)
# Ensure only deleted rows were deleted
self._assert_shadow_tables_empty_except(
'shadow_instance_id_mappings')
def test_archive_deleted_rows_for_every_uuid_table(self):
tablenames = []
for model_class in six.itervalues(models.__dict__):
if hasattr(model_class, "__tablename__"):
tablenames.append(model_class.__tablename__)
tablenames.sort()
for tablename in tablenames:
self._test_archive_deleted_rows_for_one_uuid_table(tablename)
def _test_archive_deleted_rows_for_one_uuid_table(self, tablename):
""":returns: 0 on success, 1 if no uuid column, 2 if insert failed."""
# NOTE(cdent): migration 314 adds the resource_providers
# table with a uuid column that does not archive, so skip.
skip_tables = ['resource_providers']
if tablename in skip_tables:
return 1
main_table = sqlalchemyutils.get_table(self.engine, tablename)
if not hasattr(main_table.c, "uuid"):
# Not a uuid table, so skip it.
return 1
shadow_table = sqlalchemyutils.get_table(
self.engine, "shadow_" + tablename)
# Add 6 rows to table
for uuidstr in self.uuidstrs:
ins_stmt = main_table.insert().values(uuid=uuidstr)
try:
self.conn.execute(ins_stmt)
except (db_exc.DBError, OperationalError):
# This table has constraints that require a table-specific
# insert, so skip it.
return 2
# Set 4 to deleted
update_statement = main_table.update().\
where(main_table.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
qmt = sql.select([main_table]).where(main_table.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qmt).fetchall()
# Verify we have 6 in main
self.assertEqual(len(rows), 6)
qst = sql.select([shadow_table]).\
where(shadow_table.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qst).fetchall()
# Verify we have 0 in shadow
self.assertEqual(len(rows), 0)
# Archive 2 rows
sqlalchemy_api._archive_deleted_rows_for_table(tablename, max_rows=2)
# Verify we have 4 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 4)
# Verify we have 2 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 2)
# Archive 2 more rows
sqlalchemy_api._archive_deleted_rows_for_table(tablename, max_rows=2)
# Verify we have 2 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 2)
# Verify we have 4 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 4)
# Try to archive more, but there are no deleted rows left.
sqlalchemy_api._archive_deleted_rows_for_table(tablename, max_rows=2)
# Verify we still have 2 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 2)
# Verify we still have 4 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 4)
return 0
def test_archive_deleted_rows_no_id_column(self):
uuidstr0 = self.uuidstrs[0]
ins_stmt = self.dns_domains.insert().values(domain=uuidstr0)
self.conn.execute(ins_stmt)
update_statement = self.dns_domains.update().\
where(self.dns_domains.c.domain == uuidstr0).\
values(deleted=True)
self.conn.execute(update_statement)
qdd = sql.select([self.dns_domains], self.dns_domains.c.domain ==
uuidstr0)
rows = self.conn.execute(qdd).fetchall()
self.assertEqual(len(rows), 1)
qsdd = sql.select([self.shadow_dns_domains],
self.shadow_dns_domains.c.domain == uuidstr0)
rows = self.conn.execute(qsdd).fetchall()
self.assertEqual(len(rows), 0)
db.archive_deleted_rows(max_rows=1)
rows = self.conn.execute(qdd).fetchall()
self.assertEqual(len(rows), 0)
rows = self.conn.execute(qsdd).fetchall()
self.assertEqual(len(rows), 1)
self._assert_shadow_tables_empty_except(
'shadow_dns_domains',
)
def test_archive_deleted_rows_shadow_insertions_equals_deletions(self):
# Add 2 rows to table
for uuidstr in self.uuidstrs[:2]:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt)
# Set both to deleted
update_statement = self.instance_id_mappings.update().\
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:2]))\
.values(deleted=1)
self.conn.execute(update_statement)
qiim = sql.select([self.instance_id_mappings]).where(self.
instance_id_mappings.c.uuid.in_(self.uuidstrs[:2]))
rows = self.conn.execute(qiim).fetchall()
# Verify we have 2 in main
self.assertEqual(len(rows), 2)
qsiim = sql.select([self.shadow_instance_id_mappings]).\
where(self.shadow_instance_id_mappings.c.uuid.in_(
self.uuidstrs[:2]))
shadow_rows = self.conn.execute(qsiim).fetchall()
# Verify we have 0 in shadow
self.assertEqual(len(shadow_rows), 0)
# Archive the rows
db.archive_deleted_rows(max_rows=2)
main_rows = self.conn.execute(qiim).fetchall()
shadow_rows = self.conn.execute(qsiim).fetchall()
# Verify the insertions into shadow is same as deletions from main
self.assertEqual(len(shadow_rows), len(rows) - len(main_rows))
def _check_sqlite_version_less_than_3_7(self):
# SQLite doesn't enforce foreign key constraints without a pragma.
dialect = self.engine.url.get_dialect()
if dialect == sqlite.dialect:
# We're seeing issues with foreign key support in SQLite 3.6.20
# SQLAlchemy doesn't support it at all with < SQLite 3.6.19
# It works fine in SQLite 3.7.
# So return early to skip this test if running SQLite < 3.7
import sqlite3
tup = sqlite3.sqlite_version_info
if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
self.skipTest(
'sqlite version too old for reliable SQLA foreign_keys')
self.conn.execute("PRAGMA foreign_keys = ON")
def test_archive_deleted_rows_fk_constraint(self):
# consoles.pool_id depends on console_pools.id
self._check_sqlite_version_less_than_3_7()
ins_stmt = self.console_pools.insert().values(deleted=1)
result = self.conn.execute(ins_stmt)
id1 = result.inserted_primary_key[0]
ins_stmt = self.consoles.insert().values(deleted=1,
pool_id=id1)
result = self.conn.execute(ins_stmt)
result.inserted_primary_key[0]
# The first try to archive console_pools should fail, due to FK.
num = sqlalchemy_api._archive_deleted_rows_for_table("console_pools",
max_rows=None)
self.assertEqual(num[0], 0)
# Then archiving consoles should work.
num = sqlalchemy_api._archive_deleted_rows_for_table("consoles",
max_rows=None)
self.assertEqual(num[0], 1)
# Then archiving console_pools should work.
num = sqlalchemy_api._archive_deleted_rows_for_table("console_pools",
max_rows=None)
self.assertEqual(num[0], 1)
self._assert_shadow_tables_empty_except(
'shadow_console_pools',
'shadow_consoles'
)
def test_archive_deleted_rows_for_migrations(self):
# migrations.instance_uuid depends on instances.uuid
self._check_sqlite_version_less_than_3_7()
instance_uuid = uuidsentinel.instance
ins_stmt = self.instances.insert().values(uuid=instance_uuid,
deleted=1)
self.conn.execute(ins_stmt)
ins_stmt = self.migrations.insert().values(instance_uuid=instance_uuid,
deleted=0)
self.conn.execute(ins_stmt)
# The first try to archive instances should fail, due to FK.
num = sqlalchemy_api._archive_deleted_rows_for_table("instances",
max_rows=None)
self.assertEqual(0, num[0])
# Then archiving migrations should work.
num = sqlalchemy_api._archive_deleted_rows_for_table("migrations",
max_rows=None)
self.assertEqual(1, num[0])
# Then archiving instances should work.
num = sqlalchemy_api._archive_deleted_rows_for_table("instances",
max_rows=None)
self.assertEqual(1, num[0])
self._assert_shadow_tables_empty_except(
'shadow_instances',
'shadow_migrations'
)
def test_archive_deleted_rows_2_tables(self):
# Add 6 rows to each table
for uuidstr in self.uuidstrs:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt)
ins_stmt2 = self.instances.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt2)
# Set 4 of each to deleted
update_statement = self.instance_id_mappings.update().\
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
update_statement2 = self.instances.update().\
where(self.instances.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement2)
# Verify we have 6 in each main table
qiim = sql.select([self.instance_id_mappings]).where(
self.instance_id_mappings.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qiim).fetchall()
self.assertEqual(len(rows), 6)
qi = sql.select([self.instances]).where(self.instances.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(rows), 6)
# Verify we have 0 in each shadow table
qsiim = sql.select([self.shadow_instance_id_mappings]).\
where(self.shadow_instance_id_mappings.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qsiim).fetchall()
self.assertEqual(len(rows), 0)
qsi = sql.select([self.shadow_instances]).\
where(self.shadow_instances.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(rows), 0)
# Archive 7 rows, which should be 4 in one table and 3 in the other.
db.archive_deleted_rows(max_rows=7)
# Verify we have 5 left in the two main tables combined
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 5)
# Verify we have 7 in the two shadow tables combined.
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 7)
# Archive the remaining deleted rows.
db.archive_deleted_rows(max_rows=1)
# Verify we have 4 total left in both main tables.
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 4)
# Verify we have 8 in shadow
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 8)
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows(max_rows=500)
# Verify we have 4 total left in both main tables.
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 4)
# Verify we have 8 in shadow
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 8)
self._assert_shadow_tables_empty_except(
'shadow_instances',
'shadow_instance_id_mappings'
)
class PciDeviceDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(PciDeviceDBApiTestCase, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
self.admin_context = context.get_admin_context()
self.ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._compute_node = None
def _get_fake_pci_devs(self):
return {'id': 3353,
'uuid': uuidsentinel.pci_device1,
'compute_node_id': 1,
'address': '0000:0f:08.7',
'vendor_id': '8086',
'product_id': '1520',
'numa_node': 1,
'dev_type': fields.PciDeviceType.SRIOV_VF,
'dev_id': 'pci_0000:0f:08.7',
'extra_info': '{}',
'label': 'label_8086_1520',
'status': fields.PciDeviceStatus.AVAILABLE,
'instance_uuid': '00000000-0000-0000-0000-000000000010',
'request_id': None,
'parent_addr': '0000:0f:00.1',
}, {'id': 3356,
'uuid': uuidsentinel.pci_device3356,
'compute_node_id': 1,
'address': '0000:0f:03.7',
'parent_addr': '0000:0f:03.0',
'vendor_id': '8083',
'product_id': '1523',
'numa_node': 0,
'dev_type': fields.PciDeviceType.SRIOV_VF,
'dev_id': 'pci_0000:0f:08.7',
'extra_info': '{}',
'label': 'label_8086_1520',
'status': fields.PciDeviceStatus.AVAILABLE,
'instance_uuid': '00000000-0000-0000-0000-000000000010',
'request_id': None,
}
@property
def compute_node(self):
if self._compute_node is None:
self._compute_node = db.compute_node_create(self.admin_context, {
'vcpus': 0,
'memory_mb': 0,
'local_gb': 0,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'cpu_info': 'fake',
})
return self._compute_node
def _create_fake_pci_devs(self):
v1, v2 = self._get_fake_pci_devs()
for i in v1, v2:
i['compute_node_id'] = self.compute_node['id']
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
return (v1, v2)
def test_pci_device_get_by_addr(self):
v1, v2 = self._create_fake_pci_devs()
result = db.pci_device_get_by_addr(self.admin_context, 1,
'0000:0f:08.7')
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_get_by_addr_not_found(self):
self._create_fake_pci_devs()
self.assertRaises(exception.PciDeviceNotFound,
db.pci_device_get_by_addr, self.admin_context,
1, '0000:0f:08:09')
def test_pci_device_get_all_by_parent_addr(self):
v1, v2 = self._create_fake_pci_devs()
results = db.pci_device_get_all_by_parent_addr(self.admin_context, 1,
'0000:0f:00.1')
self._assertEqualListsOfObjects([v1], results, self.ignored_keys)
def test_pci_device_get_all_by_parent_addr_empty(self):
v1, v2 = self._create_fake_pci_devs()
results = db.pci_device_get_all_by_parent_addr(self.admin_context, 1,
'0000:0f:01.6')
self.assertEqual(len(results), 0)
def test_pci_device_get_by_id(self):
v1, v2 = self._create_fake_pci_devs()
result = db.pci_device_get_by_id(self.admin_context, 3353)
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_get_by_id_not_found(self):
self._create_fake_pci_devs()
self.assertRaises(exception.PciDeviceNotFoundById,
db.pci_device_get_by_id,
self.admin_context, 3354)
def test_pci_device_get_all_by_node(self):
v1, v2 = self._create_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
def test_pci_device_get_all_by_node_empty(self):
v1, v2 = self._get_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context, 9)
self.assertEqual(len(results), 0)
def test_pci_device_get_by_instance_uuid(self):
v1, v2 = self._create_fake_pci_devs()
v1['status'] = fields.PciDeviceStatus.ALLOCATED
v2['status'] = fields.PciDeviceStatus.ALLOCATED
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
results = db.pci_device_get_all_by_instance_uuid(
self.context,
'00000000-0000-0000-0000-000000000010')
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
def test_pci_device_get_by_instance_uuid_check_status(self):
v1, v2 = self._create_fake_pci_devs()
v1['status'] = fields.PciDeviceStatus.ALLOCATED
v2['status'] = fields.PciDeviceStatus.CLAIMED
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
results = db.pci_device_get_all_by_instance_uuid(
self.context,
'00000000-0000-0000-0000-000000000010')
self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
def test_pci_device_update(self):
v1, v2 = self._create_fake_pci_devs()
v1['status'] = fields.PciDeviceStatus.ALLOCATED
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
result = db.pci_device_get_by_addr(
self.admin_context, 1, '0000:0f:08.7')
self._assertEqualObjects(v1, result, self.ignored_keys)
v1['status'] = fields.PciDeviceStatus.CLAIMED
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
result = db.pci_device_get_by_addr(
self.admin_context, 1, '0000:0f:08.7')
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_destroy(self):
v1, v2 = self._create_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context,
self.compute_node['id'])
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
db.pci_device_destroy(self.admin_context, v1['compute_node_id'],
v1['address'])
results = db.pci_device_get_all_by_node(self.admin_context,
self.compute_node['id'])
self._assertEqualListsOfObjects(results, [v2], self.ignored_keys)
def test_pci_device_destroy_exception(self):
v1, v2 = self._get_fake_pci_devs()
self.assertRaises(exception.PciDeviceNotFound,
db.pci_device_destroy,
self.admin_context,
v1['compute_node_id'],
v1['address'])
def _create_fake_pci_devs_old_format(self):
v1, v2 = self._get_fake_pci_devs()
for v in (v1, v2):
v['parent_addr'] = None
v['extra_info'] = jsonutils.dumps(
{'phys_function': 'fake-phys-func'})
db.pci_device_update(self.admin_context, v['compute_node_id'],
v['address'], v)
@mock.patch('time.sleep', new=lambda x: None)
class RetryOnDeadlockTestCase(test.TestCase):
def test_without_deadlock(self):
@oslo_db_api.wrap_db_retry(max_retries=5,
retry_on_deadlock=True)
def call_api(*args, **kwargs):
return True
self.assertTrue(call_api())
def test_raise_deadlock(self):
self.attempts = 2
@oslo_db_api.wrap_db_retry(max_retries=5,
retry_on_deadlock=True)
def call_api(*args, **kwargs):
while self.attempts:
self.attempts = self.attempts - 1
raise db_exc.DBDeadlock("fake exception")
return True
self.assertTrue(call_api())
class TestSqlalchemyTypesRepr(test_base.DbTestCase):
def setUp(self):
# NOTE(sdague): the oslo_db base test case completely
# invalidates our logging setup, we actually have to do that
# before it is called to keep this from vomitting all over our
# test output.
self.useFixture(nova_fixtures.StandardLogging())
super(TestSqlalchemyTypesRepr, self).setUp()
meta = MetaData(bind=self.engine)
self.table = Table(
'cidr_tbl',
meta,
Column('id', Integer, primary_key=True),
Column('addr', col_types.CIDR())
)
self.table.create()
self.addCleanup(meta.drop_all)
def test_cidr_repr(self):
addrs = [('192.168.3.0/24', '192.168.3.0/24'),
('2001:db8::/64', '2001:db8::/64'),
('192.168.3.0', '192.168.3.0/32'),
('2001:db8::', '2001:db8::/128'),
(None, None)]
with self.engine.begin() as conn:
for i in addrs:
conn.execute(self.table.insert(), {'addr': i[0]})
query = self.table.select().order_by(self.table.c.id)
result = conn.execute(query)
for idx, row in enumerate(result):
self.assertEqual(addrs[idx][1], row.addr)
class TestMySQLSqlalchemyTypesRepr(TestSqlalchemyTypesRepr,
test_base.MySQLOpportunisticTestCase):
pass
class TestPostgreSQLSqlalchemyTypesRepr(TestSqlalchemyTypesRepr,
test_base.PostgreSQLOpportunisticTestCase):
pass
class TestDBInstanceTags(test.TestCase):
sample_data = {
'project_id': 'project1',
'hostname': 'example.com',
'host': 'h1',
'node': 'n1',
'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
'info_cache': {'ckey': 'cvalue'}
}
def setUp(self):
super(TestDBInstanceTags, self).setUp()
self.user_id = 'user1'
self.project_id = 'project1'
self.context = context.RequestContext(self.user_id, self.project_id)
def _create_instance(self):
inst = db.instance_create(self.context, self.sample_data)
return inst['uuid']
def _get_tags_from_resp(self, tag_refs):
return [(t.resource_id, t.tag) for t in tag_refs]
def test_instance_tag_add(self):
uuid = self._create_instance()
tag = u'tag'
tag_ref = db.instance_tag_add(self.context, uuid, tag)
self.assertEqual(uuid, tag_ref.resource_id)
self.assertEqual(tag, tag_ref.tag)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
# Check the tag for the instance was added
tags = self._get_tags_from_resp(tag_refs)
self.assertEqual([(uuid, tag)], tags)
def test_instance_tag_add_duplication(self):
uuid = self._create_instance()
tag = u'tag'
for x in range(5):
db.instance_tag_add(self.context, uuid, tag)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
# Check the only one tag for the instance was added
tags = self._get_tags_from_resp(tag_refs)
self.assertEqual([(uuid, tag)], tags)
def test_instance_tag_set(self):
uuid = self._create_instance()
tag1 = u'tag1'
tag2 = u'tag2'
tag3 = u'tag3'
tag4 = u'tag4'
# Set tags to the instance
db.instance_tag_set(self.context, uuid, [tag1, tag2])
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
# Check the tags for the instance were set
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag1), (uuid, tag2)]
self.assertEqual(expected, tags)
# Set new tags to the instance
db.instance_tag_set(self.context, uuid, [tag3, tag4, tag2])
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
# Check the tags for the instance were replaced
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag3), (uuid, tag4), (uuid, tag2)]
self.assertEqual(set(expected), set(tags))
@mock.patch('nova.db.sqlalchemy.models.Tag.__table__.insert',
return_value=models.Tag.__table__.insert())
def test_instance_tag_set_empty_add(self, mock_insert):
uuid = self._create_instance()
tag1 = u'tag1'
tag2 = u'tag2'
db.instance_tag_set(self.context, uuid, [tag1, tag2])
# Check insert() was called to insert 'tag1' and 'tag2'
mock_insert.assert_called_once_with()
mock_insert.reset_mock()
db.instance_tag_set(self.context, uuid, [tag1])
# Check insert() wasn't called because there are no tags for creation
mock_insert.assert_not_called()
@mock.patch('sqlalchemy.orm.query.Query.delete')
def test_instance_tag_set_empty_delete(self, mock_delete):
uuid = self._create_instance()
db.instance_tag_set(self.context, uuid, [u'tag1', u'tag2'])
# Check delete() wasn't called because there are no tags for deletion
mock_delete.assert_not_called()
db.instance_tag_set(self.context, uuid, [u'tag1', u'tag3'])
# Check delete() was called to delete 'tag2'
mock_delete.assert_called_once_with(synchronize_session=False)
def test_instance_tag_get_by_instance_uuid(self):
uuid1 = self._create_instance()
uuid2 = self._create_instance()
tag1 = u'tag1'
tag2 = u'tag2'
tag3 = u'tag3'
db.instance_tag_add(self.context, uuid1, tag1)
db.instance_tag_add(self.context, uuid2, tag1)
db.instance_tag_add(self.context, uuid2, tag2)
db.instance_tag_add(self.context, uuid2, tag3)
# Check the tags for the first instance
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid1)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid1, tag1)]
self.assertEqual(expected, tags)
# Check the tags for the second instance
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid2)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid2, tag1), (uuid2, tag2), (uuid2, tag3)]
self.assertEqual(expected, tags)
def test_instance_tag_get_by_instance_uuid_no_tags(self):
uuid = self._create_instance()
self.assertEqual([], db.instance_tag_get_by_instance_uuid(self.context,
uuid))
def test_instance_tag_delete(self):
uuid = self._create_instance()
tag1 = u'tag1'
tag2 = u'tag2'
db.instance_tag_add(self.context, uuid, tag1)
db.instance_tag_add(self.context, uuid, tag2)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag1), (uuid, tag2)]
# Check the tags for the instance were added
self.assertEqual(expected, tags)
db.instance_tag_delete(self.context, uuid, tag1)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag2)]
self.assertEqual(expected, tags)
def test_instance_tag_delete_non_existent(self):
uuid = self._create_instance()
self.assertRaises(exception.InstanceTagNotFound,
db.instance_tag_delete, self.context, uuid, u'tag')
def test_instance_tag_delete_all(self):
uuid = self._create_instance()
tag1 = u'tag1'
tag2 = u'tag2'
db.instance_tag_add(self.context, uuid, tag1)
db.instance_tag_add(self.context, uuid, tag2)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag1), (uuid, tag2)]
# Check the tags for the instance were added
self.assertEqual(expected, tags)
db.instance_tag_delete_all(self.context, uuid)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
tags = self._get_tags_from_resp(tag_refs)
self.assertEqual([], tags)
def test_instance_tag_exists(self):
uuid = self._create_instance()
tag1 = u'tag1'
tag2 = u'tag2'
db.instance_tag_add(self.context, uuid, tag1)
# NOTE(snikitin): Make sure it's actually a bool
self.assertTrue(db.instance_tag_exists(self.context, uuid,
tag1))
self.assertFalse(db.instance_tag_exists(self.context, uuid,
tag2))
def test_instance_tag_add_to_non_existing_instance(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, db.instance_tag_add,
self.context, 'fake_uuid', 'tag')
def test_instance_tag_set_to_non_existing_instance(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, db.instance_tag_set,
self.context, 'fake_uuid', ['tag1', 'tag2'])
def test_instance_tag_get_from_non_existing_instance(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound,
db.instance_tag_get_by_instance_uuid, self.context,
'fake_uuid')
def test_instance_tag_delete_from_non_existing_instance(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, db.instance_tag_delete,
self.context, 'fake_uuid', 'tag')
def test_instance_tag_delete_all_from_non_existing_instance(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound,
db.instance_tag_delete_all,
self.context, 'fake_uuid')
def test_instance_tag_exists_non_existing_instance(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound,
db.instance_tag_exists,
self.context, 'fake_uuid', 'tag')
@mock.patch('time.sleep', new=lambda x: None)
class TestInstanceInfoCache(test.TestCase):
def setUp(self):
super(TestInstanceInfoCache, self).setUp()
user_id = 'fake'
project_id = 'fake'
self.context = context.RequestContext(user_id, project_id)
def test_instance_info_cache_get(self):
instance = db.instance_create(self.context, {})
network_info = 'net'
db.instance_info_cache_update(self.context, instance.uuid,
{'network_info': network_info})
info_cache = db.instance_info_cache_get(self.context, instance.uuid)
self.assertEqual(network_info, info_cache.network_info)
def test_instance_info_cache_update(self):
instance = db.instance_create(self.context, {})
network_info1 = 'net1'
db.instance_info_cache_update(self.context, instance.uuid,
{'network_info': network_info1})
info_cache = db.instance_info_cache_get(self.context, instance.uuid)
self.assertEqual(network_info1, info_cache.network_info)
network_info2 = 'net2'
db.instance_info_cache_update(self.context, instance.uuid,
{'network_info': network_info2})
info_cache = db.instance_info_cache_get(self.context, instance.uuid)
self.assertEqual(network_info2, info_cache.network_info)
def test_instance_info_cache_delete(self):
instance = db.instance_create(self.context, {})
network_info = 'net'
db.instance_info_cache_update(self.context, instance.uuid,
{'network_info': network_info})
info_cache = db.instance_info_cache_get(self.context, instance.uuid)
self.assertEqual(network_info, info_cache.network_info)
db.instance_info_cache_delete(self.context, instance.uuid)
info_cache = db.instance_info_cache_get(self.context, instance.uuid)
self.assertIsNone(info_cache)
def test_instance_info_cache_update_duplicate(self):
instance1 = db.instance_create(self.context, {})
instance2 = db.instance_create(self.context, {})
network_info1 = 'net1'
db.instance_info_cache_update(self.context, instance1.uuid,
{'network_info': network_info1})
network_info2 = 'net2'
db.instance_info_cache_update(self.context, instance2.uuid,
{'network_info': network_info2})
# updating of instance_uuid causes unique constraint failure,
# using of savepoint helps to continue working with existing session
# after DB errors, so exception was successfully handled
db.instance_info_cache_update(self.context, instance2.uuid,
{'instance_uuid': instance1.uuid})
info_cache1 = db.instance_info_cache_get(self.context, instance1.uuid)
self.assertEqual(network_info1, info_cache1.network_info)
info_cache2 = db.instance_info_cache_get(self.context, instance2.uuid)
self.assertEqual(network_info2, info_cache2.network_info)
def test_instance_info_cache_create_using_update(self):
network_info = 'net'
instance_uuid = uuidsentinel.uuid1
db.instance_info_cache_update(self.context, instance_uuid,
{'network_info': network_info})
info_cache = db.instance_info_cache_get(self.context, instance_uuid)
self.assertEqual(network_info, info_cache.network_info)
self.assertEqual(instance_uuid, info_cache.instance_uuid)
@mock.patch.object(models.InstanceInfoCache, 'update')
def test_instance_info_cache_retried_on_deadlock(self, update):
update.side_effect = [db_exc.DBDeadlock(), db_exc.DBDeadlock(), None]
instance = db.instance_create(self.context, {})
network_info = 'net'
updated = db.instance_info_cache_update(self.context, instance.uuid,
{'network_info': network_info})
self.assertEqual(instance.uuid, updated.instance_uuid)
@mock.patch.object(models.InstanceInfoCache, 'update')
def test_instance_info_cache_not_retried_on_deadlock_forever(self, update):
update.side_effect = db_exc.DBDeadlock
instance = db.instance_create(self.context, {})
network_info = 'net'
self.assertRaises(db_exc.DBDeadlock,
db.instance_info_cache_update,
self.context, instance.uuid,
{'network_info': network_info})
class TestInstanceTagsFiltering(test.TestCase):
sample_data = {
'project_id': 'project1'
}
def setUp(self):
super(TestInstanceTagsFiltering, self).setUp()
self.ctxt = context.RequestContext('user1', 'project1')
def _create_instance_with_kwargs(self, **kw):
context = kw.pop('context', self.ctxt)
data = self.sample_data.copy()
data.update(kw)
return db.instance_create(context, data)
def _create_instances(self, count):
return [self._create_instance_with_kwargs()['uuid']
for i in range(count)]
def _assertEqualInstanceUUIDs(self, expected_uuids, observed_instances):
observed_uuids = [inst['uuid'] for inst in observed_instances]
self.assertEqual(sorted(expected_uuids), sorted(observed_uuids))
def test_instance_get_all_by_filters_tag_any(self):
uuids = self._create_instances(3)
db.instance_tag_set(self.ctxt, uuids[0], [u't1'])
db.instance_tag_set(self.ctxt, uuids[1], [u't1', u't2', u't3'])
db.instance_tag_set(self.ctxt, uuids[2], [u't3'])
result = db.instance_get_all_by_filters(self.ctxt,
{'tags-any': [u't1', u't2']})
self._assertEqualInstanceUUIDs([uuids[0], uuids[1]], result)
def test_instance_get_all_by_filters_tag_any_empty(self):
uuids = self._create_instances(2)
db.instance_tag_set(self.ctxt, uuids[0], [u't1'])
db.instance_tag_set(self.ctxt, uuids[1], [u't1', u't2'])
result = db.instance_get_all_by_filters(self.ctxt,
{'tags-any': [u't3', u't4']})
self.assertEqual([], result)
def test_instance_get_all_by_filters_tag(self):
uuids = self._create_instances(3)
db.instance_tag_set(self.ctxt, uuids[0], [u't1', u't3'])
db.instance_tag_set(self.ctxt, uuids[1], [u't1', u't2'])
db.instance_tag_set(self.ctxt, uuids[2], [u't1', u't2', u't3'])
result = db.instance_get_all_by_filters(self.ctxt,
{'tags': [u't1', u't2']})
self._assertEqualInstanceUUIDs([uuids[1], uuids[2]], result)
def test_instance_get_all_by_filters_tag_empty(self):
uuids = self._create_instances(2)
db.instance_tag_set(self.ctxt, uuids[0], [u't1'])
db.instance_tag_set(self.ctxt, uuids[1], [u't1', u't2'])
result = db.instance_get_all_by_filters(self.ctxt,
{'tags': [u't3']})
self.assertEqual([], result)
def test_instance_get_all_by_filters_tag_any_and_tag(self):
uuids = self._create_instances(3)
db.instance_tag_set(self.ctxt, uuids[0], [u't1', u't2'])
db.instance_tag_set(self.ctxt, uuids[1], [u't1', u't2', u't4'])
db.instance_tag_set(self.ctxt, uuids[2], [u't2', u't3'])
result = db.instance_get_all_by_filters(self.ctxt,
{'tags': [u't1', u't2'],
'tags-any': [u't3', u't4']})
self._assertEqualInstanceUUIDs([uuids[1]], result)
def test_instance_get_all_by_filters_tags_and_project_id(self):
context1 = context.RequestContext('user1', 'p1')
context2 = context.RequestContext('user2', 'p2')
uuid1 = self._create_instance_with_kwargs(
context=context1, project_id='p1')['uuid']
uuid2 = self._create_instance_with_kwargs(
context=context1, project_id='p1')['uuid']
uuid3 = self._create_instance_with_kwargs(
context=context2, project_id='p2')['uuid']
db.instance_tag_set(context1, uuid1, [u't1', u't2'])
db.instance_tag_set(context1, uuid2, [u't1', u't2', u't4'])
db.instance_tag_set(context2, uuid3, [u't1', u't2', u't3', u't4'])
result = db.instance_get_all_by_filters(context.get_admin_context(),
{'tags': [u't1', u't2'],
'tags-any': [u't3', u't4'],
'project_id': 'p1'})
self._assertEqualInstanceUUIDs([uuid2], result)
def test_instance_get_all_by_filters_not_tags(self):
uuids = self._create_instances(8)
db.instance_tag_set(self.ctxt, uuids[0], [u't1'])
db.instance_tag_set(self.ctxt, uuids[1], [u't2'])
db.instance_tag_set(self.ctxt, uuids[2], [u't1', u't2'])
db.instance_tag_set(self.ctxt, uuids[3], [u't2', u't3'])
db.instance_tag_set(self.ctxt, uuids[4], [u't3'])
db.instance_tag_set(self.ctxt, uuids[5], [u't1', u't2', u't3'])
db.instance_tag_set(self.ctxt, uuids[6], [u't3', u't4'])
db.instance_tag_set(self.ctxt, uuids[7], [])
result = db.instance_get_all_by_filters(
self.ctxt, {'not-tags': [u't1', u't2']})
self._assertEqualInstanceUUIDs([uuids[0], uuids[1], uuids[3], uuids[4],
uuids[6], uuids[7]], result)
def test_instance_get_all_by_filters_not_tags_multiple_cells(self):
"""Test added for bug 1682693.
In cells v2 scenario, db.instance_get_all_by_filters() will
be called multiple times to search across all cells. This
test tests that filters for all cells remain the same in the
loop.
"""
uuids = self._create_instances(8)
db.instance_tag_set(self.ctxt, uuids[0], [u't1'])
db.instance_tag_set(self.ctxt, uuids[1], [u't2'])
db.instance_tag_set(self.ctxt, uuids[2], [u't1', u't2'])
db.instance_tag_set(self.ctxt, uuids[3], [u't2', u't3'])
db.instance_tag_set(self.ctxt, uuids[4], [u't3'])
db.instance_tag_set(self.ctxt, uuids[5], [u't1', u't2', u't3'])
db.instance_tag_set(self.ctxt, uuids[6], [u't3', u't4'])
db.instance_tag_set(self.ctxt, uuids[7], [])
filters = {'not-tags': [u't1', u't2']}
result = db.instance_get_all_by_filters(self.ctxt, filters)
self._assertEqualInstanceUUIDs([uuids[0], uuids[1], uuids[3], uuids[4],
uuids[6], uuids[7]], result)
def test_instance_get_all_by_filters_not_tags_any(self):
uuids = self._create_instances(8)
db.instance_tag_set(self.ctxt, uuids[0], [u't1'])
db.instance_tag_set(self.ctxt, uuids[1], [u't2'])
db.instance_tag_set(self.ctxt, uuids[2], [u't1', u't2'])
db.instance_tag_set(self.ctxt, uuids[3], [u't2', u't3'])
db.instance_tag_set(self.ctxt, uuids[4], [u't3'])
db.instance_tag_set(self.ctxt, uuids[5], [u't1', u't2', u't3'])
db.instance_tag_set(self.ctxt, uuids[6], [u't3', u't4'])
db.instance_tag_set(self.ctxt, uuids[7], [])
result = db.instance_get_all_by_filters(
self.ctxt, {'not-tags-any': [u't1', u't2']})
self._assertEqualInstanceUUIDs([uuids[4], uuids[6], uuids[7]], result)
def test_instance_get_all_by_filters_not_tags_and_tags(self):
uuids = self._create_instances(5)
db.instance_tag_set(self.ctxt, uuids[0], [u't1', u't2', u't4', u't5'])
db.instance_tag_set(self.ctxt, uuids[1], [u't1', u't2', u't4'])
db.instance_tag_set(self.ctxt, uuids[2], [u't1', u't2', u't3'])
db.instance_tag_set(self.ctxt, uuids[3], [u't1', u't3'])
db.instance_tag_set(self.ctxt, uuids[4], [])
result = db.instance_get_all_by_filters(self.ctxt,
{'tags': [u't1', u't2'],
'not-tags': [u't4', u't5']})
self._assertEqualInstanceUUIDs([uuids[1], uuids[2]], result)
def test_instance_get_all_by_filters_tags_contradictory(self):
uuids = self._create_instances(4)
db.instance_tag_set(self.ctxt, uuids[0], [u't1'])
db.instance_tag_set(self.ctxt, uuids[1], [u't2', u't3'])
db.instance_tag_set(self.ctxt, uuids[2], [u't1', u't2'])
db.instance_tag_set(self.ctxt, uuids[3], [])
result = db.instance_get_all_by_filters(self.ctxt,
{'tags': [u't1'],
'not-tags': [u't1']})
self.assertEqual([], result)
result = db.instance_get_all_by_filters(self.ctxt,
{'tags': [u't1'],
'not-tags-any': [u't1']})
self.assertEqual([], result)
result = db.instance_get_all_by_filters(self.ctxt,
{'tags-any': [u't1'],
'not-tags-any': [u't1']})
self.assertEqual([], result)
result = db.instance_get_all_by_filters(self.ctxt,
{'tags-any': [u't1'],
'not-tags': [u't1']})
self.assertEqual([], result)
def test_instance_get_all_by_filters_not_tags_and_tags_any(self):
uuids = self._create_instances(6)
db.instance_tag_set(self.ctxt, uuids[0], [u't1'])
db.instance_tag_set(self.ctxt, uuids[1], [u't2'])
db.instance_tag_set(self.ctxt, uuids[2], [u't1', u't2'])
db.instance_tag_set(self.ctxt, uuids[3], [u't1', u't3'])
db.instance_tag_set(self.ctxt, uuids[4], [u't1', u't2', u't3'])
db.instance_tag_set(self.ctxt, uuids[5], [])
result = db.instance_get_all_by_filters(self.ctxt,
{'tags-any': [u't1', u't2'],
'not-tags': [u't1', u't2']})
self._assertEqualInstanceUUIDs([uuids[0], uuids[1], uuids[3]], result)
def test_instance_get_all_by_filters_not_tags_and_not_tags_any(self):
uuids = self._create_instances(6)
db.instance_tag_set(self.ctxt, uuids[0], [u't1'])
db.instance_tag_set(self.ctxt, uuids[1], [u't2', u't5'])
db.instance_tag_set(self.ctxt, uuids[2], [u't1', u't2'])
db.instance_tag_set(self.ctxt, uuids[3], [u't1', u't3'])
db.instance_tag_set(self.ctxt, uuids[4], [u't1', u't2', u't4', u't5'])
db.instance_tag_set(self.ctxt, uuids[5], [])
result = db.instance_get_all_by_filters(self.ctxt,
{'not-tags': [u't1', u't2'],
'not-tags-any': [u't3', u't4']})
self._assertEqualInstanceUUIDs([uuids[0], uuids[1], uuids[5]], result)
def test_instance_get_all_by_filters_all_tag_filters(self):
uuids = self._create_instances(9)
db.instance_tag_set(self.ctxt, uuids[0], [u't1', u't3', u't7'])
db.instance_tag_set(self.ctxt, uuids[1], [u't1', u't2'])
db.instance_tag_set(self.ctxt, uuids[2], [u't1', u't2', u't7'])
db.instance_tag_set(self.ctxt, uuids[3], [u't1', u't2', u't3', u't5'])
db.instance_tag_set(self.ctxt, uuids[4], [u't1', u't2', u't3', u't7'])
db.instance_tag_set(self.ctxt, uuids[5], [u't1', u't2', u't3'])
db.instance_tag_set(self.ctxt, uuids[6], [u't1', u't2', u't3', u't4',
u't5'])
db.instance_tag_set(self.ctxt, uuids[7], [u't1', u't2', u't3', u't4',
u't5', u't6'])
db.instance_tag_set(self.ctxt, uuids[8], [])
result = db.instance_get_all_by_filters(self.ctxt,
{'tags': [u't1', u't2'],
'tags-any': [u't3', u't4'],
'not-tags': [u't5', u't6'],
'not-tags-any': [u't7', u't8']})
self._assertEqualInstanceUUIDs([uuids[3], uuids[5], uuids[6]], result)
class ConsoleAuthTokenTestCase(test.TestCase):
def _create_instances(self, uuids):
for uuid in uuids:
db.instance_create(self.context,
{'uuid': uuid,
'project_id': self.context.project_id})
def _create(self, token_hash, instance_uuid, expire_offset, host=None):
t = copy.deepcopy(fake_console_auth_token.fake_token_dict)
del t['id']
t['token_hash'] = token_hash
t['instance_uuid'] = instance_uuid
t['expires'] = timeutils.utcnow_ts() + expire_offset
if host:
t['host'] = host
db.console_auth_token_create(self.context, t)
def setUp(self):
super(ConsoleAuthTokenTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
def test_console_auth_token_create_no_instance(self):
t = copy.deepcopy(fake_console_auth_token.fake_token_dict)
del t['id']
self.assertRaises(exception.InstanceNotFound,
db.console_auth_token_create,
self.context, t)
def test_console_auth_token_get_valid_deleted_instance(self):
uuid1 = uuidsentinel.uuid1
hash1 = utils.get_sha256_str(uuidsentinel.token1)
self._create_instances([uuid1])
self._create(hash1, uuid1, 100)
db_obj1 = db.console_auth_token_get_valid(self.context, hash1, uuid1)
self.assertIsNotNone(db_obj1, "a valid token should be in database")
db.instance_destroy(self.context, uuid1)
self.assertRaises(exception.InstanceNotFound,
db.console_auth_token_get_valid,
self.context, hash1, uuid1)
def test_console_auth_token_destroy_all_by_instance(self):
uuid1 = uuidsentinel.uuid1
uuid2 = uuidsentinel.uuid2
hash1 = utils.get_sha256_str(uuidsentinel.token1)
hash2 = utils.get_sha256_str(uuidsentinel.token2)
hash3 = utils.get_sha256_str(uuidsentinel.token3)
self._create_instances([uuid1, uuid2])
self._create(hash1, uuid1, 100)
self._create(hash2, uuid1, 100)
self._create(hash3, uuid2, 100)
db_obj1 = db.console_auth_token_get_valid(self.context, hash1, uuid1)
db_obj2 = db.console_auth_token_get_valid(self.context, hash2, uuid1)
db_obj3 = db.console_auth_token_get_valid(self.context, hash3, uuid2)
self.assertIsNotNone(db_obj1, "a valid token should be in database")
self.assertIsNotNone(db_obj2, "a valid token should be in database")
self.assertIsNotNone(db_obj3, "a valid token should be in database")
db.console_auth_token_destroy_all_by_instance(self.context, uuid1)
db_obj4 = db.console_auth_token_get_valid(self.context, hash1, uuid1)
db_obj5 = db.console_auth_token_get_valid(self.context, hash2, uuid1)
db_obj6 = db.console_auth_token_get_valid(self.context, hash3, uuid2)
self.assertIsNone(db_obj4, "no valid token should be in database")
self.assertIsNone(db_obj5, "no valid token should be in database")
self.assertIsNotNone(db_obj6, "a valid token should be in database")
def test_console_auth_token_get_valid_by_expiry(self):
uuid1 = uuidsentinel.uuid1
uuid2 = uuidsentinel.uuid2
hash1 = utils.get_sha256_str(uuidsentinel.token1)
hash2 = utils.get_sha256_str(uuidsentinel.token2)
self.addCleanup(timeutils.clear_time_override)
timeutils.set_time_override(timeutils.utcnow())
self._create_instances([uuid1, uuid2])
self._create(hash1, uuid1, 10)
timeutils.advance_time_seconds(100)
self._create(hash2, uuid2, 10)
db_obj1 = db.console_auth_token_get_valid(self.context, hash1, uuid1)
db_obj2 = db.console_auth_token_get_valid(self.context, hash2, uuid2)
self.assertIsNone(db_obj1, "the token should have expired")
self.assertIsNotNone(db_obj2, "a valid token should be found here")
def test_console_auth_token_get_valid_by_uuid(self):
uuid1 = uuidsentinel.uuid1
uuid2 = uuidsentinel.uuid2
hash1 = utils.get_sha256_str(uuidsentinel.token1)
self._create_instances([uuid1, uuid2])
self._create(hash1, uuid1, 10)
db_obj1 = db.console_auth_token_get_valid(self.context, hash1, uuid1)
db_obj2 = db.console_auth_token_get_valid(self.context, hash1, uuid2)
self.assertIsNotNone(db_obj1, "a valid token should be found here")
self.assertEqual(hash1, db_obj1['token_hash'])
self.assertIsNone(db_obj2, "the token uuid should not match")
def test_console_auth_token_destroy_expired_by_host(self):
uuid1 = uuidsentinel.uuid1
uuid2 = uuidsentinel.uuid2
uuid3 = uuidsentinel.uuid3
hash1 = utils.get_sha256_str(uuidsentinel.token1)
hash2 = utils.get_sha256_str(uuidsentinel.token2)
hash3 = utils.get_sha256_str(uuidsentinel.token3)
self.addCleanup(timeutils.clear_time_override)
timeutils.set_time_override(timeutils.utcnow())
self._create_instances([uuid1, uuid2, uuid3])
self._create(hash1, uuid1, 10)
self._create(hash2, uuid2, 10, host='other-host')
timeutils.advance_time_seconds(100)
self._create(hash3, uuid3, 10)
db.console_auth_token_destroy_expired_by_host(
self.context, 'fake-host')
# the api only supports getting unexpired tokens
# but by rolling back time we can see if a token that
# should be deleted is still there
timeutils.advance_time_seconds(-100)
db_obj1 = db.console_auth_token_get_valid(self.context, hash1, uuid1)
db_obj2 = db.console_auth_token_get_valid(self.context, hash2, uuid2)
db_obj3 = db.console_auth_token_get_valid(self.context, hash3, uuid3)
self.assertIsNone(db_obj1, "the token should have been deleted")
self.assertIsNotNone(db_obj2, "a valid token should be found here")
self.assertIsNotNone(db_obj3, "a valid token should be found here")
def test_console_auth_token_get_valid_without_uuid_deleted_instance(self):
uuid1 = uuidsentinel.uuid1
hash1 = utils.get_sha256_str(uuidsentinel.token1)
self._create_instances([uuid1])
self._create(hash1, uuid1, 100)
db_obj1 = db.console_auth_token_get_valid(self.context, hash1)
self.assertIsNotNone(db_obj1, "a valid token should be in database")
db.instance_destroy(self.context, uuid1)
db_obj1 = db.console_auth_token_get_valid(self.context, hash1)
self.assertIsNone(db_obj1, "the token should have been deleted")
def test_console_auth_token_get_valid_without_uuid_by_expiry(self):
uuid1 = uuidsentinel.uuid1
uuid2 = uuidsentinel.uuid2
hash1 = utils.get_sha256_str(uuidsentinel.token1)
hash2 = utils.get_sha256_str(uuidsentinel.token2)
self.addCleanup(timeutils.clear_time_override)
timeutils.set_time_override(timeutils.utcnow())
self._create_instances([uuid1, uuid2])
self._create(hash1, uuid1, 10)
timeutils.advance_time_seconds(100)
self._create(hash2, uuid2, 10)
db_obj1 = db.console_auth_token_get_valid(self.context, hash1)
db_obj2 = db.console_auth_token_get_valid(self.context, hash2)
self.assertIsNone(db_obj1, "the token should have expired")
self.assertIsNotNone(db_obj2, "a valid token should be found here")
class SortMarkerHelper(test.TestCase):
def setUp(self):
super(SortMarkerHelper, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.instances = []
launched = datetime.datetime(2005, 4, 30, 13, 00, 00)
td = datetime.timedelta
values = {
'key_name': ['dan', 'dan', 'taylor', 'jax'],
'memory_mb': [512, 1024, 512, 256],
'launched_at': [launched + td(1), launched - td(256),
launched + td(32), launched - td(5000)],
}
for i in range(0, 4):
inst = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'auto_disk_config': bool(i % 2),
'vcpus': 1}
for key in values:
inst[key] = values[key].pop(0)
db_instance = db.instance_create(self.context, inst)
self.instances.append(db_instance)
def test_with_one_key(self):
"""Test instance_get_by_sort_filters() with one sort key."""
# If we sort ascending by key_name and our marker was something
# just after jax, taylor would be the next one.
marker = db.instance_get_by_sort_filters(
self.context,
['key_name'], ['asc'], ['jaxz'])
self.assertEqual(self.instances[2]['uuid'], marker)
def _test_with_multiple_keys(self, sort_keys, sort_dirs, value_fn):
"""Test instance_get_by_sort_filters() with multiple sort keys.
Since this returns the marker it's looking for, it's actually really
hard to test this like we normally would with pagination, i.e. marching
through the instances in order. Attempting to do so covered up a bug
in this previously.
So, for a list of marker values, query and assert we get the instance
we expect.
"""
# For the query below, ordering memory_mb asc, key_name desc,
# The following is the expected ordering of the instances we
# have to test:
#
# 256-jax
# 512-taylor
# 512-dan
# 1024-dan
steps = [
(200, 'foo', 3), # all less than 256-jax
(256, 'xyz', 3), # name comes before jax
(256, 'jax', 3), # all equal to 256-jax
(256, 'abc', 2), # name after jax
(500, 'foo', 2), # all greater than 256-jax
(512, 'xyz', 2), # name before taylor and dan
(512, 'mno', 0), # name after taylor, before dan-512
(512, 'abc', 1), # name after dan-512
(999, 'foo', 1), # all greater than 512-taylor
(1024, 'xyz', 1), # name before dan
(1024, 'abc', None), # name after dan
(2048, 'foo', None), # all greater than 1024-dan
]
for mem, name, expected in steps:
marker = db.instance_get_by_sort_filters(
self.context,
sort_keys,
sort_dirs,
value_fn(mem, name))
if expected is None:
self.assertIsNone(marker)
else:
expected_inst = self.instances[expected]
got_inst = [inst for inst in self.instances
if inst['uuid'] == marker][0]
self.assertEqual(
expected_inst['uuid'],
marker,
'marker %s-%s expected %s-%s got %s-%s' % (
mem, name,
expected_inst['memory_mb'], expected_inst['key_name'],
got_inst['memory_mb'], got_inst['key_name']))
def test_with_two_keys(self):
"""Test instance_get_by_sort_filters() with two sort_keys."""
self._test_with_multiple_keys(
['memory_mb', 'key_name'],
['asc', 'desc'],
lambda mem, name: [mem, name])
def test_with_three_keys(self):
"""Test instance_get_by_sort_filters() with three sort_keys.
This inserts another key in the middle of memory_mb,key_name
which is always equal in all the test instances. We do this
to make sure that we are only including the equivalence fallback
on the final sort_key, otherwise we might stall out in the
middle of a series of instances with equivalent values for
a key in the middle of sort_keys.
"""
self._test_with_multiple_keys(
['memory_mb', 'vcpus', 'key_name'],
['asc', 'asc', 'desc'],
lambda mem, name: [mem, 1, name])
def test_no_match(self):
marker = db.instance_get_by_sort_filters(self.context,
['memory_mb'], ['asc'],
[4096])
# None of our instances have >= 4096mb, so nothing matches
self.assertIsNone(marker)
def test_by_bool(self):
"""Verify that we can use booleans in sort_keys."""
# If we sort ascending by auto_disk_config, the first one
# with True for that value would be the second instance we
# create, because bool(1 % 2) == True.
marker = db.instance_get_by_sort_filters(
self.context,
['auto_disk_config', 'id'], ['asc', 'asc'], [True, 2])
self.assertEqual(self.instances[1]['uuid'], marker)
| {
"content_hash": "dfb195b0599ed4b931abea50ebde9d8d",
"timestamp": "",
"source": "github",
"line_count": 9737,
"max_line_length": 92,
"avg_line_length": 45.122008832289204,
"alnum_prop": 0.5666764537854527,
"repo_name": "gooddata/openstack-nova",
"id": "a792a5ea67651497ba46058c5d414eeabe861764",
"size": "440108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/db/test_db_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3858"
},
{
"name": "HTML",
"bytes": "1386"
},
{
"name": "PHP",
"bytes": "43584"
},
{
"name": "Python",
"bytes": "23012372"
},
{
"name": "Shell",
"bytes": "32567"
},
{
"name": "Smarty",
"bytes": "429290"
}
],
"symlink_target": ""
} |
"""
Django settings for tutorial project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm%dm+o^_d51g1#buoianua5a%%k52xo73f16(9$i9fizhx*d1j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_swagger',
'quickstart',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'PAGE_SIZE': 10
} | {
"content_hash": "ac7144eb36fd60cafa6d519b5241fb42",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 91,
"avg_line_length": 25.826771653543307,
"alnum_prop": 0.688109756097561,
"repo_name": "BubuYo/rest_api",
"id": "2945c9898638df3a6bd351f700ec17e312b96afd",
"size": "3280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorial/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14330"
}
],
"symlink_target": ""
} |
mi_diccionario_vacio = {}
mi_diccionario_vacio['llave-1'] = 2
mi_diccionario_vacio['llave-2'] = 4
mi_diccionario_vacio['llave-3'] = 8
# Borra un elemento del diccionario
del mi_diccionario_vacio['llave-1']
# Verificamos que una llave existe en el diccionario
if 'llave-2' in mi_diccionario_vacio:
print('llave-2 existe')
print(f"El valor de llave-2 es: {mi_diccionario_vacio['llave-2']}")
valor = mi_diccionario_vacio.get('llave-2')
print(f"El valor de llave-2 es: {valor}")
# La siguiente línea da un error
# print(f"El valor de llave-1 es: {mi_diccionario_vacio['llave-1']}")
valor = mi_diccionario_vacio.get('llave-1', 'No existe esta llave')
print(f"El valor de llave-1 es: {valor}")
print(mi_diccionario_vacio)
########################
# Tuplas
########################
ejemplo_lista = [1, 2, 3]
ejemplo_tupla = (1, 2, 3)
print(f"Lista: {ejemplo_lista} vs. Tupla {ejemplo_tupla}")
# No podemos alterar las tuplas que ya se definieron previamente
ejemplo_lista.append(4)
# La siguiente línea da error
# ejemplo_tupla.append(4)
print(f"Lista: {ejemplo_lista} vs. Tupla {ejemplo_tupla}")
ejemplo_lista[3] = 10
# La siguiente línea da error
# ejemplo_tupla[3] = 10
print(f"Lista: {ejemplo_lista} vs. Tupla {ejemplo_tupla}")
# Unpacking
x, y, z = ejemplo_tupla
print(f"x = {x}, y = {y}, z = {z}")
# La siguiente línea da error
# x, y = ejemplo_tupla
# La siguiente línea es la manera correcta de hacerlo utilizando la variable "underscore"
a, _, _ = ejemplo_tupla
print(f"a = {a}")
########################
# Ciclos
########################
# Ciclo 'for'
nueva_lista = [1, 2, 5, True, 'string', 10, False, 'otro string', ['otra', 'lista']]
# Ciclo 'for' por defacto en Python. Se comporta como 'foreach'
for item in nueva_lista:
print(item)
# Simulando un ciclo 'for' clásico en otros lenguajes: for (i = 0; i < N; i++)
N = len(nueva_lista)
for i in range(N):
print(f"Índice: {i} - Valor: {nueva_lista[i]}")
# Simulando un ciclo 'for' clásico en otros lenguajes: for (i = 1; i < 10; i += 2)
for i in range(1, 10, 2):
print(i)
# Verificando el tipo de un objeto
tipo = type(nueva_lista)
print(f"El tipo de objeto es: {tipo}")
# Ciclos anidados
for item in nueva_lista:
if type(item) == str or type(item) == list:
for item_2 in item:
print(item_2)
# Iterar un diccionario
nuevo_diccionario = {
'k-1': 10,
'k-2': True,
'k-3': 'some string',
'k-4': [2, 4, 6, 8, 10]
}
# Accediendo solamente a la llave
for key in nuevo_diccionario:
print(f"La llave '{key}' tiene el valor: {nuevo_diccionario[key]}")
# Accediendo al par llave -> valor
# Llaves del diccionario
print(nuevo_diccionario.keys())
# Valores del diccionario
print(nuevo_diccionario.values())
# Pares llave->valor del diccionario
print(nuevo_diccionario.items())
for llave, valor in nuevo_diccionario.items():
print(f"La llave '{llave}' tiene el valor: {valor}")
# Ciclo 'while'
otra_nueva_lista = [1, 2, 5, True, 'string', 10, False, 'otro string', ['otra', 'lista']]
iteracion = 1
while otra_nueva_lista != []:
item = otra_nueva_lista.pop()
print(f"El item que se sacó de la lista es: {item}\nLista en la iteración #{iteracion}: {otra_nueva_lista}")
iteracion += 1
else:
print("La lista está vacía")
########################
# Funciones
########################
def suma(a, b):
return a + b
print(f"El valor de la función suma() es = {suma(5, 5)}")
########################
# Clases y Objetos
########################
class Calculadora:
# TODO: Revisar variables de clase
# variable_de_clase = 'x'
# Constructor en Python
def __init__(self, a, b):
self.a = a
self.b = b
self._protected = 'protegido'
self.__private = 'privado'
def suma(self):
return self.a + self.b
def resta(self):
return self.a - self.b
def multiplicacion(self):
return self.a * self.b
def division(self):
if self.a % self.b == 0:
return self.a // self.b
return self.a / self.b
# Setter clásico pero ahora en Python
def set_private(self, valor):
self.__private = valor
# Getter clásico pero ahora en Python
def get_private(self):
return self.__private
# Esta es una función protegida
def __private_function():
pass
def __str__(self):
return f"Class Calculadora\n" \
+ f"Atributo A: {self.a}\n" \
+ f"Atributo B: {self.b}"
# Creamos una instancia (u objeto) de la clase 'Calculadora'
mi_calculadora = Calculadora(5, 6)
# Accedemos a los atributos de la instancia (u objeto)
print(f"Atributo 'a' = {mi_calculadora.a}")
print(f"Atributo 'b' = {mi_calculadora.b}")
# Mandamos llamar a las funciones de la instancia (u objeto)
print(f"Función 'suma()' = {mi_calculadora.suma()}")
print(f"Función 'resta()' = {mi_calculadora.resta()}")
print(f"Función 'multiplicacion()' = {mi_calculadora.multiplicacion()}")
print(f"Función 'division()' = {mi_calculadora.division()}")
# Tratando de acceder a atributos protegidos y privados
print(f"Atributo protegido = {mi_calculadora._protected}")
print(f"Atributo privado = {mi_calculadora.get_private()}")
# Accediendo la instancia directamente
print(mi_calculadora)
# TODO: Revisar variables de clase
# # Accediendo a un atributo o variable de clase
# print(f"Valor de la variable de clase: {mi_calculadora.variable_de_clase}")
# mi_calculadora_2 = Calculadora(100, 8)
# mi_calculadora_2.variable_de_clase = 'y'
# print(f"Valor de la variable de clase: {mi_calculadora.variable_de_clase}")
# # Accedemos a los atributos de la 2da instancia (u objeto)
# print(f"Atributo 'a' = {mi_calculadora_2.a}")
# print(f"Atributo 'b' = {mi_calculadora_2.b}")
# print(f"Valor de la variable de clase: {mi_calculadora.variable_de_clase}")
# print(f"Valor de la variable de clase: {mi_calculadora_2.variable_de_clase}")
# Herencia
class CalculadoraCientifica(Calculadora):
# Constructor que manda llamar al constructor de la clase padre
def __init__(self, a, b):
super().__init__(a, b)
def potencia(self):
return self.a ** self.b
# Creamos una instancia (u objeto) de la clase 'Calculadora'
mi_calculadora_cientifica = CalculadoraCientifica(9, 10)
# Accedemos a los atributos de la instancia (u objeto)
print(f"Atributo 'a' = {mi_calculadora_cientifica.a}")
print(f"Atributo 'b' = {mi_calculadora_cientifica.b}")
# Mandamos llamar a las funciones de la instancia (u objeto)
print(f"Función 'suma()' = {mi_calculadora_cientifica.suma()}")
print(f"Función 'resta()' = {mi_calculadora_cientifica.resta()}")
print(f"Función 'multiplicacion()' = {mi_calculadora_cientifica.multiplicacion()}")
print(f"Función 'division()' = {mi_calculadora_cientifica.division()}")
print(f"Función 'potencia()' = {mi_calculadora_cientifica.potencia()}") | {
"content_hash": "54d7c76ad76c887541623e68c7e5003f",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 112,
"avg_line_length": 30.398230088495577,
"alnum_prop": 0.6452692867540029,
"repo_name": "AnhellO/DAS_Sistemas",
"id": "b9c9d8b7b608134dbe8d8d5fe8b6d0ea9773a33c",
"size": "6961",
"binary": false,
"copies": "1",
"ref": "refs/heads/ene-jun-2022",
"path": "Ene-Jun-2022/Ejemplos/3er-clase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "8515"
},
{
"name": "Go",
"bytes": "25845"
},
{
"name": "HTML",
"bytes": "36671"
},
{
"name": "Python",
"bytes": "716604"
}
],
"symlink_target": ""
} |
"""Reading, editing, and writing XML files."""
import xml.etree.ElementTree as ET # noqa: N814
import logging
import re
logger = logging.getLogger(__name__)
class XML(object):
"""Class capable of reading, editing, and writing XML files."""
@staticmethod
def get_ns(text):
"""Get the namespace prefix from an XML element or attribute name.
Args:
text (str): Element name or attribute name, such as
"{http://schemas.dmtf.org/ovf/envelope/1}Element".
Returns:
str: "" if no prefix is present, or a namespace prefix, such as
"http://schemas.dmtf.org/ovf/envelope/1".
"""
match = re.match(r"\{(.*)\}", str(text))
if not match:
logger.error("Name '%s' has no associated namespace!", text)
return ""
return match.group(1)
@staticmethod
def strip_ns(text):
"""Remove a namespace prefix from an XML element or attribute name.
Args:
text (str): Element name or attribute name, such as
"{http://schemas.dmtf.org/ovf/envelope/1}Element".
Returns:
str: Bare name, such as "Element".
"""
match = re.match(r"\{.*\}(.*)", str(text))
if not match:
logger.error("Name '%s' has no associated namespace!", text)
return text
else:
return match.group(1)
def __init__(self, xml_file):
"""Read the given XML file and store it in memory.
The memory representation is available as properties :attr:`tree` and
:attr:`root`.
Args:
xml_file (str): File path to read.
Raises:
xml.etree.ElementTree.ParseError: if parsing fails
"""
# Parse the XML into memory
self.tree = ET.parse(xml_file)
""":class:`xml.etree.ElementTree.ElementTree` describing this file."""
self.root = self.tree.getroot()
"""Root :class:`xml.etree.ElementTree.Element` instance of the tree."""
def write_xml(self, xml_file):
"""Write pretty XML out to the given file.
Args:
xml_file (str): Filename to write to
"""
logger.verbose("Writing XML to %s", xml_file)
# Pretty-print the XML for readability
self.xml_reindent(self.root, 0)
# We could make cleaner XML by passing "default_namespace=NSM['ovf']",
# which will leave off the "ovf:" prefix on elements and attributes in
# the main OVF namespace, but unfortunately, this cleaner XML is not
# recognized as valid by ElementTree, resulting in a "write-once" OVF -
# subsequent attempts to read and re-write the XML will give the error:
#
# ValueError: cannot use non-qualified names with default_namespace
# option
#
# This is a bug - see http://bugs.python.org/issue17088
self.tree.write(xml_file, xml_declaration=True, encoding='utf-8')
@staticmethod
def xml_reindent(parent, depth=0):
"""Recursively add indentation to XML to make it look nice.
Args:
parent (xml.etree.ElementTree.Element): Current parent element
depth (int): How far down the rabbit hole we have recursed.
Increments by 2 for each successive level of nesting.
"""
depth += 2
last = None
for elem in list(parent):
elem.tail = "\n" + (" " * depth)
XML.xml_reindent(elem, depth)
last = elem
if last is not None:
# Parent indents to first child
parent.text = "\n" + (" " * depth)
# Last element indents back to parent
depth -= 2
last.tail = "\n" + (" " * depth)
if depth == 0:
# Add newline at end of file
parent.tail = "\n"
@classmethod
def find_child(cls, parent, tag, attrib=None, required=False):
"""Find the unique child element under the specified parent element.
Args:
parent (xml.etree.ElementTree.Element): Parent element
tag (str): Child tag to match on
attrib (dict): Child attributes to match on
required (boolean): Whether to raise an error if no child exists
Raises:
LookupError: if more than one matching child is found
KeyError: if no matching child is found and :attr:`required` is True
Returns:
xml.etree.ElementTree.Element: Child element found, or None
"""
matches = cls.find_all_children(parent, tag, attrib)
if len(matches) > 1:
raise LookupError(
"Found multiple matching <{0}> children (each with "
"attributes '{1}') under <{2}>:\n{3}"
.format(XML.strip_ns(tag),
attrib,
XML.strip_ns(parent.tag),
"\n".join([ET.tostring(e).decode() for e in matches])))
elif len(matches) == 0:
if required:
raise KeyError("Mandatory element <{0}> not found under <{1}>"
.format(XML.strip_ns(tag),
XML.strip_ns(parent.tag)))
return None
else:
return matches[0]
@classmethod
def find_all_children(cls, parent, tag, attrib=None):
"""Find all matching child elements under the specified parent element.
Args:
parent (xml.etree.ElementTree.Element): Parent element
tag (iterable): Child tag string (or list of tags) to match on
attrib (dict): Child attributes to match on
Returns:
list: (Possibly empty) list of matching child Elements
"""
assert parent is not None
if isinstance(tag, str):
elements = parent.findall(tag)
label = tag
else:
elements = []
for tag_entry in tag:
elements.extend(parent.findall(tag_entry))
label = [XML.strip_ns(t) for t in tag]
if not elements:
logger.spam("No children matching %s found under %s",
label, XML.strip_ns(parent.tag))
return elements
logger.spam("Examining %s %s elements under %s",
len(elements), label, XML.strip_ns(parent.tag))
child_list = []
for element in elements:
found = True
if attrib:
for key in attrib.keys():
if element.get(key, None) != attrib[key]:
logger.spam("Attribute '%s' (%s) does not match "
"expected value (%s)",
XML.strip_ns(key), element.get(key, ""),
attrib[key])
found = False
break
if found:
child_list.append(element)
logger.spam("Found %s matching %s elements", len(child_list), label)
return child_list
@classmethod
def add_child(cls, parent, new_child, ordering=None,
known_namespaces=None):
"""Add the given child element under the given parent element.
Args:
parent (xml.etree.ElementTree.Element): Parent element
new_child (xml.etree.ElementTree.Element): Child element to attach
ordering (list): (Optional) List describing the expected ordering of
child tags under the parent; if a new child element is created,
its placement under the parent will respect this sequence.
known_namespaces (list): (Optional) List of well-understood XML
namespaces. If a new child is created, and ``ordering`` is
given, any tag (new or existing) that is encountered but not
accounted for in ``ordering`` will result in COT logging a
warning **if and only if** the unaccounted-for tag is in a
known namespace.
"""
if ordering and new_child.tag not in ordering:
child_ns = XML.get_ns(new_child.tag)
if known_namespaces and child_ns in known_namespaces:
logger.warning("New child '%s' is in a known namespace '%s',"
" but is not in the list of expected children"
" in this namespace under '%s':\n%s",
XML.strip_ns(new_child.tag),
child_ns,
XML.strip_ns(parent.tag),
[XML.strip_ns(expected) for expected in ordering
if XML.get_ns(expected) == child_ns])
# Assume this is some sort of custom element, which
# implicitly goes at the end of the list.
ordering = None
if not ordering:
parent.append(new_child)
else:
new_index = ordering.index(new_child.tag)
index = 0
found_position = False
for child in list(parent):
try:
if ordering.index(child.tag) > new_index:
found_position = True
break
except ValueError:
child_ns = XML.get_ns(child.tag)
if known_namespaces and child_ns in known_namespaces:
logger.warning(
"Found unexpected child element '%s' under '%s' in"
" namespace '%s'. The list of expected children in"
" this namespace is only:\n%s",
XML.strip_ns(child.tag),
XML.strip_ns(parent.tag),
child_ns,
[XML.strip_ns(expected) for expected in ordering
if XML.get_ns(expected) == child_ns])
# Assume this is some sort of custom element - all known
# elements should implicitly come before it.
found_position = True
break
index += 1
if found_position:
parent.insert(index, new_child)
else:
parent.append(new_child)
@classmethod
def set_or_make_child(cls, parent, tag, text=None, attrib=None,
ordering=None, known_namespaces=None):
"""Update or create a child element under the specified parent element.
Args:
parent (xml.etree.ElementTree.Element): Parent element
tag (str): Child element text tag to find or create
text (str): Value to set the child's text attribute to
attrib (dict): Dict of child attributes to match on while
searching and set in the final child element
ordering (list): See :meth:`add_child`
known_namespaces (list): See :meth:`add_child`
Returns:
xml.etree.ElementTree.Element: New or updated child Element.
"""
assert parent is not None
if attrib is None:
attrib = {}
element = cls.find_child(parent, tag, attrib=attrib)
if element is None:
logger.spam("Creating new %s element under parent %s",
XML.strip_ns(tag), XML.strip_ns(parent.tag))
element = ET.Element(tag)
XML.add_child(parent, element, ordering, known_namespaces)
if text is not None:
element.text = str(text)
for attr in attrib:
element.set(attr, attrib[attr])
return element
| {
"content_hash": "d595e9d82af193a2de3a899b8b9d0941",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 79,
"avg_line_length": 40,
"alnum_prop": 0.5386904761904762,
"repo_name": "glennmatthews/cot",
"id": "5f4b3e3461c045bb5520a192d8d5c82a72b16099",
"size": "12501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "COT/xml_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1293158"
},
{
"name": "Roff",
"bytes": "37442"
},
{
"name": "Shell",
"bytes": "3840"
}
],
"symlink_target": ""
} |
'''
Datadog
www.datadoghq.com
----
Make sense of your IT Data
Licensed under Simplified BSD License (see LICENSE)
(C) Boxed Ice 2010 all rights reserved
(C) Datadog, Inc. 2010-2013 all rights reserved
'''
# set up logging before importing any other components
from config import initialize_logging; initialize_logging('forwarder')
from config import get_logging_config
import os; os.umask(022)
# Standard imports
import logging
import os
import sys
import threading
import zlib
from Queue import Queue, Full
from subprocess import Popen
from hashlib import md5
from datetime import datetime, timedelta
from socket import gaierror, error as socket_error
# Tornado
import tornado.httpserver
import tornado.ioloop
import tornado.web
from tornado.escape import json_decode
from tornado.options import define, parse_command_line, options
# agent import
from util import Watchdog, get_uuid, get_hostname, json, get_tornado_ioloop
from emitter import http_emitter
from config import get_config, get_url_endpoint, get_version
from checks.check_status import ForwarderStatus
from transaction import Transaction, TransactionManager
import modules
# 3rd party
try:
import pycurl
except ImportError:
# For the source install, pycurl might not be installed
pycurl = None
log = logging.getLogger('forwarder')
log.setLevel(get_logging_config()['log_level'] or logging.INFO)
DD_ENDPOINT = "dd_url"
TRANSACTION_FLUSH_INTERVAL = 5000 # Every 5 seconds
WATCHDOG_INTERVAL_MULTIPLIER = 10 # 10x flush interval
HEADERS_TO_REMOVE = [
'Host',
'Content-Length',
]
# Maximum delay before replaying a transaction
MAX_WAIT_FOR_REPLAY = timedelta(seconds=90)
# Maximum queue size in bytes (when this is reached, old messages are dropped)
MAX_QUEUE_SIZE = 30 * 1024 * 1024 # 30MB
THROTTLING_DELAY = timedelta(microseconds=1000000/2) # 2 msg/second
class EmitterThread(threading.Thread):
def __init__(self, *args, **kwargs):
self.__name = kwargs['name']
self.__emitter = kwargs.pop('emitter')()
self.__logger = kwargs.pop('logger')
self.__config = kwargs.pop('config')
self.__max_queue_size = kwargs.pop('max_queue_size', 100)
self.__queue = Queue(self.__max_queue_size)
threading.Thread.__init__(self, *args, **kwargs)
self.daemon = True
def run(self):
while True:
(data, headers) = self.__queue.get()
try:
self.__logger.debug('Emitter %r handling a packet', self.__name)
self.__emitter(data, self.__logger, self.__config)
except Exception:
self.__logger.error('Failure during operation of emitter %r', self.__name, exc_info=True)
def enqueue(self, data, headers):
try:
self.__queue.put((data, headers), block=False)
except Full:
self.__logger.warn('Dropping packet for %r due to backlog', self.__name)
class EmitterManager(object):
"""Track custom emitters"""
def __init__(self, config):
self.agentConfig = config
self.emitterThreads = []
for emitter_spec in [s.strip() for s in self.agentConfig.get('custom_emitters', '').split(',')]:
if len(emitter_spec) == 0: continue
logging.info('Setting up custom emitter %r', emitter_spec)
try:
thread = EmitterThread(
name=emitter_spec,
emitter=modules.load(emitter_spec, 'emitter'),
logger=logging,
config=config,
)
thread.start()
self.emitterThreads.append(thread)
except Exception, e:
logging.error('Unable to start thread for emitter: %r', emitter_spec, exc_info=True)
logging.info('Done with custom emitters')
def send(self, data, headers=None):
if not self.emitterThreads:
return # bypass decompression/decoding
if headers and headers.get('Content-Encoding') == 'deflate':
data = zlib.decompress(data)
data = json_decode(data)
for emitterThread in self.emitterThreads:
logging.info('Queueing for emitter %r', emitterThread.name)
emitterThread.enqueue(data, headers)
class MetricTransaction(Transaction):
_application = None
_trManager = None
_endpoints = []
_emitter_manager = None
@classmethod
def set_application(cls, app):
cls._application = app
cls._emitter_manager = EmitterManager(cls._application._agentConfig)
@classmethod
def set_tr_manager(cls, manager):
cls._trManager = manager
@classmethod
def get_tr_manager(cls):
return cls._trManager
@classmethod
def set_endpoints(cls):
# Only send data to Datadog if an API KEY exists
# i.e. user is also Datadog user
try:
is_dd_user = 'api_key' in cls._application._agentConfig\
and 'use_dd' in cls._application._agentConfig\
and cls._application._agentConfig['use_dd']\
and cls._application._agentConfig.get('api_key')
if is_dd_user:
log.warn("You are a Datadog user so we will send data to https://app.datadoghq.com")
cls._endpoints.append(DD_ENDPOINT)
except Exception:
log.info("Not a Datadog user")
def __init__(self, data, headers):
self._data = data
self._headers = headers
self._headers['DD-Forwarder-Version'] = get_version()
# Call after data has been set (size is computed in Transaction's init)
Transaction.__init__(self)
# Emitters operate outside the regular transaction framework
if self._emitter_manager is not None:
self._emitter_manager.send(data, headers)
# Insert the transaction in the Manager
self._trManager.append(self)
log.debug("Created transaction %d" % self.get_id())
self._trManager.flush()
def __sizeof__(self):
return sys.getsizeof(self._data)
def get_url(self, endpoint):
endpoint_base_url = get_url_endpoint(self._application._agentConfig[endpoint])
api_key = self._application._agentConfig.get('api_key')
if api_key:
return endpoint_base_url + '/intake?api_key=%s' % api_key
return endpoint_base_url + '/intake'
def flush(self):
for endpoint in self._endpoints:
url = self.get_url(endpoint)
log.debug("Sending metrics to endpoint %s at %s" % (endpoint, url))
# Getting proxy settings
proxy_settings = self._application._agentConfig.get('proxy_settings', None)
tornado_client_params = {
'url': url,
'method': 'POST',
'body': self._data,
'headers': self._headers,
'validate_cert': not self._application.skip_ssl_validation,
}
# Remove headers that were passed by the emitter. Those don't apply anymore
# This is pretty hacky though as it should be done in pycurl or curl or tornado
for h in HEADERS_TO_REMOVE:
if h in tornado_client_params['headers']:
del tornado_client_params['headers'][h]
log.debug("Removing {0} header.".format(h))
force_use_curl = False
if proxy_settings is not None:
force_use_curl = True
if pycurl is not None:
log.debug("Configuring tornado to use proxy settings: %s:****@%s:%s" % (proxy_settings['user'],
proxy_settings['host'], proxy_settings['port']))
tornado_client_params['proxy_host'] = proxy_settings['host']
tornado_client_params['proxy_port'] = proxy_settings['port']
tornado_client_params['proxy_username'] = proxy_settings['user']
tornado_client_params['proxy_password'] = proxy_settings['password']
if self._application._agentConfig.get('proxy_forbid_method_switch'):
# See http://stackoverflow.com/questions/8156073/curl-violate-rfc-2616-10-3-2-and-switch-from-post-to-get
tornado_client_params['prepare_curl_callback'] = lambda curl: curl.setopt(pycurl.POSTREDIR, pycurl.REDIR_POST_ALL)
if (not self._application.use_simple_http_client or force_use_curl) and pycurl is not None:
ssl_certificate = self._application._agentConfig.get('ssl_certificate', None)
tornado_client_params['ca_certs'] = ssl_certificate
req = tornado.httpclient.HTTPRequest(**tornado_client_params)
use_curl = force_use_curl or self._application._agentConfig.get("use_curl_http_client") and not self._application.use_simple_http_client
if use_curl:
if pycurl is None:
log.error("dd-agent is configured to use the Curl HTTP Client, but pycurl is not available on this system.")
else:
log.debug("Using CurlAsyncHTTPClient")
tornado.httpclient.AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
else:
log.debug("Using SimpleHTTPClient")
http = tornado.httpclient.AsyncHTTPClient()
http.fetch(req, callback=self.on_response)
def on_response(self, response):
if response.error:
log.error("Response: %s" % response)
self._trManager.tr_error(self)
else:
self._trManager.tr_success(self)
self._trManager.flush_next()
class APIMetricTransaction(MetricTransaction):
def get_url(self, endpoint):
endpoint_base_url = get_url_endpoint(self._application._agentConfig[endpoint])
config = self._application._agentConfig
api_key = config['api_key']
url = endpoint_base_url + '/api/v1/series/?api_key=' + api_key
return url
def get_data(self):
return self._data
class StatusHandler(tornado.web.RequestHandler):
def get(self):
threshold = int(self.get_argument('threshold', -1))
m = MetricTransaction.get_tr_manager()
self.write("<table><tr><td>Id</td><td>Size</td><td>Error count</td><td>Next flush</td></tr>")
transactions = m.get_transactions()
for tr in transactions:
self.write("<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>" %
(tr.get_id(), tr.get_size(), tr.get_error_count(), tr.get_next_flush()))
self.write("</table>")
if threshold >= 0:
if len(transactions) > threshold:
self.set_status(503)
class AgentInputHandler(tornado.web.RequestHandler):
def post(self):
"""Read the message and forward it to the intake"""
# read message
msg = self.request.body
headers = self.request.headers
if msg is not None:
# Setup a transaction for this message
tr = MetricTransaction(msg, headers)
else:
raise tornado.web.HTTPError(500)
self.write("Transaction: %s" % tr.get_id())
class ApiInputHandler(tornado.web.RequestHandler):
def post(self):
"""Read the message and forward it to the intake"""
# read message
msg = self.request.body
headers = self.request.headers
if msg is not None:
# Setup a transaction for this message
tr = APIMetricTransaction(msg, headers)
else:
raise tornado.web.HTTPError(500)
class Application(tornado.web.Application):
def __init__(self, port, agentConfig, watchdog=True, skip_ssl_validation=False, use_simple_http_client=False):
self._port = int(port)
self._agentConfig = agentConfig
self._metrics = {}
MetricTransaction.set_application(self)
MetricTransaction.set_endpoints()
self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
MAX_QUEUE_SIZE, THROTTLING_DELAY)
MetricTransaction.set_tr_manager(self._tr_manager)
self._watchdog = None
self.skip_ssl_validation = skip_ssl_validation or agentConfig.get('skip_ssl_validation', False)
self.use_simple_http_client = use_simple_http_client
if self.skip_ssl_validation:
log.info("Skipping SSL hostname validation, useful when using a transparent proxy")
if watchdog:
watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
self._watchdog = Watchdog(watchdog_timeout,
max_mem_mb=agentConfig.get('limit_memory_consumption', None))
def log_request(self, handler):
""" Override the tornado logging method.
If everything goes well, log level is DEBUG.
Otherwise it's WARNING or ERROR depending on the response code. """
if handler.get_status() < 400:
log_method = log.debug
elif handler.get_status() < 500:
log_method = log.warning
else:
log_method = log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
def appendMetric(self, prefix, name, host, device, ts, value):
if self._metrics.has_key(prefix):
metrics = self._metrics[prefix]
else:
metrics = {}
self._metrics[prefix] = metrics
if metrics.has_key(name):
metrics[name].append([host, device, ts, value])
else:
metrics[name] = [[host, device, ts, value]]
def _postMetrics(self):
if len(self._metrics) > 0:
self._metrics['uuid'] = get_uuid()
self._metrics['internalHostname'] = get_hostname(self._agentConfig)
self._metrics['apiKey'] = self._agentConfig['api_key']
MetricTransaction(json.dumps(self._metrics),
headers={'Content-Type': 'application/json'})
self._metrics = {}
def run(self):
handlers = [
(r"/intake/?", AgentInputHandler),
(r"/api/v1/series/?", ApiInputHandler),
(r"/status/?", StatusHandler),
]
settings = dict(
cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
xsrf_cookies=False,
debug=False,
log_function=self.log_request
)
non_local_traffic = self._agentConfig.get("non_local_traffic", False)
tornado.web.Application.__init__(self, handlers, **settings)
http_server = tornado.httpserver.HTTPServer(self)
try:
# non_local_traffic must be == True to match, not just some non-false value
if non_local_traffic is True:
http_server.listen(self._port)
else:
# localhost in lieu of 127.0.0.1 to support IPv6
try:
http_server.listen(self._port, address = self._agentConfig['bind_host'])
except gaierror:
log.warning("localhost seems undefined in your host file, using 127.0.0.1 instead")
http_server.listen(self._port, address = "127.0.0.1")
except socket_error, e:
if "Errno 99" in str(e):
log.warning("IPv6 doesn't seem to be fully supported. Falling back to IPv4")
http_server.listen(self._port, address = "127.0.0.1")
else:
raise
except socket_error, e:
log.exception("Socket error %s. Is another application listening on the same port ? Exiting", e)
sys.exit(1)
except Exception, e:
log.exception("Uncaught exception. Forwarder is exiting.")
sys.exit(1)
log.info("Listening on port %d" % self._port)
# Register callbacks
self.mloop = get_tornado_ioloop()
logging.getLogger().setLevel(get_logging_config()['log_level'] or logging.INFO)
def flush_trs():
if self._watchdog:
self._watchdog.reset()
self._postMetrics()
self._tr_manager.flush()
tr_sched = tornado.ioloop.PeriodicCallback(flush_trs,TRANSACTION_FLUSH_INTERVAL,
io_loop = self.mloop)
# Register optional Graphite listener
gport = self._agentConfig.get("graphite_listen_port", None)
if gport is not None:
log.info("Starting graphite listener on port %s" % gport)
from graphite import GraphiteServer
gs = GraphiteServer(self, get_hostname(self._agentConfig), io_loop=self.mloop)
if non_local_traffic is True:
gs.listen(gport)
else:
gs.listen(gport, address = "localhost")
# Start everything
if self._watchdog:
self._watchdog.reset()
tr_sched.start()
self.mloop.start()
log.info("Stopped")
def stop(self):
self.mloop.stop()
def init(skip_ssl_validation=False, use_simple_http_client=False):
agentConfig = get_config(parse_args = False)
port = agentConfig.get('listen_port', 17123)
if port is None:
port = 17123
else:
port = int(port)
app = Application(port, agentConfig, skip_ssl_validation=skip_ssl_validation, use_simple_http_client=use_simple_http_client)
def sigterm_handler(signum, frame):
log.info("caught sigterm. stopping")
app.stop()
import signal
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
return app
def main():
define("sslcheck", default=1, help="Verify SSL hostname, on by default")
define("use_simple_http_client", default=0, help="Use Tornado SimpleHTTPClient instead of CurlAsyncHTTPClient")
args = parse_command_line()
skip_ssl_validation = False
use_simple_http_client = False
if unicode(options.sslcheck) == u"0":
skip_ssl_validation = True
if unicode(options.use_simple_http_client) == u"1":
use_simple_http_client = True
# If we don't have any arguments, run the server.
if not args:
import tornado.httpclient
app = init(skip_ssl_validation, use_simple_http_client=use_simple_http_client)
try:
app.run()
except Exception:
log.exception("Uncaught exception in the forwarder")
finally:
ForwarderStatus.remove_latest_status()
else:
usage = "%s [help|info]. Run with no commands to start the server" % (
sys.argv[0])
command = args[0]
if command == 'info':
logging.getLogger().setLevel(logging.ERROR)
return ForwarderStatus.print_latest_status()
elif command == 'help':
print usage
else:
print "Unknown command: %s" % command
print usage
return -1
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "c0926966467bcb5ebd0b3ada15e07541",
"timestamp": "",
"source": "github",
"line_count": 528,
"max_line_length": 148,
"avg_line_length": 36.52840909090909,
"alnum_prop": 0.6026857468761342,
"repo_name": "JohnLZeller/dd-agent",
"id": "7b2d1d10cb07f8238084793562afddea673d9312",
"size": "19329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ddagent.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "917"
},
{
"name": "HTML",
"bytes": "8758"
},
{
"name": "Nginx",
"bytes": "3404"
},
{
"name": "PowerShell",
"bytes": "2665"
},
{
"name": "Python",
"bytes": "1454456"
},
{
"name": "Ruby",
"bytes": "57718"
},
{
"name": "Shell",
"bytes": "38669"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
} |
import os
import wx
from contextlib import contextmanager
from robotide.namespace import Namespace
from robotide.controller import Project
from robotide.spec import librarydatabase
from robotide.ui import LoadProgressObserver
from robotide.ui.mainframe import RideFrame
from robotide.pluginapi import RideLogMessage
from robotide import context, contrib
from robotide.context import coreplugins
from robotide.preferences import Preferences, RideSettings
from robotide.application.pluginloader import PluginLoader
from robotide.application.editorprovider import EditorProvider
from robotide.application.releasenotes import ReleaseNotes
from robotide.application.updatenotifier import UpdateNotifierController, \
UpdateDialog
class RIDE(wx.App):
def __init__(self, path=None, updatecheck=True):
self._initial_path = path
self._updatecheck = updatecheck
context.APP = self
wx.App.__init__(self, redirect=False)
def OnInit(self):
# Needed for SetToolTipString to work
wx.HelpProvider_Set(wx.SimpleHelpProvider())
self.settings = RideSettings()
librarydatabase.initialize_database()
self.preferences = Preferences(self.settings)
self.namespace = Namespace(self.settings)
self._controller = Project(self.namespace, self.settings)
self.frame = RideFrame(self, self._controller)
self._editor_provider = EditorProvider()
self._plugin_loader = PluginLoader(self, self._get_plugin_dirs(),
coreplugins.get_core_plugins())
self._plugin_loader.enable_plugins()
self.editor = self._get_editor()
self.editor.show()
self._load_data()
self.frame.tree.populate(self.model)
self.frame.tree.set_editor(self.editor)
self._publish_system_info()
if self._updatecheck:
UpdateNotifierController(self.settings).notify_update_if_needed(UpdateDialog)
wx.CallLater(200, ReleaseNotes(self).bring_to_front)
return True
def _publish_system_info(self):
RideLogMessage(context.SYSTEM_INFO).publish()
@property
def model(self):
return self._controller
def _get_plugin_dirs(self):
return [self.settings.get_path('plugins'),
os.path.join(self.settings['install root'], 'site-plugins'),
contrib.CONTRIB_PATH]
def _get_editor(self):
from robotide.editor import EditorPlugin
for pl in self._plugin_loader.plugins:
if isinstance(pl._plugin, EditorPlugin):
return pl._plugin
def _load_data(self):
path = self._initial_path or self._get_latest_path()
if path:
with self.active_event_loop():
observer = LoadProgressObserver(self.frame)
self._controller.load_data(path, observer)
def _get_latest_path(self):
recent = self._get_recentfiles_plugin()
if not recent or not recent.recent_files:
return None
return recent.recent_files[0]
def _get_recentfiles_plugin(self):
from robotide.recentfiles import RecentFilesPlugin
for pl in self.get_plugins():
if isinstance(pl._plugin, RecentFilesPlugin):
return pl._plugin
def get_plugins(self):
return self._plugin_loader.plugins
def register_preference_panel(self, panel_class):
'''Add the given panel class to the list of known preference panels'''
self.preferences.add(panel_class)
def unregister_preference_panel(self, panel_class):
'''Remove the given panel class from the list of known preference panels'''
self.preferences.remove(panel_class)
def register_editor(self, object_class, editor_class, activate):
self._editor_provider.register_editor(object_class, editor_class,
activate)
def unregister_editor(self, object_class, editor_class):
self._editor_provider.unregister_editor(object_class, editor_class)
def activate_editor(self, object_class, editor_class):
self._editor_provider.set_active_editor(object_class, editor_class)
def get_editors(self, object_class):
return self._editor_provider.get_editors(object_class)
def get_editor(self, object_class):
return self._editor_provider.get_editor(object_class)
@contextmanager
def active_event_loop(self):
# With wxPython 2.9.1, ProgressBar.Pulse breaks if there's no active
# event loop.
# See http://code.google.com/p/robotframework-ride/issues/detail?id=798
loop = wx.EventLoop()
wx.EventLoop.SetActive(loop)
yield
del loop
| {
"content_hash": "57d1b1070e725de9bcc37700f953702d",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 89,
"avg_line_length": 38.04,
"alnum_prop": 0.670031545741325,
"repo_name": "caio2k/RIDE",
"id": "0c22d5148d30d78c24f1dd575e747591f0194dd5",
"size": "5363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robotide/application/application.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21370"
},
{
"name": "HTML",
"bytes": "110298"
},
{
"name": "JavaScript",
"bytes": "41401"
},
{
"name": "Python",
"bytes": "2908095"
}
],
"symlink_target": ""
} |
from pynos import device
from st2actions.runners.pythonrunner import Action
class interface_ip_address(Action):
def run(self, **kwargs):
conn = (str(kwargs.pop('ip')), str(kwargs.pop('port')))
auth = (str(kwargs.pop('username')), str(kwargs.pop('password')))
test = kwargs.pop('test', False)
callback = kwargs.pop('callback', None)
with device.Device(
conn=conn, auth=auth,
test=test,
callback=callback
) as dev:
dev.interface.ip_address(**kwargs)
return 0
| {
"content_hash": "b1d67dc13e1634e4d2944240e2461407",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 73,
"avg_line_length": 33.35294117647059,
"alnum_prop": 0.5908289241622575,
"repo_name": "StackStorm/st2contrib",
"id": "b09fac2e95cee0fc4ce707c4a8eb1ab1b2a3bbad",
"size": "567",
"binary": false,
"copies": "3",
"ref": "refs/heads/st2contrib-deprecated-archive",
"path": "archive/packs/vdx/actions/interface_ip_address.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "5581"
},
{
"name": "Python",
"bytes": "1362240"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "7781"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import pandas as pd
# Bokeh imports
from bokeh.util.testing import verify_all
# Module under test
#import bokeh.sampledata.unemployment1948 as bsu
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.unemployment1948", ALL))
@pytest.mark.sampledata
def test_data():
import bokeh.sampledata.unemployment1948 as bsu
assert isinstance(bsu.data, pd.DataFrame)
# check detail for package data
assert len(bsu.data) == 69
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| {
"content_hash": "ac6f25569c70d2e25b77b4acb218c7f6",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 91,
"avg_line_length": 30.8125,
"alnum_prop": 0.34212305611899935,
"repo_name": "Karel-van-de-Plassche/bokeh",
"id": "2a0ece8de92a7ca4c0269f42458bdf1e42a972dc",
"size": "1999",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/sampledata/tests/test_unemployment1948.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705342"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468291"
}
],
"symlink_target": ""
} |
from nova.compute import resource_tracker
class FakeResourceTracker(resource_tracker.ResourceTracker):
"""Version without a DB requirement."""
def _update(self, context, compute_node):
pass
| {
"content_hash": "99e07d0f3265c410c0a5bf00c1b6f016",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 60,
"avg_line_length": 26.125,
"alnum_prop": 0.7320574162679426,
"repo_name": "vmturbo/nova",
"id": "3a173a4b84db4d471a847718fcf4f60f98ab9fc8",
"size": "849",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nova/tests/unit/compute/fake_resource_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "18983608"
},
{
"name": "Shell",
"bytes": "31813"
},
{
"name": "Smarty",
"bytes": "307089"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import subprocess
import sys
from rospkg.os_detect import OS_ARCH, OS_MANJARO
from ..installers import PackageManagerInstaller
from .source import SOURCE_INSTALLER
from .pip import PIP_INSTALLER
PACMAN_INSTALLER = 'pacman'
def register_installers(context):
context.set_installer(PACMAN_INSTALLER, PacmanInstaller())
def register_platforms(context):
context.add_os_installer_key(OS_ARCH, SOURCE_INSTALLER)
context.add_os_installer_key(OS_ARCH, PACMAN_INSTALLER)
context.add_os_installer_key(OS_ARCH, PIP_INSTALLER)
context.set_default_os_installer_key(OS_ARCH, lambda self: PACMAN_INSTALLER)
register_manjaro(context)
def register_manjaro(context):
# Manjaro uses the same packages as Arch Linux. Override to Arch if detected
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_MANJARO and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_MANJARO, OS_ARCH), file=sys.stderr)
context.set_os_override(OS_ARCH, context.os_detect.get_codename())
def pacman_detect_single(p):
return not subprocess.call(['pacman', '-T', p], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def pacman_detect(packages):
return [p for p in packages if pacman_detect_single(p)]
class PacmanInstaller(PackageManagerInstaller):
def __init__(self):
super(PacmanInstaller, self).__init__(pacman_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
command = ['pacman', '-S']
if not interactive:
command.append('--noconfirm')
if not reinstall:
command.append('--needed')
if quiet:
command.append('-q')
return [self.elevate_priv(command + packages)]
| {
"content_hash": "d80eef170dd209bb5f95563343241405",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 99,
"avg_line_length": 31.285714285714285,
"alnum_prop": 0.6884830035514967,
"repo_name": "ros-infrastructure/rosdep",
"id": "d9301f43d95022d9109b43b4ec2224d182a0520e",
"size": "3596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rosdep2/platforms/arch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "466"
},
{
"name": "Python",
"bytes": "503079"
},
{
"name": "Shell",
"bytes": "296"
}
],
"symlink_target": ""
} |
class TestModule(object):
def tests(self):
return {'equalto': lambda a, b: a == b}
| {
"content_hash": "bff1963f065b3acf228c790c4726cd63",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 47,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.5894736842105263,
"repo_name": "alem0lars/os-install",
"id": "968b63bb779fcc7fee1555b84dc44b8d280f6ca8",
"size": "95",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "roles/common/test_plugins/test_equality.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "71512"
}
],
"symlink_target": ""
} |
from lar import *
from scipy import *
import json
import scipy
import numpy as np
import time as tm
import gc
from pngstack2array3d import *
import struct
import getopt, sys
import traceback
#
import matplotlib.pyplot as plt
# ------------------------------------------------------------
# Logging & Timer
# ------------------------------------------------------------
logging_level = 0;
# 0 = no_logging
# 1 = few details
# 2 = many details
# 3 = many many details
def log(n, l):
if __name__=="__main__" and n <= logging_level:
for s in l:
print "Log:", s;
timer = 1;
timer_last = tm.time()
def timer_start(s):
global timer_last;
if __name__=="__main__" and timer == 1:
log(3, ["Timer start:" + s]);
timer_last = tm.time();
def timer_stop():
global timer_last;
if __name__=="__main__" and timer == 1:
log(3, ["Timer stop :" + str(tm.time() - timer_last)]);
# ------------------------------------------------------------
# Configuration parameters
# ------------------------------------------------------------
PNG_EXTENSION = ".png"
BIN_EXTENSION = ".bin"
# ------------------------------------------------------------
# Utility toolbox
# ------------------------------------------------------------
def countFilesInADir(directory):
return len(os.walk(directory).next()[2])
def isArrayEmpty(arr):
return all(e == 0 for e in arr)
# ------------------------------------------------------------
def writeOffsetToFile(file, offsetCurr):
file.write( struct.pack('>I', offsetCurr[0]) )
file.write( struct.pack('>I', offsetCurr[1]) )
file.write( struct.pack('>I', offsetCurr[2]) )
# ------------------------------------------------------------
def computeChains(imageHeight,imageWidth,imageDepth, imageDx,imageDy,imageDz, Nx,Ny,Nz, calculateout,bordo3, colors,pixelCalc,centroidsCalc, colorIdx,INPUT_DIR,DIR_O):
beginImageStack = 0
endImage = beginImageStack
MAX_CHAINS = colors
count = 0
fileName = "selettori-"
if (calculateout == True):
fileName = "output-"
saveTheColors = centroidsCalc
saveTheColors = sorted(saveTheColors.reshape(1,colors)[0])
# print str(imageHeight) + '-' + str(imageWidth) + '-' + str(imageDepth)
# print str(imageDx) + '-' + str(imageDy) + '-' + str(imageDz)
# print str(Nx) + '-' + str(Ny) + '-' + str(Nz)
with open(DIR_O+'/'+fileName+str(saveTheColors[colorIdx])+BIN_EXTENSION, "wb") as newFile:
for zBlock in xrange(imageDepth/imageDz):
startImage = endImage
endImage = startImage + imageDz
xEnd, yEnd = 0,0
theImage,colors,theColors = pngstack2array3d(INPUT_DIR, startImage, endImage, colors, pixelCalc, centroidsCalc)
theColors = theColors.reshape(1,colors)
if (sorted(theColors[0]) != saveTheColors):
log(1, [ "Error: colors have changed"] )
sys.exit(2)
for xBlock in xrange(imageHeight/imageDx):
for yBlock in xrange(imageWidth/imageDy):
xStart, yStart = xBlock * imageDx, yBlock * imageDy
xEnd, yEnd = xStart+imageDx, yStart+imageDy
image = theImage[:, xStart:xEnd, yStart:yEnd]
nz,nx,ny = image.shape
count += 1
# Compute a quotient complex of chains with constant field
# ------------------------------------------------------------
chains3D_old = [];
chains3D = None
if (calculateout != True):
chains3D = np.zeros(nx*ny*nz,dtype=int32);
zStart = startImage - beginImageStack;
def addr(x,y,z): return x + (nx) * (y + (ny) * (z))
hasSomeOne = False
if (calculateout == True):
for x in xrange(nx):
for y in xrange(ny):
for z in xrange(nz):
if (image[z,x,y] == saveTheColors[colorIdx]):
chains3D_old.append(addr(x,y,z))
else:
for x in xrange(nx):
for y in xrange(ny):
for z in xrange(nz):
if (image[z,x,y] == saveTheColors[colorIdx]):
hasSomeOne = True
chains3D[addr(x,y,z)] = 1
# Compute the boundary complex of the quotient cell
# ------------------------------------------------------------
objectBoundaryChain = None
if (calculateout == True) and (len(chains3D_old) > 0):
objectBoundaryChain = larBoundaryChain(bordo3,chains3D_old)
# Save
if (calculateout == True):
if (objectBoundaryChain != None):
writeOffsetToFile( newFile, np.array([zStart,xStart,yStart], dtype=int32) )
newFile.write( bytearray( np.array(objectBoundaryChain.toarray().astype('b').flatten()) ) )
else:
if (hasSomeOne != False):
writeOffsetToFile( newFile, np.array([zStart,xStart,yStart], dtype=int32) )
newFile.write( bytearray( np.array(chains3D, dtype=np.dtype('b')) ) )
def runComputation(imageDx,imageDy,imageDz, colors,coloridx,calculateout, V,FV, INPUT_DIR,BEST_IMAGE,BORDER_FILE,DIR_O):
bordo3 = None
if (calculateout == True):
with open(BORDER_FILE, "r") as file:
bordo3_json = json.load(file)
ROWCOUNT = bordo3_json['ROWCOUNT']
COLCOUNT = bordo3_json['COLCOUNT']
ROW = np.asarray(bordo3_json['ROW'], dtype=np.int32)
COL = np.asarray(bordo3_json['COL'], dtype=np.int32)
DATA = np.asarray(bordo3_json['DATA'], dtype=np.int8)
bordo3 = csr_matrix((DATA,COL,ROW),shape=(ROWCOUNT,COLCOUNT));
imageHeight,imageWidth = getImageData(INPUT_DIR+str(BEST_IMAGE)+PNG_EXTENSION)
imageDepth = countFilesInADir(INPUT_DIR)
Nx,Ny,Nz = imageHeight/imageDx, imageWidth/imageDx, imageDepth/imageDz
try:
pixelCalc, centroidsCalc = centroidcalc(INPUT_DIR, BEST_IMAGE, colors)
computeChains(imageHeight,imageWidth,imageDepth, imageDx,imageDy,imageDz, Nx,Ny,Nz, calculateout,bordo3, colors,pixelCalc,centroidsCalc, coloridx,INPUT_DIR,DIR_O)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
log(1, [ "Error: " + ''.join('!! ' + line for line in lines) ]) # Log it or whatever here
sys.exit(2)
def main(argv):
ARGS_STRING = 'Args: -r -b <borderfile> -x <borderX> -y <borderY> -z <borderZ> -i <inputdirectory> -c <colors> -d <coloridx> -o <outputdir> -q <bestimage>'
try:
opts, args = getopt.getopt(argv,"rb:x:y:z:i:c:d:o:q:")
except getopt.GetoptError:
print ARGS_STRING
sys.exit(2)
nx = ny = nz = imageDx = imageDy = imageDz = 64
colors = 2
coloridx = 0
mandatory = 6
calculateout = False
#Files
BORDER_FILE = 'bordo3.json'
BEST_IMAGE = ''
DIR_IN = ''
DIR_O = ''
for opt, arg in opts:
if opt == '-x':
nx = ny = nz = imageDx = imageDy = imageDz = int(arg)
mandatory = mandatory - 1
elif opt == '-y':
ny = nz = imageDy = imageDz = int(arg)
elif opt == '-z':
nz = imageDz = int(arg)
elif opt == '-r':
calculateout = True
elif opt == '-i':
DIR_IN = arg + '/'
mandatory = mandatory - 1
elif opt == '-b':
BORDER_FILE = arg
mandatory = mandatory - 1
elif opt == '-o':
mandatory = mandatory - 1
DIR_O = arg
elif opt == '-c':
mandatory = mandatory - 1
colors = int(arg)
elif opt == '-d':
mandatory = mandatory - 1
coloridx = int(arg)
elif opt == '-q':
BEST_IMAGE = int(arg)
if mandatory != 0:
print 'Not all arguments where given'
print ARGS_STRING
sys.exit(2)
if (coloridx >= colors):
print 'Not all arguments where given (coloridx >= colors)'
print ARGS_STRING
sys.exit(2)
def ind(x,y,z): return x + (nx+1) * (y + (ny+1) * (z))
def invertIndex(nx,ny,nz):
nx,ny,nz = nx+1,ny+1,nz+1
def invertIndex0(offset):
a0, b0 = offset / nx, offset % nx
a1, b1 = a0 / ny, a0 % ny
a2, b2 = a1 / nz, a1 % nz
return b0,b1,b2
return invertIndex0
chunksize = nx * ny + nx * nz + ny * nz + 3 * nx * ny * nz
V = [[x,y,z] for z in range(nz+1) for y in range(ny+1) for x in range(nx+1) ]
v2coords = invertIndex(nx,ny,nz)
FV = []
for h in range(len(V)):
x,y,z = v2coords(h)
if (x < nx) and (y < ny): FV.append([h,ind(x+1,y,z),ind(x,y+1,z),ind(x+1,y+1,z)])
if (x < nx) and (z < nz): FV.append([h,ind(x+1,y,z),ind(x,y,z+1),ind(x+1,y,z+1)])
if (y < ny) and (z < nz): FV.append([h,ind(x,y+1,z),ind(x,y,z+1),ind(x,y+1,z+1)])
runComputation(imageDx, imageDy, imageDz, colors, coloridx, calculateout, V, FV, DIR_IN, BEST_IMAGE, BORDER_FILE, DIR_O)
if __name__ == "__main__":
main(sys.argv[1:]) | {
"content_hash": "03ed7f5587be28818fec4c24dcfbac23",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 167,
"avg_line_length": 31.32319391634981,
"alnum_prop": 0.5871570769604273,
"repo_name": "cvdlab/lar-running-demo",
"id": "ae67453c25b117e393a14ab6b00552924e4cf8be",
"size": "8263",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "py/computation/old/step_calcchains_serial_tobinary_filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "134055"
},
{
"name": "Shell",
"bytes": "23171"
},
{
"name": "TeX",
"bytes": "10571"
}
],
"symlink_target": ""
} |
"""
================================================
:mod:`replacers` -- Survivor replacement methods
================================================
This module provides pre-defined replacers for evolutionary computations.
All replacer functions have the following arguments:
- *random* -- the random number generator object
- *population* -- the population of individuals
- *parents* -- the list of parent individuals
- *offspring* -- the list of offspring individuals
- *args* -- a dictionary of keyword arguments
Each replacer function returns the list of surviving individuals.
.. Copyright 2012 Aaron Garrett
.. Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
.. The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
.. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
.. module:: replacers
.. moduleauthor:: Aaron Garrett <[email protected]>
"""
import math
def default_replacement(random, population, parents, offspring, args):
"""Performs no replacement, returning the original population.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
return population
def truncation_replacement(random, population, parents, offspring, args):
"""Replaces population with the best of the population and offspring.
This function performs truncation replacement, which means that
the entire existing population is replaced by the best from among
the current population and offspring, keeping the existing population
size fixed. This is similar to so-called "plus" replacement in the
evolution strategies literature, except that "plus" replacement
considers only parents and offspring for survival. However, if the
entire population are parents (which is often the case in evolution
strategies), then truncation replacement and plus-replacement are
equivalent approaches.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
psize = len(population)
population.extend(list(offspring))
population.sort(reverse=True)
return population[:psize]
def steady_state_replacement(random, population, parents, offspring, args):
"""Performs steady-state replacement for the offspring.
This function performs steady-state replacement, which means that
the offspring replace the least fit individuals in the existing
population, even if those offspring are less fit than the individuals
that they replace.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
population.sort()
num_to_replace = min(len(offspring), len(population))
population[:num_to_replace] = offspring[:num_to_replace]
return population
def generational_replacement(random, population, parents, offspring, args):
"""Performs generational replacement with optional weak elitism.
This function performs generational replacement, which means that
the entire existing population is replaced by the offspring,
truncating to the population size if the number of offspring is
larger. Weak elitism may also be specified through the `num_elites`
keyword argument in args. If this is used, the best `num_elites`
individuals in the current population are allowed to survive if
they are better than the worst `num_elites` offspring.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *num_elites* -- number of elites to consider (default 0)
"""
num_elites = args.setdefault('num_elites', 0)
population.sort(reverse=True)
offspring.extend(population[:num_elites])
offspring.sort(reverse=True)
survivors = offspring[:len(population)]
return survivors
def random_replacement(random, population, parents, offspring, args):
"""Performs random replacement with optional weak elitism.
This function performs random replacement, which means that
the offspring replace random members of the population, keeping
the population size constant. Weak elitism may also be specified
through the `num_elites` keyword argument in args. If this is used,
the best `num_elites` individuals in the current population are
allowed to survive if they are better than the worst `num_elites`
offspring.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *num_elites* -- number of elites to consider (default 0)
"""
num_elites = args.setdefault('num_elites', 0)
population.sort(reverse=True)
num_to_replace = min(len(offspring), len(population) - num_elites)
valid_indices = range(num_elites, len(population))
rep_index = random.sample(valid_indices, num_to_replace)
for i, repind in enumerate(rep_index):
population[repind] = offspring[i]
return population
def plus_replacement(random, population, parents, offspring, args):
"""Performs "plus" replacement.
This function performs "plus" replacement, which means that
the entire existing population is replaced by the best
population-many elements from the combined set of parents and
offspring.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
pool = list(offspring)
pool.extend(parents)
pool.sort(reverse=True)
survivors = pool[:len(population)]
return survivors
def comma_replacement(random, population, parents, offspring, args):
"""Performs "comma" replacement.
This function performs "comma" replacement, which means that
the entire existing population is replaced by the best
population-many elements from the offspring. This function
makes the assumption that the size of the offspring is at
least as large as the original population. Otherwise, the
population size will not be constant.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
offspring.sort(reverse=True)
survivors = offspring[:len(population)]
return survivors
def crowding_replacement(random, population, parents, offspring, args):
"""Performs crowding replacement as a form of niching.
This function performs crowding replacement, which means that
the members of the population are replaced one-at-a-time with
each of the offspring. A random sample of `crowding_distance`
individuals is pulled from the current population, and the
closest individual to the current offspring (where "closest"
is determined by the `distance_function`) is replaced by that
offspring, if the offspring is better. It is possible for one
offspring to replace an earlier offspring in the same generation,
given the random sample that is taken of the current survivors
for each offspring.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *distance_function* -- a function that accepts two candidate
solutions and returns the distance between them (default
Euclidean L2 distance)
- *crowding_distance* -- a positive integer representing the
number of closest solutions to consider as a "crowd" (default 2)
"""
def distance(x, y):
return math.sqrt(sum([(a - b)**2 for a, b in zip(x, y)]))
try:
distance_function = args['distance_function']
except KeyError:
distance_function = distance
args['distance_function'] = distance_function
crowding_distance = args.setdefault('crowding_distance', 2)
survivors = population
for o in offspring:
pool = random.sample(survivors, crowding_distance)
closest = min(pool, key=lambda x: distance_function(o.candidate, x.candidate))
if o > closest:
survivors.remove(closest)
survivors.append(o)
return survivors
#-------------------------------------------
# Algorithm-specific Replacement Strategies
#-------------------------------------------
def simulated_annealing_replacement(random, population, parents, offspring, args):
"""Replaces population using the simulated annealing schedule.
This function performs simulated annealing replacement based
on a temperature and a cooling rate. These can be specified
by the keyword arguments `temperature`, which should be the
initial temperature, and `cooling_rate`, which should be the
coefficient by which the temperature is reduced. If these
keyword arguments are not present, then the function will
attempt to base the cooling schedule either on the ratio of
evaluations to the maximum allowed evaluations or on the
ratio of generations to the maximum allowed generations.
Each of these ratios is of the form ``(max - current)/max``
so that the cooling schedule moves smoothly from 1 to 0.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *temperature* -- the initial temperature
- *cooling_rate* -- a real-valued coefficient in the range (0, 1)
by which the temperature should be reduced
"""
try:
temp = args['temperature']
cooling_rate = args['cooling_rate']
temp = temp * cooling_rate
args['temperature'] = temp
except KeyError:
try:
num_evals = args['_ec'].num_evaluations
max_evals = args['max_evaluations']
temp = float(max_evals - num_evals) / float(max_evals)
except KeyError:
num_gens = args['_ec'].num_generations
max_gens = args['max_generations']
temp = 1 - float(max_gens - num_gens) / float(max_gens)
new_pop = []
for p, o in zip(parents, offspring):
if o >= p:
new_pop.append(o)
elif temp > 0 and random.random() < math.exp(-abs(p.fitness - o.fitness) / float(temp)):
new_pop.append(o)
else:
new_pop.append(p)
return new_pop
def nsga_replacement(random, population, parents, offspring, args):
"""Replaces population using the non-dominated sorting technique from NSGA-II.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
survivors = []
combined = list(population)
combined.extend(offspring)
# Perform the non-dominated sorting to determine the fronts.
fronts = []
pop = set(range(len(combined)))
while len(pop) > 0:
front = []
for p in pop:
dominated = False
for q in pop:
if combined[p] < combined[q]:
dominated = True
break
if not dominated:
front.append(p)
fronts.append([dict(individual=combined[f], index=f) for f in front])
pop = pop - set(front)
# Go through each front and add all the elements until doing so
# would put you above the population limit. At that point, fall
# back to the crowding distance to determine who to put into the
# next population. Individuals with higher crowding distances
# (i.e., more distance between neighbors) are preferred.
for i, front in enumerate(fronts):
if len(survivors) + len(front) > len(population):
# Determine the crowding distance.
distance = [0 for _ in range(len(combined))]
individuals = list(front)
num_individuals = len(individuals)
num_objectives = len(individuals[0]['individual'].fitness)
for obj in range(num_objectives):
individuals.sort(key=lambda x: x['individual'].fitness[obj])
distance[individuals[0]['index']] = float('inf')
distance[individuals[-1]['index']] = float('inf')
for i in range(1, num_individuals-1):
distance[individuals[i]['index']] = (distance[individuals[i]['index']] +
(individuals[i+1]['individual'].fitness[obj] -
individuals[i-1]['individual'].fitness[obj]))
crowd = [dict(dist=distance[f['index']], index=f['index']) for f in front]
crowd.sort(key=lambda x: x['dist'], reverse=True)
last_rank = [combined[c['index']] for c in crowd]
r = 0
num_added = 0
num_left_to_add = len(population) - len(survivors)
while r < len(last_rank) and num_added < num_left_to_add:
if last_rank[r] not in survivors:
survivors.append(last_rank[r])
num_added += 1
r += 1
# If we've filled out our survivor list, then stop.
# Otherwise, process the next front in the list.
if len(survivors) == len(population):
break
else:
for f in front:
if f['individual'] not in survivors:
survivors.append(f['individual'])
return survivors
def paes_replacement(random, population, parents, offspring, args):
"""Replaces population using the Pareto Archived Evolution Strategy method.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
archive = args['_ec'].archive
archiver = args['_ec'].archiver
survivors = []
for p, o in zip(parents, offspring):
if o == p:
survivors.append(p)
elif o in archive:
survivors.append(p)
elif o > p:
archive = archiver(random, [o], archive, args)
survivors.append(o)
elif o >= p:
for a in archive:
if o > a or o < a:
break
if o >= a:
archive = archiver(random, [o], archive, args)
if o > a or archiver.grid_population[o.grid_location] <= archiver.grid_population[p.grid_location]:
survivors.append(o)
else:
survivors.append(p)
else:
survivors.append(p)
else:
survivors.append(p)
return survivors
| {
"content_hash": "85e22ea1ce0f9208000a85e1093fd689",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 115,
"avg_line_length": 40.32876712328767,
"alnum_prop": 0.6456068840579711,
"repo_name": "aarongarrett/inspyred",
"id": "10aa4e11150bc6066cb7b49ca96fac394cd51c52",
"size": "17664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inspyred/ec/replacers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2278"
},
{
"name": "Python",
"bytes": "323575"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 426c876ed746
Revises: b92dc277c384
Create Date: 2017-05-12 11:57:29.387852
"""
# revision identifiers, used by Alembic.
revision = '426c876ed746'
down_revision = 'b92dc277c384'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('organizations', sa.Column(
'default_locale_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'organizations', 'codings', [
'default_locale_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'organizations', type_='foreignkey')
op.drop_column('organizations', 'default_locale_id')
### end Alembic commands ###
| {
"content_hash": "8a0f99e9d2806be67b2a9fcb85712254",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 65,
"avg_line_length": 27.8,
"alnum_prop": 0.6642685851318945,
"repo_name": "uwcirg/true_nth_usa_portal",
"id": "2ad232f8d2f3cc8047e86ecba1193af3fbe3b0e6",
"size": "834",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "portal/migrations/versions/426c876ed746_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1733344"
},
{
"name": "Dockerfile",
"bytes": "947"
},
{
"name": "HTML",
"bytes": "435596"
},
{
"name": "JavaScript",
"bytes": "588006"
},
{
"name": "Mako",
"bytes": "414"
},
{
"name": "Python",
"bytes": "1837126"
},
{
"name": "Shell",
"bytes": "13976"
},
{
"name": "Vue",
"bytes": "62901"
}
],
"symlink_target": ""
} |
import pytest
from openshift_checks.package_availability import PackageAvailability
@pytest.mark.parametrize('pkg_mgr,is_active', [
('yum', True),
('dnf', False),
])
def test_is_active(pkg_mgr, is_active):
task_vars = dict(
ansible_pkg_mgr=pkg_mgr,
)
assert PackageAvailability(None, task_vars).is_active() == is_active
@pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
(
dict(openshift_service_type='origin'),
set(),
set(['openshift-hyperkube', 'openshift-node']),
),
(
dict(
openshift_service_type='origin',
group_names=['oo_masters_to_config'],
),
set(['origin-hyperkube']),
set(['origin-node']),
),
(
dict(
openshift_service_type='atomic-openshift',
group_names=['oo_nodes_to_config'],
),
set(['atomic-openshift-node']),
set(['atomic-openshift-hyperkube']),
),
(
dict(
openshift_service_type='atomic-openshift',
group_names=['oo_masters_to_config', 'oo_nodes_to_config'],
),
set(['atomic-openshift-hyperkube', 'atomic-openshift-node']),
set(),
),
])
def test_package_availability(task_vars, must_have_packages, must_not_have_packages):
return_value = {}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
assert set(module_args['packages']).issuperset(must_have_packages)
assert not set(module_args['packages']).intersection(must_not_have_packages)
return {'foo': return_value}
result = PackageAvailability(execute_module, task_vars).run()
assert result['foo'] is return_value
| {
"content_hash": "54fdcd7e037b87b6a50f18abf0e4729a",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 85,
"avg_line_length": 30.694915254237287,
"alnum_prop": 0.6024295969077857,
"repo_name": "maxamillion/openshift-ansible",
"id": "206e21318fefd5660ba26fdef891b2c4f2f520a2",
"size": "1811",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "roles/openshift_health_checker/test/package_availability_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3740"
},
{
"name": "Go",
"bytes": "5047"
},
{
"name": "HTML",
"bytes": "14650"
},
{
"name": "JavaScript",
"bytes": "155"
},
{
"name": "Python",
"bytes": "3349100"
},
{
"name": "Shell",
"bytes": "83968"
}
],
"symlink_target": ""
} |
"""Tests for registry."""
from absl.testing import absltest
from language.mentionmemory.encoders import base_encoder
from language.mentionmemory.encoders import encoder_registry
@encoder_registry.register_encoder('decorated_encoder')
class DecoratedEncoder(base_encoder.BaseEncoder):
pass
class UnDecoratedEncoder(base_encoder.BaseEncoder):
pass
class InvalidEncoder(object):
pass
class EncoderRegistryTest(absltest.TestCase):
def test_decorated_encoder(self):
"""Simple test to verify that decorated encoders have been registered."""
encoder_name = encoder_registry.get_registered_encoder('decorated_encoder')
self.assertIsNotNone(encoder_name)
self.assertEqual(encoder_name.__name__, 'DecoratedEncoder')
def test_undecorated_encoder(self):
"""Simple test to verify that we can register encoders at runtime."""
# Register the encoder.
encoder_registry.register_encoder('undecorated_encoder')(UnDecoratedEncoder)
# Retrieve it.
encoder_name = encoder_registry.get_registered_encoder(
'undecorated_encoder')
self.assertIsNotNone(encoder_name)
self.assertEqual(encoder_name.__name__, 'UnDecoratedEncoder')
# Verify that we can still access previously registerd decorated layers.
encoder_name = encoder_registry.get_registered_encoder('decorated_encoder')
self.assertIsNotNone(encoder_name)
self.assertEqual(encoder_name.__name__, 'DecoratedEncoder')
def test_invalid_encoder(self):
"""Verify we get an exception when trying to register invalid encoder."""
with self.assertRaises(TypeError):
encoder_registry.register_encoder('invalid_encoder')(InvalidEncoder) # pytype: disable=wrong-arg-types
def test_multiple_encoder_registrations(self):
"""Verify that re-using an already registered name raises an exception."""
with self.assertRaises(ValueError):
encoder_registry.register_encoder('decorated_encoder')(UnDecoratedEncoder)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "d9ae1c41899f30515b17c7bb938a2cb1",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 109,
"avg_line_length": 34.46551724137931,
"alnum_prop": 0.7518759379689844,
"repo_name": "google-research/language",
"id": "30dbd763ad8e7efe3b48778647584f3e6b325a4d",
"size": "2614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language/mentionmemory/encoders/encoder_registry_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9834"
},
{
"name": "CSS",
"bytes": "602"
},
{
"name": "HTML",
"bytes": "25162"
},
{
"name": "JavaScript",
"bytes": "8857"
},
{
"name": "Jupyter Notebook",
"bytes": "1505066"
},
{
"name": "Python",
"bytes": "7139472"
},
{
"name": "Shell",
"bytes": "183709"
}
],
"symlink_target": ""
} |
from L500analysis.derived_fields.derived_fields import *
from L500analysis.plotting.profiles.tools.radial_normalizations import *
from L500analysis.derived_fields.collections.peak_height.derived_field_functions \
import *
from L500analysis.utils.constants import hubble,Msun2g,kpc2cm
from L500analysis.plotting.profiles.tools.make_profile import make_profile
def _bulk_all_ratio_profile(data, *args, **kwargs) :
M_gas_all = data.profiles['M_gas_bin']
M_gas_bulk = data.profiles['M_gas_bulk_bin']
Rmid = data.profiles['r_mid']
Rin = data.profiles['r_in']
Rout = data.profiles['r_out']
Rvir = data.halo_properties[kwargs['R_delta_key']]
Rscaled = {hid: Rmid[hid]/Rvir[hid] for hid in data.halo_ids}
return dict({ 'aexp':data.aexp, 'M_gas_bulk':M_gas_bulk, 'M_gas_all':M_gas_all,
'Rmid':Rmid,
'Rin':Rin, 'Rout':Rout,
'halo_ids':data.halo_ids,
'Rscaled':Rscaled },
**kwargs)
def calculate_bulk_all_ratio_profile(input_data) :
d = input_data
mass_ratio = {}
for hid in d['halo_ids'] :
mass_ratio[hid] = d['M_gas_bulk'][hid]/d['M_gas_all'][hid]
mass_ratio[hid] = make_profile(x=d['Rscaled'][hid],y=mass_ratio[hid])
return mass_ratio
add_derived_field('Mg_bulk/Mg_all_500c',function=_bulk_all_ratio_profile,
combine_function=calculate_bulk_all_ratio_profile,
R_delta_key='r500c')
add_derived_field('Mg_bulk/Mg_all_200m',function=_bulk_all_ratio_profile,
combine_function=calculate_bulk_all_ratio_profile,
R_delta_key='r200m')
| {
"content_hash": "2f6c97f864f3ddffc0f9b73e12bb01e8",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 84,
"avg_line_length": 38.74418604651163,
"alnum_prop": 0.6308523409363745,
"repo_name": "cavestruz/L500analysis",
"id": "713ea22c3bf218153e5f8f2194f1c7364ab17a45",
"size": "1666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plotting/profiles/Mg_bulk_all_ratio/derived_field_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "509320"
}
],
"symlink_target": ""
} |
from datetime import datetime
import requests
from scraper.database import get_session
from scraper.models import Champion, Player, Match, PlayerMatchMap
from scraper.settings import Config
def fetch_base(base, resource, **kwargs):
url = base + resource
kwargs['api_key'] = Config.TOKEN
response = requests.get(url, params=kwargs)
return response.json()
def fetch_static(resource, **kwargs):
base = 'https://br.api.pvp.net/api/lol/static-data/br/'
return fetch_base(base, resource, **kwargs)
def fetch(resource, **kwargs):
base = 'https://br.api.pvp.net/api/lol/br/'
return fetch_base(base, resource, **kwargs)
def scrap_match(match_id):
session = get_session()
print 'fetching data for match', match_id
match_data = fetch('v2.2/match/%s' % match_id)
participants = match_data['participants']
# Create the match
creation = datetime.fromtimestamp(match_data['matchCreation'] / 1000.0)
data = {
'type': match_data['matchType'],
'version': match_data['matchVersion'],
'queue_type': match_data['queueType'],
'creation': creation,
'duration': match_data['matchDuration'],
}
Match.get_or_create(session, {'id': match_id}, **data)
for participant in match_data['participantIdentities']:
summoner_id = participant['player']['summonerId']
summoner_name = participant['player']['summonerName']
print 'parsing', summoner_name
Player.get_or_create(session, {'id': summoner_id},
username=summoner_name)
details = [p for p in participants
if participant['participantId'] == p['participantId']][0]
data = {
'champion_id': details['championId'],
'season': match_data['season'],
'deaths': details['stats']['deaths'],
'kills': details['stats']['kills'],
'assists': details['stats']['assists'],
'has_won': details['stats']['winner'],
}
PlayerMatchMap.get_or_create(session, {
'player_id': summoner_id,
'match_id': match_id,
}, **data)
session.commit()
def scrap_summoner(summoner_ids=None, details=False):
session = get_session()
if summoner_ids is None:
summoner_ids = [p.id for p in session.query(Player.id)]
summoners_data = fetch('v1.4/summoner/%s' % ','.join(summoner_ids))
for id, data in summoners_data.iteritems():
Player.get_or_create(session, {'id': id},
username=data['name'])
for summoner_id in summoner_ids:
print 'fetching data for summoner', summoner_id
summoner_data = fetch('v2.2/matchlist/by-summoner/%s' % summoner_id)
for match in summoner_data['matches']:
print 'parsing', match['matchId']
Champion.get_or_create(session, {'id': match['champion']})
Match.get_or_create(session, {'id': match['matchId']})
data = {
'player_id': summoner_id,
'match_id': match['matchId'],
'champion_id': match['champion'],
'lane': match['lane'],
'role': match['role'],
'queue': match['queue'],
'season': match['season'],
}
PlayerMatchMap.get_or_create(session, {
'player_id': summoner_id,
'match_id': data['match_id'],
}, **data)
if details:
scrap_match(data['match_id'])
session.commit()
def scrap_champions():
session = get_session()
for name, data in fetch_static('v1.2/champion')['data'].iteritems():
print 'parsing', name
Champion.get_or_create(session, {'id': data['id']}, name=name)
session.commit()
| {
"content_hash": "38eef219e11a8fb7f6952dabfc58d497",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 76,
"avg_line_length": 34.160714285714285,
"alnum_prop": 0.5763199163617355,
"repo_name": "Doomsk/dblolscrapper",
"id": "e23a7a2ab420ee22ca6fdd335411de7f41966c2f",
"size": "3826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scraper/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4653"
}
],
"symlink_target": ""
} |
from vic import lib as vic_lib
def test_str_to_bool():
for s, expected in [('TRUE', True), ('FALSE', False),
('true', True), ('false', False),
('TrUe', True), ('FaLsE', False)]:
assert vic_lib.str_to_bool(s.encode()) == expected
def test_str_to_agg_type():
assert vic_lib.str_to_agg_type(''.encode()) == vic_lib.AGG_TYPE_DEFAULT
assert vic_lib.str_to_agg_type('*'.encode()) == vic_lib.AGG_TYPE_DEFAULT
for s in ['AGG_TYPE_AVG', 'AGG_TYPE_BEG', 'AGG_TYPE_END',
'AGG_TYPE_MAX', 'AGG_TYPE_MIN', 'AGG_TYPE_SUM']:
expected = getattr(vic_lib, s)
assert vic_lib.str_to_agg_type(s.encode()) == expected
assert vic_lib.str_to_agg_type(s.lower().encode()) == expected
def test_str_to_out_type():
assert vic_lib.str_to_out_type(''.encode()) == vic_lib.OUT_TYPE_DEFAULT
assert vic_lib.str_to_out_type('*'.encode()) == vic_lib.OUT_TYPE_DEFAULT
for s in ['OUT_TYPE_USINT', 'OUT_TYPE_SINT', 'OUT_TYPE_FLOAT',
'OUT_TYPE_DOUBLE']:
expected = getattr(vic_lib, s)
assert vic_lib.str_to_out_type(s.encode()) == expected
assert vic_lib.str_to_out_type(s.lower().encode()) == expected
def test_str_to_out_mult():
assert vic_lib.str_to_out_mult(''.encode()) == 0
assert vic_lib.str_to_out_mult('*'.encode()) == 0
for mult in range(0, 10000, 100):
assert vic_lib.str_to_out_mult(str(mult).encode()) == float(mult)
def test_str_to_freq_flag():
for s in ['NEVER', 'NSTEPS', 'NSECONDS', 'NMINUTES', 'NHOURS', 'NDAYS',
'NMONTHS', 'NYEARS', 'DATE', 'END']:
expected = getattr(vic_lib, 'FREQ_{}'.format(s))
assert vic_lib.str_to_freq_flag(s.encode()) == expected
assert vic_lib.str_to_freq_flag(s.lower().encode()) == expected
def test_str_to_ascii_format():
# TODO: figure out the best way to pass a mutable string to ffi
pass
def test_str_to_calendar():
for s in ['STANDARD', 'GREGORIAN', 'PROLEPTIC_GREGORIAN', 'NOLEAP',
'365_DAY', '360_DAY', 'JULIAN', 'ALL_LEAP', '366_DAY']:
expected = getattr(vic_lib, 'CALENDAR_{}'.format(s))
assert vic_lib.str_to_calendar(s.encode()) == expected
assert vic_lib.str_to_calendar(s.lower().encode()) == expected
# NOLEAP calendar has an alternate spelling
# s = 'CALENDAR_NO_LEAP'
# assert vic_lib.str_to_calendar(s.encode()) == vic_lib.CALENDAR_NOLEAP
# assert vic_lib.str_to_calendar(s.lower().encode()) == vic_lib.CALENDAR_NOLEAP
def test_str_to_timeunits():
for s in ['SECONDS', 'MINUTES', 'HOURS', 'DAYS']:
expected = getattr(vic_lib, 'TIME_UNITS_{}'.format(s))
assert vic_lib.str_to_timeunits(s.encode()) == expected
assert vic_lib.str_to_timeunits(s.lower().encode()) == expected
def test_str_from_time_units():
# TODO: figure out the best way to pass a mutable string to ffi
pass
def test_str_from_calendar():
# TODO: figure out the best way to pass a mutable string to ffi
pass
def test_cell_method_from_agg_type():
# TODO: figure out the best way to pass a mutable string to ffi
pass
| {
"content_hash": "7020a5a7bbe423314de1aa1de21eedda",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 83,
"avg_line_length": 38.59756097560975,
"alnum_prop": 0.6066350710900474,
"repo_name": "UW-Hydro/VIC",
"id": "2db57b442fa6ae06b6826617283d2b79731a2d85",
"size": "3165",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/unit/shared/test_input_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2344409"
},
{
"name": "Dockerfile",
"bytes": "792"
},
{
"name": "Fortran",
"bytes": "55922"
},
{
"name": "Makefile",
"bytes": "11578"
},
{
"name": "Python",
"bytes": "175656"
},
{
"name": "Shell",
"bytes": "10906"
}
],
"symlink_target": ""
} |
from rally import consts
class Endpoint(object):
def __init__(self, auth_url, username, password, tenant_name=None,
permission=consts.EndpointPermission.USER,
region_name=None, endpoint_type=consts.EndpointType.PUBLIC,
admin_port=None, domain_name=None, endpoint=None,
user_domain_name="Default", project_domain_name="Default",
https_insecure=None, https_cacert=None):
self.auth_url = auth_url
self.username = username
self.password = password
self.tenant_name = tenant_name
self.permission = permission
self.region_name = region_name
self.endpoint_type = endpoint_type
self.domain_name = domain_name
self.user_domain_name = user_domain_name
self.project_domain_name = project_domain_name
self.endpoint = endpoint
self.insecure = https_insecure
self.cacert = https_cacert
if admin_port:
import warnings
warnings.warn("'admin_port' argument is deprecated and will "
"be ignored.")
def to_dict(self, include_permission=False):
dct = {"auth_url": self.auth_url, "username": self.username,
"password": self.password, "tenant_name": self.tenant_name,
"region_name": self.region_name,
"endpoint_type": self.endpoint_type,
"domain_name": self.domain_name,
"endpoint": self.endpoint,
"https_insecure": self.insecure,
"https_cacert": self.cacert,
"user_domain_name": self.user_domain_name,
"project_domain_name": self.project_domain_name}
if include_permission:
dct["permission"] = self.permission
return dct
| {
"content_hash": "34b72999f4bc1e54dc428bc2351fb66f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 42.55813953488372,
"alnum_prop": 0.5896174863387978,
"repo_name": "varunarya10/rally",
"id": "24ac9077f77a5c016c65c925654c9e2a76717083",
"size": "2460",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rally/objects/endpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46741"
},
{
"name": "Python",
"bytes": "2053831"
},
{
"name": "Shell",
"bytes": "18078"
}
],
"symlink_target": ""
} |
"""
calico.etcddriver.test_protocol
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for Felix/etcddriver protocol read/write function.
"""
import logging
import socket
from unittest import TestCase
import errno
from mock import Mock, call, patch
import msgpack
from calico.etcddriver.protocol import (
MessageWriter, STATUS_RESYNC, MSG_KEY_STATUS, MSG_TYPE_STATUS,
MSG_KEY_TYPE, STATUS_IN_SYNC, MessageReader,
SocketClosed, WriteFailed)
_log = logging.getLogger(__name__)
class StubWriterSocket(object):
def __init__(self):
self.chunks = []
self.unpacker = msgpack.Unpacker()
self.exception = None
def sendall(self, data):
if self.exception:
raise self.exception
self.chunks.append(data)
self.unpacker.feed(data)
def next_msg(self):
return next(self.unpacker)
class TestMessageWriter(TestCase):
def setUp(self):
self.sck = StubWriterSocket()
self.writer = MessageWriter(self.sck)
self.unpacker = msgpack.Unpacker()
def test_send_message(self):
self.writer.send_message(MSG_TYPE_STATUS,
{
MSG_KEY_STATUS: STATUS_RESYNC
})
self.assert_message_sent({
MSG_KEY_TYPE: MSG_TYPE_STATUS,
MSG_KEY_STATUS: STATUS_RESYNC
})
self.assert_no_more_messages()
def test_send_message_error(self):
self.sck.exception = socket.error()
self.assertRaises(WriteFailed, self.writer.send_message,
MSG_TYPE_STATUS,
{
MSG_KEY_STATUS: STATUS_RESYNC
})
def test_send_message_buffered(self):
# First message gets buffered.
self.writer.send_message(MSG_TYPE_STATUS,
flush=False)
self.assert_no_more_messages()
# Second message triggers a flush of both messages, in order.
self.writer.send_message(MSG_TYPE_STATUS,
{
MSG_KEY_STATUS: STATUS_IN_SYNC
})
self.assert_message_sent({
MSG_KEY_TYPE: MSG_TYPE_STATUS
})
self.assert_message_sent({
MSG_KEY_TYPE: MSG_TYPE_STATUS,
MSG_KEY_STATUS: STATUS_IN_SYNC
})
self.assert_no_more_messages()
def test_eventual_flush(self):
# First 200 messages should be buffered.
for _ in xrange(200):
self.writer.send_message(MSG_TYPE_STATUS,
{
MSG_KEY_STATUS: STATUS_RESYNC
},
flush=False)
self.assert_no_more_messages()
# 201st message triggers them all to be sent.
self.writer.send_message(MSG_TYPE_STATUS,
{
MSG_KEY_STATUS: STATUS_RESYNC
},
flush=False)
for _ in xrange(201):
self.assert_message_sent({
MSG_KEY_TYPE: MSG_TYPE_STATUS,
MSG_KEY_STATUS: STATUS_RESYNC
})
self.assert_no_more_messages()
def test_flush_no_content(self):
self.writer.flush()
self.assertFalse(self.sck.chunks)
def assert_message_sent(self, msg):
try:
received_msg = self.sck.next_msg()
except StopIteration:
self.fail("No messages received")
self.assertEqual(received_msg, msg,
"Received incorrect message: %s "
"while expecting: %s" % (received_msg, msg))
def assert_no_more_messages(self):
try:
msg = self.sck.next_msg()
except StopIteration:
return
else:
self.fail("Unexpected message: %s" % msg)
class TestMessageReader(TestCase):
def setUp(self):
self.sck = Mock(spec=socket.socket)
self.reader = MessageReader(self.sck)
@patch("select.select", autospec=True)
def test_mainline(self, m_select):
m_select.side_effect = iter([
([self.sck], [], []),
([self.sck], [], []),
])
exp_msg = {MSG_KEY_TYPE: MSG_TYPE_STATUS,
MSG_KEY_STATUS: STATUS_RESYNC}
self.sck.recv.return_value = msgpack.dumps(exp_msg)
for _ in xrange(2):
msg_gen = self.reader.new_messages(timeout=1)
msg_type, msg = next(msg_gen)
self.assertEqual(msg_type, MSG_TYPE_STATUS)
self.assertEqual(msg, exp_msg)
self.assertEqual(
self.sck.recv.mock_calls,
[
call(16384),
call(16384),
]
)
@patch("select.select", autospec=True)
def test_partial_read(self, m_select):
m_select.side_effect = iter([
([self.sck], [], []),
([self.sck], [], []),
])
exp_msg = {MSG_KEY_TYPE: MSG_TYPE_STATUS}
msg_bytes = msgpack.dumps(exp_msg)
self.sck.recv.side_effect = iter([
msg_bytes[:len(msg_bytes)/2],
msg_bytes[len(msg_bytes)/2:],
])
self.assertRaises(StopIteration, next,
self.reader.new_messages(timeout=None))
self.assertEqual(next(self.reader.new_messages(timeout=None)),
(MSG_TYPE_STATUS, exp_msg))
@patch("select.select", autospec=True)
def test_retryable_error(self, m_select):
m_select.side_effect = iter([
([self.sck], [], []),
([self.sck], [], []),
([self.sck], [], []),
([self.sck], [], []),
])
errors = []
for no in [errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR]:
err = socket.error()
err.errno = no
errors.append(err)
exp_msg = {MSG_KEY_TYPE: MSG_TYPE_STATUS,
MSG_KEY_STATUS: STATUS_RESYNC}
self.sck.recv.side_effect = iter(errors + [msgpack.dumps(exp_msg)])
for _ in errors:
msg_gen = self.reader.new_messages(timeout=1)
self.assertRaises(StopIteration, next, msg_gen)
msg_gen = self.reader.new_messages(timeout=1)
msg_type, msg = next(msg_gen)
self.assertEqual(msg_type, MSG_TYPE_STATUS)
self.assertEqual(msg, exp_msg)
@patch("select.select", autospec=True)
def test_non_retryable_error(self, m_select):
m_select.side_effect = iter([
([self.sck], [], []),
])
err = socket.error()
err.errno = errno.E2BIG
self.sck.recv.side_effect = err
msg_gen = self.reader.new_messages(timeout=1)
self.assertRaises(socket.error, next, msg_gen)
@patch("select.select", autospec=True)
def test_timeout(self, m_select):
m_select.side_effect = iter([
([], [], []),
])
msg_gen = self.reader.new_messages(timeout=1)
self.assertRaises(StopIteration, next, msg_gen)
self.assertFalse(self.sck.recv.called)
@patch("select.select", autospec=True)
def test_shutdown(self, m_select):
self.sck.recv.return_value = ""
msg_gen = self.reader.new_messages(timeout=None)
self.assertRaises(SocketClosed, next, msg_gen)
| {
"content_hash": "9abd425e752b8fce2025c3e639bf9698",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 75,
"avg_line_length": 34.122727272727275,
"alnum_prop": 0.5260423604635673,
"repo_name": "alexhersh/calico",
"id": "14d36bf5ad1dd2511a641c6417afb5e10f06cd72",
"size": "8114",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "calico/etcddriver/test/test_protocol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "812015"
},
{
"name": "Shell",
"bytes": "13082"
}
],
"symlink_target": ""
} |
import ctypes
import itertools
import windows
import windows.hooks
from windows.generated_def.winstructs import *
class Ressource(object):
def __init__(self, filename, lpName, lpType):
self.filename = filename
self.lpName = lpName
self.lpType = lpType
self.driver_data = None
self.loaded_ressource = None
def match(self, hModule, lpName, lpType):
x = not hModule and self.lpName == lpName and self.lpType == lpType
return x
def get_driver_data(self):
if self.driver_data is not None:
return self.driver_data
self.driver_data = open(self.filename, 'rb').read()
return self.driver_data
def load_resource(self):
driver_data = self.get_driver_data()
char_p = ctypes.c_char_p(driver_data)
real_addr = ctypes.cast(char_p, ctypes.c_void_p).value
return real_addr
def resource_len(self):
return len(self.get_driver_data())
resource_list = []
HRSRC_dict = {}
HRSRC_attibution = itertools.count(0x42424242)
@windows.hooks.Callback(PVOID, PVOID, PVOID, PVOID)
def FindResourceWHook(hModule, lpName, lpType, real_function):
for res in resource_list:
if res.match(hModule, lpName, lpType):
HRSRC = next(HRSRC_attibution)
HRSRC_dict[HRSRC] = res
return HRSRC
return real_function()
@windows.hooks.SizeofResourceCallback
def SizeofResourceHook(hModule, hResInfo, real_function):
if hResInfo in HRSRC_dict:
return HRSRC_dict[hResInfo].resource_len()
return real_function()
@windows.hooks.LoadResourceCallback
def LoadResourceHook(hModule, hResInfo, real_function):
if hResInfo in HRSRC_dict:
return HRSRC_dict[hResInfo].load_resource()
return real_function()
@windows.hooks.LockResourceCallback
def LockResourceHook(hResData, real_function):
x = real_function()
return x
| {
"content_hash": "95df8fe369b92448d6bd8d7a93b19a9a",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 75,
"avg_line_length": 28.16176470588235,
"alnum_prop": 0.6725848563968668,
"repo_name": "sogeti-esec-lab/LKD",
"id": "0e6f977cf27cacfcaf3bae96a85dedd2ecd19132",
"size": "1915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resource_emulation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "763807"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from mock import patch, Mock
from fabutils.git import get_changed_files
@patch('fabutils.git.quiet')
@patch('fabutils.git.local', Mock())
def test_called_within_quiet(quiet):
get_changed_files()
quiet.assert_called_once()
@patch('fabutils.git.quiet')
@patch('fabutils.git.local', Mock(return_value='first\nsecond\nthird'))
def test_new_line_separated_entries_are_converted_to_a_list(quiet):
changed = get_changed_files()
assert isinstance(changed, list)
assert len(changed) == 3
assert changed[0] == 'first'
assert changed[1] == 'second'
assert changed[2] == 'third'
@patch('fabutils.git.quiet')
@patch('fabutils.git.local', Mock(return_value=''))
def test_returns_empty_list_when_no_changes_are_present(quiet):
changed = get_changed_files()
assert changed == []
| {
"content_hash": "62c9670107fee91bbb97a0d9218b1e93",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 71,
"avg_line_length": 27.451612903225808,
"alnum_prop": 0.700352526439483,
"repo_name": "novopl/fabutils",
"id": "0abf3ca6719d5b2822c80e764823ec39c82c1231",
"size": "875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/fabutils/git/test_get_changed_files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26565"
},
{
"name": "Shell",
"bytes": "738"
}
],
"symlink_target": ""
} |
from twitter.common import app
from twitter.common.log.options import LogOptions
from apache.aurora.admin import help as help_commands
from apache.aurora.admin import admin, maintenance
from apache.aurora.common.auth.auth_module_manager import register_auth_module
from .help import add_verbosity_options, generate_terse_usage
app.register_commands_from(admin, help_commands, maintenance)
add_verbosity_options()
def main():
app.help()
try:
from apache.aurora.kerberos.auth_module import KerberosAuthModule
register_auth_module(KerberosAuthModule())
except ImportError:
# Use default auth implementation if kerberos is not available.
pass
LogOptions.set_stderr_log_level('INFO')
LogOptions.disable_disk_logging()
app.set_name('aurora-admin')
app.set_usage(generate_terse_usage())
def proxy_main():
app.main()
| {
"content_hash": "b74b42895f8839889b89812c4f3ae287",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 26.774193548387096,
"alnum_prop": 0.791566265060241,
"repo_name": "wfarner/aurora",
"id": "470b2d2339284de9246b35daa0af1c2b832c232d",
"size": "1379",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/main/python/apache/aurora/admin/aurora_admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6971"
},
{
"name": "Groovy",
"bytes": "12868"
},
{
"name": "HTML",
"bytes": "57299"
},
{
"name": "Java",
"bytes": "3526949"
},
{
"name": "JavaScript",
"bytes": "104493"
},
{
"name": "Makefile",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "1409644"
},
{
"name": "Ruby",
"bytes": "4315"
},
{
"name": "Shell",
"bytes": "58757"
},
{
"name": "Smalltalk",
"bytes": "79"
},
{
"name": "Smarty",
"bytes": "25233"
},
{
"name": "Thrift",
"bytes": "56058"
}
],
"symlink_target": ""
} |
"""
Driver for Linux servers running LVM.
"""
import math
import os
import socket
from oslo.config import cfg
from cinder.brick import exception as brick_exception
from cinder.brick.local_dev import lvm as lvm
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import units
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('volume_group',
default='cinder-volumes',
help='Name for the VG that will contain exported volumes'),
cfg.IntOpt('lvm_mirrors',
default=0,
help='If set, create lvms with multiple mirrors. Note that '
'this requires lvm_mirrors + 2 pvs with available space'),
cfg.StrOpt('lvm_type',
default='default',
help='Type of LVM volumes to deploy; (default or thin)'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class LVMVolumeDriver(driver.VolumeDriver):
"""Executes commands relating to Volumes."""
VERSION = '2.0.0'
def __init__(self, vg_obj=None, *args, **kwargs):
super(LVMVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.hostname = socket.gethostname()
self.vg = vg_obj
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM'
self.protocol = 'local'
def set_execute(self, execute):
self._execute = execute
def check_for_setup_error(self):
"""Verify that requirements are in place to use LVM driver."""
if self.vg is None:
root_helper = utils.get_root_helper()
try:
self.vg = lvm.LVM(self.configuration.volume_group,
root_helper,
lvm_type=self.configuration.lvm_type,
executor=self._execute)
except brick_exception.VolumeGroupNotFound:
message = ("Volume Group %s does not exist" %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
vg_list = volutils.get_all_volume_groups(
self.configuration.volume_group)
vg_dict = \
(vg for vg in vg_list if vg['name'] == self.vg.vg_name).next()
if vg_dict is None:
message = ("Volume Group %s does not exist" %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
if self.configuration.lvm_type == 'thin':
# Specific checks for using Thin provisioned LV's
if not volutils.supports_thin_provisioning():
message = ("Thin provisioning not supported "
"on this version of LVM.")
raise exception.VolumeBackendAPIException(data=message)
pool_name = "%s-pool" % self.configuration.volume_group
if self.vg.get_volume(pool_name) is None:
try:
self.vg.create_thin_pool(pool_name)
except processutils.ProcessExecutionError as exc:
exception_message = ("Failed to create thin pool, "
"error message was: %s"
% exc.stderr)
raise exception.VolumeBackendAPIException(
data=exception_message)
def _sizestr(self, size_in_g):
if int(size_in_g) == 0:
return '100m'
return '%sg' % size_in_g
def _volume_not_present(self, volume_name):
return self.vg.get_volume(volume_name) is None
def _delete_volume(self, volume, is_snapshot=False):
"""Deletes a logical volume."""
if self.configuration.volume_clear != 'none' and \
self.configuration.lvm_type != 'thin':
self._clear_volume(volume, is_snapshot)
name = volume['name']
if is_snapshot:
name = self._escape_snapshot(volume['name'])
self.vg.delete(name)
def _clear_volume(self, volume, is_snapshot=False):
# zero out old volumes to prevent data leaking between users
# TODO(ja): reclaiming space should be done lazy and low priority
if is_snapshot:
# if the volume to be cleared is a snapshot of another volume
# we need to clear out the volume using the -cow instead of the
# directly volume path. We need to skip this if we are using
# thin provisioned LVs.
# bug# lp1191812
dev_path = self.local_path(volume) + "-cow"
else:
dev_path = self.local_path(volume)
# TODO(jdg): Maybe we could optimize this for snaps by looking at
# the cow table and only overwriting what's necessary?
# for now we're still skipping on snaps due to hang issue
if not os.path.exists(dev_path):
msg = (_('Volume device file path %s does not exist.')
% dev_path)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
size_in_g = volume.get('size', volume.get('volume_size', None))
if size_in_g is None:
msg = (_("Size for volume: %s not found, "
"cannot secure delete.") % volume['id'])
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
# clear_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
vol_sz_in_meg = size_in_g * units.KiB
volutils.clear_volume(
vol_sz_in_meg, dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
def _escape_snapshot(self, snapshot_name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not snapshot_name.startswith('snapshot'):
return snapshot_name
return '_' + snapshot_name
def _create_volume(self, name, size, lvm_type, mirror_count, vg=None):
vg_ref = self.vg
if vg is not None:
vg_ref = vg
vg_ref.create_volume(name, size, lvm_type, mirror_count)
def create_volume(self, volume):
"""Creates a logical volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
self.configuration.lvm_mirrors)
# Some configurations of LVM do not automatically activate
# ThinLVM snapshot LVs.
self.vg.activate_lv(snapshot['name'], is_snapshot=True)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
volutils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
snapshot['volume_size'] * units.KiB,
self.configuration.volume_dd_blocksize,
execute=self._execute)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(jdg): We don't need to explicitly call
# remove export here because we already did it
# in the manager before we got here.
if self._volume_not_present(volume['name']):
# If the volume isn't present, then don't attempt to delete
return True
if self.vg.lv_has_snapshot(volume['name']):
LOG.error(_('Unabled to delete due to existing snapshot '
'for volume: %s') % volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']),
snapshot['volume_name'],
self.configuration.lvm_type)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_("snapshot: %s not found, "
"skipping delete operations") % snapshot['name'])
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
# it's quite slow.
self._delete_volume(snapshot, is_snapshot=True)
def local_path(self, volume, vg=None):
if vg is None:
vg = self.configuration.volume_group
# NOTE(vish): stops deprecation warning
escaped_group = vg.replace('-', '--')
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % volume['id'],
'id': temp_id}
self.create_snapshot(temp_snapshot)
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
try:
volutils.copy_volume(
self.local_path(temp_snapshot),
self.local_path(volume),
src_vref['size'] * units.KiB,
self.configuration.volume_dd_blocksize,
execute=self._execute)
finally:
self.delete_snapshot(temp_snapshot)
def clone_image(self, volume, image_location, image_id, image_meta):
return None, False
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path) as volume_file:
backup_service.backup(backup, volume_file)
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
if self.vg is None:
LOG.warning(_('Unable to update stats on non-initialized '
'Volume Group: %s'), self.configuration.volume_group)
return
self.vg.update_volume_group_info()
data = {}
# Note(zhiteng): These information are driver/backend specific,
# each driver may define these values in its own config options
# or fetch from driver specific configuration file.
data["volume_backend_name"] = self.backend_name
data["vendor_name"] = 'Open Source'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
if self.configuration.lvm_mirrors > 0:
data['total_capacity_gb'] =\
self.vg.vg_mirror_size(self.configuration.lvm_mirrors)
data['free_capacity_gb'] =\
self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors)
elif self.configuration.lvm_type == 'thin':
data['total_capacity_gb'] = self.vg.vg_thin_pool_size
data['free_capacity_gb'] = self.vg.vg_thin_pool_free_space
else:
data['total_capacity_gb'] = self.vg.vg_size
data['free_capacity_gb'] = self.vg.vg_free_space
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = False
data['location_info'] =\
('LVMVolumeDriver:%(hostname)s:%(vg)s'
':%(lvm_type)s:%(lvm_mirrors)s' %
{'hostname': self.hostname,
'vg': self.configuration.volume_group,
'lvm_type': self.configuration.lvm_type,
'lvm_mirrors': self.configuration.lvm_mirrors})
self._stats = data
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size."""
self.vg.extend_volume(volume['name'],
self._sizestr(new_size))
def manage_existing(self, volume, existing_ref):
"""Manages an existing LV.
Renames the LV to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
"""
lv_name = existing_ref['lv_name']
lv = self.vg.get_volume(lv_name)
# Attempt to rename the LV to match the OpenStack internal name.
try:
self.vg.rename_volume(lv_name, volume['name'])
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to rename logical volume %(name)s, "
"error message was: %(err_msg)s")
% {'name': lv_name,
'err_msg': exc.stderr})
raise exception.VolumeBackendAPIException(
data=exception_message)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of an existing LV for manage_existing.
existing_ref is a dictionary of the form:
{'lv_name': <name of LV>}
"""
# Check that the reference is valid
if 'lv_name' not in existing_ref:
reason = _('Reference must contain lv_name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_name = existing_ref['lv_name']
lv = self.vg.get_volume(lv_name)
# Raise an exception if we didn't find a suitable LV.
if not lv:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
try:
lv_size = int(math.ceil(float(lv['size'])))
except ValueError:
exception_message = (_("Failed to manage existing volume "
"%(name)s, because reported size %(size)s "
"was not a floating-point number.")
% {'name': lv_name,
'size': lv['size']})
raise exception.VolumeBackendAPIException(
data=exception_message)
return lv_size
class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
"""Executes commands relating to ISCSI volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSCSI target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
def __init__(self, *args, **kwargs):
self.db = kwargs.get('db')
self.target_helper = self.get_target_helper(self.db)
super(LVMISCSIDriver, self).__init__(*args, **kwargs)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM_iSCSI'
self.protocol = 'iSCSI'
def set_execute(self, execute):
super(LVMISCSIDriver, self).set_execute(execute)
if self.target_helper is not None:
self.target_helper.set_execute(execute)
def _create_target(self, iscsi_name, iscsi_target,
volume_path, chap_auth, lun=0,
check_exit_code=False, old_name=None):
# NOTE(jdg): tgt driver has an issue where with a lot of activity
# (or sometimes just randomly) it will get *confused* and attempt
# to reuse a target ID, resulting in a target already exists error
# Typically a simple retry will address this
# For now we have this while loop, might be useful in the
# future to throw a retry decorator in common or utils
attempts = 2
while attempts > 0:
attempts -= 1
try:
# NOTE(jdg): For TgtAdm case iscsi_name is all we need
# should clean this all up at some point in the future
tid = self.target_helper.create_iscsi_target(
iscsi_name,
iscsi_target,
0,
volume_path,
chap_auth,
check_exit_code=check_exit_code,
old_name=old_name)
break
except brick_exception.ISCSITargetCreateFailed:
if attempts == 0:
raise
else:
LOG.warning(_('Error creating iSCSI target, retrying '
'creation for target: %s') % iscsi_name)
return tid
def ensure_export(self, context, volume):
volume_name = volume['name']
iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
volume_name)
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
volume_name)
# NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
# should clean this all up at some point in the future
model_update = self.target_helper.ensure_export(context, volume,
iscsi_name,
volume_path)
if model_update:
self.db.volume_update(context, volume['id'], model_update)
def create_export(self, context, volume):
return self._create_export(context, volume)
def _create_export(self, context, volume, vg=None):
"""Creates an export for a logical volume."""
if vg is None:
vg = self.configuration.volume_group
volume_path = "/dev/%s/%s" % (vg, volume['name'])
data = self.target_helper.create_export(context, volume, volume_path)
return {
'provider_location': data['location'],
'provider_auth': data['auth'],
}
def remove_export(self, context, volume):
self.target_helper.remove_export(context, volume)
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):
"""Optimize the migration if the destination is on the same server.
If the specified host is another back-end on the same server, and
the volume is not attached, we can do the migration locally without
going through iSCSI.
"""
false_ret = (False, None)
if volume['status'] != 'available':
return false_ret
if 'location_info' not in host['capabilities']:
return false_ret
info = host['capabilities']['location_info']
try:
(dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\
info.split(':')
lvm_mirrors = int(lvm_mirrors)
except ValueError:
return false_ret
if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname):
return false_ret
if dest_vg != self.vg.vg_name:
vg_list = volutils.get_all_volume_groups()
try:
(vg for vg in vg_list if vg['name'] == dest_vg).next()
except StopIteration:
message = ("Destination Volume Group %s does not exist" %
dest_vg)
LOG.error(_('%s'), message)
return false_ret
helper = utils.get_root_helper()
dest_vg_ref = lvm.LVM(dest_vg, helper,
lvm_type=lvm_type,
executor=self._execute)
self.remove_export(ctxt, volume)
self._create_volume(volume['name'],
self._sizestr(volume['size']),
lvm_type,
lvm_mirrors,
dest_vg_ref)
volutils.copy_volume(self.local_path(volume),
self.local_path(volume, vg=dest_vg),
volume['size'],
self.configuration.volume_dd_blocksize,
execute=self._execute)
self._delete_volume(volume)
model_update = self._create_export(ctxt, volume, vg=dest_vg)
return (True, model_update)
def _iscsi_location(self, ip, target, iqn, lun=None):
return "%s:%s,%s %s %s" % (ip, self.configuration.iscsi_port,
target, iqn, lun)
def _iscsi_authentication(self, chap, name, password):
return "%s %s %s" % (chap, name, password)
class LVMISERDriver(LVMISCSIDriver, driver.ISERDriver):
"""Executes commands relating to ISER volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSER target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
def __init__(self, *args, **kwargs):
self.target_helper = self.get_target_helper(kwargs.get('db'))
LVMVolumeDriver.__init__(self, *args, **kwargs)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM_iSER'
self.protocol = 'iSER'
| {
"content_hash": "a4f9557375ee36bf622bb612ef070195",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 79,
"avg_line_length": 40.87377049180328,
"alnum_prop": 0.5625877351301488,
"repo_name": "spring-week-topos/cinder-week",
"id": "2dc2fdaf2f62d82d3258f3ba8494f0d590dcba87",
"size": "25664",
"binary": false,
"copies": "1",
"ref": "refs/heads/spring-week",
"path": "cinder/volume/drivers/lvm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6134883"
},
{
"name": "Shell",
"bytes": "8998"
}
],
"symlink_target": ""
} |
"""
AUTHOR: KEITH WILLIAMS
DATE: 14/2/2017
DESCRIPTION: Manage all database interactions
"""
# PyMongo was the recommended python driver for MongoDB at
# https://docs.mongodb.com/ecosystem/drivers/python/
import pymongo
import redis
# http://api.mongodb.com/python/current/api/bson/json_util.html
from bson.json_util import dumps, ObjectId
import json
# Redis information
REDIS_HOST='localhost'
REDIS_PORT=6379
REDIS_PASSWORD=''
# Connect to Redis
red = redis.Redis(
host=REDIS_HOST,
port=REDIS_PORT,
password=REDIS_PASSWORD,
decode_responses=True)
# Mongo information
MONGO_HOST='localhost'
MONGO_PORT=27017
# Connect to Mongo
mongo = pymongo.MongoClient(
MONGO_HOST,
MONGO_PORT)
# Get the Mongo database
mongodb = mongo['test-database']
# Get the collection of users
users_collection = mongodb['users-collection']
# Get the collection of contacts
contacts_collection = mongodb['contacts-collection']
# Get the collection of messages
messages_collection = mongodb['messages-collection']
def is_username_unique(username):
# Return the user document with the given username if it exists
# Otherwise return None
users_with_username = users_collection.find_one({'username': username})
# If None was returned the username is unique and return true
return users_with_username == None
# Create a new user document in MongoDB
def create_user(user):
# Create a new document with the users data in the users collection in the mongo database
users_collection.insert_one(user)
# Retrieve and return a user document with a matching username from MongoDB
def get_user(username):
# Get the document or None if no user document with the given username exists
user = users_collection.find_one({'username': username})
return user
# Retrieve and return all user documents from MongoDB that match the search
def search_users(search, current_user):
# Get a list of the current users contacts
contacts = contacts_collection.aggregate([
# Get contact documents where the current user is in the list of users.
# Only return the username, ignore all other properties.
{ "$match": {
'users': current_user,
}
},
# Break array into seperate objects
{ "$unwind": "$users" },
# Ignore the resulting objects that have the current users username
{ "$match": {
'users': {"$ne": current_user}
}
},
{ "$project": {
'_id': 0,
'username': "$users",
}
}
])
# Define an empty list called usernames
usernames = []
# Loop through the contact documents returned from mongoDB and
# append each username to the usernames list
for doc in contacts:
usernames.append(doc['username'])
# This query will return all users that match the search except for the
# user that is currently logged in and their contacts. This is a case
# insensitive search. Only return the usernames.
# NOTE: A more efficient way to perform this search would be to store
# another property in every user document called lower_username which will
# be the lowercase value of the username and then match the lowercase search
# string against that rather than using a case insensitive search. This
# would be more efficent as it could use indexes.
users = users_collection.find({"$and": [ {'username': {'$regex': search, '$options': 'i'}}, {'username': { '$ne': current_user}}, {'username': {'$nin': usernames}} ]}, { 'username': 1, '_id': 0 })
# The dumps() method is used to create a JSON representation of the data.
return dumps(users)
# Return true if the given users are not already contacts
def has_contact(current_user, username):
# Return the contact document where the two given usernames are in the list of usernames
# if it exists, otherwise return None
contacts = contacts_collection.find_one({ "$and": [ { 'users': current_user }, { 'users': username } ] })
# If None was returned the users are not contacts and return false
return contacts != None
# Add the two users to a new contact document with a unique channel identifier
def add_contact(current_user, username, timestamp, channel):
# Create a new JSON object containing the contact data
contact = {"users": [current_user, username], "created": timestamp, "channel": channel}
# Create a new document with the contact data in the contacts collection in MongoDB
contacts_collection.insert_one(contact)
# Delete the contact document that has the two given users in the users list
def delete_contact(current_user, username):
# Delete the document
contacts_collection.delete_one({'users': { "$all" : [current_user, username] }})
# Return all the users who are listed as the given users contact
def get_contacts(username):
# Query MongoDB for a list of the current users contacts
# Return a list of objects containing the contacts usernames
# Sort the list alphabetically by username
contacts = contacts_collection.aggregate([
# Get documents where the current user is in the list of users
{ "$match": {
'users': username,
}
},
# Break array into seperate objects
{ "$unwind": "$users" },
# Ignore the resulting objects that have the current users username
{ "$match": {
'users': {"$ne": username}
}
},
# Only return username and channel
{ "$project": {
'_id': 0,
'contact': "$users",
'channel': "$channel"
}
},
# Sort by username in ascending value
{ "$sort" : {
'contact': 1
}
}
])
# Convert the cursor object to JSON format
return dumps(contacts)
def event_stream(channel):
pubsub = red.pubsub()
pubsub.subscribe(channel)
for message in pubsub.listen():
# Ignore all types except for 'message' which are the messages sent by a client.
# For example, subscribe.
if (message['type'] == 'message'):
# Get the data property of the object. The data property contains the data that
# was published to the channel, in this case it is the message.
data = message['data']
# The new line characters (\n) are required
yield 'data: %s\n\n' % data
# Publish data to the Reis channel and save to MongoDB for persistance
def post_message(channel, data):
# Publish to Redis channel first as it should be published before being written to mongo
# for persistance. The JSON must be converted to a string or it will be returned as a
# byte string.
red.publish(channel, json.dumps(data))
# Then add the message to the messages collection in the mongo database for persistance
messages_collection.insert_one(data)
# Get all the previous messages from the given channel
def get_messages(channel, last_id, step):
# Get at most X documents from the messages-collection where the channel
# value in the document is the same as the channel that is given in the URL.
# These documents must have an _id property that is less than the last_id.
if last_id == '0':
messages = messages_collection.find({'channel': channel})
else:
messages = messages_collection.find({"$and": [ {'channel': channel}, {'_id': {"$lt": ObjectId(last_id)}} ]})
messages = list(messages.sort('_id', -1).limit(step))
# The find() method returns a cursor. The dumps() method is used
# to create a JSON representation of the data. Reverse the list.
return dumps(messages[::-1]) | {
"content_hash": "24a47b3153180e84a8a96d5d19483e2d",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 197,
"avg_line_length": 33.24770642201835,
"alnum_prop": 0.7128863134657837,
"repo_name": "KeithWilliamsGMIT/3rd-Year-Project",
"id": "bd930dacc1f6d89321eb04de1ee79d5e28ea339e",
"size": "7248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/app/databases.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10813"
},
{
"name": "HTML",
"bytes": "7941"
},
{
"name": "JavaScript",
"bytes": "6253"
},
{
"name": "Python",
"bytes": "15076"
},
{
"name": "TypeScript",
"bytes": "25194"
}
],
"symlink_target": ""
} |
from rsqueakvm.plugins.foreign_language.process import W_ForeignLanguageProcess
from rsqueakvm.plugins.ruby import utils
from rsqueakvm.plugins.ruby.frame import WR_FrameObject
from rsqueakvm.plugins.ruby.model import W_RubyObject
from rsqueakvm.plugins.ruby.objspace import ruby_space
from topaz.error import RubyError, print_traceback
from topaz.executioncontext import ExecutionContext
class W_RubyProcess(W_ForeignLanguageProcess):
_attrs_ = ['source', 'filepath', 'ec']
repr_classname = 'W_RubyProcess'
def __init__(self, space, w_rcvr=None, method_name='', args_w=None,
source='', filepath='-e',
is_send=False, break_on_exceptions=False):
W_ForeignLanguageProcess.__init__(
self, space, w_rcvr, method_name, args_w,
is_send, break_on_exceptions)
self.source = source
self.filepath = filepath
self.ec = ExecutionContext()
self.init_runner()
def eval(self):
if self.source == '':
return self.fail('Invalid Ruby eval')
try:
retval = ruby_space.execute(self.source, filepath=self.filepath)
self.set_result(W_RubyObject(retval))
except RubyError as e:
self.set_result(W_RubyObject(e.w_value))
def send(self):
st_to_rb = utils.smalltalk_to_ruby
wr_rcvr = st_to_rb(self.space(), self.w_rcvr)
args_rw = [st_to_rb(self.space(), w_arg) for w_arg in self.args_w]
try:
wr_result = ruby_space.send(
wr_rcvr, self.method_name, args_w=args_rw)
self.set_result(W_RubyObject(wr_result))
except RubyError as e:
print_traceback(ruby_space, e.w_value)
error = W_RubyObject(e.w_value)
self.set_error(error)
self.set_result(error)
except Exception as e:
# import pdb; pdb.set_trace()
self.fail(
'No result in send prim (wr_rcvr: %s, methodname: %s, "%s")'
% (wr_rcvr, self.method_name, e))
def pre_resume(self):
ruby_space.current_ruby_process.set(self)
def post_resume(self):
# unset `current_ruby_process` to restore original behavior
ruby_space.current_ruby_process.set(None)
def w_top_frame(self):
if self.ec is None:
return None
topframe = self.ec.gettoprubyframe()
if topframe is None:
return None
return W_RubyObject(WR_FrameObject(topframe))
def guess_classname(self):
return self.repr_classname
| {
"content_hash": "973df493f3161a3a43b5162dfcc52fc0",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 36.91428571428571,
"alnum_prop": 0.6133900928792569,
"repo_name": "HPI-SWA-Lab/RSqueak",
"id": "5a2877100d2c2bc0bae59b1d4c270dfc5a205af7",
"size": "2584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rsqueakvm/plugins/ruby/process.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1638"
},
{
"name": "C",
"bytes": "115644"
},
{
"name": "HTML",
"bytes": "4754"
},
{
"name": "PowerShell",
"bytes": "1691"
},
{
"name": "Python",
"bytes": "1140634"
},
{
"name": "Shell",
"bytes": "18715"
},
{
"name": "Smalltalk",
"bytes": "71208"
}
],
"symlink_target": ""
} |
import inspect
from mocha import (register_package,
config,
decorators as h_deco,
render,
abort
)
from mocha.core import (set_view_attr,
get_view_attr)
# `contrib` prefix is set so all templates in this package
# get accessed via `contrib/`
register_package(__package__, "contrib")
# ------------------------------------------------------------------------------
# ADMIN
ADMIN_LAYOUT = "contrib/admin/layout.jade"
ADMIN_TAG = "ADMIN"
def disable_admin(*a, **kw):
abort(404)
# @admin
def admin(f):
"""
@admin
A decorator that turns a class into ADMIN
"""
import auth.decorators as a_deco
if not inspect.isclass(f):
raise TypeError("@ADMIN expects a Mocha class")
if config("ADMIN_ENABLED", True):
# Index route
index_route = config("ADMIN_INDEX_ROUTE", "/")
# ROLES
min_role = config("ADMIN_MIN_ACL", "ADMIN")
role_name = "accepts_%s_roles" % min_role.lower()
if not hasattr(a_deco, role_name):
raise ValueError("Invalid ADMIN_MIN_ACL: %s" % min_role)
getattr(a_deco, role_name)(f)
a_deco.login_required(f)
set_view_attr(f, "nav_tags", [ADMIN_TAG])
layout = config("ADMIN_LAYOUT") or ADMIN_LAYOUT
return render.template(layout=layout)(f)
else:
set_view_attr(f, "nav_visible", False)
f.before_request = disable_admin
return f
| {
"content_hash": "3c99f68597c9f953967deb076a9f29d0",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 80,
"avg_line_length": 25.049180327868854,
"alnum_prop": 0.5399214659685864,
"repo_name": "mardix/Mocha",
"id": "4b4477c97e814560c1505fd9092356ea23b540b2",
"size": "1528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mocha/contrib/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22749"
},
{
"name": "HTML",
"bytes": "73805"
},
{
"name": "JavaScript",
"bytes": "83936"
},
{
"name": "Python",
"bytes": "267202"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import ConfigParser
import datetime
import logging
import os
from Queue import Queue
import re
import subprocess
import sys
import yaml
import concurrent.futures
from rackspace_monitoring.drivers import rackspace
from rackspace_monitoring.providers import get_driver
from rackspace_monitoring.types import Provider
import alarmparser
DEFAULT_CONFIG_FILE = '/root/.raxrc'
logging.basicConfig(level=logging.DEBUG,
datefmt="",
format="%(message)s",
stream=sys.stdout)
LOGGER = logging.getLogger(__name__)
# Exclude checks that RAX MaaS includes by default
EXCLUDEDCHECK_BASE = [
'filesystem',
'load_average',
'memory',
'network'
]
class ParseException(Exception):
def __init__(self, message, alarm=None, check=None):
super(ParseException, self).__init__(message)
self.check = check
self.alarm = alarm
class RpcMaas(object):
"""Class representing a connection to the MAAS Service"""
def __init__(self, entity_match='', entities=None,
config_file=DEFAULT_CONFIG_FILE, use_api=True):
self.entity_label_whitelist = entities
self.entity_match = entity_match
self.config_file = config_file
self.use_api = use_api
if self.use_api:
self.driver = get_driver(Provider.RACKSPACE)
self._get_conn()
self._get_overview()
self._add_links()
self._filter_entities()
self.q = Queue()
def _filter_entities(self):
if not self.entity_label_whitelist:
self.entities = [e['entity'] for e in self.overview
if self.entity_match in e['entity'].label]
else:
self.entities = []
for entry in self.overview:
entity = entry['entity']
for label in self.entity_label_whitelist:
if entity.label == label:
self.entities.append(entity)
if not self.entities:
raise Exception("No Entities found matching --entity or "
"--entitymatch")
def _get_conn(self):
"""Read config file and use extracted creds to connect to MAAS"""
self.config = ConfigParser.RawConfigParser()
self.config.read(self.config_file)
self.conn = None
try:
user = self.config.get('credentials', 'username')
api_key = self.config.get('credentials', 'api_key')
self.conn = self.driver(user, api_key)
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
url = self.config.get('api', 'url')
token = self.config.get('api', 'token')
self.conn = self.driver(None, None, ex_force_base_url=url,
ex_force_auth_token=token)
def _get_overview(self):
self.overview = self.conn.ex_views_overview()
def _add_links(self):
"""Add missing parent/child links to objects"""
# Entity --> Check
for entry in self.overview:
entity = entry['entity']
entity.checks = entry['checks']
entity.alarms = []
entity.metrics = []
# Check --> Entity
for check in entity.checks:
check.entity = entity
check.metrics = []
# Check <--> Alarm
check.alarms = []
for alarm in self.get_alarms(check=check, entry=entry):
alarm.check = check
alarm.entity = entity
check.alarms.append(alarm)
entity.alarms.append(alarm)
def _add_metrics_list_to_check(self, check):
"""Called via ThreadPoolExecutor, result returned via queue."""
metrics = self.conn.list_metrics(check.entity.id, check.id)
self.q.put((check, metrics))
def add_metrics(self):
"""Add metrics list to each checks
Requires a call per check, so ThreadPoolExecutor is used to
parallelise and reduce time taken
"""
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
for entity in self.get_entities():
for check in entity.checks:
executor.submit(self._add_metrics_list_to_check, check)
while not self.q.empty():
check, metrics = self.q.get()
for metric in metrics:
metric.check = check
metric.entity = check.entity
check.metrics.append(metric)
check.entity.metrics.append(metric)
def get_entities(self):
"""Return list of known entities
entity_match filter not required as this is done in __init__
"""
return self.entities
def get_checks(self, check_match=''):
"""List checks for entities matching a string"""
checks = []
for entity in self.entities:
checks.extend([c for c in entity.checks
if check_match in c.label])
return checks
def get_alarms(self, entry, check):
"""Get list of alarms
Params:
entry: overview dictionary for one entity.
This function adds a state field to each alarm object
using information from the 'latest_alarm_states' entry key.
"""
alarms = []
for alarm in entry['alarms']:
if not alarm.check_id == check.id:
continue
# add state to the alarm object from latest_alarm_states
alarm_states = sorted(
(als for als in entry['latest_alarm_states']
if als.alarm_id == alarm.id),
key=lambda x: x.timestamp
)
if alarm_states:
alarm.state = alarm_states[-1].state
else:
alarm.state = "UNKNOWN"
alarms.append(alarm)
return alarms
def str2bool(boolean):
if not boolean:
return False
elif boolean.lower() in ("yes", "true", "1"):
return True
elif boolean.lower() in ("no", "false", "0"):
return False
else:
raise BaseException('Not a Boolean')
class RpcMaasAgentConfig(object):
"""Read MAAS Agent configuration files
Parse them as yaml and store that.
"""
def __init__(self, agentconfdpath):
self.agentconfdpath = agentconfdpath
self.checks = self._parse_maas_configs()
def _parse_maas_configs(self):
"""Read all config files in agentconfdpath"""
self.checks = {}
for path in os.listdir(self.agentconfdpath):
if path.endswith('.yaml'):
check = self._parse_config_file(
os.path.join(self.agentconfdpath, path)
)
if not str2bool(check.get('disabled')):
self.checks[check['label']] = check
else:
return self.checks
def _parse_config_file(self, path):
"""Parse one yaml config file"""
with open(path, 'r') as config_file:
blob = yaml.safe_load(config_file)
return blob
class RpcMassCli(object):
"""CLI interface for RPC Maas"""
def __init__(self):
self.parse_args()
LOGGER.addHandler(logging.FileHandler(self.args.logfile))
use_api = True
if self.args.command in ['verify-alarm-syntax', 'verify-local']:
use_api = False
self.rpcm = RpcMaas(self.args.entitymatch,
self.args.entity,
self.args.raxrcpath,
use_api)
self.rpcmac = RpcMaasAgentConfig(self.args.agentconfdir)
def parse_args(self):
parser = argparse.ArgumentParser(description='Test MaaS checks')
parser.add_argument('command',
type=str,
choices=['list-alarms', 'run-checks',
'list-checks', 'delete',
'compare-checks',
'compare-alarms',
'checks-without-alarms',
'overview',
'verify-created',
'verify-status',
'verify-local',
'remove-defunct-checks',
'remove-defunct-alarms'],
help='Command to execute')
parser.add_argument('--force',
action="store_true",
help='Do stuff irrespective of consequence'),
parser.add_argument('--entitymatch',
type=str,
help='Limit testing to checks on entities '
' whose label contains this string.',
default='')
parser.add_argument('--entity',
type=str,
help='Limit testing to entities whose labels'
' exactly match this string. Can be specified'
' multiple times',
action='append',
default=[])
parser.add_argument('--checkmatch',
type=str,
help='Limit testing to checks '
' whose label contains this string',
default='')
parser.add_argument('--tab',
action="store_true",
help='Output in tab-separated format, applies '
'only to alarms and checks commands')
parser.add_argument('--raxrcpath',
type=str,
help='path to config file to read',
default=DEFAULT_CONFIG_FILE)
parser.add_argument('--agentconfdir',
type=str,
help='path to config file to read',
default='/etc/rackspace-monitoring-agent.conf.d')
parser.add_argument('--logfile',
type=str,
help='path to log file to write',
default='/var/log/rpc_maas_tool.log')
parser.add_argument('--verbose',
action="store_true",
help='Show items without failures when listing'
'alarms or running checks')
parser.add_argument('--excludedcheck',
action="append",
help='A check that should not be present'
'can be specified multiple times',
default=[])
self.args = parser.parse_args()
def main(self):
if self.rpcm.use_api is True and self.rpcm.conn is None:
LOGGER.error("Unable to get a connection to MaaS, exiting")
sys.exit(1)
dd = {'list-alarms': self.alarms,
'run-checks': self.run_checks,
'list-checks': self.checks,
'compare-checks': self.compare_checks,
'checks-without-alarms': self.checks_without_alarms,
'compare-alarms': self.compare_alarms,
'overview': self.overview,
'verify-created': self.verify_created,
'verify-status': self.verify_status,
'delete': self.delete,
'remove-defunct-checks': self.remove_defunct_checks,
'remove-defunct-alarms': self.remove_defunct_alarms,
'verify-alarm-syntax': self.verify_alarm_syntax,
'verify-local': self.verify_local
}
result = dd[self.args.command]()
if result is None:
return 0
else:
return result
def _parse_alarm_criteria(self, alarm):
"""Use the waxeye generated parser to parse the alarm critera DSL"""
try:
# Waxeye requires deep recurssion, 10000 stack frames should
# use about 5mb of memory excluding stored data.
sys.setrecursionlimit(10000)
p = alarmparser.Parser()
ast = p.parse(alarm['criteria'])
if ast.__class__.__name__ == 'AST':
return ast
else:
raise ParseException(
"Cannot parse alarm criteria: {alarm} Error: {ast}"
.format(alarm=alarm['label'],
ast=ast), alarm=alarm)
except RuntimeError as e:
message = ("Failed to parse {name}: {criteria}."
" Message: {message}"
.format(name=alarm['name'],
criteria=alarm['criteria'],
message=e.message))
raise ParseException(message, alarm=alarm)
def verify_alarm_syntax(self):
"""Verify syntax by parsing the alarm criteria for all known checks """
rc = 0
for check in self.rpmac.checks.values():
for alarm in check.alarms:
try:
self._parse_alarm_criteria(alarm)
except ValueError as e:
LOGGER.info(e)
rc = 1
return rc
def _find_metrics(self, ast, metrics):
"""Recursively descend AST looking for metricName elements
When a metric name is found, a string is constructed from all it's
single character child nodes, a list of metric name strings is
returned.
"""
if hasattr(ast, 'type') and ast.type == 'metricName':
name_node = ast.children[0]
name_str = ''.join(map(str, name_node.children))
metrics.append(name_str)
if hasattr(ast, 'children'):
for child in ast.children:
child_metrics = self._find_metrics(child, [])
if child_metrics:
metrics.extend(child_metrics)
return metrics
def verify_local(self):
"""Checks MaaS configuration without using MaaS API
Checks three things:
1) Execute the command defined in each check and check its return code
2) Compile the alarm criteria and check for syntax
3) Check the metrics required by the alarm criteria against the metrics
produced by executing the check commands.
"""
status_line_re = re.compile('status\s+(?P<status>.*)')
metric_line_re = re.compile(
'metric\s+(?P<name>[^\s]+)\s+(?P<unit>[^\s]+)\s+(?P<value>[^\s]+)')
def _alarm_metrics_from_check(check):
"""Get all the metrics referenced by a check's alarms"""
metrics = []
for alarm in check['alarms'].values():
ast = self._parse_alarm_criteria(alarm)
return self._find_metrics(ast, metrics)
def _execute_check(args, check, rpcm):
"""Execute one check
This function will be called from a threadpool thread.
"""
try:
result = {'success': True,
'output': subprocess.check_output(
args,
stderr=subprocess.STDOUT),
'check': check
}
except subprocess.CalledProcessError as e:
result = {'success': False,
'output': e.output,
'check': check
}
rpcm.q.put(result)
# Checks are executed using a threadpool to speed up verification.
execution_results = []
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
for check in self.rpcmac.checks.values():
if check['type'] != 'agent.plugin':
continue
args = ["{plugin_path}{plugin}".format(
plugin_path='/usr/lib/rackspace-monitoring-agent/plugins/',
plugin=check['details']['file'])]
try:
args.extend(check['details']['args'])
except KeyError:
check['details']['args'] = ""
args.extend(check['details']['args'])
executor.submit(_execute_check, args, check, self.rpcm)
while not self.rpcm.q.empty():
execution_results.append(self.rpcm.q.get())
available_metrics = set() # metrics produced by check commands
required_metrics = set() # metrics used in alarm criteria
failed_checks = [] # checks that failed to execute or not 'okay'
invalid_criteria = [] # alarms with invalid criteria
for result in execution_results:
check = result['check']
if result['success'] is not True:
failed_checks.append(result)
continue
# use the output of executing the checks to find available metrics
for line in result['output'].splitlines():
# check the status line and return code
match = status_line_re.match(line)
if match:
status = match.groupdict()['status']
if status != 'okay':
failed_checks.append(result)
# store all the metrics that are returned by the check
match = metric_line_re.match(line)
if match:
available_metrics.add(match.groupdict()['name'])
# Parse alarm criteria
for check in self.rpcmac.checks.values():
try:
metrics = _alarm_metrics_from_check(check)
# Non agent metrics are not added to required_metrics because
# we can't determine locally the metrics that will be
# available to alarms for remote checks.
if check['type'] == 'agent.plugin':
required_metrics.update(metrics)
except ParseException as e:
invalid_criteria.append({
'check': check,
'alarm': e.alarm,
'error': e.message
})
missing_metrics = required_metrics - available_metrics
if (failed_checks == [] and
missing_metrics == set() and
invalid_criteria == []):
LOGGER.info("All checks executed OK, "
"All alarm criteria syntax OK, "
"All required metrics are present")
return 0
if missing_metrics:
LOGGER.info(
"The following metrics are required by alarms but not"
" produced by any checks: {missing_metrics}".format(
missing_metrics=missing_metrics))
if failed_checks:
LOGGER.info(
"The following checks failed to execute or didn't return "
"'okay' as their status: {failed_checks}".format(
failed_checks=[(r['check']['label'], r['output'])
for r in failed_checks]))
if invalid_criteria:
LOGGER.info(
"The following alarms have critera that could not be parsed:"
" {alarms}".format(
alarms="\n".join([
"Alarm: {name} Criteria: {criteria}"
" Error: {error}".format(
name=ic['alarm']['label'],
criteria=ic['alarm']['criteria'],
error=ic['error'])
for ic in invalid_criteria
])
)
)
return 1
def checks_without_alarms(self):
"""list checks with no alarms"""
no_alarms = list()
for c in self.rpcm.get_checks(self.args.checkmatch):
if not c.alarms and c.type != 'remote.ping':
for base_exclude in EXCLUDEDCHECK_BASE:
if base_exclude not in c.label.lower():
break
else:
no_alarms.append(c)
if no_alarms:
LOGGER.info("The following checks have 0 alarms "
"registered with the maas api:")
self._write(no_alarms)
return 1
def excluded_checks(self):
"""List checks that are present but shouldn't be"""
present_but_excluded_checks = []
checks = self.rpcm.get_checks(self.args.entitymatch)
for check in checks:
for exclude in self.args.excludedcheck:
# ping is a default check and may not have an alarm
if exclude in check.label and not check.disabled:
present_but_excluded_checks.append(check)
if present_but_excluded_checks:
LOGGER.info("The following checks are in the excluded_checks list"
" but are still present in the API:")
self._write(present_but_excluded_checks)
return 1
def compare_checks(self):
"""Compare checks
Check that all checks found in config files are registered with The
maas api.
"""
api_checks = self.rpcm.get_checks(self.args.checkmatch)
missing_checks = [configcheck for configcheck in
self.rpcmac.checks.keys()
if configcheck not in
[apicheck.label for apicheck in api_checks]]
if missing_checks:
LOGGER.info("The following checks have config files but are not "
"registered with the maas api: %(missing_checks)s "
% {'missing_checks': missing_checks})
return 1
def _compare_alarm(self, config_alarm, api_alarm):
"""Compare one config alarm with one api alarm"""
return (config_alarm['label'] == api_alarm.label and
config_alarm['criteria'].strip() == api_alarm.criteria.strip())
def compare_alarms(self):
"""Compare alarms.
Check that all alarms found in MAAS agent config files are also
listed by the maas api.
"""
api_alarms = []
for entity in self.rpcm.get_entities():
api_alarms.extend(entity.alarms)
config_alarms = {}
for check in self.rpcmac.checks.values():
config_alarms.update(check['alarms'])
missing_alarms = []
for config_alarm in config_alarms.values():
if not any([self._compare_alarm(config_alarm, api_alarm)
for api_alarm in api_alarms]):
missing_alarms.append(config_alarm)
if missing_alarms:
LOGGER.info("The following alarms are present in config files but "
"are not registered with the maas api: "
"%(missing_alarms)s "
% {'missing_alarms':
[a['label'] for a in missing_alarms]})
return 1
def verify_created(self):
"""Verify that all checks and alarms have been created"""
LOGGER.info("--- %(datestamp)s ---"
% {'datestamp': datetime.datetime.now()})
result = 0
for step in [self.compare_checks,
self.compare_alarms,
self.checks_without_alarms,
self.excluded_checks]:
step_result = step()
if step_result is not None:
result += step_result
if result > 0:
return 1
else:
LOGGER.info("All expected checks and alarms are present")
def verify_status(self):
"""Verify MAAS configuration and status"""
LOGGER.info("--- %(datestamp)s ---"
% {'datestamp': datetime.datetime.now()})
alarms, failed_alarms = self._get_failed_alarms()
if self.args.verbose:
checks, failed_checks = self._get_failed_checks()
else:
# run checks that have at least one failed alarm
# in most situations this much quicker than executing all checks
checks_with_failed_alarms = set(a.check for a in failed_alarms)
checks, failed_checks = self._get_failed_checks(
checks_with_failed_alarms)
failures = failed_checks + failed_alarms
if self.args.verbose:
LOGGER.info("Registered Checks and Alarms:")
self._write(checks + alarms)
elif failures:
LOGGER.info("Checks and Alarms with failures:")
self._write(failures)
return 1
else:
LOGGER.info("MAAS Verify completed succesfully")
def _get_failed_alarms(self):
alarms = []
failed_alarms = []
for entity in self.rpcm.get_entities():
for alarm in entity.alarms:
alarms.append(alarm)
if alarm.state not in ["OK", "UNKNOWN", "DISABLED"]:
failed_alarms.append(alarm)
alarm.bullet = "!"
return (alarms, failed_alarms)
def alarms(self):
"""List Alarms"""
alarms, failed_alarms = self._get_failed_alarms()
if self.args.verbose:
self._write(alarms)
else:
self._write(failed_alarms)
if len(failed_alarms) > 0:
return 1
def checks(self):
"""List Checks"""
self._write(self.rpcm.get_checks(self.args.checkmatch))
def _get_failed_checks(self, checks=None):
failed_checks = []
if checks is None:
checks = []
for entity in self.rpcm.get_entities():
for check in entity.checks:
if self.args.checkmatch not in check.label:
continue
checks.append(check)
for check in checks:
try:
result = self.rpcm.conn.test_existing_check(check)
except rackspace.RackspaceMonitoringValidationError:
completed = False
else:
status = result[0]['status']
completed = result[0]['available']
check.state = (" Completed:%(completed)s Status:%(status)s"
% {'completed': completed, 'status': status})
if not completed:
check.bullet = "!"
failed_checks.append(check)
return (checks, failed_checks)
def run_checks(self):
"""Execute Checks and list results"""
checks, failed_checks = self._get_failed_checks()
if self.args.verbose:
self._write(checks)
else:
self._write(failed_checks)
if len(failed_checks) > 0:
return 1
def overview(self):
"""List checks alarms and metrics"""
entities = self.rpcm.get_entities()
checks = []
alarms = []
metrics = []
self.rpcm.add_metrics()
for entity in entities:
checks.extend(entity.checks)
alarms.extend(entity.alarms)
for check in entity.checks:
metrics.extend(check.metrics)
self._write(checks + alarms + metrics)
def delete(self):
count = 0
if self.args.force is False:
print("*** Proceeding WILL delete ALL your checks (and data) ****")
sys.stdout.flush()
if raw_input("Type 'from orbit' to continue: ") != 'from orbit':
return
for entity in self.rpcm.get_entities():
for check in entity.checks:
self.rpcm.conn.delete_check(check)
count += 1
LOGGER.info("Number of checks deleted: %s" % count)
def remove_defunct_checks(self):
check_count = 0
for entity in self.rpcm.get_entities():
for check in entity.checks:
if re.match('filesystem--.*', check.label):
self.rpcm.conn.delete_check(check)
check_count += 1
LOGGER.info("Number of checks deleted: %s" % check_count)
def remove_defunct_alarms(self):
alarm_count = 0
defunct_alarms = {'rabbit_mq_container': ['disk_free_alarm',
'mem_alarm'],
'galera_container': ['WSREP_CLUSTER_SIZE',
'WSREP_LOCAL_STATE_COMMENT']}
for entity in self.rpcm.get_entities():
for alarm in entity.alarms:
for container in defunct_alarms:
for defunct_alarm in defunct_alarms[container]:
if re.match(
'%s--.*%s' % (defunct_alarm, container),
alarm.label):
self.rpcm.conn.delete_alarm(alarm)
alarm_count += 1
LOGGER.info("Number of alarms deleted: %s" % alarm_count)
def _os(self, obj, indent=0, ps=''):
"""ObjectString
Create a string from an objects
"""
bullet = " %s " % getattr(obj, 'bullet', "-")
objclass = obj.__class__.__name__
nameattr = 'label'
checktype = ""
if hasattr(obj, 'name'):
nameattr = 'name'
if hasattr(obj, 'type'):
checktype = ":%(type)s" % {'type': obj.type}
if hasattr(obj, 'state'):
ps += ":%(state)s" % {'state': obj.state}
if not hasattr(obj, 'id'):
obj.id = ""
if self.args.tab:
bullet = ""
return ("%(indent)s%(bullet)s%(objclass)s%(checktype)s"
":%(id)s:%(label)s%(ps)s"
% {'id': obj.id,
'bullet': bullet,
'indent': ' ' * indent * 2,
'objclass': objclass,
'label': getattr(obj, nameattr),
'checktype': checktype,
'ps': ps})
def _write(self,
objs,
sep="\t"):
if self.args.tab:
# Tab seperated output
def _line_segment(obj, line=""):
# entity
if hasattr(obj, 'checks'):
return self._os(obj) + sep + line
# check
elif hasattr(obj, 'alarms'):
return (_line_segment(obj.entity, self._os(obj)) +
sep + line)
# alarm or metric
else:
return (_line_segment(obj.check, self._os(obj)) +
sep + line)
for obj in objs:
LOGGER.info(_line_segment(obj))
else:
# Tree style output
entities = set()
checks = set()
for obj in objs:
entities.add(getattr(obj, 'entity', obj))
checks.add(getattr(obj, 'check', obj))
for entity in entities:
LOGGER.info(self._os(entity))
for check in [c for c in checks if c.entity == entity]:
LOGGER.info(self._os(check, indent=1))
for obj in objs:
if (getattr(obj, 'check', None) == check and
getattr(obj, 'entity', None) == entity):
LOGGER.info(self._os(obj, indent=2))
if __name__ == "__main__":
cli = RpcMassCli()
sys.exit(cli.main())
| {
"content_hash": "1469de43b70df9759ae84bc723a878d7",
"timestamp": "",
"source": "github",
"line_count": 841,
"max_line_length": 79,
"avg_line_length": 38.18668252080856,
"alnum_prop": 0.507613264829519,
"repo_name": "npawelek/rpc-maas",
"id": "0c05bfccc0aec5fafe33ab5d7fa420a25648cc9c",
"size": "32719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "playbooks/files/rax-maas/tools/rpc-maas-tool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3215"
},
{
"name": "Python",
"bytes": "368284"
},
{
"name": "Shell",
"bytes": "50085"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import AuthProviderTestCase
from sentry.utils.auth import SSO_SESSION_KEY
class OrganizationAuthLoginTest(AuthProviderTestCase):
def test_sso_auth_required(self):
user = self.create_user("[email protected]", is_superuser=False)
organization = self.create_organization(name="foo")
member = self.create_member(user=user, organization=organization)
setattr(member.flags, "sso:linked", True)
member.save()
auth_provider = AuthProvider.objects.create(
organization=organization, provider="dummy", flags=0
)
AuthIdentity.objects.create(auth_provider=auth_provider, user=user)
self.login_as(user)
path = u"/{}/".format(organization.slug)
redirect_uri = u"/auth/login/{}/".format(organization.slug)
# we should be redirecting the user to the authentication form as they
# haven't verified this specific organization
resp = self.client.get(path)
self.assertRedirects(resp, redirect_uri)
# superuser should still require SSO as they're a member of the org
user.update(is_superuser=True)
resp = self.client.get(path)
self.assertRedirects(resp, redirect_uri)
# XXX(dcramer): using internal API as exposing a request object is hard
self.session[SSO_SESSION_KEY] = six.text_type(organization.id)
self.save_session()
# now that SSO is marked as complete, we should be able to access dash
resp = self.client.get(path)
assert resp.status_code == 200
| {
"content_hash": "4da041c729bb7fe0f729d450643a954a",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 37.422222222222224,
"alnum_prop": 0.6828978622327792,
"repo_name": "beeftornado/sentry",
"id": "9d25276f8a56a68804e3bf744eeb42d9f3a4bff4",
"size": "1684",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/integration/test_sso.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
from __future__ import print_function, absolute_import, division
import sys
import inspect
import socket
import numpy as np
from sklearn.utils import check_random_state
from sklearn.grid_search import ParameterGrid
try:
from hyperopt import (Trials, tpe, fmin, STATUS_OK, STATUS_RUNNING,
STATUS_FAIL)
except ImportError:
# hyperopt is optional, but required for hyperopt_tpe()
pass
try:
from GPy import kern
from GPy.kern import RBF, Fixed, Bias
from GPy.util.linalg import tdot
from GPy.models import GPRegression
from scipy.optimize import minimize
from scipy.stats import norm
# If the GPy modules fail we won't do this unnecessarily.
from .entry_point import load_entry_point
KERNEL_BASE_CLASS = kern.src.kern.Kern
except:
# GPy is optional, but required for gp
GPRegression = kern = minimize = None
pass
from .search_space import EnumVariable
DEFAULT_TIMEOUT = socket._GLOBAL_DEFAULT_TIMEOUT
class BaseStrategy(object):
short_name = None
def suggest(self, history, searchspace):
"""
Parameters
----------
history : list of 3-tuples
History of past function evaluations. Each element in history
should be a tuple `(params, score, status)`, where `params` is a
dict mapping parameter names to values
searchspace : SearchSpace
Instance of search_space.SearchSpace
random_state :i nteger or numpy.RandomState, optional
The random seed for sampling. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Returns
-------
new_params : dict
"""
raise NotImplementedError()
@staticmethod
def is_repeated_suggestion(params, history):
"""
Parameters
----------
params : dict
Trial param set
history : list of 3-tuples
History of past function evaluations. Each element in history
should be a tuple `(params, score, status)`, where `params` is a
dict mapping parameter names to values
Returns
-------
is_repeated_suggestion : bool
"""
if any(params == hparams and hstatus == 'SUCCEEDED' for hparams, hscore, hstatus in history):
return True
else:
return False
class RandomSearch(BaseStrategy):
short_name = 'random'
def __init__(self, seed=None):
self.seed = seed
def suggest(self, history, searchspace):
"""Randomly suggest params from searchspace.
"""
return searchspace.rvs(self.seed)
class HyperoptTPE(BaseStrategy):
short_name = 'hyperopt_tpe'
def __init__(self, seed=None):
self.seed = seed
def suggest(self, history, searchspace):
"""
Suggest params to maximize an objective function based on the
function evaluation history using a tree of Parzen estimators (TPE),
as implemented in the hyperopt package.
Use of this function requires that hyperopt be installed.
"""
# This function is very odd, because as far as I can tell there's
# no real documented API for any of the internals of hyperopt. Its
# execution model is that hyperopt calls your objective function
# (instead of merely providing you with suggested points, and then
# you calling the function yourself), and its very tricky (for me)
# to use the internal hyperopt data structures to get these predictions
# out directly.
# so they path we take in this function is to construct a synthetic
# hyperopt.Trials database which from the `history`, and then call
# hyoperopt.fmin with a dummy objective function that logs the value
# used, and then return that value to our client.
# The form of the hyperopt.Trials database isn't really documented in
# the code -- most of this comes from reverse engineering it, by
# running fmin() on a simple function and then inspecting the form of
# the resulting trials object.
if 'hyperopt' not in sys.modules:
raise ImportError('No module named hyperopt')
random = check_random_state(self.seed)
hp_searchspace = searchspace.to_hyperopt()
trials = Trials()
for i, (params, scores, status) in enumerate(history):
if status == 'SUCCEEDED':
# we're doing maximization, hyperopt.fmin() does minimization,
# so we need to swap the sign
result = {'loss': -np.mean(scores), 'status': STATUS_OK}
elif status == 'PENDING':
result = {'status': STATUS_RUNNING}
elif status == 'FAILED':
result = {'status': STATUS_FAIL}
else:
raise RuntimeError('unrecognized status: %s' % status)
# the vals key in the trials dict is basically just the params
# dict, but enum variables (hyperopt hp.choice() nodes) are
# different, because the index of the parameter is specified
# in vals, not the parameter itself.
vals = {}
for var in searchspace:
if isinstance(var, EnumVariable):
# get the index in the choices of the parameter, and use
# that.
matches = [i for i, c in enumerate(var.choices)
if c == params[var.name]]
assert len(matches) == 1
vals[var.name] = matches
else:
# the other big difference is that all of the param values
# are wrapped in length-1 lists.
vals[var.name] = [params[var.name]]
trials.insert_trial_doc({
'misc': {
'cmd': ('domain_attachment', 'FMinIter_Domain'),
'idxs': dict((k, [i]) for k in hp_searchspace.keys()),
'tid': i,
'vals': vals,
'workdir': None},
'result': result,
'tid': i,
# bunch of fixed fields that hyperopt seems to require
'owner': None, 'spec': None, 'state': 2, 'book_time': None,
'exp_key': None, 'refresh_time': None, 'version': 0
})
trials.refresh()
chosen_params_container = []
def mock_fn(x):
# http://stackoverflow.com/a/3190783/1079728
# to get around no nonlocal keywork in python2
chosen_params_container.append(x)
return 0
fmin(fn=mock_fn, algo=tpe.suggest, space=hp_searchspace, trials=trials,
max_evals=len(trials.trials)+1,
**self._hyperopt_fmin_random_kwarg(random))
chosen_params = chosen_params_container[0]
return chosen_params
@staticmethod
def _hyperopt_fmin_random_kwarg(random):
if 'rstate' in inspect.getargspec(fmin).args:
# 0.0.3-dev version uses this argument
kwargs = {'rstate': random, 'allow_trials_fmin': False}
elif 'rseed' in inspect.getargspec(fmin).args:
# 0.0.2 version uses different argument
kwargs = {'rseed': random.randint(2**32-1)}
return kwargs
class GP(BaseStrategy):
short_name = 'gp'
def __init__(self, acquisition=None, seed=None, seeds=1, max_feval=5E4, max_iter=1E5):
self.seed = seed
self.seeds = seeds
self.max_feval = max_feval
self.max_iter = max_iter
self.model = None
self.n_dims = None
self.kernel = None
self._kerns = None
self._kernf = None
self._kernb = None
if acquisition is None:
acquisition = {'name': 'osprey', 'params': {}}
self.acquisition_function = acquisition
self._acquisition_function = None
self._set_acquisition()
def _create_kernel(self, V):
self._kerns = [RBF(1, ARD=True, active_dims=[i])
for i in range(self.n_dims)]
self._kernf = Fixed(self.n_dims, tdot(V))
self._kernb = Bias(self.n_dims)
self.kernel = np.sum(self._kerns) + self._kernf + self._kernb
def _fit_model(self, X, Y):
model = GPRegression(X, Y, self.kernel)
model.optimize(messages=False, max_f_eval=self.max_feval)
self.model = model
def _get_random_point(self):
return np.array([np.random.uniform(low=0., high=1.)
for i in range(self.n_dims)])
def _is_var_positive(self, var):
if np.any(var < 0):
# RuntimeError may be overkill
raise RuntimeError('Negative variance predicted from regression model.')
else:
return True
def _ei(self, x, y_mean, y_var):
y_std = np.sqrt(y_var)
y_best = self.model.Y.max(axis=0)
z = (y_mean - y_best)/y_std
result = y_std*(z*norm.cdf(z) + norm.pdf(z))
return result
def _ucb(self, x, y_mean, y_var, kappa=1.0):
result = y_mean + kappa*np.sqrt(y_var)
return result
def _osprey(self, x, y_mean, y_var):
return (y_mean+y_var).flatten()
def _optimize(self, init=None):
# TODO start minimization from a range of points and take minimum
if not init:
init = self._get_random_point()
def z(x):
# TODO make spread of points around x and take mean value.
x = x.copy().reshape(-1, self.n_dims)
y_mean, y_var = self.model.predict(x, kern=(np.sum(self._kerns).copy() +
self._kernb.copy()))
# This code is for debug/testing phase only.
# Ideally we should test for negative variance regardless of the AF.
# However, we want to recover the original functionality of Osprey, hence the conditional block.
# TODO remove this.
if self.acquisition_function['name'] == 'osprey':
af = self._acquisition_function(x, y_mean=y_mean, y_var=y_var)
elif self.acquisition_function['name'] in ['ei', 'ucb']:
# y_var = np.abs(y_var)
if self._is_var_positive(y_var):
af = self._acquisition_function(x, y_mean=y_mean, y_var=y_var)
return (-1)*af
res = minimize(z, init, bounds=self.n_dims*[(0., 1.)],
options={'maxiter': self.max_iter, 'disp': 0})
return res.x
def _set_acquisition(self):
if isinstance(self.acquisition_function, list):
raise RuntimeError('Must specify only one acquisition function')
if sorted(self.acquisition_function.keys()) != ['name', 'params']:
raise RuntimeError('strategy/params/acquisition must contain keys '
'"name" and "params"')
if self.acquisition_function['name'] not in ['ei', 'ucb', 'osprey']:
raise RuntimeError('strategy/params/acquisition name must be one of '
'"ei", "ucb", "osprey"')
f = eval('self._'+self.acquisition_function['name'])
def g(x, y_mean, y_var):
return f(x, y_mean, y_var, **self.acquisition_function['params'])
self._acquisition_function = g
def _get_data(self, history, searchspace):
X = []
Y = []
V = []
ignore = []
for param_dict, scores, status in history:
# transform points into the GP domain. This invloves bringing
# int and enum variables to floating point, etc.
if status == 'FAILED':
# not sure how to deal with these yet
continue
point = searchspace.point_to_gp(param_dict)
if status == 'SUCCEEDED':
X.append(point)
Y.append(np.mean(scores))
V.append(np.var(scores))
elif status == 'PENDING':
ignore.append(point)
else:
raise RuntimeError('unrecognized status: %s' % status)
return (np.array(X).reshape(-1, self.n_dims),
np.array(Y).reshape(-1, 1),
np.array(V).reshape(-1, 1),
np.array(ignore).reshape(-1, self.n_dims))
def _from_gp(self, result, searchspace):
# Note that GP only deals with float-valued variables, so we have
# a transform step on either side, where int and enum valued variables
# are transformed before calling gp, and then the result suggested by
# GP needs to be reverse-transformed.
out = {}
for gpvalue, var in zip(result, searchspace):
out[var.name] = var.point_from_gp(float(gpvalue))
return out
def _is_within(self, point, X, tol=1E-2):
if True in (np.sqrt(((point - X)**2).sum(axis=0)) <= tol):
return True
return False
def suggest(self, history, searchspace, max_tries=5):
if not GPRegression:
raise ImportError('No module named GPy')
if not minimize:
raise ImportError('No module named SciPy')
if len(history) < self.seeds:
return RandomSearch().suggest(history, searchspace)
self.n_dims = searchspace.n_dims
X, Y, V, ignore = self._get_data(history, searchspace)
# TODO make _create_kernel accept optional args.
self._create_kernel(V)
self._fit_model(X, Y)
suggestion = self._optimize()
if suggestion in ignore or self._is_within(suggestion, X):
return RandomSearch().suggest(history, searchspace)
return self._from_gp(suggestion, searchspace)
class GridSearch(BaseStrategy):
short_name = 'grid'
def __init__(self):
self.param_grid = None
self.current = -1
def suggest(self, history, searchspace):
# Convert searchspace to param_grid
if self.param_grid is None:
if not all(isinstance(v, EnumVariable) for v in searchspace):
raise RuntimeError("GridSearchStrategy is defined only for all-enum search space")
self.param_grid = ParameterGrid(dict((v.name, v.choices) for v in searchspace))
# NOTE: there is no way of signaling end of parameters to be searched against
# so user should pick correctly number of evaluations
self.current += 1
return self.param_grid[self.current % len(self.param_grid)]
| {
"content_hash": "c219f5d67caae3c77c1ab3b90b1b3d04",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 108,
"avg_line_length": 37.62307692307692,
"alnum_prop": 0.5782730184692974,
"repo_name": "msultan/osprey",
"id": "7d666409d735882fe07b30f41227929433d96b2e",
"size": "14673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osprey/strategies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "164914"
},
{
"name": "Shell",
"bytes": "4718"
},
{
"name": "TeX",
"bytes": "2620"
}
],
"symlink_target": ""
} |
import base64
from typing import List
from xdrlib import Packer, Unpacker
from .base import Integer
from .operation_meta import OperationMeta
from .transaction_meta_v1 import TransactionMetaV1
from .transaction_meta_v2 import TransactionMetaV2
__all__ = ["TransactionMeta"]
class TransactionMeta:
"""
XDR Source Code::
union TransactionMeta switch (int v)
{
case 0:
OperationMeta operations<>;
case 1:
TransactionMetaV1 v1;
case 2:
TransactionMetaV2 v2;
};
"""
def __init__(
self,
v: int,
operations: List[OperationMeta] = None,
v1: TransactionMetaV1 = None,
v2: TransactionMetaV2 = None,
) -> None:
_expect_max_length = 4294967295
if operations and len(operations) > _expect_max_length:
raise ValueError(
f"The maximum length of `operations` should be {_expect_max_length}, but got {len(operations)}."
)
self.v = v
self.operations = operations
self.v1 = v1
self.v2 = v2
def pack(self, packer: Packer) -> None:
Integer(self.v).pack(packer)
if self.v == 0:
if self.operations is None:
raise ValueError("operations should not be None.")
packer.pack_uint(len(self.operations))
for operations_item in self.operations:
operations_item.pack(packer)
return
if self.v == 1:
if self.v1 is None:
raise ValueError("v1 should not be None.")
self.v1.pack(packer)
return
if self.v == 2:
if self.v2 is None:
raise ValueError("v2 should not be None.")
self.v2.pack(packer)
return
@classmethod
def unpack(cls, unpacker: Unpacker) -> "TransactionMeta":
v = Integer.unpack(unpacker)
if v == 0:
length = unpacker.unpack_uint()
operations = []
for _ in range(length):
operations.append(OperationMeta.unpack(unpacker))
return cls(v=v, operations=operations)
if v == 1:
v1 = TransactionMetaV1.unpack(unpacker)
return cls(v=v, v1=v1)
if v == 2:
v2 = TransactionMetaV2.unpack(unpacker)
return cls(v=v, v2=v2)
return cls(v=v)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "TransactionMeta":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "TransactionMeta":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.v == other.v
and self.operations == other.operations
and self.v1 == other.v1
and self.v2 == other.v2
)
def __str__(self):
out = []
out.append(f"v={self.v}")
out.append(
f"operations={self.operations}"
) if self.operations is not None else None
out.append(f"v1={self.v1}") if self.v1 is not None else None
out.append(f"v2={self.v2}") if self.v2 is not None else None
return f"<TransactionMeta [{', '.join(out)}]>"
| {
"content_hash": "92ad5d8af16f48d8c2df76e94b50073d",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 112,
"avg_line_length": 30.840336134453782,
"alnum_prop": 0.5574931880108992,
"repo_name": "StellarCN/py-stellar-base",
"id": "0a9ef07b2b86292ffe59e3ab3d3be4dd659e7ed1",
"size": "3762",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "stellar_sdk/xdr/transaction_meta.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1737"
},
{
"name": "Makefile",
"bytes": "1085"
},
{
"name": "Python",
"bytes": "2044193"
},
{
"name": "RPC",
"bytes": "76503"
}
],
"symlink_target": ""
} |
from packages import peewee
metadata_db_proxy = peewee.Proxy()
class Scheme(peewee.Model):
item_id = peewee.CharField(index=True)
key = peewee.CharField(index=True)
value = peewee.CharField(index=True)
class Meta:
database = metadata_db_proxy
db_table = 'metadata'
indexes = (
# unique index for item_id, key, value
(('item_id', 'key', 'value'), True),
)
def as_dict(self):
return {'item_id': self.item_id, 'key': self.key, 'value': self.value}
class Metadata(object):
def __init__(self, path):
''' path - path to metadata file (sqlite)
'''
self._db = peewee.SqliteDatabase(path)
metadata_db_proxy.initialize(self._db)
if not Scheme.table_exists():
Scheme.create_table()
def put(self, item_id, metadata=list()):
''' store item_id metadata into database
'''
if not item_id:
raise RuntimeError('ItemId is not defined, %s' % item_id)
if not isinstance(metadata, list):
raise RuntimeError('Metadata is not defined or defined not as a list of (key,value), %s' % metadata)
with self._db.transaction():
for k, v in metadata:
try:
Scheme.create(**{'item_id': item_id, 'key': k, 'value': v})
except peewee.IntegrityError:
# TODO update metadata
pass
return self.get(item_id)
def get(self, item_id=None):
''' get metadata
'''
if not item_id:
_metadata = list()
for item in Scheme.select(Scheme.item_id).group_by(Scheme.item_id):
_metadata.append((item.item_id, [(meta.key, meta.value) \
for meta in Scheme.select().where(Scheme.item_id == item.item_id)]))
else:
_metadata = (item_id, [(meta.key, meta.value) \
for meta in Scheme.select().where(Scheme.item_id == item_id)])
return _metadata
def delete(self, item_id):
''' delete item
'''
with self._db.transaction():
if isinstance(item_id, (list, tuple)):
for meta in Scheme.select().where(Scheme.item_id << item_id):
meta.delete_instance()
else:
for meta in Scheme.select().where(Scheme.item_id == item_id):
meta.delete_instance()
def search(self, k, v):
''' select item_ids where metadata key = k and metadata value = v
'''
return [f.item_id for f in Scheme.select(Scheme.item_id)
.where(Scheme.key == k, Scheme.value == v)
]
def count(self):
''' return count of records in database
'''
return Scheme.select().group_by(Scheme.item_id).count()
| {
"content_hash": "8b6ccc39838fde004950c73793410e69",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 112,
"avg_line_length": 32.37362637362637,
"alnum_prop": 0.5227427019687713,
"repo_name": "ownport/storages",
"id": "a2bb4051fe902d799542825335cd9b441f3a257c",
"size": "2946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storages/metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "78"
},
{
"name": "Python",
"bytes": "149106"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from datetime import timedelta
from core.analytics import ScheduledAnalytics
from mongoengine import Q
class ExpireTags(ScheduledAnalytics):
default_values = {
"frequency": timedelta(hours=12),
"name": "ExpireTags",
"description": "Expires tags in observables",
}
ACTS_ON = [] # act on all observables
# TODO Use server-side JS filter
CUSTOM_FILTER = Q(tags__not__size=0) # filter only tagged elements
EXPIRATION = timedelta(days=1)
def bulk(self, observables):
for o in observables:
self.each(o)
@staticmethod
def each(obj):
obj.expire_tags()
| {
"content_hash": "0c7cb506b58bc6520947ce20a440dc72",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 23.551724137931036,
"alnum_prop": 0.6544655929721815,
"repo_name": "yeti-platform/yeti",
"id": "5fa3d331a1459d66c91cc7bf2c6ef15ce8c7ffda",
"size": "683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/analytics/public/expire_tags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18580"
},
{
"name": "Dockerfile",
"bytes": "1379"
},
{
"name": "HTML",
"bytes": "182623"
},
{
"name": "JavaScript",
"bytes": "79774"
},
{
"name": "Python",
"bytes": "586533"
},
{
"name": "Shell",
"bytes": "8495"
}
],
"symlink_target": ""
} |
"""ctexplain: how does configuration affect build graphs?
This is a swiss army knife tool that tries to explain why build graphs are the
size they are and how build flags, configuration transitions, and dependency
structures affect that.
This can help developers use flags and transitions with minimal memory and
maximum build speed.
Usage:
$ ctexplain [--analysis=...] -b "<targets_to_build> [build flags]"
Example:
$ ctexplain -b "//mypkg:mybinary --define MY_FEATURE=1"
Relevant terms in https://docs.bazel.build/versions/master/glossary.html:
"target", "configuration", "analysis phase", "configured target",
"configuration trimming", "transition"
TODO(gregce): link to proper documentation for full details.
"""
from typing import Callable
from typing import Tuple
# Do not edit this line. Copybara replaces it with PY2 migration helper.
from absl import app
from absl import flags
from dataclasses import dataclass
# Do not edit this line. Copybara replaces it with PY2 migration helper..third_party.bazel.tools.ctexplain.analyses.summary as summary
from tools.ctexplain.bazel_api import BazelApi
# Do not edit this line. Copybara replaces it with PY2 migration helper..third_party.bazel.tools.ctexplain.lib as lib
from tools.ctexplain.types import ConfiguredTarget
# Do not edit this line. Copybara replaces it with PY2 migration helper..third_party.bazel.tools.ctexplain.util as util
FLAGS = flags.FLAGS
@dataclass(frozen=True)
class Analysis():
"""Supported analysis type."""
# The value in --analysis=<value> that triggers this analysis.
key: str
# The function that invokes this analysis.
exec: Callable[[Tuple[ConfiguredTarget, ...]], None]
# User-friendly analysis description.
description: str
available_analyses = [
Analysis(
"summary",
lambda x: summary.report(summary.analyze(x)),
"summarizes build graph size and how trimming could help"
),
Analysis(
"culprits",
lambda x: print("this analysis not yet implemented"),
"shows which flags unnecessarily fork configured targets. These\n"
+ "are conceptually mergeable."
),
Analysis(
"forked_targets",
lambda x: print("this analysis not yet implemented"),
"ranks targets by how many configured targets they\n"
+ "create. These may be legitimate forks (because they behave "
+ "differently with\n different flags) or identical clones that are "
+ "conceptually mergeable."
),
Analysis(
"cloned_targets",
lambda x: print("this analysis not yet implemented"),
"ranks targets by how many behavior-identical configured\n targets "
+ "they produce. These are conceptually mergeable."
)
]
# Available analyses, keyed by --analysis=<value> triggers.
analyses = {analysis.key: analysis for analysis in available_analyses}
# Command-line flag registration:
def _render_analysis_help_text() -> str:
"""Pretty-prints help text for available analyses."""
return "\n".join(f'- "{name}": {analysis.description}'
for name, analysis in analyses.items())
flags.DEFINE_list("analysis", ["summary"], f"""
Analyses to run. May be any comma-separated combination of
{_render_analysis_help_text()}
""")
flags.register_validator(
"analysis",
lambda flag_value: all(name in analyses for name in flag_value),
message=f'available analyses: {", ".join(analyses.keys())}')
flags.DEFINE_multi_string(
"build", [],
"""command-line invocation of the build to analyze. For example:
"//foo --define a=b". If listed multiple times, this is a "multi-build
analysis" that measures how much distinct builds can share subgraphs""",
short_name="b")
# Core program logic:
def _get_build_flags(cmdline: str) -> Tuple[Tuple[str, ...], Tuple[str, ...]]:
"""Parses a build invocation command line.
Args:
cmdline: raw build invocation string. For example: "//foo --cpu=x86"
Returns:
Tuple of ((target labels to build), (build flags))
"""
cmdlist = cmdline.split()
labels = [arg for arg in cmdlist if arg.startswith("//")]
build_flags = [arg for arg in cmdlist if not arg.startswith("//")]
return (tuple(labels), tuple(build_flags))
def main(argv):
del argv # Satisfy py linter's "unused" warning.
if not FLAGS.build:
exit("ctexplain: build efficiency measurement tool. Add --help "
+ "for usage.")
elif len(FLAGS.build) > 1:
exit("TODO(gregce): support multi-build shareability analysis")
(labels, build_flags) = _get_build_flags(FLAGS.build[0])
build_desc = ",".join(labels)
with util.ProgressStep(f"Collecting configured targets for {build_desc}"):
cts = lib.analyze_build(BazelApi(), labels, build_flags)
for analysis in FLAGS.analysis:
analyses[analysis].exec(cts)
if __name__ == "__main__":
app.run(main)
| {
"content_hash": "062378a776eaae9d38dfc90b6f536b82",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 134,
"avg_line_length": 33.5448275862069,
"alnum_prop": 0.7002467105263158,
"repo_name": "werkt/bazel",
"id": "cb300a45cd95164a1d7a355ffdc297f34850d417",
"size": "5485",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/ctexplain/ctexplain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2916"
},
{
"name": "C",
"bytes": "16022"
},
{
"name": "C++",
"bytes": "1171078"
},
{
"name": "HTML",
"bytes": "20211"
},
{
"name": "Java",
"bytes": "29573073"
},
{
"name": "Makefile",
"bytes": "248"
},
{
"name": "Objective-C",
"bytes": "8797"
},
{
"name": "Objective-C++",
"bytes": "1043"
},
{
"name": "PowerShell",
"bytes": "5536"
},
{
"name": "Python",
"bytes": "1400777"
},
{
"name": "Shell",
"bytes": "1169561"
},
{
"name": "Smarty",
"bytes": "487938"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
from datetime import datetime
import time
import array
import struct
import os
sys.path.append("build/lib/python/")
import vsimj1b
sys.path.append("../../shell")
import swapforth
class TetheredJ1b(swapforth.TetheredTarget):
cellsize = 4
def open_ser(self, port, speed):
self.ser = vsimj1b.vsimj1b()
def reset(self):
ser = self.ser
ser.reset()
for c in ' 1 tth !':
ser.write(c)
ser.write('\r')
while 1:
c = ser.read(1)
# print(repr(c))
if c == b'\x1e':
break
def boot(self, bootfile = None):
sys.stdout.write('Contacting... ')
self.reset()
print('established')
def interrupt(self):
self.reset()
def serialize(self):
l = self.command_response('0 here dump')
lines = l.strip().replace('\r', '').split('\n')
s = []
for l in lines:
l = l.split()
s += [int(b, 16) for b in l[1:17]]
s = array.array('B', s).tobytes().ljust(32768, bytearray((0xff,)))
return array.array('i', s)
if __name__ == '__main__':
swapforth.main(TetheredJ1b)
| {
"content_hash": "b0ccd267b9314baf15f2c3ca2b516058",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 74,
"avg_line_length": 23.22641509433962,
"alnum_prop": 0.5385865150284321,
"repo_name": "jamesbowman/swapforth",
"id": "a1065802b69bea6c34cdc08331f19dbc9b3d3985",
"size": "1231",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "j1b/verilator/shell.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "180625"
},
{
"name": "C",
"bytes": "11847"
},
{
"name": "C++",
"bytes": "21706"
},
{
"name": "F#",
"bytes": "16205"
},
{
"name": "Forth",
"bytes": "379160"
},
{
"name": "Makefile",
"bytes": "13630"
},
{
"name": "Python",
"bytes": "114933"
},
{
"name": "Shell",
"bytes": "720"
},
{
"name": "TeX",
"bytes": "67088"
},
{
"name": "Verilog",
"bytes": "145950"
}
],
"symlink_target": ""
} |
from Container import Container
from JumpScale import j
import os
import docker
import time
from urllib import parse
class Docker:
def __init__(self):
self.__jslocation__ = "j.sal.docker"
self.logger = j.logger.get('j.sal.docker')
self._basepath = "/mnt/vmstor/docker"
self._weaveSocket = None
self._prefix = ""
self._containers = {}
self._names = []
if 'DOCKER_HOST' not in os.environ or os.environ['DOCKER_HOST'] == "":
self.base_url = 'unix://var/run/docker.sock'
elif self.weaveIsActive:
self.base_url = self.weavesocket
else:
self.base_url = os.environ['DOCKER_HOST']
self.client = docker.Client(base_url=self.base_url, timeout=120)
def init(self):
j.do.execute("systemctl stop docker")
d = j.sal.disklayout.findDisk(mountpoint="/storage")
if d is not None:
# we found a disk, lets make sure its in fstab
d.setAutoMount()
dockerpath = "%s/docker" % d.mountpoint
dockerpath = dockerpath.replace("//", '/')
if dockerpath not in j.sal.btrfs.subvolumeList(d.mountpoint):
# have to create the dockerpath
j.sal.btrfs.subvolumeCreate(dockerpath)
# j.sal.fs.createDir("/storage/docker")
j.sal.fs.copyDirTree("/var/lib/docker", dockerpath)
j.sal.fs.symlink("/storage/docker", "/var/lib/docker",
overwriteTarget=True)
j.do.execute("systemctl start docker")
@property
def weaveIsActive(self):
return bool(self.weavesocket)
@property
def weavesocket(self):
if self._weaveSocket is None:
if not j.tools.cuisine.local.core.command_check('weave'):
self.logger.warning("weave not found, do not forget to start if installed.")
self._weaveSocket = ""
else:
rc, self._weaveSocket = j.sal.process.execute("eval $(weave env) && echo $DOCKER_HOST", die=False)
if rc > 0:
self.logger.warning("weave not found, do not forget to start if installed.")
self._weaveSocket = ""
self._weaveSocket = self._weaveSocket.strip()
return self._weaveSocket
def weaveInstall(self, ufw=False):
j.tools.cuisine.local.systemservices.weave.install(start=True)
if ufw:
j.tools.cuisine.local.systemservices.ufw.allowIncoming(6783)
j.tools.cuisine.local.systemservices.ufw.allowIncoming(6783, protocol="udp")
# def connectRemoteTCP(self, base_url):
# self.base_url = base_url
# self.client = docker.Client(base_url=weavesocket)
@property
def docker_host(self):
"""
Get the docker hostname.
"""
u = parse.urlparse(self.base_url)
if u.scheme == 'unix':
return 'localhost'
else:
return u.hostname
def _execute(self, command):
env = os.environ.copy()
env.pop('PYTHONPATH', None)
(exitcode, stdout, stderr) = j.sal.process.run(
command, showOutput=False, captureOutput=True, stopOnError=False, env=env)
if exitcode != 0:
raise j.exceptions.RuntimeError(
"Failed to execute %s: Error: %s, %s" % (command, stdout, stderr))
return stdout
#
# def copy(self, name, src, dest):
# rndd = j.data.idgenerator.generateRandomInt(10, 1000000)
# temp = "/var/docker/%s/%s" % (name, rndd)
# j.sal.fs.createDir(temp)
# source_name = j.sal.fs.getBaseName(src)
# if j.sal.fs.isDir(src):
# j.sal.fs.copyDirTree(src, j.sal.fs.joinPaths(temp, source_name))
# else:
# j.sal.fs.copyFile(src, j.sal.fs.joinPaths(temp, source_name))
#
# ddir = j.sal.fs.getDirName(dest)
# cmd = "mkdir -p %s" % (ddir)
# self.run(name, cmd)
#
# cmd = "cp -r /var/jumpscale/%s/%s %s" % (rndd, source_name, dest)
# self.run(name, cmd)
# j.sal.fs.remove(temp)
@property
def containers(self):
if self._containers == {}:
for item in self.client.containers(all=all):
try:
name = str(item["Names"][0].strip("/").strip())
except:
continue
id = str(item["Id"].strip())
self._containers[id] = Container(name, id, self.client)
return list(self._containers.values())
@property
def containerNamesRunning(self):
"""
List all running containers names
"""
res = []
for container in self.containers:
if container.isRunning():
res.append(container.name)
return res
@property
def containerNames(self):
"""
List all containers names
"""
res = []
for container in self.containers:
res.append(container.name)
return res
@property
def containersRunning(self):
"""
List of all running container objects
"""
res = []
for container in self.containers:
if container.isRunning():
res.append(container)
return res
def exists(self, name):
return name in self.containerNames
@property
def basepath(self):
self._basepath = '/mnt/data/docker'
# TODO: needs to fetch values out of hrd
# if not self._basepath:
# if j.application.config.exists('docker.basepath'):
# self._basepath = j.application.config.get('docker.basepath')
# else:
# self._basepath="/mnt/vmstor/docker" #btrfs subvol create
return self._basepath
def _getChildren(self, pid, children):
process = j.sal.process.getProcessObject(pid)
children.append(process)
for child in process.get_children():
children = self._getChildren(child.pid, children)
return children
def _get_rootpath(self, name):
rootpath = j.sal.fs.joinPaths(
self.basepath, '%s%s' % (self._prefix, name), 'rootfs')
return rootpath
def _getMachinePath(self, machinename, append=""):
if machinename == "":
raise j.exceptions.RuntimeError("Cannot be empty")
base = j.sal.fs.joinPaths(self.basepath, '%s%s' %
(self._prefix, machinename))
if append != "":
base = j.sal.fs.joinPaths(base, append)
return base
def status(self):
"""
return list docker with some info
@return list of dicts
"""
self.weavesocket
res = []
for item in self.client.containers():
name = item["Names"][0].strip(" /")
sshport = ""
for port in item["Ports"]:
if port["PrivatePort"] == 22:
if "PublicPort" in port:
sshport = port["PublicPort"]
else:
sshport = None
res.append([name, item["Image"], self.docker_host,
sshport, item["Status"]])
return res
def ps(self):
"""
return detailed info
"""
self.weavesocket
return self.client.containers()
def get(self, name, die=True):
"""
Get a container object by name
@param name string: container name
"""
for container in self.containers:
if container.name == name:
return container
if die:
raise j.exceptions.RuntimeError(
"Container with name %s doesn't exists" % name)
else:
return None
def exportRsync(self, name, backupname, key="pub"):
raise j.exceptions.RuntimeError("not implemented")
self.removeRedundantFiles(name)
ipaddr = j.application.config.get("jssync.addr")
path = self._getMachinePath(name)
if not j.sal.fs.exists(path):
raise j.exceptions.RuntimeError("cannot find machine:%s" % path)
if backupname[-1] != "/":
backupname += "/"
if path[-1] != "/":
path += "/"
cmd = "rsync -a %s %s::upload/%s/images/%s --delete-after --modify-window=60 --compress --stats --progress --exclude '.Trash*'" % (
path, ipaddr, key, backupname)
j.sal.process.executeWithoutPipe(cmd)
# def removeRedundantFiles(self,name):
# raise j.exceptions.RuntimeError("not implemented")
# basepath=self._getMachinePath(name)
# j.sal.fs.removeIrrelevantFiles(basepath,followSymlinks=False)
# toremove="%s/rootfs/var/cache/apt/archives/"%basepath
# j.sal.fs.removeDirTree(toremove)
def importRsync(self, backupname, name, basename="", key="pub"):
"""
@param basename is the name of a start of a machine locally, will be used as basis and then the source will be synced over it
"""
raise j.exceptions.RuntimeError("not implemented")
ipaddr = j.application.config.get("jssync.addr")
path = self._getMachinePath(name)
self.btrfsSubvolNew(name)
# j.sal.fs.createDir(path)
if backupname[-1] != "/":
backupname += "/"
if path[-1] != "/":
path += "/"
if basename != "":
basepath = self._getMachinePath(basename)
if basepath[-1] != "/":
basepath += "/"
if not j.sal.fs.exists(basepath):
raise j.exceptions.RuntimeError(
"cannot find base machine:%s" % basepath)
cmd = "rsync -av -v %s %s --delete-after --modify-window=60 --size-only --compress --stats --progress" % (
basepath, path)
self.logger.info(cmd)
j.sal.process.executeWithoutPipe(cmd)
cmd = "rsync -av -v %s::download/%s/images/%s %s --delete-after --modify-window=60 --compress --stats --progress" % (
ipaddr, key, backupname, path)
self.logger.info(cmd)
j.sal.process.executeWithoutPipe(cmd)
def exportTgz(self, name, backupname):
raise j.exceptions.RuntimeError("not implemented")
self.removeRedundantFiles(name)
path = self._getMachinePath(name)
bpath = j.sal.fs.joinPaths(self.basepath, "backups")
if not j.sal.fs.exists(path):
raise j.exceptions.RuntimeError("cannot find machine:%s" % path)
j.sal.fs.createDir(bpath)
bpath = j.sal.fs.joinPaths(bpath, "%s.tgz" % backupname)
cmd = "cd %s;tar Szcf %s ." % (path, bpath)
j.sal.process.executeWithoutPipe(cmd)
return bpath
def importTgz(self, backupname, name):
raise j.exceptions.RuntimeError("not implemented")
path = self._getMachinePath(name)
bpath = j.sal.fs.joinPaths(
self.basepath, "backups", "%s.tgz" % backupname)
if not j.sal.fs.exists(bpath):
raise j.exceptions.RuntimeError(
"cannot find import path:%s" % bpath)
j.sal.fs.createDir(path)
cmd = "cd %s;tar xzvf %s -C ." % (path, bpath)
j.sal.process.executeWithoutPipe(cmd)
def _init_aysfs(self, fs, dockname):
if fs.isUnique():
if not fs.isRunning():
self.logger.info('starting unique aysfs: %s' % fs.getName())
fs.start()
else:
self.logger.info(
'skipping aysfs: %s (unique running)' % fs.getName())
else:
fs.setName('%s-%s' % (dockname, fs.getName()))
if fs.isRunning():
fs.stop()
self.logger.info('starting aysfs: %s' % fs.getName())
fs.start()
def create(self, name="", ports="", vols="", volsro="", stdout=True, base="jumpscale/ubuntu1604", nameserver=["8.8.8.8"],
replace=True, cpu=None, mem=0, ssh=True, myinit=True, sharecode=False, sshkeyname="", sshpubkey="",
setrootrndpasswd=True, rootpasswd="", jumpscalebranch="master", aysfs=[], detach=False, privileged=False, getIfExists=True, weavenet=False):
"""
Creates a new container.
@param ports in format as follows "22:8022 80:8080" the first arg e.g. 22 is the port in the container
@param vols in format as follows "/var/insidemachine:/var/inhost # /var/1:/var/1 # ..." '#' is separator
@param sshkeyname : use ssh-agent (can even do remote through ssh -A) and then specify key you want to use in docker
"""
if ssh is True and myinit is False:
raise ValueError("SSH can't be enabled without myinit.")
# check there is weave
self.weavesocket
name = name.lower().strip()
self.logger.info(("create:%s" % name))
running = [item.name for item in self.containersRunning]
if not replace:
if name in self.containerNamesRunning:
if getIfExists:
return self.get(name=name)
else:
j.events.opserror_critical(
"Cannot create machine with name %s, because it does already exists.")
else:
if self.exists(name):
self.logger.info("remove existing container %s" % name)
container = self.get(name)
if container:
container.destroy()
if vols is None:
vols = ""
if volsro is None:
volsro = ""
if ports is None:
ports = ""
if mem is not None:
if mem > 0:
mem = int(mem) * 1024
elif mem <= 0:
mem = None
portsdict = {}
if len(ports) > 0:
items = ports.split(" ")
for item in items:
key, val = item.split(":", 1)
ss = key.split("/")
if len(ss) == 2:
portsdict[tuple(ss)] = val
else:
portsdict[int(key)] = val
if ssh:
if 22 not in portsdict:
for port in range(9022, 9190):
if not j.sal.nettools.tcpPortConnectionTest(self.docker_host, port):
portsdict[22] = port
self.logger.info(("ssh port will be on:%s" % port))
break
volsdict = {}
if len(vols) > 0:
items = vols.split("#")
for item in items:
key, val = item.split(":", 1)
volsdict[str(key).strip()] = str(val).strip()
if sharecode and j.sal.fs.exists(path="/opt/code"):
self.logger.info("share jumpscale code enable")
if "/opt/code" not in volsdict:
volsdict["/opt/code"] = "/opt/code"
for fs in aysfs:
self._init_aysfs(fs, name)
mounts = fs.getPrefixs()
for inp, out in mounts.items():
while not j.sal.fs.exists(inp):
time.sleep(0.1)
volsdict[out] = inp
volsdictro = {}
if len(volsro) > 0:
items = volsro.split("#")
for item in items:
key, val = item.split(":", 1)
volsdictro[str(key).strip()] = str(val).strip()
self.logger.info("Volumes map:")
for src1, dest1 in list(volsdict.items()):
self.logger.info(" %-20s %s" % (src1, dest1))
binds = {}
volskeys = [] # is location in docker
for key, path in list(volsdict.items()):
# j.sal.fs.createDir(path) # create the path on hostname
binds[path] = {"bind": key, "ro": False}
volskeys.append(key)
for key, path in list(volsdictro.items()):
# j.sal.fs.createDir(path) # create the path on hostname
binds[path] = {"bind": key, "ro": True}
volskeys.append(key)
if base not in self.getImages():
self.logger.info("download docker image %s" % base)
self.pull(base)
if base.startswith("jumpscale/ubuntu1604") or myinit is True:
cmd = "sh -c \"mkdir -p /var/run/screen;chmod 777 /var/run/screen; /var/run/screen;exec >/dev/tty 2>/dev/tty </dev/tty && /sbin/my_init -- /usr/bin/screen -s bash\""
cmd = "sh -c \" /sbin/my_init -- bash -l\""
else:
cmd = None
self.logger.info(("install docker with name '%s'" % name))
if vols != "":
self.logger.info("Volumes")
self.logger.info(volskeys)
self.logger.info(binds)
hostname = None if self.weaveIsActive else name.replace('_', '-')
host_config = self.client.create_host_config(
privileged=privileged) if privileged else None
res = self.client.create_container(image=base, command=cmd, hostname=hostname, user="root",
detach=detach, stdin_open=False, tty=True, mem_limit=mem, ports=list(portsdict.keys()), environment=None, volumes=volskeys,
network_disabled=False, name=name, entrypoint=None, cpu_shares=cpu, working_dir=None, domainname=None, memswap_limit=None, host_config=host_config)
if res["Warnings"] is not None:
raise j.exceptions.RuntimeError(
"Could not create docker, res:'%s'" % res)
id = res["Id"]
if self.weaveIsActive:
nameserver = None
for k, v in portsdict.items():
if type(k) == tuple and len(k) == 2:
portsdict["%s/%s" % (k[0], k[1])] = v
portsdict.pop(k)
res = self.client.start(container=id, binds=binds, port_bindings=portsdict, lxc_conf=None,
publish_all_ports=False, links=None, privileged=privileged, dns=nameserver, dns_search=None,
volumes_from=None, network_mode=None)
container = Container(name, id, self.client, host=self.docker_host)
self._containers[id] = container
if ssh:
container.pushSSHKey(keyname=sshkeyname, sshpubkey=sshpubkey)
# Make sure docker is ready for executor
end_time = time.time() + 60
while time.time() < end_time:
rc, _, _ = container.executor.execute('ls /', die=False, showout=False)
if rc:
time.sleep(0.1)
break
if setrootrndpasswd:
if rootpasswd is None or rootpasswd == '':
self.logger.info("set default root passwd (gig1234)")
container.executor.execute(
"echo \"root:gig1234\"|chpasswd", showout=False)
else:
self.logger.info("set root passwd to %s" % rootpasswd)
container.executor.execute(
"echo \"root:%s\"|chpasswd" % rootpasswd, showout=False)
if not self.weaveIsActive:
container.setHostName(name)
return container
def getImages(self):
images = []
for item in self.client.images():
if item['RepoTags'] is None:
continue
tags = str(item['RepoTags'][0])
tags = tags.replace(":latest", "")
images.append(tags)
return images
def removeImages(self, tag="<none>:<none>"):
"""
Delete a certain Docker image using tag
"""
for item in self.client.images():
if tag in item["RepoTags"]:
self.client.remove_image(item["Id"])
def ping(self):
self.weavesocket
try:
self.client.ping()
except Exception as e:
return False
return True
def destroyAll(self, removeimages=False):
"""
Destroy all containers.
@param removeimages bool: to remove all images.
"""
for container in self.containers:
if "weave" in container.name:
continue
container.destroy()
if removeimages:
self.removeImages()
def _destroyAllKill(self):
if self.ping():
for container in self.containers:
container.destroy()
self.removeImages()
j.do.execute("systemctl stop docker")
if j.sal.fs.exists(path="/var/lib/docker/btrfs/subvolumes"):
j.sal.btrfs.subvolumesDelete('/var/lib/docker/btrfs/subvolumes')
if j.sal.fs.exists(path="/var/lib/docker/volumes"):
for item in j.sal.fs.listDirsInDir("/var/lib/docker/volumes"):
j.sal.fs.removeDirTree(item)
def removeDocker(self):
self._destroyAllKill()
rc, out = j.sal.process.execute("mount")
mountpoints = []
for line in out.split("\n"):
if line.find("type btrfs") != -1:
mountpoint = line.split("on ")[1].split("type")[0].strip()
mountpoints.append(mountpoint)
for mountpoint in mountpoints:
j.sal.btrfs.subvolumesDelete(mountpoint, "/docker/")
j.sal.btrfs.subvolumesDelete("/storage", "docker")
j.sal.process.execute("apt-get remove docker-engine -y")
# j.sal.process.execute("rm -rf /var/lib/docker")
j.sal.fs.removeDirTree("/var/lib/docker")
def reInstallDocker(self):
"""
ReInstall docker on your system
"""
self.removeDocker()
j.tools.cuisine.local.docker.install(force=True)
self.init()
def pull(self, imagename):
"""
pull a certain image.
@param imagename string: image
"""
self.client.import_image_from_image(imagename)
def push(self, image, output=True):
"""
image: str, name of the image
output: print progress as it pushes
"""
client = self.client
previous_timeout = client.timeout
client.timeout = 36000
out = []
for l in client.push(image, stream=True):
line = j.data.serializer.json.loads(l)
id = line['id'] if 'id' in line else ''
s = "%s " % id
if 'status' in line:
s += line['status']
if 'progress' in line:
detail = line['progressDetail']
progress = line['progress']
s += " %50s " % progress
if 'error' in line:
message = line['errorDetail']['message']
raise j.exceptions.RuntimeError(message)
if output:
self.logger.info(s)
out.append(s)
client.timeout = previous_timeout
return "\n".join(out)
def build(self, path, tag, output=True, force=False):
"""
path: path of the directory that contains the docker file
tag: tag to give to the image. e.g: 'jumpscale/myimage'
output: print output as it builds
return: strint containing the stdout
"""
# TODO: implement force
out = []
if force:
nocache = True
for l in self.client.build(path=path, tag=tag, nocache=nocache):
line = j.data.serializer.json.loads(l)
if 'stream' in line:
line = line['stream'].strip()
if output:
self.logger.info(line)
out.append(line)
return "\n".join(out)
class DockerExecObj:
def __init__(self, name):
self.name = name
self.id = "docker:%s" % name
def execute(self, cmds, die=True, checkok=None, async=False, showout=True, timeout=0, env={}):
return self._cuisineDockerHost.core.run("docker exec %s %s" % (self.name, cmds))
| {
"content_hash": "a04b56173e51be091602d8d8e0c9630d",
"timestamp": "",
"source": "github",
"line_count": 681,
"max_line_length": 190,
"avg_line_length": 35.34214390602056,
"alnum_prop": 0.5397623400365631,
"repo_name": "Jumpscale/jumpscale_core8",
"id": "955f76fc66bdf4ce50e3a4721988dde6fe2040dc",
"size": "24090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/sal/docker/Docker.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1113"
},
{
"name": "Cap'n Proto",
"bytes": "9033"
},
{
"name": "Lua",
"bytes": "12538"
},
{
"name": "Python",
"bytes": "4343122"
},
{
"name": "Shell",
"bytes": "7091"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.md')) as f:
CHANGES = f.read()
long_description = README + "\n\n" + CHANGES
try:
import pypandoc
long_description = pypandoc.convert(long_description, 'rst', format='md')
except(IOError, ImportError):
pass
requires = [
'lxml',
'requests',
]
testing_extras = requires + [
'nose',
'coverage',
]
setup(name='LstGen',
version='0.6.0',
description='LstGen',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Code Generators"
],
author='Igor Stroh',
author_email='[email protected]',
url='http://github.com/jenner/LstGen',
keywords='lohnsteuer code generator cli',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
extras_require = {
'testing': testing_extras,
},
entry_points="""\
[console_scripts]
lstgen = lstgen.cli:main
"""
)
| {
"content_hash": "996d0150fdb139655332ede828febbd0",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 27.875,
"alnum_prop": 0.6258808456117874,
"repo_name": "jenner/LstGen",
"id": "838c3b6f28b846e9fbc7da5c7cb43a407a776a55",
"size": "1561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "2113"
},
{
"name": "PHP",
"bytes": "1801"
},
{
"name": "Python",
"bytes": "72278"
}
],
"symlink_target": ""
} |
"""
homeassistant.bootstrap
~~~~~~~~~~~~~~~~~~~~~~~
Provides methods to bootstrap a home assistant instance.
Each method will return a tuple (bus, statemachine).
After bootstrapping you can add your own components or
start by calling homeassistant.start_home_assistant(bus)
"""
import os
import sys
import logging
import logging.handlers
from collections import defaultdict
import homeassistant.core as core
import homeassistant.util.dt as date_util
import homeassistant.util.package as pkg_util
import homeassistant.util.location as loc_util
import homeassistant.config as config_util
import homeassistant.loader as loader
import homeassistant.components as core_components
import homeassistant.components.group as group
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
EVENT_COMPONENT_LOADED, CONF_LATITUDE, CONF_LONGITUDE,
CONF_TEMPERATURE_UNIT, CONF_NAME, CONF_TIME_ZONE, CONF_CUSTOMIZE,
TEMP_CELCIUS, TEMP_FAHRENHEIT)
_LOGGER = logging.getLogger(__name__)
ATTR_COMPONENT = 'component'
PLATFORM_FORMAT = '{}.{}'
def setup_component(hass, domain, config=None):
""" Setup a component and all its dependencies. """
if domain in hass.config.components:
return True
_ensure_loader_prepared(hass)
if config is None:
config = defaultdict(dict)
components = loader.load_order_component(domain)
# OrderedSet is empty if component or dependencies could not be resolved
if not components:
return False
for component in components:
if not _setup_component(hass, component, config):
return False
return True
def _handle_requirements(hass, component, name):
""" Installs requirements for component. """
if hass.config.skip_pip or not hasattr(component, 'REQUIREMENTS'):
return True
for req in component.REQUIREMENTS:
if not pkg_util.install_package(req, target=hass.config.path('lib')):
_LOGGER.error('Not initializing %s because could not install '
'dependency %s', name, req)
return False
return True
def _setup_component(hass, domain, config):
""" Setup a component for Home Assistant. """
if domain in hass.config.components:
return True
component = loader.get_component(domain)
missing_deps = [dep for dep in component.DEPENDENCIES
if dep not in hass.config.components]
if missing_deps:
_LOGGER.error(
'Not initializing %s because not all dependencies loaded: %s',
domain, ", ".join(missing_deps))
return False
if not _handle_requirements(hass, component, domain):
return False
try:
if not component.setup(hass, config):
_LOGGER.error('component %s failed to initialize', domain)
return False
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error during setup of component %s', domain)
return False
hass.config.components.append(component.DOMAIN)
# Assumption: if a component does not depend on groups
# it communicates with devices
if group.DOMAIN not in component.DEPENDENCIES:
hass.pool.add_worker()
hass.bus.fire(
EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: component.DOMAIN})
return True
def prepare_setup_platform(hass, config, domain, platform_name):
""" Loads a platform and makes sure dependencies are setup. """
_ensure_loader_prepared(hass)
platform_path = PLATFORM_FORMAT.format(domain, platform_name)
platform = loader.get_component(platform_path)
# Not found
if platform is None:
_LOGGER.error('Unable to find platform %s', platform_path)
return None
# Already loaded
elif platform_path in hass.config.components:
return platform
# Load dependencies
if hasattr(platform, 'DEPENDENCIES'):
for component in platform.DEPENDENCIES:
if not setup_component(hass, component, config):
_LOGGER.error(
'Unable to prepare setup for platform %s because '
'dependency %s could not be initialized', platform_path,
component)
return None
if not _handle_requirements(hass, platform, platform_path):
return None
return platform
def mount_local_lib_path(config_dir):
""" Add local library to Python Path """
sys.path.insert(0, os.path.join(config_dir, 'lib'))
# pylint: disable=too-many-branches, too-many-statements, too-many-arguments
def from_config_dict(config, hass=None, config_dir=None, enable_log=True,
verbose=False, daemon=False, skip_pip=False,
log_rotate_days=None):
"""
Tries to configure Home Assistant from a config dict.
Dynamically loads required components and its dependencies.
"""
if hass is None:
hass = core.HomeAssistant()
if config_dir is not None:
config_dir = os.path.abspath(config_dir)
hass.config.config_dir = config_dir
mount_local_lib_path(config_dir)
process_ha_core_config(hass, config.get(core.DOMAIN, {}))
if enable_log:
enable_logging(hass, verbose, daemon, log_rotate_days)
hass.config.skip_pip = skip_pip
if skip_pip:
_LOGGER.warning('Skipping pip installation of required modules. '
'This may cause issues.')
_ensure_loader_prepared(hass)
# Make a copy because we are mutating it.
# Convert it to defaultdict so components can always have config dict
# Convert values to dictionaries if they are None
config = defaultdict(
dict, {key: value or {} for key, value in config.items()})
# Filter out the repeating and common config section [homeassistant]
components = (key for key in config.keys()
if ' ' not in key and key != core.DOMAIN)
if not core_components.setup(hass, config):
_LOGGER.error('Home Assistant core failed to initialize. '
'Further initialization aborted.')
return hass
_LOGGER.info('Home Assistant core initialized')
# Setup the components
for domain in loader.load_order_components(components):
_setup_component(hass, domain, config)
return hass
def from_config_file(config_path, hass=None, verbose=False, daemon=False,
skip_pip=True, log_rotate_days=None):
"""
Reads the configuration file and tries to start all the required
functionality. Will add functionality to 'hass' parameter if given,
instantiates a new Home Assistant object if 'hass' is not given.
"""
if hass is None:
hass = core.HomeAssistant()
# Set config dir to directory holding config file
config_dir = os.path.abspath(os.path.dirname(config_path))
hass.config.config_dir = config_dir
mount_local_lib_path(config_dir)
enable_logging(hass, verbose, daemon, log_rotate_days)
config_dict = config_util.load_config_file(config_path)
return from_config_dict(config_dict, hass, enable_log=False,
skip_pip=skip_pip)
def enable_logging(hass, verbose=False, daemon=False, log_rotate_days=None):
""" Setup the logging for home assistant. """
if not daemon:
logging.basicConfig(level=logging.INFO)
fmt = ("%(log_color)s%(asctime)s %(levelname)s (%(threadName)s) "
"[%(name)s] %(message)s%(reset)s")
try:
from colorlog import ColoredFormatter
logging.getLogger().handlers[0].setFormatter(ColoredFormatter(
fmt,
datefmt='%y-%m-%d %H:%M:%S',
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
))
except ImportError:
_LOGGER.warning(
"Colorlog package not found, console coloring disabled")
# Log errors to a file if we have write access to file or config dir
err_log_path = hass.config.path('home-assistant.log')
err_path_exists = os.path.isfile(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or \
(not err_path_exists and os.access(hass.config.config_dir, os.W_OK)):
if log_rotate_days:
err_handler = logging.handlers.TimedRotatingFileHandler(
err_log_path, when='midnight', backupCount=log_rotate_days)
else:
err_handler = logging.FileHandler(
err_log_path, mode='w', delay=True)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(
logging.Formatter('%(asctime)s %(name)s: %(message)s',
datefmt='%y-%m-%d %H:%M:%S'))
logger = logging.getLogger('')
logger.addHandler(err_handler)
logger.setLevel(logging.INFO) # this sets the minimum log level
else:
_LOGGER.error(
'Unable to setup error log %s (access denied)', err_log_path)
def process_ha_core_config(hass, config):
""" Processes the [homeassistant] section from the config. """
hac = hass.config
def set_time_zone(time_zone_str):
""" Helper method to set time zone in HA. """
if time_zone_str is None:
return
time_zone = date_util.get_time_zone(time_zone_str)
if time_zone:
hac.time_zone = time_zone
date_util.set_default_time_zone(time_zone)
else:
_LOGGER.error('Received invalid time zone %s', time_zone_str)
for key, attr in ((CONF_LATITUDE, 'latitude'),
(CONF_LONGITUDE, 'longitude'),
(CONF_NAME, 'location_name')):
if key in config:
setattr(hac, attr, config[key])
set_time_zone(config.get(CONF_TIME_ZONE))
customize = config.get(CONF_CUSTOMIZE)
if isinstance(customize, dict):
for entity_id, attrs in config.get(CONF_CUSTOMIZE, {}).items():
if not isinstance(attrs, dict):
continue
Entity.overwrite_attribute(entity_id, attrs.keys(), attrs.values())
if CONF_TEMPERATURE_UNIT in config:
unit = config[CONF_TEMPERATURE_UNIT]
if unit == 'C':
hac.temperature_unit = TEMP_CELCIUS
elif unit == 'F':
hac.temperature_unit = TEMP_FAHRENHEIT
# If we miss some of the needed values, auto detect them
if None not in (
hac.latitude, hac.longitude, hac.temperature_unit, hac.time_zone):
return
_LOGGER.info('Auto detecting location and temperature unit')
info = loc_util.detect_location_info()
if info is None:
_LOGGER.error('Could not detect location information')
return
if hac.latitude is None and hac.longitude is None:
hac.latitude = info.latitude
hac.longitude = info.longitude
if hac.temperature_unit is None:
if info.use_fahrenheit:
hac.temperature_unit = TEMP_FAHRENHEIT
else:
hac.temperature_unit = TEMP_CELCIUS
if hac.location_name is None:
hac.location_name = info.city
if hac.time_zone is None:
set_time_zone(info.time_zone)
def _ensure_loader_prepared(hass):
""" Ensure Home Assistant loader is prepared. """
if not loader.PREPARED:
loader.prepare(hass)
| {
"content_hash": "9469a46a91df614ea49337502b88759e",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 79,
"avg_line_length": 32.851540616246496,
"alnum_prop": 0.6339529331514324,
"repo_name": "alexkolar/home-assistant",
"id": "a7e4dbfdc14e4b78a99150bf1fde698096dee384",
"size": "11728",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/bootstrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "815172"
},
{
"name": "Python",
"bytes": "942063"
},
{
"name": "Shell",
"bytes": "3887"
}
],
"symlink_target": ""
} |
"""
A file full of bit twidling helpers
"""
import struct
MAX_WORD = 32 # usually no more than 8, 16 is for SIMD register support
# Masks to use for unsigned anding to size
u_maxes = [(2 ** (8*i)) - 1 for i in range(MAX_WORD+1)]
u_maxes[0] = 0 # powers of 0 are 1, but we need 0
bu_maxes = [(2 ** (i)) - 1 for i in range(8*MAX_WORD+1)]
# Masks of just the sign bit for different sizes
sign_bits = [(2 ** (8*i)) >> 1 for i in range(MAX_WORD+1)]
sign_bits[0] = 0 # powers of 0 are 1, but we need 0
bsign_bits = [(2 ** i) >> 1 for i in range(8*MAX_WORD+1)]
# Max *signed* masks (all but top bit )
s_maxes = [u_maxes[i] ^ sign_bits[i] for i in range(len(u_maxes))]
s_maxes[0] = 0
# bit width masks
b_masks = [(2**i)-1 for i in range(MAX_WORD*8)]
b_masks[0] = 0
def unsigned(value, size):
"""
Make a value unsigned based on it's size.
"""
return value & u_maxes[size]
def signed(value, size):
"""
Make a value signed based on it's size.
"""
x = unsigned(value, size)
if x & sign_bits[size]:
x = (x - u_maxes[size]) - 1
return x
def bsigned(value, size):
"""
Make a value signed based on it's size.
"""
if value & bsign_bits[size]:
value = (value - bu_maxes[size]) - 1
return value
def is_signed(value, size):
x = unsigned(value, size)
return bool(x & sign_bits[size])
def sign_extend(value, cursize, newsize):
"""
Take a value and extend it's size filling
in the space with the value of the high
order bit.
"""
x = unsigned(value, cursize)
if cursize != newsize:
# Test for signed w/o the call
if x & sign_bits[cursize]:
delta = newsize - cursize
highbits = u_maxes[delta]
x |= highbits << (8*cursize)
return x
def bsign_extend(value, cursize, newsize):
x = value
if cursize != newsize:
if x & bsign_bits[cursize]:
delta = newsize - cursize
highbits = bu_maxes[delta]
x |= highbits << (cursize)
return x
def is_parity(val):
s = 0
while val:
s ^= val & 1
val = val >> 1
return (not s)
parity_table = []
for i in range(256):
parity_table.append(is_parity(i))
def is_parity_byte(bval):
"""
An "optimized" parity checker that looks up the index.
"""
return parity_table[bval & 0xff]
def lsb(value):
return value & 0x1
def msb(value, size):
if value & sign_bits[size]:
return 1
return 0
def is_signed_half_carry(value, size, src):
'''
BCD carry/borrow in the second most important nibble:
32bit - bit 27
16bit - bit 11
8bit - bit 3
'''
bitsize = (size << 3) - 5
mask = 1<<bitsize
p1 = value & mask
p2 = src & mask
return ((p1 ^ p2) != 0)
def is_signed_carry(value, size, src):
smax = s_maxes[size]
if value > smax > src:
return True
if value < -smax < -src:
return True
return False
def is_signed_overflow(value, size):
smax = s_maxes[size]
if value > smax:
return True
if value < -smax:
return True
return False
def is_unsigned_carry(value, size):
umax = u_maxes[size]
if value > umax:
return True
elif value < 0:
return True
return False
def is_aux_carry(src, dst):
return (dst & 0xf) + (src & 0xf) > 15
def is_aux_carry_sub(src, dst):
return src & 0xf > dst & 0xf
# set of format lists which make size, endianness, and signedness fast and easy
le_fmt_chars = (None, "B", "<H", None, "<I", None, None, None, "<Q")
be_fmt_chars = (None, "B", ">H", None, ">I", None, None, None, ">Q")
fmt_chars = (le_fmt_chars, be_fmt_chars)
le_fmt_schars = (None,"b","<h",None,"<i",None,None,None,"<q")
be_fmt_schars = (None,"b",">h",None,">i",None,None,None,">q")
fmt_schars = (le_fmt_schars, be_fmt_schars)
master_fmts = (fmt_chars, fmt_schars)
fmt_sizes = (None,1,2,4,4,8,8,8,8)
le_fmt_float = (None, None, None, None, '<f', None, None, None, '<d')
be_fmt_float = (None, None, None, None, '>f', None, None, None, '>d')
fmt_floats = (le_fmt_float, be_fmt_float)
def getFormat(size, big_endian=False, signed=False):
'''
Returns the proper struct format for numbers up to 8 bytes in length
Endianness and Signedness aware.
Only useful for *full individual* numbers... ie. 1, 2, 4, 8. Numbers
of 24-bits (3), 40-bit (5), 48-bits (6) or 56-bits (7) are not accounted
for here and will return None.
'''
return master_fmts[signed][big_endian][size]
def getFloatFormat(size, big_endian=False):
'''
Returns the proper struct format for numbers up to 8 bytes in length
Endianness and Signedness aware.
Only useful for *full individual* numbers... ie. 1, 2, 4, 8. Numbers
of 24-bits (3), 40-bit (5), 48-bits (6) or 56-bits (7) are not accounted
for here and will return None.
'''
return fmt_floats[big_endian][size]
def parsebytes(bytes, offset, size, sign=False, bigend=False):
"""
Mostly for pulling immediates out of strings...
"""
if size > 8:
return slowparsebytes(bytes, offset, size, sign=sign, bigend=bigend)
if bigend:
f = be_fmt_chars[size]
else:
f = le_fmt_chars[size]
if f is None:
return slowparsebytes(bytes, offset, size, sign=sign, bigend=bigend)
d = bytes[offset:offset+size]
x = struct.unpack(f, d)[0]
if sign:
x = signed(x, size)
return x
def slowparsebytes(bytes, offset, size, sign=False, bigend=False):
if bigend:
begin = offset
inc = 1
else:
begin = offset + (size-1)
inc = -1
ret = 0
ioff = 0
for x in range(size):
ret = ret << 8
ret |= bytes[begin+ioff]
ioff += inc
if sign:
ret = signed(ret, size)
return ret
def buildbytes(value, size, bigend=False):
value = unsigned(value, size)
if bigend:
f = be_fmt_chars[size]
else:
f = le_fmt_chars[size]
if f is None:
raise Exception("envi.bits.buildbytes needs slowbuildbytes")
return struct.pack(f, value)
def byteswap(value, size):
ret = 0
for i in range(size):
ret = ret << 8
ret |= (value >> (8*i)) & 0xff
return ret
hex_fmt = {
0:'0x%.1x',
1:"0x%.2x",
2:"0x%.4x",
4:"0x%.8x",
8:"0x%.16x",
}
def intwidth(val):
if val < 0:
val = abs(val)
ret = 0
while val:
ret += 1
val = val >> 8
return ret
def hex(value, size=None):
if size is None:
size = intwidth(value)
fmt = hex_fmt.get(size)
if fmt is not None:
return fmt % value
x = []
while value:
x.append('%.2x' % (value & 0xff))
value = value >> 8
x.reverse()
return '0x%.s' % ''.join(x)
return hex_fmt.get(size) % value
def binrepr(intval, bitwidth=None):
'''
Return a string of one's and zero's for the given value.
'''
ret = []
while intval:
ret.append(str(intval & 0x1))
intval >>= 1
ret.reverse()
binstr = ''.join(ret)
if bitwidth is not None:
binstr = binstr.rjust(bitwidth, '0')
return binstr
def binary(binstr):
'''
Decode a binary string of 1/0's into a python number
'''
return int(binstr, 2)
def binbytes(binstr):
'''
Decode a binary string of 1/0's into a python binary
string.
'''
if len(binstr) % 8 != 0:
raise Exception('Byte padded binary strings only for now!')
bytez = ''
while binstr:
bytez += chr(binary(binstr[:8]))
binstr = binstr[8:]
return bytez
def parsebits(bytes, offset, bitoff, bitsize):
'''
Parse bitsize bits from the bit offset bitoff beginning
at offset bytes.
Example:
'''
val = 0
cnt = 0
while cnt < bitsize:
addbit = bitoff + cnt
addoff = offset + (addbit >> 3)
modoff = addbit % 8
o = bytes[addoff]
val = (val << 1) + ((o >> (7 - modoff)) & 1)
cnt += 1
return val
def masktest(s):
'''
Specify a bit mask with the following syntax:
'110100xxx00xx' to return a tester callback which will
determine if an integer value meets the mask.
example:
opcode = 0x4388e234
if masktest('1011xxxx0000')(opcode):
print('MATCHED!')
NOTE: For performance reasons, it is recommeneded that
masktest be used to initialize a static list of tests
that are re-used rather than reconstructed.
'''
maskin = binary(s.replace('0', '1').replace('x', '0'))
matchval = binary(s.replace('x', '0'))
def domask(testval):
return testval & maskin == matchval
return domask
| {
"content_hash": "19c03d7f64a7188feddcae54a56ebecf",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 79,
"avg_line_length": 24.725212464589234,
"alnum_prop": 0.5783684692942255,
"repo_name": "atlas0fd00m/CanCat",
"id": "4ccb5ab883712c0eb76a640bb61b8b332281dfa6",
"size": "8728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cancatlib/envi/bits.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "35476"
},
{
"name": "C++",
"bytes": "247130"
},
{
"name": "Makefile",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "10130734"
}
],
"symlink_target": ""
} |
"""Changes the description, promo text and title of an app in en-US and en-GB.
"""
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
TRACK = 'alpha' # Can be 'alpha', beta', 'production' or 'rollout'
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('package_name',
help='The package name. Example: com.android.sample')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv,
'androidpublisher',
'v2',
__doc__,
__file__, parents=[argparser],
scope='https://www.googleapis.com/auth/androidpublisher')
# Process flags and read their values.
package_name = flags.package_name
try:
edit_request = service.edits().insert(body={}, packageName=package_name)
result = edit_request.execute()
edit_id = result['id']
listing_response_us = service.edits().listings().update(
editId=edit_id, packageName=package_name, language='en-US',
body={'fullDescription': 'Dessert trunk truck',
'shortDescription': 'Bacon ipsum',
'title': 'App Title US'}).execute()
print ('Listing for language %s was updated.'
% listing_response_us['language'])
listing_response_gb = service.edits().listings().update(
editId=edit_id, packageName=package_name, language='en-GB',
body={'fullDescription': 'Pudding boot lorry',
'shortDescription': 'Pancetta ipsum',
'title': 'App Title UK'}).execute()
print ('Listing for language %s was updated.'
% listing_response_gb['language'])
commit_request = service.edits().commit(
editId=edit_id, packageName=package_name).execute()
print 'Edit "%s" has been committed' % (commit_request['id'])
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "667eff7ffa455f09b04c0d0218fc468e",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 78,
"avg_line_length": 33.01587301587302,
"alnum_prop": 0.6485576923076923,
"repo_name": "hyogij/android-play-publisher-api",
"id": "7005a8bfe0a8dd9dd35b5c27dd18b868662c2953",
"size": "2698",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "v2/python/update_listings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "29470"
},
{
"name": "Python",
"bytes": "15532"
}
],
"symlink_target": ""
} |
import MySQLdb
import logging
from mysql_basic_c import mysql_basic_c as database
logger = logging.getLogger(__name__)
class tab_history_tickets():
'''
Manage tab_history_tickets table
'''
def __init__(self, host, user, passwd, db):
'''
init class.
Args:
host:mysql server host.
user:mysql user
passwd:mysql password
db:database which is used.
Return:
Raise:
'''
self.db = database(host, user, passwd, db)
def insert_one_record_from_ticket_record(self, ticket_record):
'''
insert one record from ticket record.
Args:
ticket_record:tab_tickets record
Return:
True/False
Raise:
'''
if None == ticket_record[0]:
ticket_id = 'NULL'
else:
ticket_id = '%s' % ticket_record[0]
if None == ticket_record[1]:
applicant_id = 'NULL'
else:
applicant_id = '%s' % ticket_record[1]
if None == ticket_record[2]:
applicant_name = 'NULL'
else:
applicant_name = "'%s'" % ticket_record[2]
if None == ticket_record[3]:
application_time = 'NULL'
else:
application_time = "'%s'" % ticket_record[3]
if None == ticket_record[4]:
application_method = 'NULL'
else:
application_method = '%s' % ticket_record[4]
if None == ticket_record[5]:
type = 'NULL'
else:
type = '%s' % ticket_record[5]
if None == ticket_record[6]:
content = 'NULL'
else:
content = "'%s'" % ticket_record[6]
if None == ticket_record[7]:
begin_time = 'NULL'
else:
begin_time = "'%s'" % ticket_record[7]
if None == ticket_record[8]:
end_time = 'NULL'
else:
end_time = "'%s'" % ticket_record[8]
if None == ticket_record[9]:
state = 'NULL'
else:
state = '%s' % ticket_record[9]
if None == ticket_record[10]:
inner_state = 'NULL'
else:
inner_state = '%s' % ticket_record[10]
if None == ticket_record[11]:
approver_id = 'NULL'
else:
approver_id = '%s' % ticket_record[11]
if None == ticket_record[12]:
approver_name = 'NULL'
else:
approver_name = "'%s'" % ticket_record[12]
if None == ticket_record[13]:
approval_time = 'NULL'
else:
approval_time = "'%s'" % ticket_record[13]
if None == ticket_record[14]:
approval_description = 'NULL'
else:
approval_description = "'%s'" % ticket_record[14]
if None == ticket_record[15]:
param_1 = 'NULL'
else:
param_1 = "%s" % ticket_record[15]
if None == ticket_record[16]:
param_2 = 'NULL'
else:
param_2 = "%s" % ticket_record[16]
if None == ticket_record[17]:
param_3 = 'NULL'
else:
param_3 = "'%s'" % ticket_record[17]
if None == ticket_record[18]:
param_4 = 'NULL'
else:
param_4 = "%s" % ticket_record[18]
if None == ticket_record[19]:
param_5 = 'NULL'
else:
param_5 = "%s" % ticket_record[19]
if None == ticket_record[20]:
param_6 = 'NULL'
else:
param_6 = "'%s'" % ticket_record[20]
if None == ticket_record[21]:
last_operation_time = 'NULL'
else:
last_operation_time = "'%s'" % ticket_record[21]
if None == ticket_record[22]:
operation_record = 'NULL'
else:
operation_record = "'%s'" % ticket_record[22]
sql = "insert into tab_history_tickets(ticket_id,applicant_id,applicant_name,application_time,application_method,type,content,begin_time,end_time,state,inner_state,approver_id,approver_name,approval_time,approval_description,param_1,param_2,param_3,param_4,param_5,param_6,last_operation_time,operation_record) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)" % (ticket_id, applicant_id, applicant_name, application_time, application_method, type, content, begin_time, end_time, state, inner_state, approver_id, approver_name, approval_time, approval_description, param_1, param_2, param_3, param_4, param_5, param_6, last_operation_time, operation_record)
return self.db.execute(sql)
def get_sum_files_num_about_user_one(self,user_id,type,source_zone_id,dest_zone_id):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d " %(user_id,source_zone_id,dest_zone_id,type)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d " %(user_id,dest_zone_id,type)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d " %(user_id,source_zone_id,type)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_user_two(self,user_id,type,source_zone_id,dest_zone_id,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d and \
application_time <= '%s' " %(user_id,source_zone_id,dest_zone_id,type,end_time)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d and \
application_time <= '%s' " %(user_id,dest_zone_id,type,end_time)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d and \
application_time <= '%s' " %(user_id,source_zone_id,type,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_user_three(self,user_id,type,source_zone_id,dest_zone_id,begin_time):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d and \
application_time >= '%s' " %(user_id,source_zone_id,dest_zone_id,type,begin_time)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d and \
application_time >= '%s' " %(user_id,dest_zone_id,type,begin_time)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d and \
application_time >= '%s' " %(user_id,source_zone_id,type,begin_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_user_four(self,user_id,type,source_zone_id,dest_zone_id,begin_time,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(user_id,source_zone_id,dest_zone_id,type,begin_time,end_time)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(user_id,dest_zone_id,type,begin_time,end_time)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(user_id,source_zone_id,type,begin_time,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_zone_one(self,type,source_zone_id,dest_zone_id):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type = %d " %(source_zone_id,dest_zone_id,type)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_2=%s and type = %d " %(dest_zone_id,type)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and type = %d " %(source_zone_id,type)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_zone_two(self,type,source_zone_id,dest_zone_id,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type=%d and \
application_time <= '%s' " %(source_zone_id,dest_zone_id,type,end_time)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_2=%s and type=%d and \
application_time <= '%s' " %(dest_zone_id,type,end_time)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and type=%d and \
application_time <= '%s' " %(source_zone_id,type,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_zone_three(self,type,source_zone_id,dest_zone_id,begin_time):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type=%d and \
application_time >= '%s' " %(source_zone_id,dest_zone_id,type,begin_time)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_2=%s and type=%d and \
application_time >= '%s' " %(dest_zone_id,type,begin_time)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and type=%d and \
application_time >= '%s' " %(source_zone_id,type,begin_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_num_about_zone_four(self,type,source_zone_id,dest_zone_id,begin_time,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(source_zone_id,dest_zone_id,type,begin_time,end_time)
elif 1 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(dest_zone_id,type,begin_time,end_time)
elif 2 == type :
sql = "select sum(param_4) from tab_history_tickets where state=90 and param_1=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(source_zone_id,type,begin_time,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_user_one(self,user_id,type,source_zone_id,dest_zone_id):
sql = ''
if 11 == type :
sql = sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d " \
%(user_id,source_zone_id,dest_zone_id,type)
elif 1 == type :
sql = sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d " \
%(user_id,dest_zone_id,type)
elif 2 == type :
sql = sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d " \
%(user_id,source_zone_id,type)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_user_two(self,user_id,type,source_zone_id,dest_zone_id,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d and \
application_time <= '%s' " %(user_id,source_zone_id,dest_zone_id,type,end_time)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d and \
application_time <= '%s' " %(user_id,dest_zone_id,type,end_time)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d and \
application_time <= '%s' " %(user_id,source_zone_id,type,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_user_three(self,user_id,type,source_zone_id,dest_zone_id,begin_time):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d and \
application_time >= '%s' " %(user_id,source_zone_id,dest_zone_id,type,begin_time)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d and \
application_time >= '%s' " %(user_id,dest_zone_id,type,begin_time)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d and \
application_time >= '%s' " %(user_id,source_zone_id,type,begin_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_user_four(self,user_id,type,source_zone_id,dest_zone_id,begin_time,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(user_id,source_zone_id,dest_zone_id,type,begin_time,end_time)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(user_id,dest_zone_id,type,begin_time,end_time)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and applicant_id=%d and param_1=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(user_id,source_zone_id,type,begin_time,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_zone_one(self,type,source_zone_id,dest_zone_id):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type = %d " %(source_zone_id,dest_zone_id,type)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_2=%s and type = %d " %(dest_zone_id,type)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and type = %d " %(source_zone_id,type)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_zone_two(self,type,source_zone_id,dest_zone_id,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type=%d and \
application_time <= '%s' " %(source_zone_id,dest_zone_id,type,end_time)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_2=%s and type=%d and \
application_time <= '%s' " %(dest_zone_id,type,end_time)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and type=%d and \
application_time <= '%s' " %(source_zone_id,type,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_zone_three(self,type,source_zone_id,dest_zone_id,begin_time):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type=%d and \
application_time >= '%s' " %(source_zone_id,dest_zone_id,type,begin_time)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_2=%s and type=%d and \
application_time >= '%s' " %(dest_zone_id,type,begin_time)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and type=%d and \
application_time >= '%s' " %(source_zone_id,type,begin_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
def get_sum_files_size_about_zone_four(self,type,source_zone_id,dest_zone_id,begin_time,end_time):
sql = ''
if 11 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(source_zone_id,dest_zone_id,type,begin_time,end_time)
elif 1 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_2=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(dest_zone_id,type,begin_time,end_time)
elif 2 == type :
sql = "select sum(param_5) from tab_history_tickets where state=90 and param_1=%s and type=%d and application_time>='%s' and \
application_time<='%s' " %(source_zone_id,type,begin_time,end_time)
else :
logger.error('type:%d is error' %type)
return (False,[])
logger.info('sql:%s' %sql)
err,result = self.db.query(sql)
if False == err:
return (err,result)
result_list = []
for row in result:
result_list.append(row)
return (err, result_list)
if __name__ == '__main__':
db = tab_history_tickets('192.168.1.18','sboxweb','Sbox123456xZ','sbox_db')
'''
print 'get_sum_files_num_about_user_one'
print db.get_sum_files_num_about_user_one(112,1,'',11)
print db.get_sum_files_num_about_user_one(112,2,11,'')
print db.get_sum_files_num_about_user_one(112,3,11,12)
print db.get_sum_files_num_about_user_one(112,11,11,12)
print 'get_sum_files_num_about_zone_one'
print db.get_sum_files_num_about_zone_one(1,None,11)
print db.get_sum_files_num_about_zone_one(2,11,None)
print db.get_sum_files_num_about_zone_one(3,12,11)
print db.get_sum_files_num_about_zone_one(11,11,12)
print 'get_sum_files_num_about_user_two'
print db.get_sum_files_num_about_user_two(112,1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_user_two(112,2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_user_two(112,3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_user_two(112,11,11,12,'2015-02-13 10:10:10')
print 'get_sum_files_num_about_zone_two'
print db.get_sum_files_num_about_zone_two(1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_zone_two(2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_zone_two(3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_zone_two(11,11,12,'2015-02-13 10:10:10')
print 'get_sum_files_num_about_user_three'
print db.get_sum_files_num_about_user_three(112,1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_user_three(112,2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_user_three(112,3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_user_three(112,11,11,12,'2015-02-13 10:10:10')
print 'get_sum_files_num_about_zone_three'
print db.get_sum_files_num_about_zone_three(1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_zone_three(2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_zone_three(3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_num_about_zone_three(11,11,12,'2015-02-13 10:10:10')
'''
print 'get_sum_files_num_about_user_four'
print db.get_sum_files_num_about_user_four(2,1,None,11,'20150212 00:00:00','20150214 00:00:00')
#print db.get_sum_files_num_about_user_four(112,2,11,None,'2015-02-13 10:10:10','2015-02-15 10:10:10')
#print db.get_sum_files_num_about_user_four(112,3,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
#print db.get_sum_files_num_about_user_four(112,11,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
'''
print 'get_sum_files_num_about_zone_four'
print db.get_sum_files_num_about_zone_four(1,None,11,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_num_about_zone_four(2,11,None,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_num_about_zone_four(3,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_num_about_zone_four(11,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print 'get_sum_files_size_about_user_one'
print db.get_sum_files_size_about_user_one(112,1,'',11)
print db.get_sum_files_size_about_user_one(112,2,11,'')
print db.get_sum_files_size_about_user_one(112,3,11,12)
print db.get_sum_files_size_about_user_one(112,11,11,12)
print 'get_sum_files_size_about_zone_one'
print db.get_sum_files_size_about_zone_one(1,None,11)
print db.get_sum_files_size_about_zone_one(2,11,None)
print db.get_sum_files_size_about_zone_one(3,12,11)
print db.get_sum_files_size_about_zone_one(11,11,12)
print 'get_sum_files_size_about_user_two'
print db.get_sum_files_size_about_user_two(112,1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_user_two(112,2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_user_two(112,3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_user_two(112,11,11,12,'2015-02-13 10:10:10')
print 'get_sum_files_size_about_zone_two'
print db.get_sum_files_size_about_zone_two(1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_zone_two(2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_zone_two(3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_zone_two(11,11,12,'2015-02-13 10:10:10')
print 'get_sum_files_size_about_user_three'
print db.get_sum_files_size_about_user_three(112,1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_user_three(112,2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_user_three(112,3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_user_three(112,11,11,12,'2015-02-13 10:10:10')
print 'get_sum_files_size_about_zone_three'
print db.get_sum_files_size_about_zone_three(1,None,11,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_zone_three(2,11,None,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_zone_three(3,11,12,'2015-02-13 10:10:10')
print db.get_sum_files_size_about_zone_three(11,11,12,'2015-02-13 10:10:10')
'''
print 'get_sum_files_size_about_user_four'
print db.get_sum_files_size_about_user_four(112,1,None,11,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_size_about_user_four(112,2,11,None,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_size_about_user_four(112,3,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_size_about_user_four(112,11,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print 'get_sum_files_size_about_zone_four'
print db.get_sum_files_size_about_zone_four(1,None,11,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_size_about_zone_four(2,11,None,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_size_about_zone_four(3,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
print db.get_sum_files_size_about_zone_four(11,11,12,'2015-02-13 10:10:10','2015-02-15 10:10:10')
| {
"content_hash": "88fc0701f6a18eb8b056976574aafab1",
"timestamp": "",
"source": "github",
"line_count": 628,
"max_line_length": 715,
"avg_line_length": 46.738853503184714,
"alnum_prop": 0.5797901335513764,
"repo_name": "Svolcano/python_exercise",
"id": "3e24378f1f3b09285002c85d10ff3fa2f8b2de30",
"size": "29352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WinLsLoad/lib/db/tab_history_tickets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "732"
},
{
"name": "JavaScript",
"bytes": "157614"
},
{
"name": "Python",
"bytes": "3292561"
},
{
"name": "Shell",
"bytes": "1417"
}
],
"symlink_target": ""
} |
import paddle.fluid.core as core
import unittest
import numpy as np
class TestSelectedRows(unittest.TestCase):
def test_selected_rows(self):
place = core.CPUPlace()
height = 10
rows = [0, 4, 7]
row_numel = 12
selected_rows = core.SelectedRows(rows, height)
np_array = np.ones((len(rows), row_numel)).astype("float32")
np_array[0, 0] = 2.0
np_array[2, 8] = 4.0
tensor = selected_rows.get_tensor()
tensor.set(np_array, place)
# compare rows
self.assertEqual(0, selected_rows.rows()[0])
self.assertEqual(4, selected_rows.rows()[1])
self.assertEqual(7, selected_rows.rows()[2])
# compare height
self.assertEqual(10, selected_rows.height())
# compare tensor
self.assertAlmostEqual(2.0,
selected_rows.get_tensor().get_float_element(0))
self.assertAlmostEqual(1.0,
selected_rows.get_tensor().get_float_element(1))
self.assertAlmostEqual(
4.0,
selected_rows.get_tensor().get_float_element(2 * row_numel + 8))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "00f59ca3f38ae8a647848093f1e08f2d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 79,
"avg_line_length": 31.710526315789473,
"alnum_prop": 0.5717842323651452,
"repo_name": "pkuyym/Paddle",
"id": "3d7b86787fbf0a855bcd86b8a873c9134cb1d5cc",
"size": "1818",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_selected_rows.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "272910"
},
{
"name": "C++",
"bytes": "7511205"
},
{
"name": "CMake",
"bytes": "270494"
},
{
"name": "Cuda",
"bytes": "1074033"
},
{
"name": "Go",
"bytes": "109501"
},
{
"name": "Perl",
"bytes": "11456"
},
{
"name": "Python",
"bytes": "3565922"
},
{
"name": "Shell",
"bytes": "172893"
}
],
"symlink_target": ""
} |
"""
Functions to aid writing python scripts that process the pandoc
AST serialized as JSON.
"""
import codecs
import hashlib
import io
import json
import os
import sys
# some utility-functions: make it easier to create your own filters
def get_filename4code(module, content, ext=None):
"""Generate filename based on content
The function ensures that the (temporary) directory exists, so that the
file can be written.
Example:
filename = get_filename4code("myfilter", code)
"""
imagedir = module + "-images"
fn = hashlib.sha1(content.encode(sys.getfilesystemencoding())).hexdigest()
try:
os.mkdir(imagedir)
sys.stderr.write('Created directory ' + imagedir + '\n')
except OSError:
pass
if ext:
fn += "." + ext
return os.path.join(imagedir, fn)
def get_value(kv, key, value = None):
"""get value from the keyvalues (options)"""
res = []
for k, v in kv:
if k == key:
value = v
else:
res.append([k, v])
return value, res
def get_caption(kv):
"""get caption from the keyvalues (options)
Example:
if key == 'CodeBlock':
[[ident, classes, keyvals], code] = value
caption, typef, keyvals = get_caption(keyvals)
...
return Para([Image([ident, [], keyvals], caption, [filename, typef])])
"""
caption = []
typef = ""
value, res = get_value(kv, u"caption")
if value is not None:
caption = [Str(value)]
typef = "fig:"
return caption, typef, res
def get_extension(format, default, **alternates):
"""get the extension for the result, needs a default and some specialisations
Example:
filetype = get_extension(format, "png", html="svg", latex="eps")
"""
try:
return alternates[format]
except KeyError:
return default
# end of utilities
def walk(x, action, format, meta):
"""Walk a tree, applying an action to every object.
Returns a modified tree. An action is a function of the form
`action(key, value, format, meta)`, where:
* `key` is the type of the pandoc object (e.g. 'Str', 'Para') `value` is
* the contents of the object (e.g. a string for 'Str', a list of
inline elements for 'Para')
* `format` is the target output format (as supplied by the
`format` argument of `walk`)
* `meta` is the document's metadata
The return of an action is either:
* `None`: this means that the object should remain unchanged
* a pandoc object: this will replace the original object
* a list of pandoc objects: these will replace the original object; the
list is merged with the neighbors of the orignal objects (spliced into
the list the original object belongs to); returning an empty list deletes
the object
"""
if isinstance(x, list):
array = []
for item in x:
if isinstance(item, dict) and 't' in item:
res = action(item['t'],
item['c'] if 'c' in item else None, format, meta)
if res is None:
array.append(walk(item, action, format, meta))
elif isinstance(res, list):
for z in res:
array.append(walk(z, action, format, meta))
else:
array.append(walk(res, action, format, meta))
else:
array.append(walk(item, action, format, meta))
return array
elif isinstance(x, dict):
for k in x:
x[k] = walk(x[k], action, format, meta)
return x
else:
return x
def toJSONFilter(action):
"""Like `toJSONFilters`, but takes a single action as argument.
"""
toJSONFilters([action])
def toJSONFilters(actions):
"""Generate a JSON-to-JSON filter from stdin to stdout
The filter:
* reads a JSON-formatted pandoc document from stdin
* transforms it by walking the tree and performing the actions
* returns a new JSON-formatted pandoc document to stdout
The argument `actions` is a list of functions of the form
`action(key, value, format, meta)`, as described in more
detail under `walk`.
This function calls `applyJSONFilters`, with the `format`
argument provided by the first command-line argument,
if present. (Pandoc sets this by default when calling
filters.)
"""
try:
input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
except AttributeError:
# Python 2 does not have sys.stdin.buffer.
# REF: https://stackoverflow.com/questions/2467928/python-unicodeencode
input_stream = codecs.getreader("utf-8")(sys.stdin)
source = input_stream.read()
if len(sys.argv) > 1:
format = sys.argv[1]
else:
format = ""
sys.stdout.write(applyJSONFilters(actions, source, format))
def applyJSONFilters(actions, source, format=""):
"""Walk through JSON structure and apply filters
This:
* reads a JSON-formatted pandoc document from a source string
* transforms it by walking the tree and performing the actions
* returns a new JSON-formatted pandoc document as a string
The `actions` argument is a list of functions (see `walk`
for a full description).
The argument `source` is a string encoded JSON object.
The argument `format` is a string describing the output format.
Returns a the new JSON-formatted pandoc document.
"""
doc = json.loads(source)
if 'meta' in doc:
meta = doc['meta']
elif doc[0]: # old API
meta = doc[0]['unMeta']
else:
meta = {}
altered = doc
for action in actions:
altered = walk(altered, action, format, meta)
return json.dumps(altered)
def stringify(x):
"""Walks the tree x and returns concatenated string content,
leaving out all formatting.
"""
result = []
def go(key, val, format, meta):
if key in ['Str', 'MetaString']:
result.append(val)
elif key == 'Code':
result.append(val[1])
elif key == 'Math':
result.append(val[1])
elif key == 'LineBreak':
result.append(" ")
elif key == 'SoftBreak':
result.append(" ")
elif key == 'Space':
result.append(" ")
walk(x, go, "", {})
return ''.join(result)
def attributes(attrs):
"""Returns an attribute list, constructed from the
dictionary attrs.
"""
attrs = attrs or {}
ident = attrs.get("id", "")
classes = attrs.get("classes", [])
keyvals = [[x, attrs[x]] for x in attrs if (x != "classes" and x != "id")]
return [ident, classes, keyvals]
def elt(eltType, numargs):
def fun(*args):
lenargs = len(args)
if lenargs != numargs:
raise ValueError(eltType + ' expects ' + str(numargs) +
' arguments, but given ' + str(lenargs))
if numargs == 0:
xs = []
elif len(args) == 1:
xs = args[0]
else:
xs = args
return {'t': eltType, 'c': xs}
return fun
# Constructors for block elements
Plain = elt('Plain', 1)
Para = elt('Para', 1)
CodeBlock = elt('CodeBlock', 2)
RawBlock = elt('RawBlock', 2)
BlockQuote = elt('BlockQuote', 1)
OrderedList = elt('OrderedList', 2)
BulletList = elt('BulletList', 1)
DefinitionList = elt('DefinitionList', 1)
Header = elt('Header', 3)
HorizontalRule = elt('HorizontalRule', 0)
Table = elt('Table', 5)
Div = elt('Div', 2)
Null = elt('Null', 0)
# Constructors for inline elements
Str = elt('Str', 1)
Emph = elt('Emph', 1)
Strong = elt('Strong', 1)
Strikeout = elt('Strikeout', 1)
Superscript = elt('Superscript', 1)
Subscript = elt('Subscript', 1)
SmallCaps = elt('SmallCaps', 1)
Quoted = elt('Quoted', 2)
Cite = elt('Cite', 2)
Code = elt('Code', 2)
Space = elt('Space', 0)
LineBreak = elt('LineBreak', 0)
Math = elt('Math', 2)
RawInline = elt('RawInline', 2)
Link = elt('Link', 3)
Image = elt('Image', 3)
Note = elt('Note', 1)
SoftBreak = elt('SoftBreak', 0)
Span = elt('Span', 2)
| {
"content_hash": "38248a659d9065c975d35d65d1cf8d20",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 81,
"avg_line_length": 28.735915492957748,
"alnum_prop": 0.6040926357064085,
"repo_name": "lancezlin/ml_template_py",
"id": "0d9152d908168954b7fc3d3c212cbb319b4001db",
"size": "8261",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/pandocfilters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "326933"
},
{
"name": "C++",
"bytes": "14430"
},
{
"name": "CSS",
"bytes": "7806"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "596861"
},
{
"name": "JavaScript",
"bytes": "4020233"
},
{
"name": "Jupyter Notebook",
"bytes": "517957"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "41191064"
},
{
"name": "Shell",
"bytes": "3373"
},
{
"name": "Smarty",
"bytes": "26298"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from threading import Lock, Event
from miniworld.Config import config
from miniworld.concurrency.ExceptionStopThread import ExceptionStopThread
from miniworld.errors import Unsupported
from miniworld.log import log
from miniworld.model.emulation.nodes.virtual.CentralNode import is_central_node_interface
from miniworld.model.network.backends import NetworkBackends
from miniworld.model.network.interface.Interface import HubWiFi
from miniworld.model.singletons.Singletons import singletons
from miniworld.util import ConcurrencyUtil
__author__ = 'Nils Schmidt'
TIME_NODE_STATUS_REFRESH = 5
# TODO: RENAME TO NODES?
# TODO: REFACTOR!
class NodeStarter:
"""
Starts the emulation nodes together with all its subcomponents: qemu, vde_switch
Attributes
----------
node_ids : list<int>
List of node IDs to start.
nodes_running : list<Node>
nodes : list<EmulationNode>
List of started nodes.
network_backend_name : str
event_nodes_started : Event
lock : Lock
"""
def __init__(self, node_ids, network_backend_name):
self.node_ids = node_ids
self.nodes_running = []
self.nodes = []
self.network_backend_name = network_backend_name
self.event_nodes_started = Event()
self.lock = Lock()
self.thread_check_nodes_started = None
#################################################
# Thread methods
#################################################
def print_overall_node_status(self):
""" Print the nodes not ready yet """
while not self.event_nodes_started.is_set():
nodes_not_ready = self.nodes_not_ready()
if nodes_not_ready:
log.info("waiting for nodes: %s ...", ', '.join(map(str, nodes_not_ready)))
self.event_nodes_started.wait(TIME_NODE_STATUS_REFRESH)
else:
break
# TODO: #51: suppliy node - interface - mapping
# TOO: #82: DOC
def start_nodes(self,
network_backend,
# node options
path_qemu_base_image, stringio_post_boot_script, interfaces=None,
# start options
parallel=False,
):
"""
Start the nodes (a)synchronously.
Parameters
----------
path_qemu_base_image: str
stringio_post_boot_script: StringIO, not file!
If `parallel` each thread gets a copy!
parallel: bool, optional (default is False)
Use threads to start the nodes concurrently.
interfaces: list<str>
NOTE: Influences the order of the network devices in the virtual machine!
network_backend
Returns
-------
list<EmulationNode>, ManagementNode
"""
if not self.node_ids:
log.info("there are no nodes to start!")
return [], None
self.assert_only_one_wifibridge_interface(interfaces)
# keep track of started nodes and print the missing ones each time unit ...
self.thread_check_nodes_started = ExceptionStopThread.run_fun_threaded_n_log_exception(target=self.print_overall_node_status, tkwargs=dict(name="Nodes Start Progress"))
self.thread_check_nodes_started.daemon = True
self.thread_check_nodes_started.start()
# NOTE: use same for sequential and parallel execution!
stringio_post_boot_script.seek(0)
# deepcopy StringIO (fails for file!)
# NOTE: copying the :py:class:`.NetworkBackend` did not work, therefore we create a new copy each time
try:
# prepare arguments
# NOTE: the creation of the network backend may rise an exception, therefore its inside thy try statement!
args = []
for i in self.node_ids:
args.append((i,
path_qemu_base_image,
deepcopy(stringio_post_boot_script)
)
)
# init events for first display
singletons.event_system.init_events_for_node(i)
# wait until all nodes have been started
with ConcurrencyUtil.node_start_parallel() as executor:
for node in executor.map(self._start_node, args):
self.nodes.append(node)
# TODO:
# for arg in args:
# future_list.append( executor.submit(self._start_node, arg) )
#
# # do not block the main thread too long -> eanbles listening to ctrl-c
# while 1:
#
# for f in future_list:
# if not f.done():
# sleep(1)
# break
# else:
# # raises the threads exception
# self.nodes.append(f.result())
# break
log.info("all qemu instances started ...")
# NOTE: create management switch after all nodes exist!
management_node = self.start_management_node()
return self.nodes, management_node
finally:
# stop thread
self.event_nodes_started.set()
self.thread_check_nodes_started.join()
@staticmethod
def assert_only_one_wifibridge_interface(interfaces):
if len(list(filter(lambda x: is_central_node_interface(x), interfaces))) > 1:
raise Unsupported("Multiple '%s' are not supported at the moment!" % HubWiFi)
# TODO: #82: DOC, maybe singleton ref?
def start_management_node(self):
"""
Start the management switch and connect all other nodes to it.
Also store a reference in the :py:class:`.NetworkManager`.
Returns
-------
ManagementNode
"""
network_backend_bootstrapper = NetworkBackends.get_current_network_backend_bootstrapper()
if config.is_management_switch_enabled():
# late import needed
from miniworld.management.network.manager import NetworkManager
log.info("creating management node/switch ...")
if network_backend_bootstrapper.management_node_type is None:
log.info("Network Backend has no management node")
return None
management_node = network_backend_bootstrapper.management_node_type(network_backend_bootstrapper)
management_node.start(switch=True, bridge_dev_name=config.get_bridge_tap_name())
for node in self.nodes:
management_node.connect_to_emu_node(singletons.network_backend, node)
NetworkManager.management_node = management_node
return management_node
def _start_node(self, *args):
"""
Start a node.
Returns
-------
EmulationNode
"""
args = args[0]
# TODO: #54,#55
node = NetworkBackends.get_network_backend_bootstrapper_for_string(self.network_backend_name).emulation_node_type.factory(*args[:1])
node.start(args[1], flo_post_boot_script=args[2])
with self.lock:
# keep track of started nodes
self.nodes_running.append(node)
return node
def nodes_not_ready(self):
"""
Get all all nodes which have not started yet.
Remembers the last started node id.
Returns
-------
set<Node>
"""
all_node_ids = set(self.node_ids)
nodes_remaining = all_node_ids.difference(set(map(lambda node: node.id, self.nodes_running)))
# all nodes started :)
if not nodes_remaining:
# remember last node id
self.last_id = self.node_ids[-1]
return set()
return nodes_remaining
| {
"content_hash": "41872a0121d6ed4b3c5d956b3d2863bc",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 176,
"avg_line_length": 34.856521739130436,
"alnum_prop": 0.5724086316577274,
"repo_name": "miniworld-project/miniworld_core",
"id": "3f0bc55b17767cc9a7685b6812d19525b05ca5e4",
"size": "8017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "miniworld/management/emulation/NodeStarter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "696934"
},
{
"name": "Shell",
"bytes": "1770"
}
],
"symlink_target": ""
} |
import random
def typoglycemia(sentence):
transformed = []
for word in sentence.split():
if len(word) > 4:
head, middle, last = word[0], list(word[1:-1]), word[-1]
random.shuffle(middle)
word = head + ''.join(middle) + last
transformed.append(word)
return ' '.join(transformed)
if __name__ == '__main__':
sentence = "I couldn't believe that I could actually understand what I was reading : the phenomenal power of the human mind ."
print(typoglycemia(sentence))
| {
"content_hash": "f4262d6c1b3837a8bf6ec3a3b91a78db",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 130,
"avg_line_length": 38.214285714285715,
"alnum_prop": 0.6130841121495327,
"repo_name": "knuu/nlp100",
"id": "04d3ebd915f48e48b4d38bf956dd81710d6556c5",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chap01/09.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26034"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.