gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from collections import Counter
import warnings
import numpy as np
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BaseDiscreteNB
from mipframework import Algorithm
from mipframework import AlgorithmResult
from mipframework import TabularDataResource
from mipframework.funclib.crossvalidation import kfold_split_design_matrices
from mipframework.funclib.crossvalidation import AdditiveMulticlassROCCurve
from mipframework.funclib.crossvalidation import AdditiveMulticlassClassificationReport
from mipframework.highcharts.user_defined import MultilabelConfisionMatrix
from mipframework.highcharts.user_defined import MulticlassROCCurve
class NaiveBayes(Algorithm):
def __init__(self, cli_args):
super(NaiveBayes, self).__init__(__file__, cli_args, intercept=False)
def local_init(self):
data = self.data.full
y, X = data[self.parameters.y], data[self.parameters.x]
categ_names = [k for k, v in self.metadata.is_categorical.items() if v == 1]
categ_names.remove(self.parameters.y[0])
numer_names = [k for k, v in self.metadata.is_categorical.items() if v == 0]
X_cat = np.array(X[categ_names]) if categ_names else None
X_num = np.array(X[numer_names]) if numer_names else None
if X_num is not None and X_cat is not None:
xtypes = "both"
elif X_num is not None:
xtypes = "numerical"
elif X_cat is not None:
xtypes = "categorical"
y = np.array(y)
n_splits = int(self.parameters.k)
matrices_to_split = [y]
if X_num is not None:
matrices_to_split.append(X_num)
if X_cat is not None:
matrices_to_split.append(X_cat)
train_sets, test_sets = kfold_split_design_matrices(
n_splits, *matrices_to_split
)
models = [
MixedAdditiveNB(float(self.parameters.alpha)) for _ in range(n_splits)
]
if xtypes == "numerical":
[m.fit(yt, X_num=Xt) for m, (yt, Xt) in zip(models, train_sets)]
elif xtypes == "categorical":
[m.fit(yt, X_cat=Xt) for m, (yt, Xt) in zip(models, train_sets)]
elif xtypes == "both":
[m.fit(yt, Xnt, Xct) for m, (yt, Xnt, Xct) in zip(models, train_sets)]
self.store(train_sets=train_sets)
self.store(test_sets=test_sets)
self.store(y=y)
self.store(xtypes=xtypes)
self.push_and_agree(n_splits=n_splits)
for k in range(n_splits):
self.push_and_add(**{"model" + str(k): models[k]})
def global_init(self):
n_splits = self.fetch("n_splits")
models = [self.fetch("model" + str(k)) for k in range(n_splits)]
if models[0].gnb:
classes = models[0].gnb.classes_
else:
classes = models[0].cnb.classes_
self.store(classes=classes)
for k in range(n_splits):
self.push_and_add(**{"model" + str(k): models[k]})
def local_final(self):
n_splits = int(self.parameters.k)
y = self.load("y")
n_obs = len(y)
test_sets = self.load("test_sets")
xtypes = self.load("xtypes")
models = [self.fetch("model" + str(k)) for k in range(n_splits)]
classes = models[0].classes_
n_classes = len(classes)
if xtypes == "numerical":
y_preds = [m.predict(X_num=Xt) for m, (_, Xt) in zip(models, test_sets)]
elif xtypes == "categorical":
y_preds = [m.predict(X_cat=Xt) for m, (_, Xt) in zip(models, test_sets)]
else:
y_preds = [
m.predict(Xnt, Xct) for m, (_, Xnt, Xct) in zip(models, test_sets)
]
y_pred = np.array(y).flatten()
idx = 0
for yp in y_preds:
y_pred[idx : idx + len(yp)] = yp
idx += len(yp)
if xtypes == "numerical":
y_pred_proba_per_class_kfold = [
m.predict_proba(X_num=Xt) for m, (_, Xt) in zip(models, test_sets)
]
elif xtypes == "categorical":
y_pred_proba_per_class_kfold = [
m.predict_proba(X_cat=Xt) for m, (_, Xt) in zip(models, test_sets)
]
else:
y_pred_proba_per_class_kfold = [
m.predict_proba(Xnt, Xct) for m, (_, Xnt, Xct) in zip(models, test_sets)
]
y_pred_proba_per_class = np.empty((n_obs, n_classes))
idx = 0
for yp in y_pred_proba_per_class_kfold:
y_pred_proba_per_class[idx : idx + len(yp)] = yp
idx += len(yp)
confusion_matrix = metrics.confusion_matrix(y, y_pred)
accuracy = metrics.accuracy_score(y, y_pred)
roc_curve = AdditiveMulticlassROCCurve(
y_true=y, y_pred_proba_per_class=y_pred_proba_per_class, classes=classes
)
classification_report = AdditiveMulticlassClassificationReport(
y_true=y, y_pred=y_pred, classes=classes
)
self.push_and_add(n_obs=n_obs)
self.push_and_add(confusion_matrix=confusion_matrix)
self.push_and_add(accuracy=Mediant(accuracy * n_obs, n_obs))
self.push_and_add(roc_curve=roc_curve)
self.push_and_add(classification_report=classification_report)
def global_final(self):
classes = self.load("classes")
confusion_matrix = self.fetch("confusion_matrix")
accuracy = self.fetch("accuracy").get_value()
n_obs = self.fetch("n_obs")
accuracy_ci = 1.96 * np.sqrt((accuracy * (1 - accuracy)) / n_obs)
roc_curves = self.fetch("roc_curve").get_curves()
(
precision,
recall,
specificity,
f_score,
precision_avgs,
recall_avgs,
specificity_avgs,
f_score_avgs,
) = self.fetch("classification_report").get_values()
precision = precision.tolist()
recall = recall.tolist()
specificity = specificity.tolist()
f_score = f_score.tolist()
cm_chart = MultilabelConfisionMatrix(
"Confusion Matrix", confusion_matrix, classes.tolist()
)
aucs = []
ginis = []
for fpr, tpr in roc_curves:
auc = np.trapz(tpr, fpr)
gini = 2 * auc - 1
aucs.append(auc)
ginis.append(gini)
roc_chart = MulticlassROCCurve("ROC", roc_curves, classes)
accuracy_report = TabularDataResource(
fields=["Statistic", "Value"],
data=list(
zip(
*[
["Accuracy", "Lower c.i.", "Upper c.i."],
[accuracy, accuracy - accuracy_ci, accuracy + accuracy_ci],
]
)
),
title="Overall classification statistics",
)
clf_report = TabularDataResource(
fields=["", "Precision", "Recall", "Specificity", "F score"],
data=list(
zip(
*[
classes.tolist() + ["micro avg", "macro avg", "weighted avg"],
precision + precision_avgs,
recall + recall_avgs,
specificity + specificity_avgs,
f_score + f_score_avgs,
]
)
),
title="Classification Report",
)
roc_report = TabularDataResource(
fields=["Class", "AUC", "Gini coefficient"],
data=list(zip(*[classes.tolist(), aucs, ginis])),
title="ROC report",
)
self.result = AlgorithmResult(
raw_data={
"accuracy": accuracy,
"confusion_matrix": confusion_matrix.tolist(),
"roc_curve": roc_curves,
"classes": classes.tolist(),
"precision": precision,
"recall": recall,
"f_score": f_score,
},
tables=[clf_report, roc_report, accuracy_report],
highcharts=[cm_chart, roc_chart],
)
class MixedAdditiveNB(object):
def __init__(self, alpha=1.0):
self.alpha = alpha
self.gnb = None
self.cnb = None
@property
def classes_(self):
if self.gnb:
return self.gnb.classes_
elif self.cnb:
return self.cnb.classes_
else:
raise ValueError("model hasn't been trained yet")
def fit(self, y, X_num=None, X_cat=None):
if X_num is not None:
self.gnb = AdditiveGaussianNB()
self.gnb.fit(X_num, y)
if X_cat is not None:
self.cnb = AdditiveCategoricalNB(alpha=self.alpha)
self.cnb.fit(X_cat, y)
def predict(self, X_num=None, X_cat=None):
if X_num is not None and X_cat is not None:
jll = (
self.gnb.predict_log_proba(X_num)
+ self.cnb.predict_log_proba(X_cat)
- self.gnb.class_log_prior_
)
return np.array([self.gnb.classes_[i] for i in jll.argmax(axis=1)])
elif X_num is not None:
return self.gnb.predict(X_num)
elif X_cat is not None:
return self.cnb.predict(X_cat)
def predict_proba(self, X_num=None, X_cat=None):
if X_num is not None and X_cat is not None:
probs_num = self.gnb.predict_proba(X_num)
probs_cat = self.cnb.predict_proba(X_cat)
normalizations = (probs_num * probs_cat).sum(axis=1)[:, np.newaxis]
return probs_num * probs_cat / normalizations
elif X_num is not None:
return self.gnb.predict_proba(X_num)
elif X_cat is not None:
return self.cnb.predict_proba(X_cat)
def __add__(self, other):
result = MixedAdditiveNB()
if self.gnb and other.gnb:
result.gnb = self.gnb + other.gnb
if self.cnb and other.cnb:
result.alpha = self.alpha
result.cnb = self.cnb + other.cnb
return result
# def __repr__(self):
# return repr({"gnb": self.gnb.__dict__, "cnb": self.cnb.__dict__})
class AdditiveCategoricalNB(BaseDiscreteNB):
def __init__(self, alpha=1.0):
self.alpha = alpha
self._class_log_prior_ = np.array([])
self._feature_log_prob_ = []
def fit(self, X, y):
self.n_obs_, self.n_features_ = X.shape
self.classes_, self.class_count_ = np.unique(y, return_counts=True)
self.n_classes_ = len(self.classes_)
self.categories_, self.category_per_feat_count_ = list(
zip(*[np.unique(col, return_counts=True) for col in X.T])
)
self.n_categories_ = np.array([len(c) for c in self.categories_])
self.category_count_ = [
np.empty((self.n_classes_, self.n_categories_[f]))
for f in xrange(self.n_features_)
]
for ci, c in enumerate(self.classes_):
X_where_x = X[np.where(y == c)[0]]
for fi, feature in enumerate(X_where_x.T):
counter = Counter(feature)
self.category_count_[fi][ci, :] = np.array(
[counter[cat] for cat in self.categories_[fi]]
)
def __add__(self, other):
def sum_elementwise(x, y):
return [xi + yi for xi, yi in zip(x, y)]
if self.alpha != other.alpha:
raise ValueError("alphas do not agree")
result = AdditiveCategoricalNB(alpha=self.alpha)
result.n_obs_ = self.n_obs_ + other.n_obs_
if self.n_features_ != other.n_features_:
raise ValueError("n_features_ do not agree")
result.n_features_ = self.n_features_
if (self.classes_ != other.classes_).all():
raise ValueError("classes_ do not agree")
result.classes_ = self.classes_
result.class_count_ = self.class_count_ + other.class_count_
if self.n_classes_ != other.n_classes_:
raise ValueError("n_classes_ do not agree")
result.n_classes_ = self.n_classes_
result.category_per_feat_count_ = sum_elementwise(
self.category_per_feat_count_, other.category_per_feat_count_
)
if not all(
[(c1 == c2).all() for c1, c2 in zip(self.categories_, other.categories_)]
):
raise ValueError("categories_ do not agree")
result.categories_ = self.categories_
result.n_categories_ = sum_elementwise(self.n_categories_, other.n_categories_)
result.category_count_ = sum_elementwise(
self.category_count_, other.category_count_
)
return result
@property
def class_log_prior_(self):
if not self._class_log_prior_.any():
with warnings.catch_warnings():
# silence the warning when count is 0 because class was not yet
# observed
warnings.simplefilter("ignore", RuntimeWarning)
log_class_count = np.log(self.class_count_)
self._class_log_prior_ = log_class_count - np.log(self.class_count_.sum())
return self._class_log_prior_
@property
def feature_log_prob_(self):
if not self._feature_log_prob_:
feature_log_prob = []
for i in range(self.n_features_):
smoothed_cat_count = self.category_count_[i] + self.alpha
smoothed_class_count = smoothed_cat_count.sum(axis=1)
feature_log_prob.append(
np.log(smoothed_cat_count)
- np.log(smoothed_class_count.reshape(-1, 1))
)
self._feature_log_prob_ = feature_log_prob
return self._feature_log_prob_
def _joint_log_likelihood(self, X):
if not X.shape[1] == self.n_features_:
raise ValueError(
"Expected input with %d features, got %d instead"
% (self.n_features_, X.shape[1])
)
jll = np.zeros((X.shape[0], self.class_count_.shape[0]))
for i in range(self.n_features_):
categories = X[:, i]
indices = [np.where(self.categories_[i] == cat)[0][0] for cat in categories]
jll += self.feature_log_prob_[i][:, indices].T
total_ll = jll + self.class_log_prior_
return total_ll
def __eq__(self, other):
raise NotImplementedError
class AdditiveGaussianNB(GaussianNB):
def __init__(self, priors=None, var_smoothing=1e-9):
self._class_log_prior_ = np.array([])
super(AdditiveGaussianNB, self).__init__(priors, var_smoothing)
def fit(self, X, y):
self.n_obs_, self.n_feats_ = X.shape
super(AdditiveGaussianNB, self).fit(X, y)
@property
def class_log_prior_(self):
if not self._class_log_prior_.any():
with warnings.catch_warnings():
# silence the warning when count is 0 because class was not yet
# observed
warnings.simplefilter("ignore", RuntimeWarning)
log_class_count = np.log(self.class_count_)
self._class_log_prior_ = log_class_count - np.log(self.class_count_.sum())
return self._class_log_prior_
def __add__(self, other):
if self.var_smoothing != other.var_smoothing:
raise ValueError("var_smoothing values do not agree")
if self.priors != other.priors:
raise ValueError("priors do not agree")
if (self.classes_ != other.classes_).all():
raise ValueError("classes_ do not agree")
class_count_1 = self.class_count_[:, np.newaxis]
class_count_2 = other.class_count_[:, np.newaxis]
n_obs_total = self.n_obs_ + other.n_obs_
class_count_total = class_count_1 + class_count_2
theta_total = (
class_count_1 * self.theta_ + class_count_2 * other.theta_
) / class_count_total
self.sigma_[:, :] -= self.epsilon_
other.sigma_[:, :] -= other.epsilon_
epsilon_total = max(self.epsilon_, other.epsilon_)
ssd_1 = class_count_1 * self.sigma_
ssd_2 = class_count_2 * other.sigma_
total_ssd = (
ssd_1
+ ssd_2
+ (class_count_1 * class_count_2 / class_count_total)
* (self.theta_ - other.theta_) ** 2
)
sigma_total = total_ssd / class_count_total
sigma_total += epsilon_total
result = AdditiveGaussianNB(self.priors, self.var_smoothing)
result.n_obs_ = n_obs_total
result.classes_ = self.classes_
result.sigma_ = sigma_total
result.theta_ = theta_total
result.epsilon_ = epsilon_total
result.class_count_ = class_count_total.flatten()
result.class_prior_ = result.class_count_ / n_obs_total
return result
def __eq__(self, other):
if self.var_smoothing != other.var_smoothing:
return False
if self.priors != other.priors:
return False
if (self.classes_ != other.classes_).all():
return False
if not np.isclose(self.theta_, other.theta_).all():
return False
if not np.isclose(self.sigma_, other.sigma_).all():
return self.sigma_, other.sigma_
if (self.class_count_ != other.class_count_).all():
return False
if (self.class_prior_ != other.class_prior_).all():
return False
if self.n_obs_ != other.n_obs_:
return False
if self.n_feats_ != other.n_feats_:
return False
return True
class Mediant(object):
def __init__(self, num, den):
self.num = num
self.den = den
def __add__(self, other):
return Mediant(self.num + other.num, self.den + other.den)
def __repr__(self):
return str(self.get_value())
def get_value(self):
return float(self.num) / float(self.den)
if __name__ == "__main__":
import time
from mipframework import create_runner
algorithm_args = [
"-x",
# "lefthippocampus,righthippocampus,leftaccumbensarea",
# "gender,apoe4,agegroup",
"lefthippocampus,righthippocampus,leftaccumbensarea,gender,apoe4,agegroup",
"-y",
"alzheimerbroadcategory",
"-alpha",
"1",
"-k",
"10",
"-pathology",
"dementia",
"-dataset",
"adni",
"-filter",
"",
]
runner = create_runner(NaiveBayes, algorithm_args=algorithm_args, num_workers=3,)
start = time.time()
runner.run()
end = time.time()
# print("Completed in ", end - start)
|
|
# Copyright (c) 2013 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from webob import exc
from ironicclient import exc as ironic_exc
from nova.api.openstack.compute import baremetal_nodes \
as b_nodes_v21
from nova.api.openstack.compute.legacy_v2.contrib import baremetal_nodes \
as b_nodes_v2
from nova.api.openstack import extensions
from nova import context
from nova import test
from nova.tests.unit.virt.ironic import utils as ironic_utils
class FakeRequest(object):
def __init__(self, context):
self.environ = {"nova.context": context}
def fake_node(**updates):
node = {
'id': 1,
'service_host': "host",
'cpus': 8,
'memory_mb': 8192,
'local_gb': 128,
'pm_address': "10.1.2.3",
'pm_user': "pm_user",
'pm_password': "pm_pass",
'terminal_port': 8000,
'interfaces': [],
'instance_uuid': 'fake-instance-uuid',
}
if updates:
node.update(updates)
return node
def fake_node_ext_status(**updates):
node = fake_node(uuid='fake-uuid',
task_state='fake-task-state',
updated_at='fake-updated-at',
pxe_config_path='fake-pxe-config-path')
if updates:
node.update(updates)
return node
FAKE_IRONIC_CLIENT = ironic_utils.FakeClient()
@mock.patch.object(b_nodes_v21, '_get_ironic_client',
lambda *_: FAKE_IRONIC_CLIENT)
class BareMetalNodesTestV21(test.NoDBTestCase):
mod = b_nodes_v21
def setUp(self):
super(BareMetalNodesTestV21, self).setUp()
self._setup()
self.context = context.get_admin_context()
self.request = FakeRequest(self.context)
def _setup(self):
self.controller = b_nodes_v21.BareMetalNodeController()
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list')
def test_index_ironic(self, mock_list):
properties = {'cpus': 2, 'memory_mb': 1024, 'local_gb': 20}
node = ironic_utils.get_test_node(properties=properties)
mock_list.return_value = [node]
res_dict = self.controller.index(self.request)
expected_output = {'nodes':
[{'memory_mb': properties['memory_mb'],
'host': 'IRONIC MANAGED',
'disk_gb': properties['local_gb'],
'interfaces': [],
'task_state': None,
'id': node.uuid,
'cpus': properties['cpus']}]}
self.assertEqual(expected_output, res_dict)
mock_list.assert_called_once_with(detail=True)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list')
def test_index_ironic_missing_properties(self, mock_list):
properties = {'cpus': 2}
node = ironic_utils.get_test_node(properties=properties)
mock_list.return_value = [node]
res_dict = self.controller.index(self.request)
expected_output = {'nodes':
[{'memory_mb': 0,
'host': 'IRONIC MANAGED',
'disk_gb': 0,
'interfaces': [],
'task_state': None,
'id': node.uuid,
'cpus': properties['cpus']}]}
self.assertEqual(expected_output, res_dict)
mock_list.assert_called_once_with(detail=True)
def test_index_ironic_not_implemented(self):
with mock.patch.object(self.mod, 'ironic_client', None):
self.assertRaises(exc.HTTPNotImplemented,
self.controller.index,
self.request)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic(self, mock_get, mock_list_ports):
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
port = ironic_utils.get_test_port()
mock_get.return_value = node
mock_list_ports.return_value = [port]
res_dict = self.controller.show(self.request, node.uuid)
expected_output = {'node':
{'memory_mb': properties['memory_mb'],
'instance_uuid': None,
'host': 'IRONIC MANAGED',
'disk_gb': properties['local_gb'],
'interfaces': [{'address': port.address}],
'task_state': None,
'id': node.uuid,
'cpus': properties['cpus']}}
self.assertEqual(expected_output, res_dict)
mock_get.assert_called_once_with(node.uuid)
mock_list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic_no_properties(self, mock_get, mock_list_ports):
properties = {}
node = ironic_utils.get_test_node(properties=properties)
port = ironic_utils.get_test_port()
mock_get.return_value = node
mock_list_ports.return_value = [port]
res_dict = self.controller.show(self.request, node.uuid)
expected_output = {'node':
{'memory_mb': 0,
'instance_uuid': None,
'host': 'IRONIC MANAGED',
'disk_gb': 0,
'interfaces': [{'address': port.address}],
'task_state': None,
'id': node.uuid,
'cpus': 0}}
self.assertEqual(expected_output, res_dict)
mock_get.assert_called_once_with(node.uuid)
mock_list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic_no_interfaces(self, mock_get, mock_list_ports):
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
mock_get.return_value = node
mock_list_ports.return_value = []
res_dict = self.controller.show(self.request, node.uuid)
self.assertEqual([], res_dict['node']['interfaces'])
mock_get.assert_called_once_with(node.uuid)
mock_list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get',
side_effect=ironic_exc.NotFound())
def test_show_ironic_node_not_found(self, mock_get):
error = self.assertRaises(exc.HTTPNotFound, self.controller.show,
self.request, 'fake-uuid')
self.assertIn('fake-uuid', six.text_type(error))
def test_show_ironic_not_implemented(self):
with mock.patch.object(self.mod, 'ironic_client', None):
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
self.assertRaises(exc.HTTPNotImplemented, self.controller.show,
self.request, node.uuid)
def test_create_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller.create,
self.request, {'node': object()})
def test_delete_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller.delete,
self.request, 'fake-id')
def test_add_interface_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller._add_interface,
self.request, 'fake-id', 'fake-body')
def test_remove_interface_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller._remove_interface,
self.request, 'fake-id', 'fake-body')
@mock.patch.object(b_nodes_v2, '_get_ironic_client',
lambda *_: FAKE_IRONIC_CLIENT)
class BareMetalNodesTestV2(BareMetalNodesTestV21):
mod = b_nodes_v2
def _setup(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.controller = b_nodes_v2.BareMetalNodeController(self.ext_mgr)
|
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from oslo.config import cfg
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.common import constants as q_const
from neutron.common import exceptions as n_exc
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import external_net_db
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.db import l3_rpc_base
from neutron.db import portbindings_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import proxy
from neutron.plugins.common import constants as svc_constants
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.linuxbridge.common import constants
from neutron.plugins.linuxbridge.db import l2network_db_v2 as db
LOG = logging.getLogger(__name__)
class LinuxBridgeRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
l3_rpc_base.L3RpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin
):
# history
# 1.1 Support Security Group RPC
RPC_API_VERSION = '1.1'
# Device names start with "tap"
TAP_PREFIX_LEN = 3
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
@classmethod
def get_port_from_device(cls, device):
port = db.get_port_from_device(device[cls.TAP_PREFIX_LEN:])
if port:
port['device'] = device
return port
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = self.get_port_from_device(device)
if port:
binding = db.get_network_binding(db_api.get_session(),
port['network_id'])
(network_type,
segmentation_id) = constants.interpret_vlan_id(binding.vlan_id)
entry = {'device': device,
'network_type': network_type,
'physical_network': binding.physical_network,
'segmentation_id': segmentation_id,
'network_id': port['network_id'],
'port_id': port['id'],
'admin_state_up': port['admin_state_up']}
if cfg.CONF.AGENT.rpc_support_old_agents:
entry['vlan_id'] = binding.vlan_id
new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up']
else q_const.PORT_STATUS_DOWN)
if port['status'] != new_status:
db.set_port_status(port['id'], new_status)
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
# TODO(garyk) - live migration and port status
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
port = self.get_port_from_device(device)
LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
if port:
entry = {'device': device,
'exists': True}
if (host and not
plugin.get_port_host(rpc_context, port['id']) == host):
LOG.debug(_("Device %(device)s not bound to the"
" agent host %(host)s"),
{'device': device, 'host': host})
elif port['status'] != q_const.PORT_STATUS_DOWN:
# Set port status to DOWN
db.set_port_status(port['id'], q_const.PORT_STATUS_DOWN)
else:
entry = {'device': device,
'exists': False}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
port = self.get_port_from_device(device)
LOG.debug(_("Device %(device)s up on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
if port:
if (host and
not plugin.get_port_host(rpc_context, port['id']) == host):
LOG.debug(_("Device %(device)s not bound to the"
" agent host %(host)s"),
{'device': device, 'host': host})
return
elif port['status'] != q_const.PORT_STATUS_ACTIVE:
db.set_port_status(port['id'],
q_const.PORT_STATUS_ACTIVE)
else:
LOG.debug(_("%s can not be found in database"), device)
class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
'''Agent side of the linux bridge rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
and update_dhcp_port methods.
'''
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic = topic
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, physical_network, vlan_id):
network_type, segmentation_id = constants.interpret_vlan_id(vlan_id)
kwargs = {'port': port,
'network_type': network_type,
'physical_network': physical_network,
'segmentation_id': segmentation_id}
if cfg.CONF.AGENT.rpc_support_old_agents:
kwargs['vlan_id'] = vlan_id
msg = self.make_msg('port_update', **kwargs)
self.fanout_cast(context, msg,
topic=self.topic_port_update)
class LinuxBridgePluginV2(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
portbindings_db.PortBindingMixin):
"""Implement the Neutron abstractions using Linux bridging.
A new VLAN is created for each network. An agent is relied upon
to perform the actual Linux bridge configuration on each host.
The provider extension is also supported. As discussed in
https://bugs.launchpad.net/neutron/+bug/1023156, this class could
be simplified, and filtering on extended attributes could be
handled, by adding support for extended attributes to the
NeutronDbPluginV2 base class. When that occurs, this class should
be updated to take advantage of it.
The port binding extension enables an external application relay
information to and from the plugin.
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
_supported_extension_aliases = ["provider", "external-net", "router",
"ext-gw-mode", "binding", "quotas",
"security-group", "agent", "extraroute",
"l3_agent_scheduler",
"dhcp_agent_scheduler"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
super(LinuxBridgePluginV2, self).__init__()
self.base_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_BRIDGE,
portbindings.VIF_DETAILS: {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
self._parse_network_vlan_ranges()
db.sync_network_states(self.network_vlan_ranges)
self.tenant_network_type = cfg.CONF.VLANS.tenant_network_type
if self.tenant_network_type not in [svc_constants.TYPE_LOCAL,
svc_constants.TYPE_VLAN,
svc_constants.TYPE_NONE]:
LOG.error(_("Invalid tenant_network_type: %s. "
"Service terminated!"),
self.tenant_network_type)
sys.exit(1)
self._setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver
)
LOG.debug(_("Linux Bridge Plugin initialization complete"))
def _setup_rpc(self):
# RPC support
self.service_topics = {svc_constants.CORE: topics.PLUGIN,
svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
self.conn = rpc.create_connection(new=True)
self.callbacks = LinuxBridgeRpcCallbacks()
self.dispatcher = self.callbacks.create_rpc_dispatcher()
for svc_topic in self.service_topics.values():
self.conn.create_consumer(svc_topic, self.dispatcher, fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
self.notifier = AgentNotifierApi(topics.AGENT)
self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
self.agent_notifiers[q_const.AGENT_TYPE_L3] = (
l3_rpc_agent_api.L3AgentNotify
)
def _parse_network_vlan_ranges(self):
try:
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.VLANS.network_vlan_ranges)
except Exception as ex:
LOG.error(_("%s. Agent terminated!"), ex)
sys.exit(1)
LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
def _add_network_vlan_range(self, physical_network, vlan_min, vlan_max):
self._add_network(physical_network)
self.network_vlan_ranges[physical_network].append((vlan_min, vlan_max))
def _add_network(self, physical_network):
if physical_network not in self.network_vlan_ranges:
self.network_vlan_ranges[physical_network] = []
def _extend_network_dict_provider(self, context, network):
binding = db.get_network_binding(context.session, network['id'])
if binding.vlan_id == constants.FLAT_VLAN_ID:
network[provider.NETWORK_TYPE] = svc_constants.TYPE_FLAT
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = None
elif binding.vlan_id == constants.LOCAL_VLAN_ID:
network[provider.NETWORK_TYPE] = svc_constants.TYPE_LOCAL
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = None
else:
network[provider.NETWORK_TYPE] = svc_constants.TYPE_VLAN
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = binding.vlan_id
def _process_provider_create(self, context, attrs):
network_type = attrs.get(provider.NETWORK_TYPE)
physical_network = attrs.get(provider.PHYSICAL_NETWORK)
segmentation_id = attrs.get(provider.SEGMENTATION_ID)
network_type_set = attributes.is_attr_set(network_type)
physical_network_set = attributes.is_attr_set(physical_network)
segmentation_id_set = attributes.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return (None, None, None)
if not network_type_set:
msg = _("provider:network_type required")
raise n_exc.InvalidInput(error_message=msg)
elif network_type == svc_constants.TYPE_FLAT:
if segmentation_id_set:
msg = _("provider:segmentation_id specified for flat network")
raise n_exc.InvalidInput(error_message=msg)
else:
segmentation_id = constants.FLAT_VLAN_ID
elif network_type == svc_constants.TYPE_VLAN:
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise n_exc.InvalidInput(error_message=msg)
if not utils.is_valid_vlan_tag(segmentation_id):
msg = (_("provider:segmentation_id out of range "
"(%(min_id)s through %(max_id)s)") %
{'min_id': q_const.MIN_VLAN_TAG,
'max_id': q_const.MAX_VLAN_TAG})
raise n_exc.InvalidInput(error_message=msg)
elif network_type == svc_constants.TYPE_LOCAL:
if physical_network_set:
msg = _("provider:physical_network specified for local "
"network")
raise n_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if segmentation_id_set:
msg = _("provider:segmentation_id specified for local "
"network")
raise n_exc.InvalidInput(error_message=msg)
else:
segmentation_id = constants.LOCAL_VLAN_ID
else:
msg = _("provider:network_type %s not supported") % network_type
raise n_exc.InvalidInput(error_message=msg)
if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]:
if physical_network_set:
if physical_network not in self.network_vlan_ranges:
msg = (_("Unknown provider:physical_network %s") %
physical_network)
raise n_exc.InvalidInput(error_message=msg)
elif 'default' in self.network_vlan_ranges:
physical_network = 'default'
else:
msg = _("provider:physical_network required")
raise n_exc.InvalidInput(error_message=msg)
return (network_type, physical_network, segmentation_id)
def create_network(self, context, network):
(network_type, physical_network,
vlan_id) = self._process_provider_create(context,
network['network'])
session = context.session
with session.begin(subtransactions=True):
#set up default security groups
tenant_id = self._get_tenant_id_for_create(
context, network['network'])
self._ensure_default_security_group(context, tenant_id)
if not network_type:
# tenant network
network_type = self.tenant_network_type
if network_type == svc_constants.TYPE_NONE:
raise n_exc.TenantNetworksDisabled()
elif network_type == svc_constants.TYPE_VLAN:
physical_network, vlan_id = db.reserve_network(session)
else: # TYPE_LOCAL
vlan_id = constants.LOCAL_VLAN_ID
else:
# provider network
if network_type in [svc_constants.TYPE_VLAN,
svc_constants.TYPE_FLAT]:
db.reserve_specific_network(session, physical_network,
vlan_id)
# no reservation needed for TYPE_LOCAL
net = super(LinuxBridgePluginV2, self).create_network(context,
network)
db.add_network_binding(session, net['id'],
physical_network, vlan_id)
self._process_l3_create(context, net, network['network'])
self._extend_network_dict_provider(context, net)
# note - exception will rollback entire transaction
return net
def update_network(self, context, id, network):
provider._raise_if_updates_provider_attributes(network['network'])
session = context.session
with session.begin(subtransactions=True):
net = super(LinuxBridgePluginV2, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
self._extend_network_dict_provider(context, net)
return net
def delete_network(self, context, id):
session = context.session
with session.begin(subtransactions=True):
binding = db.get_network_binding(session, id)
self._process_l3_delete(context, id)
super(LinuxBridgePluginV2, self).delete_network(context, id)
if binding.vlan_id != constants.LOCAL_VLAN_ID:
db.release_network(session, binding.physical_network,
binding.vlan_id, self.network_vlan_ranges)
# the network_binding record is deleted via cascade from
# the network record, so explicit removal is not necessary
self.notifier.network_delete(context, id)
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
net = super(LinuxBridgePluginV2, self).get_network(context,
id, None)
self._extend_network_dict_provider(context, net)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(LinuxBridgePluginV2,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self._extend_network_dict_provider(context, net)
return [self._fields(net, fields) for net in nets]
def create_port(self, context, port):
session = context.session
port_data = port['port']
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
# Set port status as 'DOWN'. This will be updated by agent
port['port']['status'] = q_const.PORT_STATUS_DOWN
port = super(LinuxBridgePluginV2,
self).create_port(context, port)
self._process_portbindings_create_and_update(context,
port_data,
port)
self._process_port_create_security_group(
context, port, sgids)
self.notify_security_groups_member_updated(context, port)
return port
def update_port(self, context, id, port):
original_port = self.get_port(context, id)
session = context.session
need_port_update_notify = False
with session.begin(subtransactions=True):
updated_port = super(LinuxBridgePluginV2, self).update_port(
context, id, port)
self._process_portbindings_create_and_update(context,
port['port'],
updated_port)
need_port_update_notify = self.update_security_group_on_port(
context, id, port, original_port, updated_port)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
if need_port_update_notify:
self._notify_port_updated(context, updated_port)
return updated_port
def delete_port(self, context, id, l3_port_check=True):
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
session = context.session
with session.begin(subtransactions=True):
self.disassociate_floatingips(context, id)
port = self.get_port(context, id)
self._delete_port_security_group_bindings(context, id)
super(LinuxBridgePluginV2, self).delete_port(context, id)
self.notify_security_groups_member_updated(context, port)
def _notify_port_updated(self, context, port):
binding = db.get_network_binding(context.session,
port['network_id'])
self.notifier.port_update(context, port,
binding.physical_network,
binding.vlan_id)
|
|
from lib.hachoir_metadata.metadata import RootMetadata, registerExtractor
from lib.hachoir_metadata.image import computeComprRate
from lib.hachoir_parser.image.exif import ExifEntry
from lib.hachoir_parser.image.jpeg import (
JpegFile, JpegChunk,
QUALITY_HASH_COLOR, QUALITY_SUM_COLOR,
QUALITY_HASH_GRAY, QUALITY_SUM_GRAY)
from lib.hachoir_core.field import MissingField
from lib.hachoir_core.i18n import _
from lib.hachoir_core.tools import makeUnicode
from lib.hachoir_metadata.safe import fault_tolerant
from datetime import datetime
def deg2float(degree, minute, second):
return degree + (float(minute) + float(second) / 60.0) / 60.0
class JpegMetadata(RootMetadata):
EXIF_KEY = {
# Exif metadatas
ExifEntry.TAG_CAMERA_MANUFACTURER: "camera_manufacturer",
ExifEntry.TAG_CAMERA_MODEL: "camera_model",
ExifEntry.TAG_ORIENTATION: "image_orientation",
ExifEntry.TAG_EXPOSURE: "camera_exposure",
ExifEntry.TAG_FOCAL: "camera_focal",
ExifEntry.TAG_BRIGHTNESS: "camera_brightness",
ExifEntry.TAG_APERTURE: "camera_aperture",
# Generic metadatas
ExifEntry.TAG_IMG_TITLE: "title",
ExifEntry.TAG_SOFTWARE: "producer",
ExifEntry.TAG_FILE_TIMESTAMP: "creation_date",
ExifEntry.TAG_WIDTH: "width",
ExifEntry.TAG_HEIGHT: "height",
ExifEntry.TAG_USER_COMMENT: "comment",
}
IPTC_KEY = {
80: "author",
90: "city",
101: "country",
116: "copyright",
120: "title",
231: "comment",
}
orientation_name = {
1: _('Horizontal (normal)'),
2: _('Mirrored horizontal'),
3: _('Rotated 180'),
4: _('Mirrored vertical'),
5: _('Mirrored horizontal then rotated 90 counter-clock-wise'),
6: _('Rotated 90 clock-wise'),
7: _('Mirrored horizontal then rotated 90 clock-wise'),
8: _('Rotated 90 counter clock-wise'),
}
def extract(self, jpeg):
if "start_frame/content" in jpeg:
self.startOfFrame(jpeg["start_frame/content"])
elif "start_scan/content/nr_components" in jpeg:
self.bits_per_pixel = 8 * jpeg["start_scan/content/nr_components"].value
if "app0/content" in jpeg:
self.extractAPP0(jpeg["app0/content"])
if "exif/content" in jpeg:
for ifd in jpeg.array("exif/content/ifd"):
for entry in ifd.array("entry"):
self.processIfdEntry(ifd, entry)
self.readGPS(ifd)
if "photoshop/content" in jpeg:
psd = jpeg["photoshop/content"]
if "version/content/reader_name" in psd:
self.producer = psd["version/content/reader_name"].value
if "iptc/content" in psd:
self.parseIPTC(psd["iptc/content"])
for field in jpeg.array("comment"):
if "content/comment" in field:
self.comment = field["content/comment"].value
self.computeQuality(jpeg)
if "data" in jpeg:
computeComprRate(self, jpeg["data"].size)
if not self.has("producer") and "photoshop" in jpeg:
self.producer = u"Adobe Photoshop"
if self.has("compression"):
self.compression = "JPEG"
@fault_tolerant
def startOfFrame(self, sof):
# Set compression method
key = sof["../type"].value
self.compression = "JPEG (%s)" % JpegChunk.START_OF_FRAME[key]
# Read image size and bits/pixel
self.width = sof["width"].value
self.height = sof["height"].value
nb_components = sof["nr_components"].value
self.bits_per_pixel = 8 * nb_components
if nb_components == 3:
self.pixel_format = _("YCbCr")
elif nb_components == 1:
self.pixel_format = _("Grayscale")
self.nb_colors = 256
@fault_tolerant
def computeQuality(self, jpeg):
# This function is an adaption to Python of ImageMagick code
# to compute JPEG quality using quantization tables
# Read quantization tables
qtlist = []
for dqt in jpeg.array("quantization"):
for qt in dqt.array("content/qt"):
# TODO: Take care of qt["index"].value?
qtlist.append(qt)
if not qtlist:
return
# Compute sum of all coefficients
sumcoeff = 0
for qt in qtlist:
coeff = qt.array("coeff")
for index in xrange(64):
sumcoeff += coeff[index].value
# Choose the right quality table and compute hash value
try:
hashval= qtlist[0]["coeff[2]"].value + qtlist[0]["coeff[53]"].value
if 2 <= len(qtlist):
hashval += qtlist[1]["coeff[0]"].value + qtlist[1]["coeff[63]"].value
hashtable = QUALITY_HASH_COLOR
sumtable = QUALITY_SUM_COLOR
else:
hashtable = QUALITY_HASH_GRAY
sumtable = QUALITY_SUM_GRAY
except (MissingField, IndexError):
# A coefficient is missing, so don't compute JPEG quality
return
# Find the JPEG quality
for index in xrange(100):
if (hashval >= hashtable[index]) or (sumcoeff >= sumtable[index]):
quality = "%s%%" % (index + 1)
if (hashval > hashtable[index]) or (sumcoeff > sumtable[index]):
quality += " " + _("(approximate)")
self.comment = "JPEG quality: %s" % quality
return
@fault_tolerant
def extractAPP0(self, app0):
self.format_version = u"JFIF %u.%02u" \
% (app0["ver_maj"].value, app0["ver_min"].value)
if "y_density" in app0:
self.width_dpi = app0["x_density"].value
self.height_dpi = app0["y_density"].value
@fault_tolerant
def processIfdEntry(self, ifd, entry):
# Skip unknown tags
tag = entry["tag"].value
if tag not in self.EXIF_KEY:
return
key = self.EXIF_KEY[tag]
if key in ("width", "height") and self.has(key):
# EXIF "valid size" are sometimes not updated when the image is scaled
# so we just ignore it
return
# Read value
if "value" in entry:
value = entry["value"].value
else:
value = ifd["value_%s" % entry.name].value
# Convert value to string
if tag == ExifEntry.TAG_ORIENTATION:
value = self.orientation_name.get(value, value)
elif tag == ExifEntry.TAG_EXPOSURE:
if not value:
return
if isinstance(value, float):
value = (value, u"1/%g" % (1/value))
elif entry["type"].value in (ExifEntry.TYPE_RATIONAL, ExifEntry.TYPE_SIGNED_RATIONAL):
value = (value, u"%.3g" % value)
# Store information
setattr(self, key, value)
@fault_tolerant
def readGPS(self, ifd):
# Read latitude and longitude
latitude_ref = None
longitude_ref = None
latitude = None
longitude = None
altitude_ref = 1
altitude = None
timestamp = None
datestamp = None
for entry in ifd.array("entry"):
tag = entry["tag"].value
if tag == ExifEntry.TAG_GPS_LATITUDE_REF:
if entry["value"].value == "N":
latitude_ref = 1
else:
latitude_ref = -1
elif tag == ExifEntry.TAG_GPS_LONGITUDE_REF:
if entry["value"].value == "E":
longitude_ref = 1
else:
longitude_ref = -1
elif tag == ExifEntry.TAG_GPS_ALTITUDE_REF:
if entry["value"].value == 1:
altitude_ref = -1
else:
altitude_ref = 1
elif tag == ExifEntry.TAG_GPS_LATITUDE:
latitude = [ifd["value_%s[%u]" % (entry.name, index)].value for index in xrange(3)]
elif tag == ExifEntry.TAG_GPS_LONGITUDE:
longitude = [ifd["value_%s[%u]" % (entry.name, index)].value for index in xrange(3)]
elif tag == ExifEntry.TAG_GPS_ALTITUDE:
altitude = ifd["value_%s" % entry.name].value
elif tag == ExifEntry.TAG_GPS_DATESTAMP:
datestamp = ifd["value_%s" % entry.name].value
elif tag == ExifEntry.TAG_GPS_TIMESTAMP:
items = [ifd["value_%s[%u]" % (entry.name, index)].value for index in xrange(3)]
items = map(int, items)
items = map(str, items)
timestamp = ":".join(items)
if latitude_ref and latitude:
value = deg2float(*latitude)
if latitude_ref < 0:
value = -value
self.latitude = value
if longitude and longitude_ref:
value = deg2float(*longitude)
if longitude_ref < 0:
value = -value
self.longitude = value
if altitude:
value = altitude
if altitude_ref < 0:
value = -value
self.altitude = value
if datestamp:
if timestamp:
datestamp += " " + timestamp
self.creation_date = datestamp
def parseIPTC(self, iptc):
datestr = hourstr = None
for field in iptc:
# Skip incomplete field
if "tag" not in field or "content" not in field:
continue
# Get value
value = field["content"].value
if isinstance(value, (str, unicode)):
value = value.replace("\r", " ")
value = value.replace("\n", " ")
# Skip unknown tag
tag = field["tag"].value
if tag == 55:
datestr = value
continue
if tag == 60:
hourstr = value
continue
if tag not in self.IPTC_KEY:
if tag != 0:
self.warning("Skip IPTC key %s: %s" % (
field["tag"].display, makeUnicode(value)))
continue
setattr(self, self.IPTC_KEY[tag], value)
if datestr and hourstr:
try:
year = int(datestr[0:4])
month = int(datestr[4:6])
day = int(datestr[6:8])
hour = int(hourstr[0:2])
min = int(hourstr[2:4])
sec = int(hourstr[4:6])
self.creation_date = datetime(year, month, day, hour, min, sec)
except ValueError:
pass
registerExtractor(JpegFile, JpegMetadata)
|
|
""" path.py - An object representing a path to a file or directory.
Example:
from path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
This module requires Python 2.2 or later.
URL: http://www.jorendorff.com/articles/python/path
Author: Jason Orendorff <jason.orendorff\x40gmail\x2ecom> (and others - see the url!)
Date: 9 Mar 2007
"""
# TODO
# - Tree-walking functions don't avoid symlink loops. Matt Harrison
# sent me a patch for this.
# - Bug in write_text(). It doesn't support Universal newline mode.
# - Better error message in listdir() when self isn't a
# directory. (On Windows, the error message really sucks.)
# - Make sure everything has a good docstring.
# - Add methods for regex find and replace.
# - guess_content_type() method?
# - Perhaps support arguments to touch().
from __future__ import generators
import sys, warnings, os, fnmatch, glob, shutil, codecs
__version__ = '2.2'
__all__ = ['path']
# Avoid the deprecation warning.
try:
import hashlib
md5 = hashlib.md5
except ImportError:
import md5
# Platform-specific support for path.owner
if os.name == 'nt':
try:
import win32security
except ImportError:
win32security = None
else:
try:
import pwd
except ImportError:
pwd = None
# Pre-2.3 support. Are unicode filenames supported?
_base = str
_getcwd = os.getcwd
try:
if os.path.supports_unicode_filenames:
_base = unicode
_getcwd = os.getcwdu
except AttributeError:
pass
# Pre-2.3 workaround for booleans
try:
True, False
except NameError:
True, False = 1, 0
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
basestring = (str, unicode)
# Universal newline support
_textmode = 'r'
if hasattr(file, 'newlines'):
_textmode = 'U'
class TreeWalkWarning(Warning):
pass
class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def getcwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
getcwd = classmethod(getcwd)
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
r""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, '/' or 'C:\\'). The other items in
the list will be strings.
path.path.joinpath(*result) will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, d.dirs('build-*').
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
d.files('*.pyc').
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
mydir.walkfiles('*.tmp') yields only files with the .tmp
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example '*.py'.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self):
""" Calculate the md5 hash for this file.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = md5.new()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isdir = os.path.isdir
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
getatime = os.path.getatime
atime = property(
getatime, None, None,
""" Last access time of the file. """)
getmtime = os.path.getmtime
mtime = property(
getmtime, None, None,
""" Last-modified time of the file. """)
if hasattr(os.path, 'getctime'):
getctime = os.path.getctime
ctime = property(
getctime, None, None,
""" Creation time of the file. """)
getsize = os.path.getsize
size = property(
getsize, None, None,
""" Size of the file, in bytes. """)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def get_owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
owner = property(
get_owner, None, None,
""" Name of the owner of this file or directory. """)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
def chmod(self, mode):
os.chmod(self, mode)
if hasattr(os, 'chown'):
def chown(self, uid, gid):
os.chown(self, uid, gid)
def rename(self, new):
os.rename(self, new)
def renames(self, new):
os.renames(self, new)
# --- Create/delete operations on directories
def mkdir(self, mode=0777):
os.mkdir(self, mode)
def makedirs(self, mode=0777):
os.makedirs(self, mode)
def rmdir(self):
os.rmdir(self)
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
def remove(self):
os.remove(self)
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Kyle Mestery, Cisco Systems, Inc.
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy.orm import exc as sa_exc
from neutron.common import exceptions as exc
from neutron.db import api as db_api
from neutron.db import model_base
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_tunnel
LOG = log.getLogger(__name__)
VXLAN_UDP_PORT = 4789
MAX_VXLAN_VNI = 16777215
vxlan_opts = [
cfg.ListOpt('vni_ranges',
default=[],
help=_("Comma-separated list of <vni_min>:<vni_max> tuples "
"enumerating ranges of VXLAN VNI IDs that are "
"available for tenant network allocation")),
cfg.StrOpt('vxlan_group',
help=_("Multicast group for VXLAN. If unset, disables VXLAN "
"multicast mode.")),
]
cfg.CONF.register_opts(vxlan_opts, "ml2_type_vxlan")
class VxlanAllocation(model_base.BASEV2):
__tablename__ = 'ml2_vxlan_allocations'
vxlan_vni = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False)
class VxlanEndpoints(model_base.BASEV2):
"""Represents tunnel endpoint in RPC mode."""
__tablename__ = 'ml2_vxlan_endpoints'
ip_address = sa.Column(sa.String(64), primary_key=True)
udp_port = sa.Column(sa.Integer, primary_key=True, nullable=False,
autoincrement=False)
def __repr__(self):
return "<VxlanTunnelEndpoint(%s)>" % self.ip_address
class VxlanTypeDriver(type_tunnel.TunnelTypeDriver):
def get_type(self):
return p_const.TYPE_VXLAN
def initialize(self):
self.vxlan_vni_ranges = []
self._parse_tunnel_ranges(
cfg.CONF.ml2_type_vxlan.vni_ranges,
self.vxlan_vni_ranges,
p_const.TYPE_VXLAN
)
self._sync_vxlan_allocations()
def reserve_provider_segment(self, session, segment):
segmentation_id = segment.get(api.SEGMENTATION_ID)
with session.begin(subtransactions=True):
try:
alloc = (session.query(VxlanAllocation).
filter_by(vxlan_vni=segmentation_id).
with_lockmode('update').
one())
if alloc.allocated:
raise exc.TunnelIdInUse(tunnel_id=segmentation_id)
LOG.debug(_("Reserving specific vxlan tunnel %s from pool"),
segmentation_id)
alloc.allocated = True
except sa_exc.NoResultFound:
LOG.debug(_("Reserving specific vxlan tunnel %s outside pool"),
segmentation_id)
alloc = VxlanAllocation(vxlan_vni=segmentation_id)
alloc.allocated = True
session.add(alloc)
def allocate_tenant_segment(self, session):
with session.begin(subtransactions=True):
alloc = (session.query(VxlanAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if alloc:
LOG.debug(_("Allocating vxlan tunnel vni %(vxlan_vni)s"),
{'vxlan_vni': alloc.vxlan_vni})
alloc.allocated = True
return {api.NETWORK_TYPE: p_const.TYPE_VXLAN,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: alloc.vxlan_vni}
def release_segment(self, session, segment):
vxlan_vni = segment[api.SEGMENTATION_ID]
with session.begin(subtransactions=True):
try:
alloc = (session.query(VxlanAllocation).
filter_by(vxlan_vni=vxlan_vni).
with_lockmode('update').
one())
alloc.allocated = False
for low, high in self.vxlan_vni_ranges:
if low <= vxlan_vni <= high:
LOG.debug(_("Releasing vxlan tunnel %s to pool"),
vxlan_vni)
break
else:
session.delete(alloc)
LOG.debug(_("Releasing vxlan tunnel %s outside pool"),
vxlan_vni)
except sa_exc.NoResultFound:
LOG.warning(_("vxlan_vni %s not found"), vxlan_vni)
def _sync_vxlan_allocations(self):
"""
Synchronize vxlan_allocations table with configured tunnel ranges.
"""
# determine current configured allocatable vnis
vxlan_vnis = set()
for tun_min, tun_max in self.vxlan_vni_ranges:
if tun_max + 1 - tun_min > MAX_VXLAN_VNI:
LOG.error(_("Skipping unreasonable VXLAN VNI range "
"%(tun_min)s:%(tun_max)s"),
{'tun_min': tun_min, 'tun_max': tun_max})
else:
vxlan_vnis |= set(xrange(tun_min, tun_max + 1))
session = db_api.get_session()
with session.begin(subtransactions=True):
# remove from table unallocated tunnels not currently allocatable
allocs = session.query(VxlanAllocation).with_lockmode("update")
for alloc in allocs:
try:
# see if tunnel is allocatable
vxlan_vnis.remove(alloc.vxlan_vni)
except KeyError:
# it's not allocatable, so check if its allocated
if not alloc.allocated:
# it's not, so remove it from table
LOG.debug(_("Removing tunnel %s from pool"),
alloc.vxlan_vni)
session.delete(alloc)
# add missing allocatable tunnels to table
for vxlan_vni in sorted(vxlan_vnis):
alloc = VxlanAllocation(vxlan_vni=vxlan_vni)
session.add(alloc)
def get_vxlan_allocation(self, session, vxlan_vni):
with session.begin(subtransactions=True):
return session.query(VxlanAllocation).filter_by(
vxlan_vni=vxlan_vni).first()
def get_endpoints(self):
"""Get every vxlan endpoints from database."""
LOG.debug(_("get_vxlan_endpoints() called"))
session = db_api.get_session()
with session.begin(subtransactions=True):
vxlan_endpoints = session.query(VxlanEndpoints)
return [{'ip_address': vxlan_endpoint.ip_address,
'udp_port': vxlan_endpoint.udp_port}
for vxlan_endpoint in vxlan_endpoints]
def add_endpoint(self, ip, udp_port=VXLAN_UDP_PORT):
LOG.debug(_("add_vxlan_endpoint() called for ip %s"), ip)
session = db_api.get_session()
with session.begin(subtransactions=True):
try:
vxlan_endpoint = (session.query(VxlanEndpoints).
filter_by(ip_address=ip).
with_lockmode('update').one())
except sa_exc.NoResultFound:
vxlan_endpoint = VxlanEndpoints(ip_address=ip,
udp_port=udp_port)
session.add(vxlan_endpoint)
return vxlan_endpoint
|
|
#!/usr/bin/env python
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import tempfile
import traceback
import unittest
import auto_push
from auto_push import CheckLastPush
from auto_push import SETTINGS_LOCATION
import auto_roll
from auto_roll import CLUSTERFUZZ_API_KEY_FILE
import common_includes
from common_includes import *
import merge_to_branch
from merge_to_branch import *
import push_to_trunk
from push_to_trunk import *
import chromium_roll
from chromium_roll import CHROMIUM
from chromium_roll import DEPS_FILE
from chromium_roll import ChromiumRoll
import releases
from releases import Releases
import bump_up_version
from bump_up_version import BumpUpVersion
from bump_up_version import LastChangeBailout
from bump_up_version import LKGRVersionUpToDateBailout
from auto_tag import AutoTag
TEST_CONFIG = {
BRANCHNAME: "test-prepare-push",
TRUNKBRANCH: "test-trunk-push",
PERSISTFILE_BASENAME: "/tmp/test-v8-push-to-trunk-tempfile",
DOT_GIT_LOCATION: None,
VERSION_FILE: None,
CHANGELOG_FILE: None,
CHANGELOG_ENTRY_FILE: "/tmp/test-v8-push-to-trunk-tempfile-changelog-entry",
PATCH_FILE: "/tmp/test-v8-push-to-trunk-tempfile-patch",
COMMITMSG_FILE: "/tmp/test-v8-push-to-trunk-tempfile-commitmsg",
CHROMIUM: "/tmp/test-v8-push-to-trunk-tempfile-chromium",
DEPS_FILE: "/tmp/test-v8-push-to-trunk-tempfile-chromium/DEPS",
SETTINGS_LOCATION: None,
ALREADY_MERGING_SENTINEL_FILE:
"/tmp/test-merge-to-branch-tempfile-already-merging",
COMMIT_HASHES_FILE: "/tmp/test-merge-to-branch-tempfile-PATCH_COMMIT_HASHES",
TEMPORARY_PATCH_FILE: "/tmp/test-merge-to-branch-tempfile-temporary-patch",
CLUSTERFUZZ_API_KEY_FILE: "/tmp/test-fake-cf-api-key",
}
AUTO_PUSH_ARGS = [
"-a", "[email protected]",
"-r", "[email protected]",
]
class ToplevelTest(unittest.TestCase):
def testSortBranches(self):
S = releases.SortBranches
self.assertEquals(["3.1", "2.25"], S(["2.25", "3.1"])[0:2])
self.assertEquals(["3.0", "2.25"], S(["2.25", "3.0", "2.24"])[0:2])
self.assertEquals(["3.11", "3.2"], S(["3.11", "3.2", "2.24"])[0:2])
def testFilterDuplicatesAndReverse(self):
F = releases.FilterDuplicatesAndReverse
self.assertEquals([], F([]))
self.assertEquals([["100", "10"]], F([["100", "10"]]))
self.assertEquals([["99", "9"], ["100", "10"]],
F([["100", "10"], ["99", "9"]]))
self.assertEquals([["98", "9"], ["100", "10"]],
F([["100", "10"], ["99", "9"], ["98", "9"]]))
self.assertEquals([["98", "9"], ["99", "10"]],
F([["100", "10"], ["99", "10"], ["98", "9"]]))
def testBuildRevisionRanges(self):
B = releases.BuildRevisionRanges
self.assertEquals({}, B([]))
self.assertEquals({"10": "100"}, B([["100", "10"]]))
self.assertEquals({"10": "100", "9": "99:99"},
B([["100", "10"], ["99", "9"]]))
self.assertEquals({"10": "100", "9": "97:99"},
B([["100", "10"], ["98", "9"], ["97", "9"]]))
self.assertEquals({"10": "100", "9": "99:99", "3": "91:98"},
B([["100", "10"], ["99", "9"], ["91", "3"]]))
self.assertEquals({"13": "101", "12": "100:100", "9": "94:97",
"3": "91:93, 98:99"},
B([["101", "13"], ["100", "12"], ["98", "3"],
["94", "9"], ["91", "3"]]))
def testMakeComment(self):
self.assertEquals("# Line 1\n# Line 2\n#",
MakeComment(" Line 1\n Line 2\n"))
self.assertEquals("#Line 1\n#Line 2",
MakeComment("Line 1\n Line 2"))
def testStripComments(self):
self.assertEquals(" Line 1\n Line 3\n",
StripComments(" Line 1\n# Line 2\n Line 3\n#\n"))
self.assertEquals("\nLine 2 ### Test\n #",
StripComments("###\n# \n\n# Line 1\nLine 2 ### Test\n #"))
def testMakeChangeLogBodySimple(self):
commits = [
["Title text 1",
"Title text 1\n\nBUG=\n",
"[email protected]"],
["Title text 2.",
"Title text 2\n\nBUG=1234\n",
"[email protected]"],
]
self.assertEquals(" Title text 1.\n"
" ([email protected])\n\n"
" Title text 2 (Chromium issue 1234).\n"
" ([email protected])\n\n",
MakeChangeLogBody(commits))
def testMakeChangeLogBodyEmpty(self):
self.assertEquals("", MakeChangeLogBody([]))
def testMakeChangeLogBodyAutoFormat(self):
commits = [
["Title text 1!",
"Title text 1\nLOG=y\nBUG=\n",
"[email protected]"],
["Title text 2",
"Title text 2\n\nBUG=1234\n",
"[email protected]"],
["Title text 3",
"Title text 3\n\nBUG=1234\nLOG = Yes\n",
"[email protected]"],
["Title text 3",
"Title text 4\n\nBUG=1234\nLOG=\n",
"[email protected]"],
]
self.assertEquals(" Title text 1.\n\n"
" Title text 3 (Chromium issue 1234).\n\n",
MakeChangeLogBody(commits, True))
def testRegressWrongLogEntryOnTrue(self):
body = """
Check elimination: Learn from if(CompareMap(x)) on true branch.
BUG=
[email protected]
Committed: https://code.google.com/p/v8/source/detail?r=18210
"""
self.assertEquals("", MakeChangeLogBody([["title", body, "author"]], True))
def testMakeChangeLogBugReferenceEmpty(self):
self.assertEquals("", MakeChangeLogBugReference(""))
self.assertEquals("", MakeChangeLogBugReference("LOG="))
self.assertEquals("", MakeChangeLogBugReference(" BUG ="))
self.assertEquals("", MakeChangeLogBugReference("BUG=none\t"))
def testMakeChangeLogBugReferenceSimple(self):
self.assertEquals("(issue 987654)",
MakeChangeLogBugReference("BUG = v8:987654"))
self.assertEquals("(Chromium issue 987654)",
MakeChangeLogBugReference("BUG=987654 "))
def testMakeChangeLogBugReferenceFromBody(self):
self.assertEquals("(Chromium issue 1234567)",
MakeChangeLogBugReference("Title\n\nTBR=\nBUG=\n"
" BUG=\tchromium:1234567\t\n"
"R=somebody\n"))
def testMakeChangeLogBugReferenceMultiple(self):
# All issues should be sorted and grouped. Multiple references to the same
# issue should be filtered.
self.assertEquals("(issues 123, 234, Chromium issue 345)",
MakeChangeLogBugReference("Title\n\n"
"BUG=v8:234\n"
" BUG\t= 345, \tv8:234,\n"
"BUG=v8:123\n"
"R=somebody\n"))
self.assertEquals("(Chromium issues 123, 234)",
MakeChangeLogBugReference("Title\n\n"
"BUG=234,,chromium:123 \n"
"R=somebody\n"))
self.assertEquals("(Chromium issues 123, 234)",
MakeChangeLogBugReference("Title\n\n"
"BUG=chromium:234, , 123\n"
"R=somebody\n"))
self.assertEquals("(issues 345, 456)",
MakeChangeLogBugReference("Title\n\n"
"\t\tBUG=v8:345,v8:456\n"
"R=somebody\n"))
self.assertEquals("(issue 123, Chromium issues 345, 456)",
MakeChangeLogBugReference("Title\n\n"
"BUG=chromium:456\n"
"BUG = none\n"
"R=somebody\n"
"BUG=456,v8:123, 345"))
# TODO(machenbach): These test don't make much sense when the formatting is
# done later.
def testMakeChangeLogBugReferenceLong(self):
# -----------------00--------10--------20--------30--------
self.assertEquals("(issues 234, 1234567890, 1234567"
"8901234567890, Chromium issues 12345678,"
" 123456789)",
MakeChangeLogBugReference("BUG=v8:234\n"
"BUG=v8:1234567890\n"
"BUG=v8:12345678901234567890\n"
"BUG=123456789\n"
"BUG=12345678\n"))
# -----------------00--------10--------20--------30--------
self.assertEquals("(issues 234, 1234567890, 1234567"
"8901234567890, Chromium issues"
" 123456789, 1234567890)",
MakeChangeLogBugReference("BUG=v8:234\n"
"BUG=v8:12345678901234567890\n"
"BUG=v8:1234567890\n"
"BUG=123456789\n"
"BUG=1234567890\n"))
# -----------------00--------10--------20--------30--------
self.assertEquals("(Chromium issues 234, 1234567890"
", 12345678901234567, "
"1234567890123456789)",
MakeChangeLogBugReference("BUG=234\n"
"BUG=12345678901234567\n"
"BUG=1234567890123456789\n"
"BUG=1234567890\n"))
def Git(*args, **kwargs):
"""Convenience function returning a git test expectation."""
return {
"name": "git",
"args": args[:-1],
"ret": args[-1],
"cb": kwargs.get("cb"),
}
def RL(text, cb=None):
"""Convenience function returning a readline test expectation."""
return {"name": "readline", "args": [], "ret": text, "cb": cb}
def URL(*args, **kwargs):
"""Convenience function returning a readurl test expectation."""
return {
"name": "readurl",
"args": args[:-1],
"ret": args[-1],
"cb": kwargs.get("cb"),
}
class SimpleMock(object):
def __init__(self, name):
self._name = name
self._recipe = []
self._index = -1
def Expect(self, recipe):
self._recipe = recipe
def Call(self, name, *args): # pragma: no cover
self._index += 1
try:
expected_call = self._recipe[self._index]
except IndexError:
raise NoRetryException("Calling %s %s" % (name, " ".join(args)))
if not isinstance(expected_call, dict):
raise NoRetryException("Found wrong expectation type for %s %s"
% (name, " ".join(args)))
# The number of arguments in the expectation must match the actual
# arguments.
if len(args) > len(expected_call['args']):
raise NoRetryException("When calling %s with arguments, the "
"expectations must consist of at least as many arguments." % name)
# Compare expected and actual arguments.
for (expected_arg, actual_arg) in zip(expected_call['args'], args):
if expected_arg != actual_arg:
raise NoRetryException("Expected: %s - Actual: %s"
% (expected_arg, actual_arg))
# The expected call contains an optional callback for checking the context
# at the time of the call.
if expected_call['cb']:
try:
expected_call['cb']()
except:
tb = traceback.format_exc()
raise NoRetryException("Caught exception from callback: %s" % tb)
# If the return value is an exception, raise it instead of returning.
if isinstance(expected_call['ret'], Exception):
raise expected_call['ret']
return expected_call['ret']
def AssertFinished(self): # pragma: no cover
if self._index < len(self._recipe) -1:
raise NoRetryException("Called %s too seldom: %d vs. %d"
% (self._name, self._index, len(self._recipe)))
class ScriptTest(unittest.TestCase):
def MakeEmptyTempFile(self):
handle, name = tempfile.mkstemp()
os.close(handle)
self._tmp_files.append(name)
return name
def WriteFakeVersionFile(self, minor=22, build=4, patch=0):
with open(TEST_CONFIG[VERSION_FILE], "w") as f:
f.write(" // Some line...\n")
f.write("\n")
f.write("#define MAJOR_VERSION 3\n")
f.write("#define MINOR_VERSION %s\n" % minor)
f.write("#define BUILD_NUMBER %s\n" % build)
f.write("#define PATCH_LEVEL %s\n" % patch)
f.write(" // Some line...\n")
f.write("#define IS_CANDIDATE_VERSION 0\n")
def MakeStep(self):
"""Convenience wrapper."""
options = ScriptsBase(TEST_CONFIG, self, self._state).MakeOptions([])
return MakeStep(step_class=Step, state=self._state,
config=TEST_CONFIG, side_effect_handler=self,
options=options)
def RunStep(self, script=PushToTrunk, step_class=Step, args=None):
"""Convenience wrapper."""
args = args if args is not None else ["-m"]
return script(TEST_CONFIG, self, self._state).RunSteps([step_class], args)
def GitMock(self, cmd, args="", pipe=True):
print "%s %s" % (cmd, args)
return self._git_mock.Call("git", args)
def LogMock(self, cmd, args=""):
print "Log: %s %s" % (cmd, args)
MOCKS = {
"git": GitMock,
# TODO(machenbach): Little hack to reuse the git mock for the one svn call
# in merge-to-branch. The command should be made explicit in the test
# expectations.
"svn": GitMock,
"vi": LogMock,
}
def Call(self, fun, *args, **kwargs):
print "Calling %s with %s and %s" % (str(fun), str(args), str(kwargs))
def Command(self, cmd, args="", prefix="", pipe=True):
return ScriptTest.MOCKS[cmd](self, cmd, args)
def ReadLine(self):
return self._rl_mock.Call("readline")
def ReadURL(self, url, params):
if params is not None:
return self._url_mock.Call("readurl", url, params)
else:
return self._url_mock.Call("readurl", url)
def ReadClusterFuzzAPI(self, api_key, **params):
# TODO(machenbach): Use a mock for this and add a test that stops rolling
# due to clustefuzz results.
return []
def Sleep(self, seconds):
pass
def GetDate(self):
return "1999-07-31"
def GetUTCStamp(self):
return "100000"
def ExpectGit(self, *args):
"""Convenience wrapper."""
self._git_mock.Expect(*args)
def ExpectReadline(self, *args):
"""Convenience wrapper."""
self._rl_mock.Expect(*args)
def ExpectReadURL(self, *args):
"""Convenience wrapper."""
self._url_mock.Expect(*args)
def setUp(self):
self._git_mock = SimpleMock("git")
self._rl_mock = SimpleMock("readline")
self._url_mock = SimpleMock("readurl")
self._tmp_files = []
self._state = {}
def tearDown(self):
Command("rm", "-rf %s*" % TEST_CONFIG[PERSISTFILE_BASENAME])
# Clean up temps. Doesn't work automatically.
for name in self._tmp_files:
if os.path.exists(name):
os.remove(name)
self._git_mock.AssertFinished()
self._rl_mock.AssertFinished()
self._url_mock.AssertFinished()
def testGitOrig(self):
self.assertTrue(Command("git", "--version").startswith("git version"))
def testGitMock(self):
self.ExpectGit([Git("--version", "git version 1.2.3"), Git("dummy", "")])
self.assertEquals("git version 1.2.3", self.MakeStep().Git("--version"))
self.assertEquals("", self.MakeStep().Git("dummy"))
def testCommonPrepareDefault(self):
self.ExpectGit([
Git("status -s -uno", ""),
Git("status -s -b -uno", "## some_branch"),
Git("svn fetch", ""),
Git("branch", " branch1\n* %s" % TEST_CONFIG[BRANCHNAME]),
Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
])
self.ExpectReadline([RL("Y")])
self.MakeStep().CommonPrepare()
self.MakeStep().PrepareBranch()
self.assertEquals("some_branch", self._state["current_branch"])
def testCommonPrepareNoConfirm(self):
self.ExpectGit([
Git("status -s -uno", ""),
Git("status -s -b -uno", "## some_branch"),
Git("svn fetch", ""),
Git("branch", " branch1\n* %s" % TEST_CONFIG[BRANCHNAME]),
])
self.ExpectReadline([RL("n")])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
self.assertEquals("some_branch", self._state["current_branch"])
def testCommonPrepareDeleteBranchFailure(self):
self.ExpectGit([
Git("status -s -uno", ""),
Git("status -s -b -uno", "## some_branch"),
Git("svn fetch", ""),
Git("branch", " branch1\n* %s" % TEST_CONFIG[BRANCHNAME]),
Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], None),
])
self.ExpectReadline([RL("Y")])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
self.assertEquals("some_branch", self._state["current_branch"])
def testInitialEnvironmentChecks(self):
TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
os.environ["EDITOR"] = "vi"
self.MakeStep().InitialEnvironmentChecks()
def testReadAndPersistVersion(self):
TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile(build=5)
step = self.MakeStep()
step.ReadAndPersistVersion()
self.assertEquals("3", step["major"])
self.assertEquals("22", step["minor"])
self.assertEquals("5", step["build"])
self.assertEquals("0", step["patch"])
def testRegex(self):
self.assertEqual("(issue 321)",
re.sub(r"BUG=v8:(.*)$", r"(issue \1)", "BUG=v8:321"))
self.assertEqual("(Chromium issue 321)",
re.sub(r"BUG=(.*)$", r"(Chromium issue \1)", "BUG=321"))
cl = " too little\n\ttab\ttab\n too much\n trailing "
cl = MSub(r"\t", r" ", cl)
cl = MSub(r"^ {1,7}([^ ])", r" \1", cl)
cl = MSub(r"^ {9,80}([^ ])", r" \1", cl)
cl = MSub(r" +$", r"", cl)
self.assertEqual(" too little\n"
" tab tab\n"
" too much\n"
" trailing", cl)
self.assertEqual("//\n#define BUILD_NUMBER 3\n",
MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
r"\g<space>3",
"//\n#define BUILD_NUMBER 321\n"))
def testPreparePushRevision(self):
# Tests the default push hash used when the --revision option is not set.
self.ExpectGit([
Git("log -1 --format=%H HEAD", "push_hash")
])
self.RunStep(PushToTrunk, PreparePushRevision)
self.assertEquals("push_hash", self._state["push_hash"])
def testPrepareChangeLog(self):
TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile()
TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
self.ExpectGit([
Git("log --format=%H 1234..push_hash", "rev1\nrev2\nrev3\nrev4"),
Git("log -1 --format=%s rev1", "Title text 1"),
Git("log -1 --format=%B rev1", "Title\n\nBUG=\nLOG=y\n"),
Git("log -1 --format=%an rev1", "[email protected]"),
Git("log -1 --format=%s rev2", "Title text 2."),
Git("log -1 --format=%B rev2", "Title\n\nBUG=123\nLOG= \n"),
Git("log -1 --format=%an rev2", "[email protected]"),
Git("log -1 --format=%s rev3", "Title text 3"),
Git("log -1 --format=%B rev3", "Title\n\nBUG=321\nLOG=true\n"),
Git("log -1 --format=%an rev3", "[email protected]"),
Git("log -1 --format=%s rev4", "Title text 4"),
Git("log -1 --format=%B rev4",
("Title\n\nBUG=456\nLOG=Y\n\n"
"Review URL: https://codereview.chromium.org/9876543210\n")),
Git("log -1 --format=%an rev4", "[email protected]"),
])
# The cl for rev4 on rietveld has an updated LOG flag.
self.ExpectReadURL([
URL("https://codereview.chromium.org/9876543210/description",
"Title\n\nBUG=456\nLOG=N\n\n"),
])
self._state["last_push_bleeding_edge"] = "1234"
self._state["push_hash"] = "push_hash"
self._state["version"] = "3.22.5"
self.RunStep(PushToTrunk, PrepareChangeLog)
actual_cl = FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE])
expected_cl = """1999-07-31: Version 3.22.5
Title text 1.
Title text 3 (Chromium issue 321).
Performance and stability improvements on all platforms.
#
# The change log above is auto-generated. Please review if all relevant
# commit messages from the list below are included.
# All lines starting with # will be stripped.
#
# Title text 1.
# ([email protected])
#
# Title text 2 (Chromium issue 123).
# ([email protected])
#
# Title text 3 (Chromium issue 321).
# ([email protected])
#
# Title text 4 (Chromium issue 456).
# ([email protected])
#
#"""
self.assertEquals(expected_cl, actual_cl)
def testEditChangeLog(self):
TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
TextToFile(" New \n\tLines \n", TEST_CONFIG[CHANGELOG_ENTRY_FILE])
os.environ["EDITOR"] = "vi"
self.ExpectReadline([
RL(""), # Open editor.
])
self.RunStep(PushToTrunk, EditChangeLog)
self.assertEquals("New\n Lines",
FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE]))
# Version on trunk: 3.22.4.0. Version on master (bleeding_edge): 3.22.6.
# Make sure that the increment is 3.22.7.0.
def testIncrementVersion(self):
TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile()
self._state["last_push_trunk"] = "hash1"
self._state["latest_build"] = "6"
self._state["latest_version"] = "3.22.6.0"
self.ExpectGit([
Git("checkout -f hash1 -- %s" % TEST_CONFIG[VERSION_FILE], ""),
Git("checkout -f svn/bleeding_edge -- %s" % TEST_CONFIG[VERSION_FILE],
"", cb=lambda: self.WriteFakeVersionFile(22, 6)),
])
self.ExpectReadline([
RL("Y"), # Increment build number.
])
self.RunStep(PushToTrunk, IncrementVersion)
self.assertEquals("3", self._state["new_major"])
self.assertEquals("22", self._state["new_minor"])
self.assertEquals("7", self._state["new_build"])
self.assertEquals("0", self._state["new_patch"])
def _TestSquashCommits(self, change_log, expected_msg):
TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
with open(TEST_CONFIG[CHANGELOG_ENTRY_FILE], "w") as f:
f.write(change_log)
self.ExpectGit([
Git("diff svn/trunk hash1", "patch content"),
Git("svn find-rev hash1", "123455\n"),
])
self._state["push_hash"] = "hash1"
self._state["date"] = "1999-11-11"
self.RunStep(PushToTrunk, SquashCommits)
self.assertEquals(FileToText(TEST_CONFIG[COMMITMSG_FILE]), expected_msg)
patch = FileToText(TEST_CONFIG[ PATCH_FILE])
self.assertTrue(re.search(r"patch content", patch))
def testSquashCommitsUnformatted(self):
change_log = """1999-11-11: Version 3.22.5
Log text 1.
Chromium issue 12345
Performance and stability improvements on all platforms.\n"""
commit_msg = """Version 3.22.5 (based on bleeding_edge revision r123455)
Log text 1. Chromium issue 12345
Performance and stability improvements on all platforms."""
self._TestSquashCommits(change_log, commit_msg)
def testSquashCommitsFormatted(self):
change_log = """1999-11-11: Version 3.22.5
Long commit message that fills more than 80 characters (Chromium issue
12345).
Performance and stability improvements on all platforms.\n"""
commit_msg = """Version 3.22.5 (based on bleeding_edge revision r123455)
Long commit message that fills more than 80 characters (Chromium issue 12345).
Performance and stability improvements on all platforms."""
self._TestSquashCommits(change_log, commit_msg)
def testSquashCommitsQuotationMarks(self):
change_log = """Line with "quotation marks".\n"""
commit_msg = """Line with "quotation marks"."""
self._TestSquashCommits(change_log, commit_msg)
def _PushToTrunk(self, force=False, manual=False):
TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
# The version file on bleeding edge has build level 5, while the version
# file from trunk has build level 4.
TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile(build=5)
TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
TEST_CONFIG[CHANGELOG_FILE] = self.MakeEmptyTempFile()
bleeding_edge_change_log = "2014-03-17: Sentinel\n"
TextToFile(bleeding_edge_change_log, TEST_CONFIG[CHANGELOG_FILE])
os.environ["EDITOR"] = "vi"
def ResetChangeLog():
"""On 'git co -b new_branch svn/trunk', and 'git checkout -- ChangeLog',
the ChangLog will be reset to its content on trunk."""
trunk_change_log = """1999-04-05: Version 3.22.4
Performance and stability improvements on all platforms.\n"""
TextToFile(trunk_change_log, TEST_CONFIG[CHANGELOG_FILE])
def ResetToTrunk():
ResetChangeLog()
self.WriteFakeVersionFile()
def CheckSVNCommit():
commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
self.assertEquals(
"""Version 3.22.5 (based on bleeding_edge revision r123455)
Log text 1 (issue 321).
Performance and stability improvements on all platforms.""", commit)
version = FileToText(TEST_CONFIG[VERSION_FILE])
self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
self.assertFalse(re.search(r"#define BUILD_NUMBER\s+6", version))
self.assertTrue(re.search(r"#define PATCH_LEVEL\s+0", version))
self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
# Check that the change log on the trunk branch got correctly modified.
change_log = FileToText(TEST_CONFIG[CHANGELOG_FILE])
self.assertEquals(
"""1999-07-31: Version 3.22.5
Log text 1 (issue 321).
Performance and stability improvements on all platforms.
1999-04-05: Version 3.22.4
Performance and stability improvements on all platforms.\n""",
change_log)
force_flag = " -f" if not manual else ""
self.ExpectGit([
Git("status -s -uno", ""),
Git("status -s -b -uno", "## some_branch\n"),
Git("svn fetch", ""),
Git("branch", " branch1\n* branch2\n"),
Git("branch", " branch1\n* branch2\n"),
Git("checkout -b %s svn/bleeding_edge" % TEST_CONFIG[BRANCHNAME], ""),
Git("svn find-rev r123455", "push_hash\n"),
Git(("log -1 --format=%H --grep="
"\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
"svn/trunk"), "hash2\n"),
Git("log -1 hash2", "Log message\n"),
Git("log -1 --format=%s hash2",
"Version 3.4.5 (based on bleeding_edge revision r1234)\n"),
Git("svn find-rev r1234", "hash3\n"),
Git("checkout -f svn/bleeding_edge -- %s" % TEST_CONFIG[VERSION_FILE],
"", cb=self.WriteFakeVersionFile),
Git("checkout -f hash2 -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=self.WriteFakeVersionFile),
Git("log --format=%H hash3..push_hash", "rev1\n"),
Git("log -1 --format=%s rev1", "Log text 1.\n"),
Git("log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
Git("log -1 --format=%an rev1", "[email protected]\n"),
Git("svn fetch", "fetch result\n"),
Git("checkout -f svn/bleeding_edge", ""),
Git("diff svn/trunk push_hash", "patch content\n"),
Git("svn find-rev push_hash", "123455\n"),
Git("checkout -b %s svn/trunk" % TEST_CONFIG[TRUNKBRANCH], "",
cb=ResetToTrunk),
Git("apply --index --reject \"%s\"" % TEST_CONFIG[PATCH_FILE], ""),
Git("checkout -f svn/trunk -- %s" % TEST_CONFIG[CHANGELOG_FILE], "",
cb=ResetChangeLog),
Git("checkout -f svn/trunk -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=self.WriteFakeVersionFile),
Git("commit -aF \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], "",
cb=CheckSVNCommit),
Git("svn dcommit 2>&1", "Some output\nCommitted r123456\nSome output\n"),
Git("svn tag 3.22.5 -m \"Tagging version 3.22.5\"", ""),
Git("checkout -f some_branch", ""),
Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
Git("branch -D %s" % TEST_CONFIG[TRUNKBRANCH], ""),
])
# Expected keyboard input in manual mode:
if manual:
self.ExpectReadline([
RL("Y"), # Confirm last push.
RL(""), # Open editor.
RL("Y"), # Increment build number.
RL("Y"), # Sanity check.
])
# Expected keyboard input in semi-automatic mode and forced mode:
if not manual:
self.ExpectReadline([])
args = ["-a", "[email protected]", "--revision", "123455"]
if force: args.append("-f")
if manual: args.append("-m")
else: args += ["-r", "[email protected]"]
PushToTrunk(TEST_CONFIG, self).Run(args)
cl = FileToText(TEST_CONFIG[CHANGELOG_FILE])
self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
self.assertTrue(re.search(r" Log text 1 \(issue 321\).", cl))
self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
# Note: The version file is on build number 5 again in the end of this test
# since the git command that merges to the bleeding edge branch is mocked
# out.
def testPushToTrunkManual(self):
self._PushToTrunk(manual=True)
def testPushToTrunkSemiAutomatic(self):
self._PushToTrunk()
def testPushToTrunkForced(self):
self._PushToTrunk(force=True)
def _ChromiumRoll(self, force=False, manual=False):
googlers_mapping_py = "%s-mapping.py" % TEST_CONFIG[PERSISTFILE_BASENAME]
with open(googlers_mapping_py, "w") as f:
f.write("""
def list_to_dict(entries):
return {"[email protected]": "[email protected]"}
def get_list():
pass""")
TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
if not os.path.exists(TEST_CONFIG[CHROMIUM]):
os.makedirs(TEST_CONFIG[CHROMIUM])
TextToFile("Some line\n \"v8_revision\": \"123444\",\n some line",
TEST_CONFIG[DEPS_FILE])
os.environ["EDITOR"] = "vi"
force_flag = " -f" if not manual else ""
self.ExpectGit([
Git("status -s -uno", ""),
Git("status -s -b -uno", "## some_branch\n"),
Git("svn fetch", ""),
Git(("log -1 --format=%H --grep="
"\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
"svn/trunk"), "push_hash\n"),
Git("svn find-rev push_hash", "123455\n"),
Git("log -1 --format=%s push_hash",
"Version 3.22.5 (based on bleeding_edge revision r123454)\n"),
Git("status -s -uno", ""),
Git("checkout -f master", ""),
Git("pull", ""),
Git("checkout -b v8-roll-123455", ""),
Git(("commit -am \"Update V8 to version 3.22.5 "
"(based on bleeding_edge revision r123454).\n\n"
"Please reply to the V8 sheriff [email protected] in "
"case of problems.\n\[email protected]\""),
""),
Git(("cl upload --send-mail --email \"[email protected]\"%s"
% force_flag), ""),
])
self.ExpectReadURL([
URL("https://chromium-build.appspot.com/p/chromium/sheriff_v8.js",
"document.write('g_name')"),
])
# Expected keyboard input in manual mode:
if manual:
self.ExpectReadline([
RL("[email protected]"), # Chromium reviewer.
])
# Expected keyboard input in semi-automatic mode and forced mode:
if not manual:
self.ExpectReadline([])
args = ["-a", "[email protected]", "-c", TEST_CONFIG[CHROMIUM],
"--sheriff", "--googlers-mapping", googlers_mapping_py]
if force: args.append("-f")
if manual: args.append("-m")
else: args += ["-r", "[email protected]"]
ChromiumRoll(TEST_CONFIG, self).Run(args)
deps = FileToText(TEST_CONFIG[DEPS_FILE])
self.assertTrue(re.search("\"v8_revision\": \"123455\"", deps))
def testChromiumRollManual(self):
self._ChromiumRoll(manual=True)
def testChromiumRollSemiAutomatic(self):
self._ChromiumRoll()
def testChromiumRollForced(self):
self._ChromiumRoll(force=True)
def testCheckLastPushRecently(self):
self.ExpectGit([
Git(("log -1 --format=%H --grep="
"\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
"svn/trunk"), "hash2\n"),
Git("log -1 --format=%s hash2",
"Version 3.4.5 (based on bleeding_edge revision r99)\n"),
])
self._state["lkgr"] = "101"
self.assertRaises(Exception, lambda: self.RunStep(auto_push.AutoPush,
CheckLastPush,
AUTO_PUSH_ARGS))
def testAutoPush(self):
TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
TEST_CONFIG[SETTINGS_LOCATION] = "~/.doesnotexist"
self.ExpectReadURL([
URL("https://v8-status.appspot.com/current?format=json",
"{\"message\": \"Tree is throttled\"}"),
URL("https://v8-status.appspot.com/lkgr", Exception("Network problem")),
URL("https://v8-status.appspot.com/lkgr", "100"),
])
self.ExpectGit([
Git("status -s -uno", ""),
Git("status -s -b -uno", "## some_branch\n"),
Git("svn fetch", ""),
Git(("log -1 --format=%H --grep=\""
"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\""
" svn/trunk"), "push_hash\n"),
Git("log -1 --format=%s push_hash",
"Version 3.4.5 (based on bleeding_edge revision r79)\n"),
])
auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS + ["--push"])
state = json.loads(FileToText("%s-state.json"
% TEST_CONFIG[PERSISTFILE_BASENAME]))
self.assertEquals("100", state["lkgr"])
def testAutoPushStoppedBySettings(self):
TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
TEST_CONFIG[SETTINGS_LOCATION] = self.MakeEmptyTempFile()
TextToFile("{\"enable_auto_push\": false}", TEST_CONFIG[SETTINGS_LOCATION])
self.ExpectReadURL([])
self.ExpectGit([
Git("status -s -uno", ""),
Git("status -s -b -uno", "## some_branch\n"),
Git("svn fetch", ""),
])
def RunAutoPush():
auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS)
self.assertRaises(Exception, RunAutoPush)
def testAutoPushStoppedByTreeStatus(self):
TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
TEST_CONFIG[SETTINGS_LOCATION] = "~/.doesnotexist"
self.ExpectReadURL([
URL("https://v8-status.appspot.com/current?format=json",
"{\"message\": \"Tree is throttled (no push)\"}"),
])
self.ExpectGit([
Git("status -s -uno", ""),
Git("status -s -b -uno", "## some_branch\n"),
Git("svn fetch", ""),
])
def RunAutoPush():
auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS)
self.assertRaises(Exception, RunAutoPush)
def testAutoRollExistingRoll(self):
self.ExpectReadURL([
URL("https://codereview.chromium.org/search",
"owner=author%40chromium.org&limit=30&closed=3&format=json",
("{\"results\": [{\"subject\": \"different\"},"
"{\"subject\": \"Update V8 to Version...\"}]}")),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM]])
self.assertEquals(1, result)
# Snippet from the original DEPS file.
FAKE_DEPS = """
vars = {
"v8_revision": "123455",
}
deps = {
"src/v8":
(Var("googlecode_url") % "v8") + "/" + Var("v8_branch") + "@" +
Var("v8_revision"),
}
"""
def testAutoRollUpToDate(self):
self.ExpectReadURL([
URL("https://codereview.chromium.org/search",
"owner=author%40chromium.org&limit=30&closed=3&format=json",
("{\"results\": [{\"subject\": \"different\"}]}")),
URL("http://src.chromium.org/svn/trunk/src/DEPS",
self.FAKE_DEPS),
])
self.ExpectGit([
Git(("log -1 --format=%H --grep="
"\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
"svn/trunk"), "push_hash\n"),
Git("svn find-rev push_hash", "123455\n"),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM]])
self.assertEquals(1, result)
def testAutoRoll(self):
TEST_CONFIG[CLUSTERFUZZ_API_KEY_FILE] = self.MakeEmptyTempFile()
TextToFile("fake key", TEST_CONFIG[CLUSTERFUZZ_API_KEY_FILE])
self.ExpectReadURL([
URL("https://codereview.chromium.org/search",
"owner=author%40chromium.org&limit=30&closed=3&format=json",
("{\"results\": [{\"subject\": \"different\"}]}")),
URL("http://src.chromium.org/svn/trunk/src/DEPS",
self.FAKE_DEPS),
])
self.ExpectGit([
Git(("log -1 --format=%H --grep="
"\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
"svn/trunk"), "push_hash\n"),
Git("svn find-rev push_hash", "123456\n"),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM], "--roll"])
self.assertEquals(0, result)
def testMergeToBranch(self):
TEST_CONFIG[ALREADY_MERGING_SENTINEL_FILE] = self.MakeEmptyTempFile()
TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile(build=5)
os.environ["EDITOR"] = "vi"
extra_patch = self.MakeEmptyTempFile()
def VerifyPatch(patch):
return lambda: self.assertEquals(patch,
FileToText(TEST_CONFIG[TEMPORARY_PATCH_FILE]))
msg = """Version 3.22.5.1 (merged r12345, r23456, r34567, r45678, r56789)
Title4
Title2
Title3
Title1
Revert "Something"
BUG=123,234,345,456,567,v8:123
LOG=N
"""
def VerifySVNCommit():
commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
self.assertEquals(msg, commit)
version = FileToText(TEST_CONFIG[VERSION_FILE])
self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
self.assertTrue(re.search(r"#define PATCH_LEVEL\s+1", version))
self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
self.ExpectGit([
Git("status -s -uno", ""),
Git("status -s -b -uno", "## some_branch\n"),
Git("svn fetch", ""),
Git("branch", " branch1\n* branch2\n"),
Git("checkout -b %s svn/trunk" % TEST_CONFIG[BRANCHNAME], ""),
Git("log --format=%H --grep=\"Port r12345\" --reverse svn/bleeding_edge",
"hash1\nhash2"),
Git("svn find-rev hash1 svn/bleeding_edge", "45678"),
Git("log -1 --format=%s hash1", "Title1"),
Git("svn find-rev hash2 svn/bleeding_edge", "23456"),
Git("log -1 --format=%s hash2", "Title2"),
Git("log --format=%H --grep=\"Port r23456\" --reverse svn/bleeding_edge",
""),
Git("log --format=%H --grep=\"Port r34567\" --reverse svn/bleeding_edge",
"hash3"),
Git("svn find-rev hash3 svn/bleeding_edge", "56789"),
Git("log -1 --format=%s hash3", "Title3"),
Git("svn find-rev r12345 svn/bleeding_edge", "hash4"),
# Simulate svn being down which stops the script.
Git("svn find-rev r23456 svn/bleeding_edge", None),
# Restart script in the failing step.
Git("svn find-rev r12345 svn/bleeding_edge", "hash4"),
Git("svn find-rev r23456 svn/bleeding_edge", "hash2"),
Git("svn find-rev r34567 svn/bleeding_edge", "hash3"),
Git("svn find-rev r45678 svn/bleeding_edge", "hash1"),
Git("svn find-rev r56789 svn/bleeding_edge", "hash5"),
Git("log -1 --format=%s hash4", "Title4"),
Git("log -1 --format=%s hash2", "Title2"),
Git("log -1 --format=%s hash3", "Title3"),
Git("log -1 --format=%s hash1", "Title1"),
Git("log -1 --format=%s hash5", "Revert \"Something\""),
Git("log -1 hash4", "Title4\nBUG=123\nBUG=234"),
Git("log -1 hash2", "Title2\n BUG = v8:123,345"),
Git("log -1 hash3", "Title3\nLOG=n\nBUG=567, 456"),
Git("log -1 hash1", "Title1\nBUG="),
Git("log -1 hash5", "Revert \"Something\"\nBUG=none"),
Git("log -1 -p hash4", "patch4"),
Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
"", cb=VerifyPatch("patch4")),
Git("log -1 -p hash2", "patch2"),
Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
"", cb=VerifyPatch("patch2")),
Git("log -1 -p hash3", "patch3"),
Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
"", cb=VerifyPatch("patch3")),
Git("log -1 -p hash1", "patch1"),
Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
"", cb=VerifyPatch("patch1")),
Git("log -1 -p hash5", "patch5\n"),
Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
"", cb=VerifyPatch("patch5\n")),
Git("apply --index --reject \"%s\"" % extra_patch, ""),
Git("commit -aF \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], ""),
Git("cl upload --send-mail -r \"[email protected]\"", ""),
Git("checkout -f %s" % TEST_CONFIG[BRANCHNAME], ""),
Git("cl presubmit", "Presubmit successfull\n"),
Git("cl dcommit -f --bypass-hooks", "Closing issue\n", cb=VerifySVNCommit),
Git("svn fetch", ""),
Git(("log -1 --format=%%H --grep=\"%s\" svn/trunk"
% msg.replace("\"", "\\\"")), "hash6"),
Git("svn find-rev hash6", "1324"),
Git(("copy -r 1324 https://v8.googlecode.com/svn/trunk "
"https://v8.googlecode.com/svn/tags/3.22.5.1 -m "
"\"Tagging version 3.22.5.1\""), ""),
Git("checkout -f some_branch", ""),
Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
])
self.ExpectReadline([
RL("Y"), # Automatically add corresponding ports (34567, 56789)?
RL("Y"), # Automatically increment patch level?
RL("[email protected]"), # V8 reviewer.
RL("LGTM"), # Enter LGTM for V8 CL.
])
# r12345 and r34567 are patches. r23456 (included) and r45678 are the MIPS
# ports of r12345. r56789 is the MIPS port of r34567.
args = ["-f", "-p", extra_patch, "--branch", "trunk", "12345", "23456",
"34567"]
# The first run of the script stops because of the svn being down.
self.assertRaises(GitFailedException,
lambda: MergeToBranch(TEST_CONFIG, self).Run(args))
# Test that state recovery after restarting the script works.
args += ["-s", "3"]
MergeToBranch(TEST_CONFIG, self).Run(args)
def testReleases(self):
tag_response_text = """
------------------------------------------------------------------------
r22631 | [email protected] | 2014-07-28 02:05:29 +0200 (Mon, 28 Jul 2014)
Changed paths:
A /tags/3.28.43 (from /trunk:22630)
Tagging version 3.28.43
------------------------------------------------------------------------
r22629 | [email protected] | 2014-07-26 05:09:29 +0200 (Sat, 26 Jul 2014)
Changed paths:
A /tags/3.28.41 (from /branches/bleeding_edge:22626)
Tagging version 3.28.41
------------------------------------------------------------------------
r22556 | [email protected] | 2014-07-23 13:31:59 +0200 (Wed, 23 Jul 2014)
Changed paths:
A /tags/3.27.34.7 (from /branches/3.27:22555)
Tagging version 3.27.34.7
------------------------------------------------------------------------
r22627 | [email protected] | 2014-07-26 01:39:15 +0200 (Sat, 26 Jul 2014)
Changed paths:
A /tags/3.28.40 (from /branches/bleeding_edge:22624)
Tagging version 3.28.40
------------------------------------------------------------------------
"""
json_output = self.MakeEmptyTempFile()
csv_output = self.MakeEmptyTempFile()
TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile()
TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
if not os.path.exists(TEST_CONFIG[CHROMIUM]):
os.makedirs(TEST_CONFIG[CHROMIUM])
def WriteDEPS(revision):
TextToFile("Line\n \"v8_revision\": \"%s\",\n line\n" % revision,
TEST_CONFIG[DEPS_FILE])
WriteDEPS(567)
def ResetVersion(minor, build, patch=0):
return lambda: self.WriteFakeVersionFile(minor=minor,
build=build,
patch=patch)
def ResetDEPS(revision):
return lambda: WriteDEPS(revision)
self.ExpectGit([
Git("status -s -uno", ""),
Git("status -s -b -uno", "## some_branch\n"),
Git("svn fetch", ""),
Git("branch", " branch1\n* branch2\n"),
Git("checkout -b %s" % TEST_CONFIG[BRANCHNAME], ""),
Git("branch -r", " svn/3.21\n svn/3.3\n"),
Git("reset --hard svn/3.3", ""),
Git("log --format=%H", "hash1\nhash2"),
Git("diff --name-only hash1 hash1^", ""),
Git("diff --name-only hash2 hash2^", TEST_CONFIG[VERSION_FILE]),
Git("checkout -f hash2 -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=ResetVersion(3, 1, 1)),
Git("log -1 --format=%B hash2",
"Version 3.3.1.1 (merged 12)\n\nReview URL: fake.com\n"),
Git("log -1 --format=%s hash2", ""),
Git("svn find-rev hash2", "234"),
Git("log -1 --format=%ci hash2", "18:15"),
Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=ResetVersion(22, 5)),
Git("reset --hard svn/3.21", ""),
Git("log --format=%H", "hash3\nhash4\nhash5\n"),
Git("diff --name-only hash3 hash3^", TEST_CONFIG[VERSION_FILE]),
Git("checkout -f hash3 -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=ResetVersion(21, 2)),
Git("log -1 --format=%B hash3", ""),
Git("log -1 --format=%s hash3", ""),
Git("svn find-rev hash3", "123"),
Git("log -1 --format=%ci hash3", "03:15"),
Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=ResetVersion(22, 5)),
Git("reset --hard svn/trunk", ""),
Git("log --format=%H", "hash6\n"),
Git("diff --name-only hash6 hash6^", TEST_CONFIG[VERSION_FILE]),
Git("checkout -f hash6 -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=ResetVersion(22, 3)),
Git("log -1 --format=%B hash6", ""),
Git("log -1 --format=%s hash6", ""),
Git("svn find-rev hash6", "345"),
Git("log -1 --format=%ci hash6", ""),
Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=ResetVersion(22, 5)),
Git("reset --hard svn/bleeding_edge", ""),
Git("log https://v8.googlecode.com/svn/tags -v --limit 20",
tag_response_text),
Git("svn find-rev r22626", "hash_22626"),
Git("svn find-rev hash_22626", "22626"),
Git("log -1 --format=%ci hash_22626", "01:23"),
Git("svn find-rev r22624", "hash_22624"),
Git("svn find-rev hash_22624", "22624"),
Git("log -1 --format=%ci hash_22624", "02:34"),
Git("status -s -uno", ""),
Git("checkout -f master", ""),
Git("pull", ""),
Git("checkout -b %s" % TEST_CONFIG[BRANCHNAME], ""),
Git("log --format=%H --grep=\"V8\"", "c_hash1\nc_hash2\n"),
Git("diff --name-only c_hash1 c_hash1^", ""),
Git("diff --name-only c_hash2 c_hash2^", TEST_CONFIG[DEPS_FILE]),
Git("checkout -f c_hash2 -- %s" % TEST_CONFIG[DEPS_FILE], "",
cb=ResetDEPS(345)),
Git("svn find-rev c_hash2", "4567"),
Git("checkout -f HEAD -- %s" % TEST_CONFIG[DEPS_FILE], "",
cb=ResetDEPS(567)),
Git("branch -r", " weird/123\n branch-heads/7\n"),
Git("checkout -f branch-heads/7 -- %s" % TEST_CONFIG[DEPS_FILE], "",
cb=ResetDEPS(345)),
Git("checkout -f HEAD -- %s" % TEST_CONFIG[DEPS_FILE], "",
cb=ResetDEPS(567)),
Git("checkout -f master", ""),
Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
Git("checkout -f some_branch", ""),
Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
])
args = ["-c", TEST_CONFIG[CHROMIUM],
"--json", json_output,
"--csv", csv_output,
"--max-releases", "1"]
Releases(TEST_CONFIG, self).Run(args)
# Check expected output.
csv = ("3.28.41,bleeding_edge,22626,,\r\n"
"3.28.40,bleeding_edge,22624,,\r\n"
"3.22.3,trunk,345,4567,\r\n"
"3.21.2,3.21,123,,\r\n"
"3.3.1.1,3.3,234,,12\r\n")
self.assertEquals(csv, FileToText(csv_output))
expected_json = [
{"bleeding_edge": "22626", "patches_merged": "", "version": "3.28.41",
"chromium_revision": "", "branch": "bleeding_edge", "revision": "22626",
"review_link": "", "date": "01:23", "chromium_branch": "",
"revision_link": "https://code.google.com/p/v8/source/detail?r=22626"},
{"bleeding_edge": "22624", "patches_merged": "", "version": "3.28.40",
"chromium_revision": "", "branch": "bleeding_edge", "revision": "22624",
"review_link": "", "date": "02:34", "chromium_branch": "",
"revision_link": "https://code.google.com/p/v8/source/detail?r=22624"},
{"bleeding_edge": "", "patches_merged": "", "version": "3.22.3",
"chromium_revision": "4567", "branch": "trunk", "revision": "345",
"review_link": "", "date": "", "chromium_branch": "7",
"revision_link": "https://code.google.com/p/v8/source/detail?r=345"},
{"patches_merged": "", "bleeding_edge": "", "version": "3.21.2",
"chromium_revision": "", "branch": "3.21", "revision": "123",
"review_link": "", "date": "03:15", "chromium_branch": "",
"revision_link": "https://code.google.com/p/v8/source/detail?r=123"},
{"patches_merged": "12", "bleeding_edge": "", "version": "3.3.1.1",
"chromium_revision": "", "branch": "3.3", "revision": "234",
"review_link": "fake.com", "date": "18:15", "chromium_branch": "",
"revision_link": "https://code.google.com/p/v8/source/detail?r=234"},
]
self.assertEquals(expected_json, json.loads(FileToText(json_output)))
def testBumpUpVersion(self):
TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile()
def ResetVersion(minor, build, patch=0):
return lambda: self.WriteFakeVersionFile(minor=minor,
build=build,
patch=patch)
self.ExpectGit([
Git("status -s -uno", ""),
Git("checkout -f bleeding_edge", "", cb=ResetVersion(11, 4)),
Git("pull", ""),
Git("branch", ""),
Git("checkout -f bleeding_edge", ""),
Git("log -1 --format=%H", "latest_hash"),
Git("diff --name-only latest_hash latest_hash^", ""),
Git("checkout -f bleeding_edge", ""),
Git("log --format=%H --grep=\"^git-svn-id: [^@]*@12345 [A-Za-z0-9-]*$\"",
"lkgr_hash"),
Git("checkout -b auto-bump-up-version lkgr_hash", ""),
Git("checkout -f bleeding_edge", ""),
Git("branch", ""),
Git("diff --name-only lkgr_hash lkgr_hash^", ""),
Git("checkout -f master", "", cb=ResetVersion(11, 5)),
Git("pull", ""),
Git("checkout -b auto-bump-up-version bleeding_edge", "",
cb=ResetVersion(11, 4)),
Git("commit -am \"[Auto-roll] Bump up version to 3.11.6.0\n\n"
"[email protected]\"", ""),
Git("cl upload --send-mail --email \"[email protected]\" -f "
"--bypass-hooks", ""),
Git("cl dcommit -f --bypass-hooks", ""),
Git("checkout -f bleeding_edge", ""),
Git("branch", "auto-bump-up-version\n* bleeding_edge"),
Git("branch -D auto-bump-up-version", ""),
])
self.ExpectReadURL([
URL("https://v8-status.appspot.com/lkgr", "12345"),
URL("https://v8-status.appspot.com/current?format=json",
"{\"message\": \"Tree is open\"}"),
])
BumpUpVersion(TEST_CONFIG, self).Run(["-a", "[email protected]"])
def testAutoTag(self):
TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self.WriteFakeVersionFile()
def ResetVersion(minor, build, patch=0):
return lambda: self.WriteFakeVersionFile(minor=minor,
build=build,
patch=patch)
self.ExpectGit([
Git("status -s -uno", ""),
Git("status -s -b -uno", "## some_branch\n"),
Git("svn fetch", ""),
Git("branch", " branch1\n* branch2\n"),
Git("checkout -f master", ""),
Git("svn rebase", ""),
Git("checkout -b %s" % TEST_CONFIG[BRANCHNAME], "",
cb=ResetVersion(4, 5)),
Git("branch -r", "svn/tags/3.4.2\nsvn/tags/3.2.1.0\nsvn/branches/3.4"),
Git("log --format=%H --grep=\"\\[Auto\\-roll\\] Bump up version to\"",
"hash125\nhash118\nhash111\nhash101"),
Git("checkout -f hash125 -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=ResetVersion(4, 4)),
Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=ResetVersion(4, 5)),
Git("checkout -f hash118 -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=ResetVersion(4, 3)),
Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=ResetVersion(4, 5)),
Git("checkout -f hash111 -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=ResetVersion(4, 2)),
Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
cb=ResetVersion(4, 5)),
Git("svn find-rev hash118", "118"),
Git("svn find-rev hash125", "125"),
Git("svn find-rev r123", "hash123"),
Git("log -1 --format=%at hash123", "1"),
Git("reset --hard hash123", ""),
Git("svn tag 3.4.3 -m \"Tagging version 3.4.3\"", ""),
Git("checkout -f some_branch", ""),
Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
])
self.ExpectReadURL([
URL("https://v8-status.appspot.com/revisions?format=json",
"[{\"revision\": \"126\", \"status\": true},"
"{\"revision\": \"123\", \"status\": true},"
"{\"revision\": \"112\", \"status\": true}]"),
])
AutoTag(TEST_CONFIG, self).Run(["-a", "[email protected]"])
# Test that we bail out if the last change was a version change.
def testBumpUpVersionBailout1(self):
TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self._state["latest"] = "latest_hash"
self.ExpectGit([
Git("diff --name-only latest_hash latest_hash^",
TEST_CONFIG[VERSION_FILE]),
])
self.assertEquals(1,
self.RunStep(BumpUpVersion, LastChangeBailout, ["--dry_run"]))
# Test that we bail out if the lkgr was a version change.
def testBumpUpVersionBailout2(self):
TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self._state["lkgr"] = "lkgr_hash"
self.ExpectGit([
Git("diff --name-only lkgr_hash lkgr_hash^", TEST_CONFIG[VERSION_FILE]),
])
self.assertEquals(1,
self.RunStep(BumpUpVersion, LKGRVersionUpToDateBailout, ["--dry_run"]))
# Test that we bail out if the last version is already newer than the lkgr's
# version.
def testBumpUpVersionBailout3(self):
TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
self._state["lkgr"] = "lkgr_hash"
self._state["lkgr_version"] = "3.22.4.0"
self._state["latest_version"] = "3.22.5.0"
self.ExpectGit([
Git("diff --name-only lkgr_hash lkgr_hash^", ""),
])
self.assertEquals(1,
self.RunStep(BumpUpVersion, LKGRVersionUpToDateBailout, ["--dry_run"]))
class SystemTest(unittest.TestCase):
def testReload(self):
step = MakeStep(step_class=PrepareChangeLog, number=0, state={}, config={},
side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER)
body = step.Reload(
"""------------------------------------------------------------------------
r17997 | [email protected] | 2013-11-22 11:04:04 +0100 (...) | 6 lines
Prepare push to trunk. Now working on version 3.23.11.
[email protected]
Review URL: https://codereview.chromium.org/83173002
------------------------------------------------------------------------""")
self.assertEquals(
"""Prepare push to trunk. Now working on version 3.23.11.
[email protected]
Committed: https://code.google.com/p/v8/source/detail?r=17997""", body)
|
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_custom_language
short_description: Configure custom languages in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and custom_language category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_custom_language:
description:
- Configure custom languages.
default: null
type: dict
suboptions:
comments:
description:
- Comment.
type: str
filename:
description:
- Custom language file path.
type: str
name:
description:
- Name.
required: true
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure custom languages.
fortios_system_custom_language:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_custom_language:
comments: "<your_own_value>"
filename: "<your_own_value>"
name: "default_name_5"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_custom_language_data(json):
option_list = ['comments', 'filename', 'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_custom_language(data, fos):
vdom = data['vdom']
state = data['state']
system_custom_language_data = data['system_custom_language']
filtered_data = underscore_to_hyphen(filter_system_custom_language_data(system_custom_language_data))
if state == "present":
return fos.set('system',
'custom-language',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'custom-language',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_custom_language']:
resp = system_custom_language(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_custom_language": {
"required": False, "type": "dict", "default": None,
"options": {
"comments": {"required": False, "type": "str"},
"filename": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
|
"""
This module provides Trainer classes that given a set of flags, create,
initialize and train a model. These classes use Runner objects to handle
multigpu/singlegpu training.
"""
import six
import math
import time
import os
import numpy as np
import tensorflow as tf
from cleverhans.utils_tf import batch_indices
from cleverhans.utils_mnist import data_mnist
import utils_cifar as cifar_input
import utils_svhn as svhn_input
from utils import preprocess_batch
from make_model import make_model
from evaluator import Evaluator
from cleverhans.utils_tf import model_loss
import logging
from collections import OrderedDict
from model import clone_variable
from evaluator import create_adv_by_name
class TrainManager(object):
"""
The base trainer class. Given an object of `hparams`, a trainer
creates and initializes a model. After initialization, the method
`model_train` can be used to train the model.
"""
def __init__(self, hparams):
"""
:param hparams: An instance of collections.namedtuple specifying the
model type and training configs. The parameters are
documented in `run_multigpu.py`.
"""
self.hparams = hparams
self.batch_size = hparams.batch_size
self.evaluate = None
self.step_num = 0
self.report = None
self._init_session()
self._init_data()
self._init_inputs()
self._init_model()
self._create_train_graph()
self._init_eval()
self.runner = None
def _init_session(self):
# Set TF random seed to improve reproducibility
self.rng = np.random.RandomState([2017, 8, 30])
tf.set_random_seed(1234)
# Create TF session
self.sess = tf.Session(
config=tf.ConfigProto(allow_soft_placement=True))
# Object used to keep track of (and return) key accuracies
if self.hparams.save:
self.writer = tf.summary.FileWriter(self.hparams.save_dir,
flush_secs=10)
else:
self.writer = None
def _init_data(self):
hparams = self.hparams
batch_size = hparams.batch_size
if hparams.dataset == 'mnist':
# Get MNIST test data
X_train, Y_train, X_test, Y_test = data_mnist(
train_start=hparams.train_start,
train_end=hparams.train_end,
test_start=hparams.test_start,
test_end=hparams.test_end)
input_shape = (batch_size, 28, 28, 1)
preproc_func = None
elif hparams.dataset == 'cifar10':
X_train, Y_train, X_test, Y_test = cifar_input.read_CIFAR10(
os.path.join(hparams.data_path, hparams.dataset))
input_shape = (batch_size, 32, 32, 3)
preproc_func = cifar_input.cifar_tf_preprocess
elif hparams.dataset == 'svhn':
X_train, Y_train, X_test, Y_test = svhn_input.read_SVHN(
os.path.join(hparams.data_path, hparams.dataset))
input_shape = (batch_size, 32, 32, 3)
preproc_func = svhn_input.svhn_tf_preprocess
# Use label smoothing
assert Y_train.shape[1] == 10.
label_smooth = .1
Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)
self.X_train = X_train
self.Y_train = Y_train
self.X_test = X_test
self.Y_test = Y_test
self.data = (X_train, Y_train, X_test, Y_test)
self.input_shape = input_shape
self.preproc_func = preproc_func
def _init_inputs(self):
preproc_func = self.preproc_func
input_shape = self.input_shape
# Define input TF placeholder
with tf.device('/gpu:0'):
x_pre = tf.placeholder(tf.float32, shape=input_shape, name='x')
x = preprocess_batch(x_pre, preproc_func)
y = tf.placeholder(tf.float32, shape=(self.batch_size, 10),
name='y')
self.g0_inputs = {'x_pre': x_pre, 'x': x, 'y': y}
def _init_model(self):
flags = self.hparams.__dict__
# Define TF model graph
model = make_model(input_shape=self.input_shape, **flags)
model.set_device(None)
self.model = model
def _init_eval(self):
logging.info("Init eval")
x_pre, x, y = [self.g0_inputs[k] for k in ['x_pre', 'x', 'y']]
self.model.set_device('/gpu:0')
self.evaluate = Evaluator(self.sess, self.model, self.batch_size,
x_pre, x, y,
self.data,
self.writer,
self.hparams)
def eval(self, **kwargs):
if self.evaluate is not None:
self.report = self.evaluate.eval_multi()
def finish(self):
if self.writer:
self.writer.close()
return self.report
def _update_learning_params(self):
model = self.model
hparams = self.hparams
fd = self.runner.feed_dict
step_num = self.step_num
if hparams.model_type == 'resnet_tf':
if step_num < hparams.lrn_step:
lrn_rate = hparams.mom_lrn
elif step_num < 30000:
lrn_rate = hparams.mom_lrn/10
elif step_num < 35000:
lrn_rate = hparams.mom_lrn/100
else:
lrn_rate = hparams.mom_lrn/1000
fd[model.lrn_rate] = lrn_rate
def _build_train_op(self, predictions, y, predictions_adv):
model = self.model
hparams = self.hparams
if hparams.model_type == 'resnet_tf':
build_train_op = model.build_cost
else:
build_train_op = model_loss
# Define loss
with tf.variable_scope('train_loss'):
if predictions_adv is not None:
if hparams.only_adv_train:
loss = build_train_op(y, predictions_adv)
else:
loss = build_train_op(y, predictions)
adv_loss = build_train_op(y, predictions_adv)
loss = (loss + adv_loss) / 2
else:
loss = build_train_op(y, predictions)
if hparams.model_type == 'resnet_tf':
train_step = model.build_train_op_from_cost(loss)
else:
optim = tf.train.AdamOptimizer(learning_rate=hparams.adam_lrn)
train_step = optim.minimize(loss)
return train_step
def model_train(self):
"""
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param hparams.save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
"""
assert self.runner is not None, (
"""Runner is not initialized. TrainerSingleGPU or TrainerMultiGPU
instantiate a Runner object at initialization time.""")
hparams = self.hparams
batch_size = hparams.batch_size
nb_epochs = hparams.nb_epochs
train_dir = hparams.save_dir
filename = 'model.ckpt'
X_train = self.X_train
Y_train = self.Y_train
sess = self.sess
with sess.as_default():
X_batch = X_train[:batch_size]
Y_batch = Y_train[:batch_size]
self._init_tf(X_batch, Y_batch)
for epoch in six.moves.xrange(nb_epochs):
logging.info("Epoch " + str(epoch))
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / batch_size))
assert nb_batches * batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
self.rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), batch_size)
# Perform one training step
self._update_learning_params()
# Train step
X_batch = X_train[index_shuf[start:end]]
Y_batch = Y_train[index_shuf[start:end]]
self._run({'x_pre': X_batch, 'y': Y_batch})
self._sync_params()
# Clean up the queue
while not self.runner.is_finished():
self._run()
self._sync_params(forced=True)
assert end >= len(X_train), (
'Not all training examples are used.')
cur = time.time()
logging.info("\tEpoch took " + str(cur - prev) + " seconds")
prev = cur
self.eval()
# Save model
cond = ((epoch+1) % hparams.save_steps == 0
or epoch == nb_epochs)
if hparams.save and cond:
save_path = os.path.join(train_dir, filename)
saver = tf.train.Saver()
saver.save(sess, save_path)
logging.info("Model saved at: " + str(save_path))
logging.info("Completed model training.")
def _init_tf(self, X_batch, Y_batch):
x_pre = self.g0_inputs['x_pre']
y = self.g0_inputs['y']
fd = {x_pre: X_batch, y: Y_batch}
init_op = tf.global_variables_initializer()
self.sess.run(init_op, feed_dict=fd)
def _run(self, X_batch=None):
last_fvals = self.runner.run(X_batch)
self.step_num += 1
return last_fvals
def _sync_params(self, forced=False):
raise NotImplemented('sync_params should be implemented.')
def _create_train_graph(self):
"""
The evaluation graph must be initialized after the train graph is
fully initialized, otherwise, some of the variables will be created
untrainable.
"""
assert self.evaluate is None, ("""Evaluation graph should be initialzed
after the train graph""")
class TrainerMultiGPU(TrainManager):
"""
This class uses a `RunnerMultiGPU` object to train a model on multiple
GPUs. It mainly overrides the `_create_train_graph` to create a graph
for adversarial training on multiple GPUs.
"""
def __init__(self, *args, **kwargs):
super(TrainerMultiGPU, self).__init__(*args, **kwargs)
from runner import RunnerMultiGPU
self.runner = RunnerMultiGPU(self.inputs, self.outputs, sess=self.sess)
def clone_g0_inputs_on_ngpus(self, inputs, outputs, g0_inputs):
"""
Clone variables unused by the attack on all GPUs. Specifically, the
ground-truth label, y, has to be preserved until the training step.
:param inputs: A list of dictionaries as the inputs to each step.
:param outputs: A list of dictionaries as the outputs of each step.
:param g0_inputs: Initial variables to be cloned.
:return: Updated inputs and outputs.
"""
assert len(inputs) == len(outputs), (
'Inputs and outputs should have the same number of elements.')
inputs[0].update(g0_inputs)
outputs[0].update(g0_inputs)
# Copy g0_inputs forward
for i in range(1, len(inputs)):
# Create the graph for i'th step of attack
device_name = inputs[i]['x'].device
with tf.device(device_name):
with tf.variable_scope('step%d' % i):
for k, v in g0_inputs.iteritems():
if k not in inputs[i]:
v_copy = clone_variable(k, v)
inputs[i][k] = v_copy
outputs[i][k] = v_copy
return inputs, outputs
def _create_train_graph(self):
super(TrainerMultiGPU, self)._create_train_graph()
assert '_multigpu' in self.hparams.attack_type_train
hparams = self.hparams
model = self.model
sess = self.sess
# Create trainable variables on last gpu.
# Variables are set to trainable or non-trainable first time they are
# created. This caused a bug when the last gpu is used both for attack
# generation and training. With this bug the result of naive training
# was affected by the length of the unused adversarial generation
# graph.
device_name = '/gpu:%d' % (hparams.ngpu-1)
model.set_device(device_name)
with tf.device(device_name):
x = clone_variable('x', self.g0_inputs['x'])
model.set_training(training=True)
preds = model.get_probs(x)
# Generates steps on gpus
model.set_training(training=False)
logging.info("Initializing train attack %s" %
hparams.attack_type_train)
inputs, outputs = create_adv_by_name(
model, self.g0_inputs['x'], hparams.attack_type_train,
sess, y=self.g0_inputs['y'], nb_iter=hparams.attack_nb_iter_train,
dataset=hparams.dataset, ngpu=hparams.ngpu)
inputs, outputs = self.clone_g0_inputs_on_ngpus(
inputs, outputs, self.g0_inputs)
# Train step on last gpu
device_name = '/gpu:%d' % (hparams.ngpu-1)
model.set_device(device_name)
with tf.device(device_name):
with tf.variable_scope('last'):
inputs += [OrderedDict()]
for k, v in outputs[-1].iteritems():
v_copy = clone_variable(k, v)
inputs[-1][k] = v_copy
x = inputs[-1]['x']
adv_x = inputs[-1]['adv_x']
y = inputs[-1]['y']
if not hparams.adv_train:
model.set_training(training=True)
preds = model.get_probs(x)
preds_adv = None
elif not hparams.only_adv_train:
model.set_training(training=True)
preds = model.get_probs(x)
model.set_training(training=True)
preds_adv = model.get_probs(adv_x)
else:
preds = None
model.set_training(training=True)
preds_adv = model.get_probs(adv_x)
train_fetches = self._build_train_op(preds, y, preds_adv)
outputs += [{'fetches': train_fetches}]
# Create the sync operation
device_name = '/gpu:%d' % (hparams.ngpu-1)
model.set_device(device_name)
with tf.device(device_name):
sync_ops = model.create_sync_ops(host_device=device_name)
self.inputs = inputs
self.outputs = outputs
self.sync_ops = sync_ops
def _sync_params(self, forced=False):
if forced or (self.step_num % self.hparams.sync_step == 0):
self.sess.run(self.sync_ops)
class TrainerSingleGPU(TrainManager):
"""
This class uses a `RunnerSingleGPU` object to train a model on a single
GPU.
"""
def __init__(self, *args, **kwargs):
super(TrainerSingleGPU, self).__init__(*args, **kwargs)
from runner import RunnerSingleGPU
self.runner = RunnerSingleGPU(self.inputs, self.outputs,
sess=self.sess)
def _create_train_graph(self):
super(TrainerSingleGPU, self)._create_train_graph()
self.model.set_device('/gpu:0')
hparams = self.hparams
model = self.model
x = self.g0_inputs['x']
y = self.g0_inputs['y']
sess = self.sess
# Create trainable variables.
model.set_training(training=True)
preds = model.get_probs(x)
if not hparams.adv_train:
logging.info("Naive training")
model.set_training(training=True)
preds = model.get_probs(x)
preds_adv = None
else:
logging.info("Adversarial training")
logging.info("Initializing train attack %s" %
hparams.attack_type_train)
model.set_training(training=False)
adv_x = create_adv_by_name(
model, x, hparams.attack_type_train, sess,
y=y, nb_iter=hparams.attack_nb_iter_train,
dataset=hparams.dataset)
if hparams.only_adv_train:
preds = None
model.set_training(training=True)
preds_adv = model.get_probs(adv_x)
else:
model.set_training(training=True)
preds = model.get_probs(x)
model.set_training(training=True)
preds_adv = model.get_probs(adv_x)
train_fetches = self._build_train_op(preds, y, preds_adv)
self.inputs = [self.g0_inputs]
self.outputs = [train_fetches]
def _sync_params(self, forced=False):
"""
Nothing to sync on single GPU.
"""
return True
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
latex.py:
Classes to read and write LaTeX tables
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
latexdicts = {'AA': {'tabletype': 'table',
'header_start': r'\hline \hline', 'header_end': r'\hline',
'data_end': r'\hline'},
'doublelines': {'tabletype': 'table',
'header_start': r'\hline \hline', 'header_end': r'\hline\hline',
'data_end': r'\hline\hline'},
'template': {'tabletype': 'tabletype', 'caption': 'caption',
'tablealign': 'tablealign',
'col_align': 'col_align', 'preamble': 'preamble',
'header_start': 'header_start',
'header_end': 'header_end', 'data_start': 'data_start',
'data_end': 'data_end', 'tablefoot': 'tablefoot',
'units': {'col1': 'unit of col1', 'col2': 'unit of col2'}}
}
RE_COMMENT = re.compile(r'(?<!\\)%') # % character but not \%
def add_dictval_to_list(adict, key, alist):
'''
Add a value from a dictionary to a list
Parameters
----------
adict : dictionary
key : hashable
alist : list
List where value should be added
'''
if key in adict:
if isinstance(adict[key], str):
alist.append(adict[key])
else:
alist.extend(adict[key])
def find_latex_line(lines, latex):
'''
Find the first line which matches a patters
Parameters
----------
lines : list
List of strings
latex : str
Search pattern
Returns
-------
line_num : int, None
Line number. Returns None, if no match was found
'''
re_string = re.compile(latex.replace('\\', '\\\\'))
for i, line in enumerate(lines):
if re_string.match(line):
return i
else:
return None
class LatexInputter(core.BaseInputter):
def process_lines(self, lines):
return [lin.strip() for lin in lines]
class LatexSplitter(core.BaseSplitter):
'''Split LaTeX table date. Default delimiter is `&`.
'''
delimiter = '&'
def __call__(self, lines):
last_line = RE_COMMENT.split(lines[-1])[0].strip()
if not last_line.endswith(r'\\'):
lines[-1] = last_line + r'\\'
return super().__call__(lines)
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. Also remove
\\ at end of line"""
line = RE_COMMENT.split(line)[0]
line = line.strip()
if line.endswith(r'\\'):
line = line.rstrip(r'\\')
else:
raise core.InconsistentTableError(r'Lines in LaTeX table have to end with \\')
return line
def process_val(self, val):
"""Remove whitespace and {} at the beginning or end of value."""
val = val.strip()
if val and (val[0] == '{') and (val[-1] == '}'):
val = val[1:-1]
return val
def join(self, vals):
'''Join values together and add a few extra spaces for readability'''
delimiter = ' ' + self.delimiter + ' '
return delimiter.join(x.strip() for x in vals) + r' \\'
class LatexHeader(core.BaseHeader):
'''Class to read the header of Latex Tables'''
header_start = r'\begin{tabular}'
splitter_class = LatexSplitter
def start_line(self, lines):
line = find_latex_line(lines, self.header_start)
if line is not None:
return line + 1
else:
return None
def _get_units(self):
units = {}
col_units = [col.info.unit for col in self.cols]
for name, unit in zip(self.colnames, col_units):
if unit:
try:
units[name] = unit.to_string(format='latex_inline')
except AttributeError:
units[name] = unit
return units
def write(self, lines):
if 'col_align' not in self.latex:
self.latex['col_align'] = len(self.cols) * 'c'
if 'tablealign' in self.latex:
align = '[' + self.latex['tablealign'] + ']'
else:
align = ''
if self.latex['tabletype'] is not None:
lines.append(r'\begin{' + self.latex['tabletype'] + r'}' + align)
add_dictval_to_list(self.latex, 'preamble', lines)
if 'caption' in self.latex:
lines.append(r'\caption{' + self.latex['caption'] + '}')
lines.append(self.header_start + r'{' + self.latex['col_align'] + r'}')
add_dictval_to_list(self.latex, 'header_start', lines)
lines.append(self.splitter.join(self.colnames))
units = self._get_units()
if 'units' in self.latex:
units.update(self.latex['units'])
if units:
lines.append(self.splitter.join([units.get(name, ' ') for name in self.colnames]))
add_dictval_to_list(self.latex, 'header_end', lines)
class LatexData(core.BaseData):
'''Class to read the data in LaTeX tables'''
data_start = None
data_end = r'\end{tabular}'
splitter_class = LatexSplitter
def start_line(self, lines):
if self.data_start:
return find_latex_line(lines, self.data_start)
else:
start = self.header.start_line(lines)
if start is None:
raise core.InconsistentTableError(r'Could not find table start')
return start + 1
def end_line(self, lines):
if self.data_end:
return find_latex_line(lines, self.data_end)
else:
return None
def write(self, lines):
add_dictval_to_list(self.latex, 'data_start', lines)
core.BaseData.write(self, lines)
add_dictval_to_list(self.latex, 'data_end', lines)
lines.append(self.data_end)
add_dictval_to_list(self.latex, 'tablefoot', lines)
if self.latex['tabletype'] is not None:
lines.append(r'\end{' + self.latex['tabletype'] + '}')
class Latex(core.BaseReader):
r'''LaTeX format table.
This class implements some LaTeX specific commands. Its main
purpose is to write out a table in a form that LaTeX can compile. It
is beyond the scope of this class to implement every possible LaTeX
command, instead the focus is to generate a syntactically valid
LaTeX tables.
This class can also read simple LaTeX tables (one line per table
row, no ``\multicolumn`` or similar constructs), specifically, it
can read the tables that it writes.
Reading a LaTeX table, the following keywords are accepted:
**ignore_latex_commands** :
Lines starting with these LaTeX commands will be treated as comments (i.e. ignored).
When writing a LaTeX table, the some keywords can customize the
format. Care has to be taken here, because python interprets ``\\``
in a string as an escape character. In order to pass this to the
output either format your strings as raw strings with the ``r``
specifier or use a double ``\\\\``.
Examples::
caption = r'My table \label{mytable}'
caption = 'My table \\\\label{mytable}'
**latexdict** : Dictionary of extra parameters for the LaTeX output
* tabletype : used for first and last line of table.
The default is ``\\begin{table}``. The following would generate a table,
which spans the whole page in a two-column document::
ascii.write(data, sys.stdout, Writer = ascii.Latex,
latexdict = {'tabletype': 'table*'})
If ``None``, the table environment will be dropped, keeping only
the ``tabular`` environment.
* tablealign : positioning of table in text.
The default is not to specify a position preference in the text.
If, e.g. the alignment is ``ht``, then the LaTeX will be ``\\begin{table}[ht]``.
* col_align : Alignment of columns
If not present all columns will be centered.
* caption : Table caption (string or list of strings)
This will appear above the table as it is the standard in
many scientific publications. If you prefer a caption below
the table, just write the full LaTeX command as
``latexdict['tablefoot'] = r'\caption{My table}'``
* preamble, header_start, header_end, data_start, data_end, tablefoot: Pure LaTeX
Each one can be a string or a list of strings. These strings
will be inserted into the table without any further
processing. See the examples below.
* units : dictionary of strings
Keys in this dictionary should be names of columns. If
present, a line in the LaTeX table directly below the column
names is added, which contains the values of the
dictionary. Example::
from astropy.io import ascii
data = {'name': ['bike', 'car'], 'mass': [75,1200], 'speed': [10, 130]}
ascii.write(data, Writer=ascii.Latex,
latexdict = {'units': {'mass': 'kg', 'speed': 'km/h'}})
If the column has no entry in the ``units`` dictionary, it defaults
to the **unit** attribute of the column. If this attribute is not
specified (i.e. it is None), the unit will be written as ``' '``.
Run the following code to see where each element of the
dictionary is inserted in the LaTeX table::
from astropy.io import ascii
data = {'cola': [1,2], 'colb': [3,4]}
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['template'])
Some table styles are predefined in the dictionary
``ascii.latex.latexdicts``. The following generates in table in
style preferred by A&A and some other journals::
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['AA'])
As an example, this generates a table, which spans all columns
and is centered on the page::
ascii.write(data, Writer=ascii.Latex, col_align='|lr|',
latexdict={'preamble': r'\begin{center}',
'tablefoot': r'\end{center}',
'tabletype': 'table*'})
**caption** : Set table caption
Shorthand for::
latexdict['caption'] = caption
**col_align** : Set the column alignment.
If not present this will be auto-generated for centered
columns. Shorthand for::
latexdict['col_align'] = col_align
'''
_format_name = 'latex'
_io_registry_format_aliases = ['latex']
_io_registry_suffix = '.tex'
_description = 'LaTeX table'
header_class = LatexHeader
data_class = LatexData
inputter_class = LatexInputter
# Strictly speaking latex only supports 1-d columns so this should inherit
# the base max_ndim = 1. But as reported in #11695 this causes a strange
# problem with Jupyter notebook, which displays a table by first calling
# _repr_latex_. For a multidimensional table this issues a stack traceback
# before moving on to _repr_html_. Here we prioritize fixing the issue with
# Jupyter displaying a Table with multidimensional columns.
max_ndim = None
def __init__(self,
ignore_latex_commands=['hline', 'vspace', 'tableline',
'toprule', 'midrule', 'bottomrule'],
latexdict={}, caption='', col_align=None):
super().__init__()
self.latex = {}
# The latex dict drives the format of the table and needs to be shared
# with data and header
self.header.latex = self.latex
self.data.latex = self.latex
self.latex['tabletype'] = 'table'
self.latex.update(latexdict)
if caption:
self.latex['caption'] = caption
if col_align:
self.latex['col_align'] = col_align
self.ignore_latex_commands = ignore_latex_commands
self.header.comment = '%|' + '|'.join(
[r'\\' + command for command in self.ignore_latex_commands])
self.data.comment = self.header.comment
def write(self, table=None):
self.header.start_line = None
self.data.start_line = None
return core.BaseReader.write(self, table=table)
class AASTexHeaderSplitter(LatexSplitter):
r'''Extract column names from a `deluxetable`_.
This splitter expects the following LaTeX code **in a single line**:
\tablehead{\colhead{col1} & ... & \colhead{coln}}
'''
def __call__(self, lines):
return super(LatexSplitter, self).__call__(lines)
def process_line(self, line):
"""extract column names from tablehead
"""
line = line.split('%')[0]
line = line.replace(r'\tablehead', '')
line = line.strip()
if (line[0] == '{') and (line[-1] == '}'):
line = line[1:-1]
else:
raise core.InconsistentTableError(r'\tablehead is missing {}')
return line.replace(r'\colhead', '')
def join(self, vals):
return ' & '.join([r'\colhead{' + str(x) + '}' for x in vals])
class AASTexHeader(LatexHeader):
r'''In a `deluxetable
<http://fits.gsfc.nasa.gov/standard30/deluxetable.sty>`_ some header
keywords differ from standard LaTeX.
This header is modified to take that into account.
'''
header_start = r'\tablehead'
splitter_class = AASTexHeaderSplitter
def start_line(self, lines):
return find_latex_line(lines, r'\tablehead')
def write(self, lines):
if 'col_align' not in self.latex:
self.latex['col_align'] = len(self.cols) * 'c'
if 'tablealign' in self.latex:
align = '[' + self.latex['tablealign'] + ']'
else:
align = ''
lines.append(r'\begin{' + self.latex['tabletype'] + r'}{' + self.latex['col_align'] + r'}'
+ align)
add_dictval_to_list(self.latex, 'preamble', lines)
if 'caption' in self.latex:
lines.append(r'\tablecaption{' + self.latex['caption'] + '}')
tablehead = ' & '.join([r'\colhead{' + name + '}' for name in self.colnames])
units = self._get_units()
if 'units' in self.latex:
units.update(self.latex['units'])
if units:
tablehead += r'\\ ' + self.splitter.join([units.get(name, ' ')
for name in self.colnames])
lines.append(r'\tablehead{' + tablehead + '}')
class AASTexData(LatexData):
r'''In a `deluxetable`_ the data is enclosed in `\startdata` and `\enddata`
'''
data_start = r'\startdata'
data_end = r'\enddata'
def start_line(self, lines):
return find_latex_line(lines, self.data_start) + 1
def write(self, lines):
lines.append(self.data_start)
lines_length_initial = len(lines)
core.BaseData.write(self, lines)
# To remove extra space(s) and // appended which creates an extra new line
# in the end.
if len(lines) > lines_length_initial:
# we compile separately because py2.6 doesn't have a flags keyword in re.sub
re_final_line = re.compile(r'\s* \\ \\ \s* $', flags=re.VERBOSE)
lines[-1] = re.sub(re_final_line, '', lines[-1])
lines.append(self.data_end)
add_dictval_to_list(self.latex, 'tablefoot', lines)
lines.append(r'\end{' + self.latex['tabletype'] + r'}')
class AASTex(Latex):
'''AASTeX format table.
This class implements some AASTeX specific commands.
AASTeX is used for the AAS (American Astronomical Society)
publications like ApJ, ApJL and AJ.
It derives from the ``Latex`` reader and accepts the same
keywords. However, the keywords ``header_start``, ``header_end``,
``data_start`` and ``data_end`` in ``latexdict`` have no effect.
'''
_format_name = 'aastex'
_io_registry_format_aliases = ['aastex']
_io_registry_suffix = '' # AASTex inherits from Latex, so override this class attr
_description = 'AASTeX deluxetable used for AAS journals'
header_class = AASTexHeader
data_class = AASTexData
def __init__(self, **kwargs):
super().__init__(**kwargs)
# check if tabletype was explicitly set by the user
if not (('latexdict' in kwargs) and ('tabletype' in kwargs['latexdict'])):
self.latex['tabletype'] = 'deluxetable'
|
|
"""Test the FissionYieldHelpers"""
import os
from collections import namedtuple
from unittest.mock import Mock
import bisect
import pytest
import numpy as np
import openmc
from openmc import lib
from openmc.deplete.nuclide import Nuclide, FissionYieldDistribution
from openmc.deplete.helpers import (
FissionYieldCutoffHelper, ConstantFissionYieldHelper,
AveragedFissionYieldHelper)
@pytest.fixture(scope="module")
def materials(tmpdir_factory):
"""Use C API to construct realistic materials for testing tallies"""
tmpdir = tmpdir_factory.mktemp("lib")
orig = tmpdir.chdir()
# Create proxy problem to please openmc
mfuel = openmc.Material(name="test_fuel")
mfuel.volume = 1.0
for nuclide in ["U235", "U238", "Xe135", "Pu239"]:
mfuel.add_nuclide(nuclide, 1.0)
openmc.Materials([mfuel]).export_to_xml()
# Geometry
box = openmc.rectangular_prism(1.0, 1.0, boundary_type="reflective")
cell = openmc.Cell(fill=mfuel, region=box)
root = openmc.Universe(cells=[cell])
openmc.Geometry(root).export_to_xml()
# settings
settings = openmc.Settings()
settings.particles = 100
settings.inactive = 0
settings.batches = 10
settings.verbosity = 1
settings.export_to_xml()
try:
with lib.run_in_memory():
yield [lib.Material(), lib.Material()]
finally:
# Convert to strings as os.remove in py 3.5 doesn't support Paths
for file_path in ("settings.xml", "geometry.xml", "materials.xml",
"summary.h5"):
os.remove(str(tmpdir / file_path))
orig.chdir()
os.rmdir(str(tmpdir))
def proxy_tally_data(tally, fill=None):
"""Construct an empty matrix built from a C tally
The shape of tally.mean will be ``(n_bins, n_nuc * n_scores)``
"""
n_nucs = max(len(tally.nuclides), 1)
n_scores = max(len(tally.scores), 1)
n_bins = 1
for tfilter in tally.filters:
if not hasattr(tfilter, "bins"):
continue
this_bins = len(tfilter.bins)
if isinstance(tfilter, lib.EnergyFilter):
this_bins -= 1
n_bins *= max(this_bins, 1)
data = np.empty((n_bins, n_nucs * n_scores))
if fill is not None:
data.fill(fill)
return data
@pytest.fixture(scope="module")
def nuclide_bundle():
u5yield_dict = {
0.0253: {"Xe135": 7.85e-4, "Gd155": 4.08e-12, "Sm149": 1.71e-12},
5.0e5: {"Xe135": 7.85e-4, "Sm149": 1.71e-12},
1.40e7: {"Xe135": 4.54e-3, "Gd155": 5.83e-8}}
u235 = Nuclide("U235")
u235.yield_data = FissionYieldDistribution(u5yield_dict)
u8yield_dict = {5.00e5: {"Xe135": 1.12e-3, "Gd155": 1.32e-12}}
u238 = Nuclide("U238")
u238.yield_data = FissionYieldDistribution(u8yield_dict)
xe135 = Nuclide("Xe135")
pu239 = Nuclide("Pu239")
pu239.yield_data = FissionYieldDistribution({
5.0e5: {"Xe135": 6.14e-3, "Sm149": 9.429e-10, "Gd155": 5.24e-9},
2e6: {"Xe135": 6.15e-3, "Sm149": 9.42e-10, "Gd155": 5.29e-9}})
NuclideBundle = namedtuple("NuclideBundle", "u235 u238 xe135 pu239")
return NuclideBundle(u235, u238, xe135, pu239)
@pytest.mark.parametrize(
"input_energy, yield_energy",
((0.0253, 0.0253), (0.01, 0.0253), (4e5, 5e5)))
def test_constant_helper(nuclide_bundle, input_energy, yield_energy):
helper = ConstantFissionYieldHelper(nuclide_bundle, energy=input_energy)
assert helper.energy == input_energy
assert helper.constant_yields == {
"U235": nuclide_bundle.u235.yield_data[yield_energy],
"U238": nuclide_bundle.u238.yield_data[5.00e5],
"Pu239": nuclide_bundle.pu239.yield_data[5e5]}
assert helper.constant_yields == helper.weighted_yields(1)
def test_cutoff_construction(nuclide_bundle):
u235 = nuclide_bundle.u235
u238 = nuclide_bundle.u238
pu239 = nuclide_bundle.pu239
# defaults
helper = FissionYieldCutoffHelper(nuclide_bundle, 1)
assert helper.constant_yields == {
"U238": u238.yield_data[5.0e5],
"Pu239": pu239.yield_data[5e5]}
assert helper.thermal_yields == {"U235": u235.yield_data[0.0253]}
assert helper.fast_yields == {"U235": u235.yield_data[5e5]}
# use 14 MeV yields
helper = FissionYieldCutoffHelper(nuclide_bundle, 1, fast_energy=14e6)
assert helper.constant_yields == {
"U238": u238.yield_data[5.0e5],
"Pu239": pu239.yield_data[5e5]}
assert helper.thermal_yields == {"U235": u235.yield_data[0.0253]}
assert helper.fast_yields == {"U235": u235.yield_data[14e6]}
# specify missing thermal yields -> use 0.0253
helper = FissionYieldCutoffHelper(nuclide_bundle, 1, thermal_energy=1)
assert helper.thermal_yields == {"U235": u235.yield_data[0.0253]}
assert helper.fast_yields == {"U235": u235.yield_data[5e5]}
# request missing fast yields -> use epithermal
helper = FissionYieldCutoffHelper(nuclide_bundle, 1, fast_energy=1e4)
assert helper.thermal_yields == {"U235": u235.yield_data[0.0253]}
assert helper.fast_yields == {"U235": u235.yield_data[5e5]}
# higher cutoff energy -> obtain fast and "faster" yields
helper = FissionYieldCutoffHelper(nuclide_bundle, 1, cutoff=1e6,
thermal_energy=5e5, fast_energy=14e6)
assert helper.constant_yields == {"U238": u238.yield_data[5e5]}
assert helper.thermal_yields == {
"U235": u235.yield_data[5e5], "Pu239": pu239.yield_data[5e5]}
assert helper.fast_yields == {
"U235": u235.yield_data[14e6], "Pu239": pu239.yield_data[2e6]}
# test super low and super high cutoff energies
helper = FissionYieldCutoffHelper(
nuclide_bundle, 1, thermal_energy=0.001, cutoff=0.002)
assert helper.fast_yields == {}
assert helper.thermal_yields == {}
assert helper.constant_yields == {
"U235": u235.yield_data[0.0253], "U238": u238.yield_data[5e5],
"Pu239": pu239.yield_data[5e5]}
helper = FissionYieldCutoffHelper(
nuclide_bundle, 1, cutoff=15e6, fast_energy=17e6)
assert helper.thermal_yields == {}
assert helper.fast_yields == {}
assert helper.constant_yields == {
"U235": u235.yield_data[14e6], "U238": u238.yield_data[5e5],
"Pu239": pu239.yield_data[2e6]}
@pytest.mark.parametrize("key", ("cutoff", "thermal_energy", "fast_energy"))
def test_cutoff_failure(key):
with pytest.raises(TypeError, match=key):
FissionYieldCutoffHelper(None, None, **{key: None})
with pytest.raises(ValueError, match=key):
FissionYieldCutoffHelper(None, None, **{key: -1})
# emulate some split between fast and thermal U235 fissions
@pytest.mark.parametrize("therm_frac", (0.5, 0.2, 0.8))
def test_cutoff_helper(materials, nuclide_bundle, therm_frac):
helper = FissionYieldCutoffHelper(nuclide_bundle, len(materials),
cutoff=1e6, fast_energy=14e6)
helper.generate_tallies(materials, [0])
non_zero_nucs = [n.name for n in nuclide_bundle]
tally_nucs = helper.update_tally_nuclides(non_zero_nucs)
assert tally_nucs == ["Pu239", "U235"]
# Check tallies
fission_tally = helper._fission_rate_tally
assert fission_tally is not None
filters = fission_tally.filters
assert len(filters) == 2
assert isinstance(filters[0], lib.MaterialFilter)
assert len(filters[0].bins) == len(materials)
assert isinstance(filters[1], lib.EnergyFilter)
# lower, cutoff, and upper energy
assert len(filters[1].bins) == 3
# Emulate building tallies
# material x energy, tallied_nuclides, 3
tally_data = proxy_tally_data(fission_tally)
helper._fission_rate_tally = Mock()
helper_flux = 1e6
tally_data[0] = therm_frac * helper_flux
tally_data[1] = (1 - therm_frac) * helper_flux
helper._fission_rate_tally.mean = tally_data
helper.unpack()
# expected results of shape (n_mats, 2, n_tnucs)
expected_results = np.empty((1, 2, len(tally_nucs)))
expected_results[:, 0] = therm_frac
expected_results[:, 1] = 1 - therm_frac
assert helper.results == pytest.approx(expected_results)
actual_yields = helper.weighted_yields(0)
assert actual_yields["U238"] == nuclide_bundle.u238.yield_data[5e5]
for nuc in tally_nucs:
assert actual_yields[nuc] == (
helper.thermal_yields[nuc] * therm_frac
+ helper.fast_yields[nuc] * (1 - therm_frac))
@pytest.mark.parametrize("avg_energy", (0.01, 6e5, 15e6))
def test_averaged_helper(materials, nuclide_bundle, avg_energy):
helper = AveragedFissionYieldHelper(nuclide_bundle)
helper.generate_tallies(materials, [0])
tallied_nucs = helper.update_tally_nuclides(
[n.name for n in nuclide_bundle])
assert tallied_nucs == ["Pu239", "U235"]
# check generated tallies
fission_tally = helper._fission_rate_tally
assert fission_tally is not None
fission_filters = fission_tally.filters
assert len(fission_filters) == 2
assert isinstance(fission_filters[0], lib.MaterialFilter)
assert len(fission_filters[0].bins) == len(materials)
assert isinstance(fission_filters[1], lib.EnergyFilter)
assert len(fission_filters[1].bins) == 2
assert fission_tally.scores == ["fission"]
assert fission_tally.nuclides == list(tallied_nucs)
weighted_tally = helper._weighted_tally
assert weighted_tally is not None
weighted_filters = weighted_tally.filters
assert len(weighted_filters) == 2
assert isinstance(weighted_filters[0], lib.MaterialFilter)
assert len(weighted_filters[0].bins) == len(materials)
assert isinstance(weighted_filters[1], lib.EnergyFunctionFilter)
assert len(weighted_filters[1].energy) == 2
assert len(weighted_filters[1].y) == 2
assert weighted_tally.scores == ["fission"]
assert weighted_tally.nuclides == list(tallied_nucs)
helper_flux = 1e16
fission_results = proxy_tally_data(fission_tally, helper_flux)
weighted_results = proxy_tally_data(
weighted_tally, helper_flux * avg_energy)
helper._fission_rate_tally = Mock()
helper._weighted_tally = Mock()
helper._fission_rate_tally.mean = fission_results
helper._weighted_tally.mean = weighted_results
helper.unpack()
expected_results = np.ones((1, len(tallied_nucs))) * avg_energy
assert helper.results == pytest.approx(expected_results)
actual_yields = helper.weighted_yields(0)
# constant U238 => no interpolation
assert actual_yields["U238"] == nuclide_bundle.u238.yield_data[5e5]
# construct expected yields
exp_u235_yields = interp_average_yields(nuclide_bundle.u235, avg_energy)
assert actual_yields["U235"] == exp_u235_yields
exp_pu239_yields = interp_average_yields(nuclide_bundle.pu239, avg_energy)
assert actual_yields["Pu239"] == exp_pu239_yields
def interp_average_yields(nuc, avg_energy):
"""Construct a set of yields by interpolation between neighbors"""
energies = nuc.yield_energies
yields = nuc.yield_data
if avg_energy < energies[0]:
return yields[energies[0]]
if avg_energy > energies[-1]:
return yields[energies[-1]]
thermal_ix = bisect.bisect_left(energies, avg_energy)
thermal_E, fast_E = energies[thermal_ix - 1:thermal_ix + 1]
assert thermal_E < avg_energy < fast_E
split = (avg_energy - thermal_E)/(fast_E - thermal_E)
return yields[thermal_E]*(1 - split) + yields[fast_E]*split
|
|
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from util.tvl import *
class ReferenceCountManager(object):
def __init__(self):
self.absoluteLUT = {}
self.incrementLUT = {}
self.decrementLUT = {}
self.k = 2
self.infinity = self.k+1
self.null = self.getCanonical({}, ())
def increment(self, rc, slot):
key = (rc, slot)
lut = self.incrementLUT
if not key in lut:
newrc = self.makeIncrement(rc, slot)
lut[key] = newrc
else:
newrc = lut[key]
return newrc
def decrement(self, rc, slot):
key = (rc, slot)
lut = self.decrementLUT
if not key in lut:
newrc = self.makeDecrement(rc, slot)
lut[key] = newrc
else:
newrc = lut[key]
return newrc
def makeIncrement(self, rc, slot):
if rc:
assert isinstance(rc, ReferenceCount), type(rc)
counts = rc.counts
radius = rc.radius
else:
counts = {}
radius = frozenset()
if slot.isHeap():
newrc = dict(counts)
newRadius = radius
newrc[slot] = min(newrc.get(slot, 0)+1, self.infinity)
elif slot.isLocal():
newrc = counts
assert slot not in radius, radius
newRadius = radius.union((slot,))
else:
assert False, slot
canonical = self.getCanonical(newrc, newRadius)
assert canonical is not None
assert canonical.slotHit(slot).mustBeTrue(), slot
return (canonical,)
def makeDecrement(self, rc, slot):
assert isinstance(rc, ReferenceCount), type(rc)
assert rc.slotHit(slot).mustBeTrue(), slot
counts = rc.counts
radius = rc.radius
if slot.isHeap():
newrc = dict(counts)
newRadius = radius
exists = False
saturated = False
count = newrc[slot]
if count == self.infinity:
newrc[slot] = self.k
saturated = True
elif count > 1:
newrc[slot] = count-1
else:
del newrc[slot]
canonical = self.getCanonical(newrc, newRadius)
if saturated:
return (canonical, rc)
else:
# Even if canonical is empty.
return (canonical,)
elif slot.isLocal():
assert slot in radius, radius
canonical = self.getCanonical(counts, radius-frozenset((slot,)))
return (canonical,)
else:
assert False, slot
def getCanonical(self, rc, radius):
# Validate the reference counts
for slot, count in rc.iteritems():
assert slot.isHeap(), slot
assert count > 0 and count <= self.infinity, count
for slot in radius:
assert slot.isLocal(), slot
radius = frozenset(radius)
key = (frozenset(rc.iteritems()), radius)
if key not in self.absoluteLUT:
obj = ReferenceCount(rc, radius)
self.absoluteLUT[key] = obj
else:
obj = self.absoluteLUT[key]
return obj
def split(self, rc, accessedCallback):
accessedrc = {}
unaccessedrc = {}
for slot, count in rc.counts.iteritems():
if accessedCallback(slot):
accessedrc[slot] = count
else:
unaccessedrc[slot] = count
accessedradius = []
unaccessedradius = []
for slot in rc.radius:
if accessedCallback(slot):
accessedradius.append(slot)
else:
unaccessedradius.append(slot)
accessed = self.getCanonical(accessedrc, accessedradius)
unaccessed = self.getCanonical(unaccessedrc, unaccessedradius)
return unaccessed, accessed
def merge(self, a, b):
# Assumes the reference counts are disjoint.
newrc = {}
if a: newrc.update(a.counts)
if b: newrc.update(b.counts)
return self.getCanonical(newrc, a.radius.union(b.radius))
class ReferenceCount(object):
__slots__ = 'counts', 'radius'
def __init__(self, counts, radius):
#assert not counts, "DEBUG"
assert isinstance(counts, dict), type(counts)
self.counts = counts
self.radius = radius
def __repr__(self):
rc = ["%s=%s" % p for p in self.counts.iteritems()]
rc.extend([str(r) for r in self.radius])
return "rc(%s)" % ", ".join(rc)
def containsParameter(self):
for slot in self.radius:
if slot.isParameter():
return True
return False
def slotHit(self, slot):
if slot.isHeap():
return tvl(slot in self.counts)
elif slot.isLocal():
return tvl(slot in self.radius)
else:
assert False, slot
def isExpression(self):
return False
def __len__(self):
return len(self.counts)+len(self.radius)
def forget(self, sys, kill):
newcounts = {}
for slot, count in self.counts.iteritems():
if slot not in kill:
newcounts[slot] = count
newradius = frozenset([slot for slot in self.radius if slot not in kill])
return sys.canonical.rcm.getCanonical(newcounts, newradius)
def remap(self, sys, slotMapping):
newcounts = {}
for slot, count in self.counts.iteritems():
newslot = slotMapping.get(slot, slot)
if newslot:
newcounts[newslot] = count
newradius = []
for slot in self.radius:
newslot = slotMapping.get(slot, slot)
if newslot:
newradius.append(newslot)
newradius = frozenset(newradius)
return sys.canonical.rcm.getCanonical(newcounts, newradius)
|
|
from pdb import set_trace
import json
import os
import random
import re
import sys
from textblob import TextBlob
from botfriend.bot import TextGeneratorBot
from olipy import corpora
from wordfilter import blacklisted
class IAMAExtractor(object):
re_parts = [
"[^.]+[!.] [^.]+[!.]",
"[^.]+[!.\n]",
"[^.!]+$",
]
emoji = re.compile(u'['
u'\U0001F300-\U0001F64F'
u'\U0001F680-\U0001F6FF'
u'\u2600-\u26FF\u2700-\u27BF]+',
re.UNICODE)
stop_at = ["http", "#", "/", " - ", " @"]
ends_with_alphanumeric = re.compile("\w$")
single_quote_not_next_to_letter = [
re.compile("[^a-zA-Z]'", re.I),
re.compile("'[^a-zA-Z]", re.I)
]
@classmethod
def extract_iama(cls, text, query):
"""Extract the part of a sentence that looks like the start of an AMA."""
for quality, p in enumerate(cls.re_parts):
quality = len(cls.re_parts) - quality
r = re.compile(r"\b(%s %s)" % (query, p), re.I + re.M)
text = cls.emoji.sub("", text)
m = r.search(text)
if not m:
continue
match = m.groups()[0]
for stop_at in cls.stop_at:
index = match.find(stop_at)
if index != -1:
match = match[:index]
match = match.strip()
if cls.ends_with_alphanumeric.search(match):
match = match + "."
if u'\u201d' in match or u'\u201c' in match or '"' in match:
continue
for i in cls.single_quote_not_next_to_letter:
if i.search(match):
return None
# A potential choice must be at least four words long.
blob = TextBlob(match)
if len(blob.tags) < 4:
return None
#print "%s <- %s" % (match, text)
# print match.encode("utf8")
return quality, match
# Sometimes the problem is you searched for "I am x" and
# Twitter returned "I'm x".
if "I am" in query and "I'm" in text:
return cls.extract_iama(
text.replace("I am", "I'm"),
query.replace("I am", "I'm"))
@classmethod
def has_bad_end(cls, s):
"""We don't want an AMA to end in what appears to be the middle of a sentence."""
s = s.lower()
for i in (' a', 'the', 'an'):
if s.endswith(i+"."):
return True
return False
class StateManager(object):
"""Manage the internal state of IAMABot."""
def __init__(self, log, twitter, state, max_potentials=1000):
""":param twitter: A Twitter client. Used to search for usable phrases.
:param state: The state object kept by the IAmABot's BotModel.
This is a dictionary containing 'update' (the time the corpus
was last refreshed) and 'potentials', a list of
dictionaries. Each dictionary has keys 'content' (original tweet) and
'ama' (the suggested AMA post derived from it).
:param corpus_size: Keep track of this number of potential phrases.
"""
self.log = log
self.twitter = twitter
self.potentials = state or []
if self.potentials:
self.already_seen = set(x['content'] for x in self.potentials)
else:
self.already_seen = set()
self.max_potentials = max_potentials
def update(self):
"""Search Twitter for phrases that can be reused. Add them
to the bot state.
"""
add_to_corpus = []
# In addition to searching for phrases like "I am a", we're
# going to pick a past tense verb like "accomplished" and
# search for (e.g.) "I accomplished".
past_tense = corpora.words.common_verbs["past_tense"]
verb_of_the_day = random.choice(past_tense)
random_verb = "I %s" % verb_of_the_day
self.log.info("Today's random verb: '%s'", random_verb)
for query in ["I am a", "I am an", "I am the", random_verb]:
for data in self.query_twitter(query):
self.potentials.append(data)
self.log.info("Considering %r" % data)
# Cut off old potentials so the state doesn't grow without bounds.
self.potentials = self.potentials[-self.max_potentials:]
def query_twitter(self, query):
"""Search Twitter for a phrase and return an object
for each tweet that could be reformatted as an AMA.
"""
if not self.twitter:
return
quoted = '"%s"' % query
results = self.twitter.search(q=quoted)
for tweet in results:
text = tweet.text
if text in self.already_seen:
# Ignore this; we already have it as a potential.
continue
self.already_seen.add(text)
if blacklisted(text):
continue
if 'AMA' in text or 'ask me anything' in text.lower():
# Don't use an actual AMA or AMA joke.
continue
iama = IAMAExtractor.extract_iama(text, query)
if not iama:
# We couldn't actually turn this into an AMA lead-in.
continue
score, iama = iama
yield dict(
content=text, query=query,
iama=iama, score=score
)
def choose(self, recently_used_posts, recently_seen_words):
"""Make a weighted choice from potentials that are not
in recently_used_posts and don't include a word in
recently_seen_words.
"""
possibilities = []
for item in self.potentials:
content = item['content']
iama = item['iama'].lower()
if any([iama in x for x in recently_used_posts]):
continue
words = set(word for word, tag in TextBlob(content.lower()).tags)
if recently_seen_words.intersection(words):
self.log.info("Ignoring due to recently seen word: '%s'", content)
continue
for i in range(item['score']):
# Matches more likely to get a good result get weighted
# more heavily.
possibilities.append(item)
if possibilities:
return random.choice(possibilities)
else:
if recently_seen_words:
return self.choose(recently_used_posts, set())
else:
if recently_seen_words or recently_used_posts:
return self.choose(set(), set())
else:
self.log.error("Can't do anything -- no data to work from.")
class IAmABot(TextGeneratorBot):
def __init__(self, *args, **kwargs):
super(IAmABot, self).__init__(*args, **kwargs)
twitter = None
for publisher in self.publishers:
if publisher.service == 'twitter':
twitter = publisher
break
else:
self.log.error("No Twitter publisher configured, cannot update state.")
if self.model.state:
state = json.loads(self.model.state)
else:
state = []
self.state_manager = StateManager(self.log, twitter.api, state)
@property
def recently_used_words(self):
"""Make a list of nouns, verbs, and adjectives used in recent posts."""
whitelist = set(['ama', 'am', 'this', 'i', "i'm"])
recent_words = set()
recent_posts = self.model.recent_posts(7)
for post in recent_posts:
blob = TextBlob(post.content)
for word, tag in blob.tags:
word = word.lower()
if (tag[0] in 'NJV' and tag != 'VBP' and word not in whitelist):
recent_words.add(word)
return recent_words
def update_state(self):
self.state_manager.update()
return json.dumps(self.state_manager.potentials)
def generate_text(self):
# We don't want to exactly repeat a post created in the past year.
recent_posts = [x.content.lower() for x in self.model.recent_posts(365)]
# We don't want to reuse a significant word in a post we created
# in the past week.
recent_words = self.recently_used_words
ama = None
while not ama:
choice = self.state_manager.choose(recent_posts, recent_words)
if not choice:
return None
ama = choice['iama'] + " AMA" + random.choice('.. !')
ama = ama.strip()
if len(ama) > 140 or "\n" in ama or IAMAExtractor.has_bad_end(
ama
):
ama = None
self.log.info("The chosen one: %s", ama)
return ama
Bot = IAmABot
|
|
#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard W. Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------------------------------------------------------
""" Resource plug-in action set.
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
from enthought.envisage.ui.action.api import Action, Group, Menu, ToolBar
from enthought.envisage.ui.workbench.api import WorkbenchActionSet
#------------------------------------------------------------------------------
# "ResourceActionSet" class:
#------------------------------------------------------------------------------
class ResourceActionSet(WorkbenchActionSet):
""" Resource plug-in action set.
"""
#--------------------------------------------------------------------------
# "ActionSet" interface:
#--------------------------------------------------------------------------
# The action set"s globally unique identifier:
id = "puddle.resource.action_set"
menus = [
Menu(
name="&File", path="MenuBar",
groups=[
"OpenGroup", "CloseGroup", "SaveGroup",
"ImportGroup", "ResourceGroup", "ExitGroup"
]
),
Menu(
name="&New", path="MenuBar/File", group="OpenGroup",
groups=["ContainerGroup", "ComponentGroup", "OtherGroup"]
),
Menu(
name="&Edit", path="MenuBar", after="File",
groups=["UndoGroup", "ClipboardGroup", "PreferencesGroup"]
),
Menu(
name="&Navigate", path="MenuBar", after="Edit"
)
]
tool_bars = [
ToolBar(
id="puddle.resource.resource_tool_bar",
name="ResourceToolBar",
groups=["FileGroup", "ImportGroup", "NavigationGroup"]
)
]
actions = [
Action(
path="MenuBar/File/New", group="OtherGroup",
class_name="puddle.resource.action.new_resource_action:"
"NewResourceAction"
),
Action(
path="MenuBar/File/New", group="ContainerGroup",
class_name="puddle.resource.action.new_folder_action:"
"NewFolderAction"
),
Action(
path="MenuBar/File", group="CloseGroup",
class_name="puddle.resource.action.close_action:"
"CloseAction"
),
Action(
path="MenuBar/File", group="CloseGroup",
class_name="puddle.resource.action.close_all_action:"
"CloseAllAction"
),
Action(
path="MenuBar/File", group="SaveGroup",
class_name="puddle.resource.action.save_action:"
"SaveAction"
),
Action(
path="MenuBar/File", group="SaveGroup",
class_name="puddle.resource.action.save_as_action:"
"SaveAsAction"
),
Action(
path="MenuBar/File", group="SaveGroup",
class_name="puddle.resource.action.save_all_action:"
"SaveAllAction"
),
Action(
path="MenuBar/File", group="ImportGroup",
class_name="puddle.resource.action.import_action:"
"ImportAction"
),
Action(
path="MenuBar/File", group="ImportGroup",
class_name="puddle.resource.action.export_action:"
"ExportAction"
),
Action(
path="MenuBar/File", group="ResourceGroup",
class_name="puddle.resource.action.refresh_action:"
"RefreshAction"
),
Action(
path="MenuBar/File", group="ResourceGroup",
class_name="puddle.resource.action.properties_action:"
"PropertiesAction"
),
Action(
path="MenuBar/Edit", group="ClipboardGroup",
class_name="puddle.resource.action.copy_action:"
"CopyAction"
),
Action(
path="MenuBar/Edit", group="ClipboardGroup",
class_name="puddle.resource.action.delete_action:"
"DeleteAction"
),
Action(
path="MenuBar/Edit", group="ClipboardGroup",
class_name="puddle.resource.action.move_action:"
"MoveAction"
),
Action(
path="MenuBar/Edit", group="ClipboardGroup",
class_name="puddle.resource.action.rename_action:"
"RenameAction"
),
Action(
path="MenuBar/Navigate",
class_name="puddle.resource.action.up_action:"
"UpAction"
),
Action(
path="MenuBar/Navigate",
class_name="puddle.resource.action.home_action:"
"HomeAction"
),
Action(
path="MenuBar/Navigate",
class_name="puddle.resource.action.location_action:"
"LocationAction"
),
# Toolbar actions
Action(
path="ToolBar/ResourceToolBar", group="FileGroup",
class_name="puddle.resource.action.new_resource_action:"
"NewResourceAction"
),
Action(
path="ToolBar/ResourceToolBar", group="FileGroup",
class_name="puddle.resource.action.save_action:"
"SaveAction"
),
Action(
path="ToolBar/ResourceToolBar", group="FileGroup",
class_name="puddle.resource.action.save_as_action:"
"SaveAsAction"
),
Action(
path="ToolBar/ResourceToolBar", group="ImportGroup",
class_name="puddle.resource.action.import_action:"
"ImportAction"
),
Action(
path="ToolBar/ResourceToolBar", group="ImportGroup",
class_name="puddle.resource.action.export_action:"
"ExportAction"
),
Action(
path="ToolBar/ResourceToolBar", group="NavigationGroup",
class_name="puddle.resource.action.up_action:"
"UpAction"
),
Action(
path="ToolBar/ResourceToolBar", group="NavigationGroup",
class_name="puddle.resource.action.home_action:"
"HomeAction"
)
]
#------------------------------------------------------------------------------
# "ContextMenuActionSet" class:
#------------------------------------------------------------------------------
class ContextMenuActionSet(WorkbenchActionSet):
""" Action set for the resource view context menu.
"""
#--------------------------------------------------------------------------
# "ActionSet" interface:
#--------------------------------------------------------------------------
# The action set"s globally unique identifier:
id = "puddle.resource.context_menu_action_set"
# The menus in this set
menus = [
Menu(
name="&New", path="Resource", group="NewGroup",
groups=["ContainerGroup", "ComponentGroup", "OtherGroup"]
),
Menu(
name="Open With", path="Resource", group="OpenGroup",
class_name="puddle.resource.action.open_with_menu_manager:"
"OpenWithMenuManager"
)
]
# The groups in this set
groups = [
Group(path="Resource", id="NewGroup"),
Group(path="Resource", id="OpenGroup"),
Group(path="Resource", id="EditGroup"),
Group(path="Resource", id="ImportGroup"),
Group(path="Resource", id="RefreshGroup"),
Group(path="Resource", id="SubMenuGroup"),
Group(path="Resource", id="PropertiesGroup")
]
# The actions in this set
actions = [
Action(
name="Folder", path="Resource/New", group="ContainerGroup",
id="puddle.resource.new_project_action",
class_name="puddle.resource.action.new_folder_action:"
"NewFolderAction"
),
Action(
path="Resource/New", group="OtherGroup",
class_name="puddle.resource.action.new_resource_action:"
"NewResourceAction"
),
Action(
name="Open", path="Resource",
group="OpenGroup", before="Open With",
id="puddle.resource.open_action",
class_name="puddle.resource.action.open_action:"
"OpenAction"
),
Action(
name="&Copy...", path="Resource", group="EditGroup",
class_name="puddle.resource.action.copy_action:"
"CopyAction"
),
Action(
name="&Delete", path="Resource", group="EditGroup",
class_name="puddle.resource.action.delete_action:"
"DeleteAction"
),
Action(
name="Mo&ve...", path="Resource", group="EditGroup",
class_name="puddle.resource.action.move_action:"
"MoveAction"
),
Action(
name="Rena&me...", path="Resource", group="EditGroup",
class_name="puddle.resource.action.rename_action:"
"RenameAction"
),
Action(
name="Import...", path="Resource", group="ImportGroup",
class_name="puddle.resource.action.import_action:"
"ImportAction"
),
Action(
name="Export...", path="Resource", group="ImportGroup",
class_name="puddle.resource.action.export_action:"
"ExportAction"
),
Action(
name="Refresh", path="Resource", group="RefreshGroup",
class_name="puddle.resource.action.refresh_action:"
"RefreshAction"
),
Action(
name="Properties", path="Resource", group="PropertiesGroup",
class_name="puddle.resource.action.properties_action:"
"PropertiesAction"
)
]
# A mapping from human-readable names to globally unique IDs
# aliases = {"Resource": "puddle.resource.context_menu"}
# EOF -------------------------------------------------------------------------
|
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
try:
import fcntl
# On Windows this module is not available, just make those
# methods no-ops
except ImportError:
class fcntl:
@staticmethod
def fcntl(a, b, c=None):
pass
import re
import os
import select
import sys
import subprocess
from color import Coloring
from command import Command, MirrorSafeCommand
_CAN_COLOR = [
'branch',
'diff',
'grep',
'log',
]
class ForallColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'forall')
self.project = self.printer('project', attr='bold')
class Forall(Command, MirrorSafeCommand):
common = False
helpSummary = "Run a shell command in each project"
helpUsage = """
%prog [<project>...] -c <command> [<arg>...]
%prog -r str1 [str2] ... -c <command> [<arg>...]"
"""
helpDescription = """
Executes the same shell command in each project.
The -r option allows running the command only on projects matching
regex or wildcard expression.
Output Formatting
-----------------
The -p option causes '%prog' to bind pipes to the command's stdin,
stdout and stderr streams, and pipe all output into a continuous
stream that is displayed in a single pager session. Project headings
are inserted before the output of each command is displayed. If the
command produces no output in a project, no heading is displayed.
The formatting convention used by -p is very suitable for some
types of searching, e.g. `repo forall -p -c git log -SFoo` will
print all commits that add or remove references to Foo.
The -v option causes '%prog' to display stderr messages if a
command produces output only on stderr. Normally the -p option
causes command output to be suppressed until the command produces
at least one byte of output on stdout.
Environment
-----------
pwd is the project's working directory. If the current client is
a mirror client, then pwd is the Git repository.
REPO_PROJECT is set to the unique name of the project.
REPO_PATH is the path relative the the root of the client.
REPO_REMOTE is the name of the remote system from the manifest.
REPO_LREV is the name of the revision from the manifest, translated
to a local tracking branch. If you need to pass the manifest
revision to a locally executed git command, use REPO_LREV.
REPO_RREV is the name of the revision from the manifest, exactly
as written in the manifest.
REPO__* are any extra environment variables, specified by the
"annotation" element under any project element. This can be useful
for differentiating trees based on user-specific criteria, or simply
annotating tree details.
shell positional arguments ($1, $2, .., $#) are set to any arguments
following <command>.
Unless -p is used, stdin, stdout, stderr are inherited from the
terminal and are not redirected.
If -e is used, when a command exits unsuccessfully, '%prog' will abort
without iterating through the remaining projects.
"""
def _Options(self, p):
def cmd(option, opt_str, value, parser):
setattr(parser.values, option.dest, list(parser.rargs))
while parser.rargs:
del parser.rargs[0]
p.add_option('-r', '--regex',
dest='regex', action='store_true',
help="Execute the command only on projects matching regex or wildcard expression")
p.add_option('-c', '--command',
help='Command (and arguments) to execute',
dest='command',
action='callback',
callback=cmd)
p.add_option('-e', '--abort-on-errors',
dest='abort_on_errors', action='store_true',
help='Abort if a command exits unsuccessfully')
g = p.add_option_group('Output')
g.add_option('-p',
dest='project_header', action='store_true',
help='Show project headers before output')
g.add_option('-v', '--verbose',
dest='verbose', action='store_true',
help='Show command error messages')
def WantPager(self, opt):
return opt.project_header
def Execute(self, opt, args):
if not opt.command:
self.Usage()
cmd = [opt.command[0]]
shell = True
if re.compile(r'^[a-z0-9A-Z_/\.-]+$').match(cmd[0]):
shell = False
if shell:
cmd.append(cmd[0])
cmd.extend(opt.command[1:])
if opt.project_header \
and not shell \
and cmd[0] == 'git':
# If this is a direct git command that can enable colorized
# output and the user prefers coloring, add --color into the
# command line because we are going to wrap the command into
# a pipe and git won't know coloring should activate.
#
for cn in cmd[1:]:
if not cn.startswith('-'):
break
else:
cn = None
# pylint: disable=W0631
if cn and cn in _CAN_COLOR:
class ColorCmd(Coloring):
def __init__(self, config, cmd):
Coloring.__init__(self, config, cmd)
if ColorCmd(self.manifest.manifestProject.config, cn).is_on:
cmd.insert(cmd.index(cn) + 1, '--color')
# pylint: enable=W0631
mirror = self.manifest.IsMirror
out = ForallColoring(self.manifest.manifestProject.config)
out.redirect(sys.stdout)
rc = 0
first = True
if not opt.regex:
projects = self.GetProjects(args)
else:
projects = self.FindProjects(args)
for project in projects:
env = os.environ.copy()
def setenv(name, val):
if val is None:
val = ''
env[name] = val.encode()
setenv('REPO_PROJECT', project.name)
setenv('REPO_PATH', project.relpath)
setenv('REPO_REMOTE', project.remote.name)
setenv('REPO_LREV', project.GetRevisionId())
setenv('REPO_RREV', project.revisionExpr)
for a in project.annotations:
setenv("REPO__%s" % (a.name), a.value)
if mirror:
setenv('GIT_DIR', project.gitdir)
cwd = project.gitdir
else:
cwd = project.worktree
if not os.path.exists(cwd):
if (opt.project_header and opt.verbose) \
or not opt.project_header:
print('skipping %s/' % project.relpath, file=sys.stderr)
continue
if opt.project_header:
stdin = subprocess.PIPE
stdout = subprocess.PIPE
stderr = subprocess.PIPE
else:
stdin = None
stdout = None
stderr = None
p = subprocess.Popen(cmd,
cwd = cwd,
shell = shell,
env = env,
stdin = stdin,
stdout = stdout,
stderr = stderr)
if opt.project_header:
class sfd(object):
def __init__(self, fd, dest):
self.fd = fd
self.dest = dest
def fileno(self):
return self.fd.fileno()
empty = True
errbuf = ''
p.stdin.close()
s_in = [sfd(p.stdout, sys.stdout),
sfd(p.stderr, sys.stderr)]
for s in s_in:
flags = fcntl.fcntl(s.fd, fcntl.F_GETFL)
fcntl.fcntl(s.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
while s_in:
in_ready, _out_ready, _err_ready = select.select(s_in, [], [])
for s in in_ready:
buf = s.fd.read(4096)
if not buf:
s.fd.close()
s_in.remove(s)
continue
if not opt.verbose:
if s.fd != p.stdout:
errbuf += buf
continue
if empty:
if first:
first = False
else:
out.nl()
if mirror:
project_header_path = project.name
else:
project_header_path = project.relpath
out.project('project %s/', project_header_path)
out.nl()
out.flush()
if errbuf:
sys.stderr.write(errbuf)
sys.stderr.flush()
errbuf = ''
empty = False
s.dest.write(buf)
s.dest.flush()
r = p.wait()
if r != 0:
if r != rc:
rc = r
if opt.abort_on_errors:
print("error: %s: Aborting due to previous error" % project.relpath,
file=sys.stderr)
sys.exit(r)
if rc != 0:
sys.exit(rc)
|
|
#!/usr/bin/python
# Copyright (c) Facebook, Inc. and its affiliates.
"""Module to interact with the Adobe User Management API."""
from __future__ import print_function
import json
import os
import platform
import random
import sys
import time
try:
import jwt
import requests
except ImportError:
print("Missing 'jwt' and/or 'requests' modules.")
exit(1)
if sys.version_info[0] == 2:
from ConfigParser import RawConfigParser
from urllib import urlencode
from urllib import quote
elif sys.version_info[0] >= 3:
from configparser import RawConfigParser
from urllib.parse import urlencode
# Constants for fallback
USERCONFIG_DEFAULT_LOC = '/Library/Adobe/usermanagement.config'
PRIVATE_KEY_DEFAULT_LOC = '/Library/Adobe/private.key'
CACHE_DEFAULT_LOC = '/Library/Adobe/adobe_tools.json'
# User lookup functions
def get_console_user():
"""Find out who is logged in right now."""
current_os = platform.system()
if 'Darwin' in current_os:
# macOS: Use SystemConfiguration framework to get the current
# console user
from SystemConfiguration import SCDynamicStoreCopyConsoleUser
cfuser = SCDynamicStoreCopyConsoleUser(None, None, None)
return cfuser[0]
if 'Windows' in current_os:
from win32api import GetUserName
return GetUserName()
if 'Linux' in current_os:
from getpass import getuser
return getuser()
# Exception classes used by this module.
class AdobeAPINoUserException(Exception):
"""Given user does not exist."""
def __init__(self, username):
"""Store the user that doesn't exist."""
self.username = username
def __str__(self):
"""String for the username."""
return "No user found for '%s' " % str(self.username)
class AdobeAPINoProductException(Exception):
"""Given product does not exist."""
def __init__(self, product):
"""Store the product that doesn't exist."""
self.product = product
def __str__(self):
"""String for the product."""
return "No product configuration for '%s'" % str(self.product)
class AdobeAPIBadStatusException(Exception):
"""Received a non-200 code from the API."""
def __init__(self, status_code, headers, text):
"""Store the product that doesn't exist."""
self.status_code = status_code
self.headers = headers
self.text = text
def __str__(self):
"""Text for the error."""
return 'Status code %s: %s' % (self.status_code, str(self.text))
def __int__(self):
"""Return status code of the error."""
return int(self.status_code)
class AdobeAPIIncompleteUserActionException(Exception):
"""User manipulation action returned an incomplete."""
def __init__(self, errors):
"""Store the error generated from the incomplete."""
self.errors = errors
def __str__(self):
"""Text for the error."""
return str(self.errors)
class AdobeAPIMissingRequirementsException(Exception):
"""Missing a required file for API usage."""
def __init__(self, filename):
"""Store the filename that is missing."""
self.filename = filename
def __str__(self):
"""Text for the error."""
return 'Required file is missing: %s' % str(self.filename)
class AdobeAPIObject(object):
"""Model to represent an Adobe API interface."""
def __init__(
self,
username="%[email protected]" % get_console_user(),
private_key_filename=PRIVATE_KEY_DEFAULT_LOC,
userconfig=USERCONFIG_DEFAULT_LOC,
cache_path=CACHE_DEFAULT_LOC,
cache=True,
key='email',
allow_nonexistent_user=False,
splay=random.randrange(-144, 144),
):
"""
Instantiate class variables for our API object model.
'username' defaults to the current logged in user on all platforms.
'private_key_filename', 'userconfig', and 'cache_path' will default to
the constants defined above if not provided.
'cache' defaults to True to consume available cache data, and to store
the data in local cache. False will not cache and ignores any local
cache file.
The cache path is defined in the constant above.
'key' must be either 'email' or 'username', and determines what field
to match the incoming data off of. By default, this is the 'email'
field.
'allow_nonexistent_user' will not trigger an exception if you try to
perform an action on a user that does not exist. This is useful for
determining if a user exists, or querying lists of product configs,
where you don't actually need to interact with a user to do so.
'splay' is a number of hours added to the cache length. By default,
this is a random value between -144 and 144 hours, so that machines
don't all invalidate their cache and query the API endpoint at the
same time.
This can be confusing because regardless of key choice, 'username' is
used to indicate the unique user.
"""
self.configs = {}
self.productlist = []
self.userlist = []
self.cache_path = cache_path
self.user = {}
self.username = username
self.cache = cache
self.key = key
self.allow_fake = allow_nonexistent_user
self.splay = splay
if self.cache:
self.__read_cache()
# Generate the access configs in case we need them later
self.__generate_config(
userconfig=userconfig,
private_key_filename=private_key_filename
)
if not self.user:
# Cache didn't have values we need, so let's query the API
self.gather_user()
if not self.productlist:
self.gather_product_list(force=True)
if self.cache:
self.__write_cache()
# CONFIG
def __get_private_key(self, priv_key_filename):
"""Retrieve private key from file."""
priv_key_file = open(priv_key_filename)
priv_key = priv_key_file.read()
priv_key_file.close()
return priv_key
def __get_user_config(self, filename=None):
"""Retrieve config data from file."""
config = RawConfigParser()
config.read(filename)
config_dict = {
# server parameters
'host': config.get("server", "host"),
'endpoint': config.get("server", "endpoint"),
'ims_host': config.get("server", "ims_host"),
'ims_endpoint_jwt': config.get("server", "ims_endpoint_jwt"),
# enterprise parameters used to construct JWT
'domain': config.get("enterprise", "domain"),
'org_id': config.get("enterprise", "org_id"),
'api_key': config.get("enterprise", "api_key"),
'client_secret': config.get("enterprise", "client_secret"),
'tech_acct': config.get("enterprise", "tech_acct"),
'priv_key_filename': config.get("enterprise", "priv_key_filename"),
}
self.configs = config_dict
def __prepare_jwt_token(self):
"""Construct the JSON Web Token for auth."""
# set expiry time for JSON Web Token
expiry_time = int(time.time()) + 60 * 60 * 24
# create payload
payload = {
"exp": expiry_time,
"iss": self.configs['org_id'],
"sub": self.configs['tech_acct'],
"aud": (
"https://" +
self.configs['ims_host'] +
"/c/" +
self.configs['api_key']
),
(
"https://" +
self.configs['ims_host'] +
"/s/" +
"ent_user_sdk"
): True
}
# create JSON Web Token
jwt_token = jwt.encode(payload, self.priv_key, algorithm='RS256')
# decode bytes into string
jwt_token = jwt_token.decode("utf-8")
return jwt_token
def __prepare_access_token(self, config_data, jwt_token):
"""Generate the access token."""
# Method parameters
url = "https://" + config_data['ims_host'] + \
config_data['ims_endpoint_jwt']
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Cache-Control": "no-cache"
}
body_credentials = {
"client_id": config_data['api_key'],
"client_secret": config_data['client_secret'],
"jwt_token": jwt_token
}
body = urlencode(body_credentials)
# send http request
res = requests.post(url, headers=headers, data=body)
# evaluate response
if res.status_code == 200:
# extract token
access_token = json.loads(res.text)["access_token"]
return access_token
else:
raise AdobeAPIBadStatusException(
res.status_code, res.headers, res.text
)
def __generate_config(self, userconfig, private_key_filename):
"""Return tuple of necessary config data."""
# Get userconfig data
user_config_path = userconfig
if not os.path.isfile(str(user_config_path)):
raise AdobeAPIMissingRequirementsException(str(user_config_path))
# Get private key
priv_key_path = private_key_filename
if not os.path.isfile(str(priv_key_path)):
raise AdobeAPIMissingRequirementsException(str(priv_key_path))
self.priv_key = self.__get_private_key(priv_key_path)
# Get config data
self.__get_user_config(user_config_path)
# Get the JWT
try:
self.jwt_token = self.__prepare_jwt_token()
except NotImplementedError:
print(
"Cryptography module was unable to succeed on your machine.",
file=sys.stderr)
raise
# Get the access token
self.access_token = self.__prepare_access_token(
self.configs,
self.jwt_token
)
def __headers(self, config_data, access_token):
"""Return the headers needed."""
headers = {
"Content-type": "application/json",
"Accept": "application/json",
"x-api-key": config_data['api_key'],
"Authorization": "Bearer " + access_token
}
return headers
# REQUEST INTERACTION FUNCTIONS
def __submit_request(self, url):
"""
Submit a request to the API endpoint.
Returns a JSON dictionary of the result.
If a non-200 status is returned, raise an AdobeAPIBadStatusException.
"""
res = requests.get(
url,
headers=self.__headers(self.configs, self.access_token)
)
if res.status_code != 200:
raise AdobeAPIBadStatusException(
res.status_code,
res.headers,
res.text
)
return json.loads(res.text)
def _submit_user_action_request(self, body_dict):
"""
Submit a JSON request to the User Action API.
Returns True if the action succeeded.
If the action was not completed, raise
AdobeAPIIncompleteUserActionException.
"""
success = False
body = json.dumps([body_dict])
url = "https://" + self.configs['host'] + \
self.configs['endpoint'] + "/action/" + \
self.configs['org_id']
res = requests.post(
url,
headers=self.__headers(self.configs, self.access_token),
data=body
)
if res.status_code != 200:
raise AdobeAPIBadStatusException(
res.status_code,
res.headers,
res.text
)
results = json.loads(res.text)
if results.get('notCompleted') == 1:
raise AdobeAPIIncompleteUserActionException(
results.get('errors')
)
if results.get('completed') == 1:
success = True
self.update_user()
return success
# CACHE FUNCTIONS
def __read_cache(self):
"""Read the values from the cache file."""
cache_data = {}
try:
# Invalidate the cache automatically after 2 weeks, plus splay
file_age = os.path.getmtime(self.cache_path)
# Splay is a number of hours added to the cache invalidation time
# It can be negative, so that clients don't all hit at once.
splay_seconds = 60 * 60 * int(self.splay)
two_weeks = (60 * 60 * 24 * 14)
if time.time() - file_age < (two_weeks + splay_seconds):
with open(self.cache_path, 'rb') as f:
cache_data = json.load(f)
except (OSError, IOError, ValueError):
# Cache doesn't exist, or is invalid
self.user = {}
return
productlist = cache_data.get('productlist', [])
if productlist:
self.productlist = productlist
userlist = cache_data.get('userlist', [])
if userlist:
self.userlist = userlist
user_data = cache_data.get('user_data', {})
if user_data and user_data.get(self.key) == self.username:
self.user = user_data
else:
# Look through the userlist to see if we find the username.
# If not, the result is an empty dict anyway.
self.user = self.data()
def __write_cache(self):
"""Write the values to the cache file."""
cache_data = {}
cache_data['productlist'] = self.productlist or []
cache_data['userlist'] = self.userlist or []
cache_data['user_data'] = self.user or {}
try:
with open(self.cache_path, 'wb') as f:
json.dump(cache_data, f, indent=True, sort_keys=True)
except IOError:
# If we fail to write cache, it just means we check again next time
pass
# GATHERING DATA FROM THE API
# These functions all must query the API (directly or indirectly) for info
# not available from the cache, and are therefore expensive.
def gather_product_list(self, force=False):
"""
Get the list of product configurations by asking the API.
Returns 'productlist', which is a list of dictionaries containing all
the Configuration groups in use.
If 'force' is true, the API call will be made regardless of cache.
If a non-200 status code is returned by the API, an exception is
raised.
Example:
```
>>>> api.productlist[0]
{u'memberCount': 182, u'groupName': u'Administrators'}
>>> api.productlist[1]
{u'memberCount': 912,
u'groupName':
u'Default Document Cloud for enterprise - Pro Configuration'}
```
"""
if force or not self.productlist:
page = 0
result = {}
productlist = []
while result.get('lastPage', False) is not True:
url = "https://" + self.configs['host'] + \
self.configs['endpoint'] + "/groups/" + \
self.configs['org_id'] + "/" + str(page)
try:
result = self.__submit_request(url)
productlist += result.get('groups', [])
page += 1
except AdobeAPIBadStatusException:
raise
self.productlist = productlist
# Update the cache
if self.cache:
self.__write_cache()
return self.productlist
def gather_user_list(self, force=False):
"""
Get a list of all users by querying the API.
Returns 'userlist', which is a list of dictionaries containing all the
users in our org.
If 'force' is true, the API call will be made regardless of cache.
If a non-200 status code is returned by the API, an exception is
raised.
Example:
```
>>> api.userlist[0]
{u'status':
u'active', u'username': u'[email protected]', u'domain': u'fb.com',
u'firstname': u'Fake Firstname', u'lastname': u'Fake Lastname',
u'groups': [
u'Default Document Cloud for enterprise - Pro Configuration',
u'Default All Apps plan - 100 GB Configuration',
u'Default Illustrator CC - 0 GB Configuration',
u'Default InDesign CC - 0 GB Configuration',
u'Default Photoshop CC - 0 GB Configuration'],
u'country': u'US', u'type': u'federatedID', u'email': u'[email protected]'}
"""
if force or not self.userlist:
page = 0
result = {}
userlist = []
while result.get('lastPage', False) is not True:
url = "https://" + self.configs['host'] + \
self.configs['endpoint'] + "/users/" + \
self.configs['org_id'] + "/" + str(page)
try:
result = self.__submit_request(url)
userlist += result.get('users', [])
page += 1
except AdobeAPIBadStatusException:
raise
self.userlist = userlist
# Update the cache
if self.cache:
self.__write_cache()
return self.userlist
def users_of_product(self, product_config_name):
"""
Get a list of users of a specific configuration by querying the API.
'userlist' is a list of dictionaries containing the user data of each
user who is a member of that product configuration group.
If a non-200 status code is returned by the API, an exception is
raised.
Example:
```
>>> api.users_of_product(
'Default Document Cloud for enterprise - Pro Configuration')[0]
{u'status': u'active', u'username': u'[email protected]',
u'domain': u'fb.com', u'firstname': u'Fake', u'lastname': u'Fake',
u'country': u'US', u'type': u'federatedID', u'email': u'[email protected]'}
```
This data is not cached, so it is an expensive call each time.
"""
page = 0
result = {}
userlist = []
while result.get('lastPage', False) is not True:
url = "https://" + self.configs['host'] + \
self.configs['endpoint'] + "/users/" + \
self.configs['org_id'] + "/" + str(page) + "/" + \
quote(product_config_name)
try:
result = self.__submit_request(url)
userlist += result.get('users', [])
page += 1
except AdobeAPIBadStatusException as e:
error = json.loads(e.text)
if 'group.not_found' in error['result']:
# Invalid product name
raise AdobeAPINoProductException(product_config_name)
else:
raise
return userlist
def data(self):
"""Get the data for the user from the userlist."""
for user in self.userlist:
if user[self.key] == self.username:
return user
# If we get here, there was no matching username
return {}
def gather_user(self):
"""
Gather data about the user by querying the API.
Returns a dictionary containing the user data.
If a non-200 status code is returned by the API, an exception is
raised.
This data is cached, but this function does not read from the cache;
it will always fetch from the API.
If the user does not exist and 'allow_nonexistent_user' was not set to
True, this raises an AdobeAPINoUserException.
"""
url = "https://" + self.configs['host'] + \
self.configs['endpoint'] + "/organizations/" + \
self.configs['org_id'] + "/users/" + str(self.username)
try:
result = self.__submit_request(url)
self.user = result.get('user', {})
except AdobeAPIBadStatusException:
if self.allow_fake:
self.user = {}
return
raise AdobeAPINoUserException(self.username)
# USER SPECIFIC FUNCTIONS
# These convenience functions are all based on the user that the object was
# instantiated with.
def list_products(self):
"""Return the list of products for the current user."""
return self.user.get('groups', [])
def is_federated(self):
"""Return True if user is federated."""
return self.user.get('type') == 'federatedID'
def has_product(self, product_name):
"""Return True if user has the product config."""
return product_name in self.list_products()
def update_user(self):
"""Force update the user information."""
# Rebuild the userlist for updated information
self.gather_user()
if self.cache:
self.__write_cache()
# PRODUCT SPECIFIC FUNCTIONS
# These are not at all related to the user, and do not require a real user.
def product_exists(self, productname):
"""Return True if a product config exists."""
if not self.productlist:
self.gather_product_list()
for product in self.productlist:
if productname == product.get('groupName', ''):
return True
return False
# ACTION FUNCTIONS
# These functions are actions you can take on the user, which require
# posting data to the API.
def add_federated_user(self, email, country, firstname, lastname):
"""Add Federated user to organization."""
add_dict = {
'user': self.username,
'do': [
{
'createFederatedID': {
'email': email,
'country': country,
'firstname': firstname,
'lastname': lastname,
}
}
]
}
result = self._submit_user_action_request(add_dict)
return result
def update_user_information(self, email, country, firstname, lastname):
"""Update the existing user's information."""
add_dict = {
'user': self.username,
'do': [
{
'update': {
}
}
]
}
if email:
add_dict['do'][0]['update']['email'] = email
if country:
add_dict['do'][0]['update']['country'] = country
if firstname:
add_dict['do'][0]['update']['firstname'] = firstname
if lastname:
add_dict['do'][0]['update']['lastname'] = lastname
result = self._submit_user_action_request(add_dict)
return result
def remove_user_from_org(self):
"""Remove user from organization."""
if not self.user:
raise AdobeAPINoUserException(self.username)
remove_dict = {
'user': self.username,
'do': [
{
'removeFromOrg': {}
}
]
}
result = self._submit_user_action_request(remove_dict)
return result
def add_products_to_user(self, products):
"""Add product configs to username."""
# Is username in the organization?
if not self.user:
raise AdobeAPINoUserException(self.username)
# Is the product real?
if isinstance(products, basestring): # NOQA
products = [products]
for product in products:
if not self.product_exists(product):
raise AdobeAPINoProductException(product)
add_dict = {
'user': self.username,
'do': [
{
'add': {
'product': products
}
}
]
}
return self._submit_user_action_request(add_dict)
def remove_product_from_user(self, products):
"""Remove products from username."""
# Is username in the organization?
if not self.user:
raise AdobeAPINoUserException(self.username)
if isinstance(products, basestring): # NOQA
products = [products]
# Is the product real?
for product in products:
if not self.product_exists(product):
raise AdobeAPINoProductException(product)
add_dict = {
'user': self.username,
'do': [
{
'remove': {
'product': products
}
}
]
}
return self._submit_user_action_request(add_dict)
# END CLASS
|
|
# -*- coding: utf-8 -*-
"""This file contains the interface for ESE database plugins."""
import construct
import logging
import pyesedb
from plaso.lib import errors
from plaso.parsers import plugins
class EseDbPlugin(plugins.BasePlugin):
"""The ESE database plugin interface."""
NAME = u'esedb'
BINARY_DATA_COLUMN_TYPES = frozenset([
pyesedb.column_types.BINARY_DATA,
pyesedb.column_types.LARGE_BINARY_DATA])
FLOATING_POINT_COLUMN_TYPES = frozenset([
pyesedb.column_types.FLOAT_32BIT,
pyesedb.column_types.DOUBLE_64BIT])
INTEGER_COLUMN_TYPES = frozenset([
pyesedb.column_types.CURRENCY,
pyesedb.column_types.DATE_TIME,
pyesedb.column_types.INTEGER_8BIT_UNSIGNED,
pyesedb.column_types.INTEGER_16BIT_SIGNED,
pyesedb.column_types.INTEGER_16BIT_UNSIGNED,
pyesedb.column_types.INTEGER_32BIT_SIGNED,
pyesedb.column_types.INTEGER_32BIT_UNSIGNED,
pyesedb.column_types.INTEGER_64BIT_SIGNED])
STRING_COLUMN_TYPES = frozenset([
pyesedb.column_types.TEXT,
pyesedb.column_types.LARGE_TEXT])
_UINT64_BIG_ENDIAN = construct.UBInt64(u'value')
_UINT64_LITTLE_ENDIAN = construct.ULInt64(u'value')
# Dictionary containing a callback method per table name.
# E.g. 'SystemIndex_0A': 'ParseSystemIndex_0A'
REQUIRED_TABLES = {}
OPTIONAL_TABLES = {}
def __init__(self):
"""Initializes the ESE database plugin."""
super(EseDbPlugin, self).__init__()
self._required_tables = frozenset(self.REQUIRED_TABLES.keys())
self._tables = {}
self._tables.update(self.REQUIRED_TABLES)
self._tables.update(self.OPTIONAL_TABLES)
def _ConvertValueBinaryDataToStringAscii(self, value):
"""Converts a binary data value into a string.
Args:
value: The binary data value containing an ASCII string or None.
Returns:
A string or None if value is None.
"""
if value:
return value.decode(u'ascii')
def _ConvertValueBinaryDataToStringBase16(self, value):
"""Converts a binary data value into a base-16 (hexadecimal) string.
Args:
value: The binary data value or None.
Returns:
A string or None if value is None.
"""
if value:
return value.encode(u'hex')
def _ConvertValueBinaryDataToUBInt64(self, value):
"""Converts a binary data value into an integer.
Args:
value: The binary data value containing an unsigned 64-bit big-endian
integer.
Returns:
An integer or None if value is None.
"""
if value:
return self._UINT64_BIG_ENDIAN.parse(value)
def _ConvertValueBinaryDataToULInt64(self, value):
"""Converts a binary data value into an integer.
Args:
value: The binary data value containing an unsigned 64-bit little-endian
integer.
Returns:
An integer or None if value is None.
"""
if value:
return self._UINT64_LITTLE_ENDIAN.parse(value)
def _GetRecordValue(self, record, value_entry):
"""Retrieves a specific value from the record.
Args:
record: The ESE record object (instance of pyesedb.record).
value_entry: The value entry.
Returns:
An object containing the value.
Raises:
ValueError: if the value is not supported.
"""
column_type = record.get_column_type(value_entry)
long_value = None
if record.is_long_value(value_entry):
long_value = record.get_value_data_as_long_value(value_entry)
if record.is_multi_value(value_entry):
# TODO: implement
raise ValueError(u'Multi value support not implemented yet.')
if column_type == pyesedb.column_types.NULL:
return
elif column_type == pyesedb.column_types.BOOLEAN:
# TODO: implement
raise ValueError(u'Boolean value support not implemented yet.')
elif column_type in self.INTEGER_COLUMN_TYPES:
if long_value:
raise ValueError(u'Long integer value not supported.')
return record.get_value_data_as_integer(value_entry)
elif column_type in self.FLOATING_POINT_COLUMN_TYPES:
if long_value:
raise ValueError(u'Long floating point value not supported.')
return record.get_value_data_as_floating_point(value_entry)
elif column_type in self.STRING_COLUMN_TYPES:
if long_value:
return long_value.get_data_as_string()
return record.get_value_data_as_string(value_entry)
elif column_type == pyesedb.column_types.GUID:
# TODO: implement
raise ValueError(u'GUID value support not implemented yet.')
if long_value:
return long_value.get_data()
return record.get_value_data(value_entry)
def _GetRecordValues(self, table_name, record, value_mappings=None):
"""Retrieves the values from the record.
Args:
table_name: The name of the table.
record: The ESE record object (instance of pyesedb.record).
value_mappings: Optional dict of value mappings, which map the column
name to a callback method. The default is None.
Returns:
An dict containing the values.
"""
record_values = {}
for value_entry in range(0, record.number_of_values):
column_name = record.get_column_name(value_entry)
if column_name in record_values:
logging.warning(
u'[{0:s}] duplicate column: {1:s} in table: {2:s}'.format(
self.NAME, column_name, table_name))
continue
value_callback = None
if value_mappings and column_name in value_mappings:
value_callback_method = value_mappings.get(column_name)
if value_callback_method:
value_callback = getattr(self, value_callback_method, None)
if value_callback is None:
logging.warning((
u'[{0:s}] missing value callback method: {1:s} for column: '
u'{2:s} in table: {3:s}').format(
self.NAME, value_callback_method, column_name, table_name))
try:
value = self._GetRecordValue(record, value_entry)
except ValueError as exception:
logging.warning(exception)
if value_callback:
value = value_callback(value)
record_values[column_name] = value
return record_values
def _GetTableNames(self, database):
"""Retrieves the table names in a database.
Args:
database: The ESE database object (instance of pyesedb.file).
Returns:
A list of the table names.
"""
table_names = []
for esedb_table in database.tables:
table_names.append(esedb_table.name)
return table_names
def GetEntries(self, parser_mediator, database=None, cache=None, **kwargs):
"""Extracts event objects from the database.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
database: Optional ESE database object (instance of pyesedb.file).
The default is None.
cache: Optional cache object (instance of EseDbCache). The default is
None.
Raises:
ValueError: If the database attribute is not valid.
"""
if database is None:
raise ValueError(u'Invalid database.')
for table_name, callback_method in self._tables.iteritems():
if not callback_method:
# Table names without a callback method are allowed to improve
# the detection of a database based on its table names.
continue
callback = getattr(self, callback_method, None)
if callback is None:
logging.warning(
u'[{0:s}] missing callback method: {1:s} for table: {2:s}'.format(
self.NAME, callback_method, table_name))
continue
esedb_table = database.get_table_by_name(table_name)
if not esedb_table:
logging.warning(u'[{0:s}] missing table: {1:s}'.format(
self.NAME, table_name))
continue
# The database is passed in case the database contains table names
# that are assigned dynamically and cannot be defined by
# the table name-callback mechanism.
callback(
parser_mediator, database=database, table=esedb_table, cache=cache,
**kwargs)
def Process(self, parser_mediator, database=None, cache=None, **kwargs):
"""Determines if this is the appropriate plugin for the database.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
database: Optional ESE database object (instance of pyesedb.file).
The default is None.
cache: Optional cache object (instance of EseDbCache). The default is
None.
Raises:
errors.WrongPlugin: If the database does not contain all the tables
defined in the required_tables set.
ValueError: If the database attribute is not valid.
"""
if database is None:
raise ValueError(u'Invalid database.')
table_names = frozenset(self._GetTableNames(database))
if self._required_tables.difference(table_names):
raise errors.WrongPlugin(
u'[{0:s}] required tables not found.'.format(self.NAME))
# This will raise if unhandled keyword arguments are passed.
super(EseDbPlugin, self).Process(parser_mediator)
self.GetEntries(
parser_mediator, database=database, cache=cache, **kwargs)
|
|
#!/usr/bin/python
# $Id:$
import ctypes
import math
import sys
import threading
import time
import pyglet
_debug = pyglet.options['debug_media']
import mt_media
from . import lib_dsound as lib
from pyglet.window.win32 import user32, kernel32
class DirectSoundException(mt_media.MediaException):
pass
def _db(gain):
"""Convert linear gain in range [0.0, 1.0] to 100ths of dB."""
if gain <= 0:
return -10000
return max(-10000, min(int(1000 * math.log(min(gain, 1))), 0))
class DirectSoundWorker(mt_media.MediaThread):
_min_write_size = 9600
# Time to wait if there are players, but they're all full.
_nap_time = 0.05
# Time to wait if there are no players.
_sleep_time = None
def __init__(self):
super().__init__()
self.players = set()
def run(self):
while True:
# This is a big lock, but ensures a player is not deleted while
# we're processing it -- this saves on extra checks in the
# player's methods that would otherwise have to check that it's
# still alive.
if _debug:
print('DirectSoundWorker run attempt acquire')
self.condition.acquire()
if _debug:
print('DirectSoundWorker run acquire')
if self.stopped:
self.condition.release()
break
sleep_time = -1
if self.players:
player = None
write_size = 0
for p in self.players:
s = p.get_write_size()
if s > write_size:
player = p
write_size = s
if write_size > self._min_write_size:
player.refill(write_size)
else:
sleep_time = self._nap_time
else:
sleep_time = self._sleep_time
self.condition.release()
if _debug:
print('DirectSoundWorker run release')
if sleep_time != -1:
self.sleep(sleep_time)
if _debug:
print('DirectSoundWorker exiting')
def add(self, player):
if _debug:
print('DirectSoundWorker add', player)
self.condition.acquire()
self.players.add(player)
self.condition.notify()
self.condition.release()
if _debug:
print('return DirectSoundWorker add', player)
def remove(self, player):
if _debug:
print('DirectSoundWorker remove', player)
self.condition.acquire()
try:
self.players.remove(player)
except KeyError:
pass
self.condition.notify()
self.condition.release()
if _debug:
print('return DirectSoundWorker remove', player)
class DirectSoundAudioPlayer(mt_media.AbstractAudioPlayer):
# How many bytes the ring buffer should be
_buffer_size = 44800 * 1
# Need to cache these because pyglet API allows update separately, but
# DSound requires both to be set at once.
_cone_inner_angle = 360
_cone_outer_angle = 360
def __init__(self, source_group, player):
super().__init__(source_group, player)
# Locking strategy:
# All DirectSound calls should be locked. All instance vars relating
# to buffering/filling/time/events should be locked (used by both
# application and worker thread). Other instance vars (consts and
# 3d vars) do not need to be locked.
self._lock = threading.RLock()
# Desired play state (may be actually paused due to underrun -- not
# implemented yet).
self._playing = False
# Up to one audio data may be buffered if too much data was received
# from the source that could not be written immediately into the
# buffer. See refill().
self._next_audio_data = None
# Theoretical write and play cursors for an infinite buffer. play
# cursor is always <= write cursor (when equal, underrun is
# happening).
self._write_cursor = 0
self._play_cursor = 0
# Cursor position of end of data. Silence is written after
# eos for one buffer size.
self._eos_cursor = None
# Indexes into DSound circular buffer. Complications ensue wrt each
# other to avoid writing over the play cursor. See get_write_size and
# write().
self._play_cursor_ring = 0
self._write_cursor_ring = 0
# List of (play_cursor, MediaEvent), in sort order
self._events = list()
# List of (cursor, timestamp), in sort order (cursor gives expiry
# place of the timestamp)
self._timestamps = list()
audio_format = source_group.audio_format
wfx = lib.WAVEFORMATEX()
wfx.wFormatTag = lib.WAVE_FORMAT_PCM
wfx.nChannels = audio_format.channels
wfx.nSamplesPerSec = audio_format.sample_rate
wfx.wBitsPerSample = audio_format.sample_size
wfx.nBlockAlign = wfx.wBitsPerSample * wfx.nChannels // 8
wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign
dsbdesc = lib.DSBUFFERDESC()
dsbdesc.dwSize = ctypes.sizeof(dsbdesc)
dsbdesc.dwFlags = (lib.DSBCAPS_GLOBALFOCUS |
lib.DSBCAPS_GETCURRENTPOSITION2 |
lib.DSBCAPS_CTRLFREQUENCY |
lib.DSBCAPS_CTRLVOLUME)
if audio_format.channels == 1:
dsbdesc.dwFlags |= lib.DSBCAPS_CTRL3D
dsbdesc.dwBufferBytes = self._buffer_size
dsbdesc.lpwfxFormat = ctypes.pointer(wfx)
# DSound buffer
self._buffer = lib.IDirectSoundBuffer()
driver._dsound.CreateSoundBuffer(dsbdesc,
ctypes.byref(self._buffer),
None)
if audio_format.channels == 1:
self._buffer3d = lib.IDirectSound3DBuffer()
self._buffer.QueryInterface(lib.IID_IDirectSound3DBuffer,
ctypes.byref(self._buffer3d))
else:
self._buffer3d = None
self._buffer.SetCurrentPosition(0)
self.refill(self._buffer_size)
def __del__(self):
try:
self.delete()
except:
pass
def delete(self):
if driver and driver.worker:
driver.worker.remove(self)
self.lock()
self._buffer.Stop()
self._buffer.Release()
self._buffer = None
if self._buffer3d:
self._buffer3d.Release()
self._buffer3d = None
self.unlock()
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
def play(self):
if _debug:
print('DirectSound play')
driver.worker.add(self)
self.lock()
if not self._playing:
self._playing = True
self._buffer.Play(0, 0, lib.DSBPLAY_LOOPING)
self.unlock()
if _debug:
print('return DirectSound play')
def stop(self):
if _debug:
print('DirectSound stop')
driver.worker.remove(self)
self.lock()
if self._playing:
self._playing = False
self._buffer.Stop()
self.unlock()
if _debug:
print('return DirectSound stop')
def clear(self):
if _debug:
print('DirectSound clear')
self.lock()
self._buffer.SetCurrentPosition(0)
self._play_cursor_ring = self._write_cursor_ring = 0
self._play_cursor = self._write_cursor
self._eos_cursor = None
self._next_audio_data = None
del self._events[:]
del self._timestamps[:]
self.unlock()
def refill(self, write_size):
self.lock()
while write_size > 0:
if _debug:
print('refill, write_size =', write_size)
# Get next audio packet (or remains of last one)
if self._next_audio_data:
audio_data = self._next_audio_data
self._next_audio_data = None
else:
audio_data = self.source_group.get_audio_data(write_size)
# Write it, or silence if there are no more packets
if audio_data:
# Add events
for event in audio_data.events:
event_cursor = self._write_cursor + event.timestamp * \
self.source_group.audio_format.bytes_per_second
self._events.append((event_cursor, event))
# Add timestamp (at end of this data packet)
ts_cursor = self._write_cursor + audio_data.length
self._timestamps.append(
(ts_cursor, audio_data.timestamp + audio_data.duration))
# Write data
if _debug:
print('write', audio_data.length)
length = min(write_size, audio_data.length)
self.write(audio_data, length)
if audio_data.length:
self._next_audio_data = audio_data
write_size -= length
else:
# Write silence
if self._eos_cursor is None:
self._eos_cursor = self._write_cursor
self._events.append(
(self._eos_cursor,
mt_media.MediaEvent(0, 'on_eos')))
self._events.append(
(self._eos_cursor,
mt_media.MediaEvent(0, 'on_source_group_eos')))
self._events.sort()
if self._write_cursor > self._eos_cursor + self._buffer_size:
self.stop()
else:
self.write(None, write_size)
write_size = 0
self.unlock()
def update_play_cursor(self):
self.lock()
play_cursor_ring = lib.DWORD()
self._buffer.GetCurrentPosition(play_cursor_ring, None)
if play_cursor_ring.value < self._play_cursor_ring:
# Wrapped around
self._play_cursor += self._buffer_size - self._play_cursor_ring
self._play_cursor_ring = 0
self._play_cursor += play_cursor_ring.value - self._play_cursor_ring
self._play_cursor_ring = play_cursor_ring.value
# Dispatch pending events
pending_events = list()
while self._events and self._events[0][0] <= self._play_cursor:
_, event = self._events.pop(0)
pending_events.append(event)
if _debug:
print('Dispatching pending events:', pending_events)
print('Remaining events:', self._events)
# Remove expired timestamps
while self._timestamps and self._timestamps[0][0] < self._play_cursor:
del self._timestamps[0]
self.unlock()
for event in pending_events:
event._sync_dispatch_to_player(self.player)
def get_write_size(self):
self.update_play_cursor()
self.lock()
play_cursor = self._play_cursor
write_cursor = self._write_cursor
self.unlock()
return self._buffer_size - (write_cursor - play_cursor)
def write(self, audio_data, length):
# Pass audio_data=None to write silence
if length == 0:
return 0
self.lock()
p1 = ctypes.c_void_p()
l1 = lib.DWORD()
p2 = ctypes.c_void_p()
l2 = lib.DWORD()
self._buffer.Lock(self._write_cursor_ring, length,
ctypes.byref(p1), l1, ctypes.byref(p2), l2, 0)
assert length == l1.value + l2.value
if audio_data:
ctypes.memmove(p1, audio_data.data, l1.value)
audio_data.consume(l1.value, self.source_group.audio_format)
if l2.value:
ctypes.memmove(p2, audio_data.data, l2.value)
audio_data.consume(l2.value, self.source_group.audio_format)
else:
ctypes.memset(p1, 0, l1.value)
if l2.value:
ctypes.memset(p2, 0, l2.value)
self._buffer.Unlock(p1, l1, p2, l2)
self._write_cursor += length
self._write_cursor_ring += length
self._write_cursor_ring %= self._buffer_size
self.unlock()
def get_time(self):
self.lock()
if self._timestamps:
cursor, ts = self._timestamps[0]
result = ts + (self._play_cursor - cursor) / \
float(self.source_group.audio_format.bytes_per_second)
else:
result = None
self.unlock()
return result
def set_volume(self, volume):
volume = _db(volume)
self.lock()
self._buffer.SetVolume(volume)
self.unlock()
def set_position(self, position):
if self._buffer3d:
x, y, z = position
self.lock()
self._buffer3d.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
self.unlock()
def set_min_distance(self, min_distance):
if self._buffer3d:
self.lock()
self._buffer3d.SetMinDistance(min_distance, lib.DS3D_IMMEDIATE)
self.unlock()
def set_max_distance(self, max_distance):
if self._buffer3d:
self.lock()
self._buffer3d.SetMaxDistance(max_distance, lib.DS3D_IMMEDIATE)
self.unlock()
def set_pitch(self, pitch):
frequency = int(pitch * self.audio_format.sample_rate)
self.lock()
self._buffer.SetFrequency(frequency)
self.unlock()
def set_cone_orientation(self, cone_orientation):
if self._buffer3d:
x, y, z = cone_orientation
self.lock()
self._buffer3d.SetConeOrientation(x, y, -z, lib.DS3D_IMMEDIATE)
self.unlock()
def set_cone_inner_angle(self, cone_inner_angle):
if self._buffer3d:
self._cone_inner_angle = int(cone_inner_angle)
self._set_cone_angles()
def set_cone_outer_angle(self, cone_outer_angle):
if self._buffer3d:
self._cone_outer_angle = int(cone_outer_angle)
self._set_cone_angles()
def _set_cone_angles(self):
inner = min(self._cone_inner_angle, self._cone_outer_angle)
outer = max(self._cone_inner_angle, self._cone_outer_angle)
self.lock()
self._buffer3d.SetConeAngles(inner, outer, lib.DS3D_IMMEDIATE)
self.unlock()
def set_cone_outer_gain(self, cone_outer_gain):
if self._buffer3d:
volume = _db(cone_outer_gain)
self.lock()
self._buffer3d.SetConeOutsideVolume(volume, lib.DS3D_IMMEDIATE)
self.unlock()
class DirectSoundDriver(mt_media.AbstractAudioDriver):
def __init__(self):
self._dsound = lib.IDirectSound()
lib.DirectSoundCreate(None, ctypes.byref(self._dsound), None)
# A trick used by mplayer.. use desktop as window handle since it
# would be complex to use pyglet window handles (and what to do when
# application is audio only?).
hwnd = user32.GetDesktopWindow()
self._dsound.SetCooperativeLevel(hwnd, lib.DSSCL_NORMAL)
# Create primary buffer with 3D and volume capabilities
self._buffer = lib.IDirectSoundBuffer()
dsbd = lib.DSBUFFERDESC()
dsbd.dwSize = ctypes.sizeof(dsbd)
dsbd.dwFlags = (lib.DSBCAPS_CTRL3D |
lib.DSBCAPS_CTRLVOLUME |
lib.DSBCAPS_PRIMARYBUFFER)
self._dsound.CreateSoundBuffer(dsbd, ctypes.byref(self._buffer), None)
# Create listener
self._listener = lib.IDirectSound3DListener()
self._buffer.QueryInterface(lib.IID_IDirectSound3DListener,
ctypes.byref(self._listener))
# Create worker thread
self.worker = DirectSoundWorker()
self.worker.start()
def __del__(self):
try:
if self._buffer:
self.delete()
except:
pass
def create_audio_player(self, source_group, player):
return DirectSoundAudioPlayer(source_group, player)
def delete(self):
self.worker.stop()
self._buffer.Release()
self._buffer = None
self._listener.Release()
self._listener = None
# Listener API
def _set_volume(self, volume):
self._volume = volume
self._buffer.SetVolume(_db(volume))
def _set_position(self, position):
self._position = position
x, y, z = position
self._listener.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
def _set_forward_orientation(self, orientation):
self._forward_orientation = orientation
self._set_orientation()
def _set_up_orientation(self, orientation):
self._up_orientation = orientation
self._set_orientation()
def _set_orientation(self):
x, y, z = self._forward_orientation
ux, uy, uz = self._up_orientation
self._listener.SetOrientation(
x, y, -z, ux, uy, -uz, lib.DS3D_IMMEDIATE)
def create_audio_driver():
global driver
driver = DirectSoundDriver()
return driver
# Global driver needed for access to worker thread and _dsound
driver = None
|
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message structures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization.
Classes use __slots__ to ensure extraneous attributes aren't accidentally added
by tests, compromising their intended effect.
"""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
import zencoin_scrypt
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, assert_equal
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 80014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_LOCATOR_SZ = 101
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_NETWORK_LIMITED = (1 << 10)
MSG_TX = 1
MSG_BLOCK = 2
MSG_WITNESS_FLAG = 1 << 30
MSG_TYPE_MASK = 0xffffffff >> 2
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress:
__slots__ = ("ip", "nServices", "pchReserved", "port", "time")
def __init__(self):
self.time = 0
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f, with_time=True):
if with_time:
self.time = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self, with_time=True):
r = b""
if with_time:
r += struct.pack("<i", self.time)
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv:
__slots__ = ("hash", "type")
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator:
__slots__ = ("nVersion", "vHave")
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint:
__slots__ = ("hash", "n")
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn:
__slots__ = ("nSequence", "prevout", "scriptSig")
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut:
__slots__ = ("nValue", "scriptPubKey")
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness:
__slots__ = ("stack",)
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness:
__slots__ = ("scriptWitness",)
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness:
__slots__ = ("vtxinwit",)
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction:
__slots__ = ("hash", "nLockTime", "nVersion", "sha256", "vin", "vout",
"wit")
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
else:
self.wit = CTxWitness()
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_with_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.hash
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader:
__slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce",
"nTime", "nVersion", "sha256", "scrypt256")
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.scrypt256 = header.scrypt256
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
self.scrypt256 = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
self.scrypt256 = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
self.scrypt256 = uint256_from_str(zencoin_scrypt.getPoWHash(r))
def rehash(self):
self.sha256 = None
self.scrypt256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
assert_equal(BLOCK_HEADER_SIZE, 80)
class CBlock(CBlockHeader):
__slots__ = ("vtx",)
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.scrypt256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.scrypt256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction:
__slots__ = ("index", "tx")
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length",
"shortids", "shortids_length")
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
__slots__ = ()
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "shortids", "use_witness")
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids is not None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest:
__slots__ = ("blockhash", "indexes")
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions:
__slots__ = ("blockhash", "transactions")
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree:
__slots__ = ("nTransactions", "vBits", "vHash")
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock:
__slots__ = ("header", "txn")
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version:
__slots__ = ("addrFrom", "addrTo", "nNonce", "nRelay", "nServices",
"nStartingHeight", "nTime", "nVersion", "strSubVer")
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK | NODE_WITNESS
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f, False)
self.addrFrom = CAddress()
self.addrFrom.deserialize(f, False)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize(False)
r += self.addrFrom.serialize(False)
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack:
__slots__ = ()
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr:
__slots__ = ("addrs",)
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv:
__slots__ = ("inv",)
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata:
__slots__ = ("inv",)
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv is not None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks:
__slots__ = ("locator", "hashstop")
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx:
__slots__ = ("tx",)
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
__slots__ = ()
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block:
__slots__ = ("block",)
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic:
__slots__ = ("command", "data")
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
__slots__ = ()
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr:
__slots__ = ()
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping:
__slots__ = ("nonce",)
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong:
__slots__ = ("nonce",)
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool:
__slots__ = ()
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_notfound:
__slots__ = ("vec", )
command = b"notfound"
def __init__(self, vec=None):
self.vec = vec or []
def deserialize(self, f):
self.vec = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.vec)
def __repr__(self):
return "msg_notfound(vec=%s)" % (repr(self.vec))
class msg_sendheaders:
__slots__ = ()
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders:
__slots__ = ("hashstop", "locator",)
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers:
__slots__ = ("headers",)
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject:
__slots__ = ("code", "data", "message", "reason")
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter:
__slots__ = ("feerate",)
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct:
__slots__ = ("announce", "version")
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock:
__slots__ = ("header_and_shortids",)
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn:
__slots__ = ("block_txn_request",)
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn:
__slots__ = ("block_transactions",)
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
__slots__ = ()
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
|
|
# Copyright 2011, OpenStack Foundation
# Copyright 2012, Red Hat, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import glance_store
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import excutils
from oslo_utils import timeutils
import six
import webob
from glance.common import exception
from glance.common import utils
from glance.domain import proxy as domain_proxy
from glance import i18n
_ = i18n._
_LE = i18n._LE
notifier_opts = [
cfg.StrOpt('default_publisher_id', default="image.localhost",
help='Default publisher_id for outgoing notifications.'),
cfg.ListOpt('disabled_notifications', default=[],
help='List of disabled notifications. A notification can be '
'given either as a notification type to disable a single '
'event, or as a notification group prefix to disable all '
'events within a group. Example: if this config option '
'is set to ["image.create", "metadef_namespace"], then '
'"image.create" notification will not be sent after '
'image is created and none of the notifications for '
'metadefinition namespaces will be sent.'),
]
CONF = cfg.CONF
CONF.register_opts(notifier_opts)
LOG = logging.getLogger(__name__)
_ALIASES = {
'glance.openstack.common.rpc.impl_kombu': 'rabbit',
'glance.openstack.common.rpc.impl_qpid': 'qpid',
'glance.openstack.common.rpc.impl_zmq': 'zmq',
}
def get_transport():
return oslo_messaging.get_transport(CONF, aliases=_ALIASES)
class Notifier(object):
"""Uses a notification strategy to send out messages about events."""
def __init__(self):
publisher_id = CONF.default_publisher_id
self._transport = get_transport()
self._notifier = oslo_messaging.Notifier(self._transport,
publisher_id=publisher_id)
def warn(self, event_type, payload):
self._notifier.warn({}, event_type, payload)
def info(self, event_type, payload):
self._notifier.info({}, event_type, payload)
def error(self, event_type, payload):
self._notifier.error({}, event_type, payload)
def _get_notification_group(notification):
return notification.split('.', 1)[0]
def _is_notification_enabled(notification):
disabled_notifications = CONF.disabled_notifications
notification_group = _get_notification_group(notification)
notifications = (notification, notification_group)
for disabled_notification in disabled_notifications:
if disabled_notification in notifications:
return False
return True
def _send_notification(notify, notification_type, payload):
if _is_notification_enabled(notification_type):
notify(notification_type, payload)
def format_image_notification(image):
"""
Given a glance.domain.Image object, return a dictionary of relevant
notification information. We purposely do not include 'location'
as it may contain credentials.
"""
return {
'id': image.image_id,
'name': image.name,
'status': image.status,
'created_at': timeutils.isotime(image.created_at),
'updated_at': timeutils.isotime(image.updated_at),
'min_disk': image.min_disk,
'min_ram': image.min_ram,
'protected': image.protected,
'checksum': image.checksum,
'owner': image.owner,
'disk_format': image.disk_format,
'container_format': image.container_format,
'size': image.size,
'is_public': image.visibility == 'public',
'properties': dict(image.extra_properties),
'tags': list(image.tags),
'deleted': False,
'deleted_at': None,
}
def format_task_notification(task):
# NOTE(nikhil): input is not passed to the notifier payload as it may
# contain sensitive info.
return {
'id': task.task_id,
'type': task.type,
'status': task.status,
'result': None,
'owner': task.owner,
'message': None,
'expires_at': timeutils.isotime(task.expires_at),
'created_at': timeutils.isotime(task.created_at),
'updated_at': timeutils.isotime(task.updated_at),
'deleted': False,
'deleted_at': None,
}
def format_metadef_namespace_notification(metadef_namespace):
return {
'namespace': metadef_namespace.namespace,
'namespace_old': metadef_namespace.namespace,
'display_name': metadef_namespace.display_name,
'protected': metadef_namespace.protected,
'visibility': metadef_namespace.visibility,
'owner': metadef_namespace.owner,
'description': metadef_namespace.description,
'created_at': timeutils.isotime(metadef_namespace.created_at),
'updated_at': timeutils.isotime(metadef_namespace.updated_at),
'deleted': False,
'deleted_at': None,
}
def format_metadef_object_notification(metadef_object):
object_properties = metadef_object.properties or {}
properties = []
for name, prop in six.iteritems(object_properties):
object_property = _format_metadef_object_property(name, prop)
properties.append(object_property)
return {
'namespace': metadef_object.namespace,
'name': metadef_object.name,
'name_old': metadef_object.name,
'properties': properties,
'required': metadef_object.required,
'description': metadef_object.description,
'created_at': timeutils.isotime(metadef_object.created_at),
'updated_at': timeutils.isotime(metadef_object.updated_at),
'deleted': False,
'deleted_at': None,
}
def _format_metadef_object_property(name, metadef_property):
return {
'name': name,
'type': metadef_property.type or None,
'title': metadef_property.title or None,
'description': metadef_property.description or None,
'default': metadef_property.default or None,
'minimum': metadef_property.minimum or None,
'maximum': metadef_property.maximum or None,
'enum': metadef_property.enum or None,
'pattern': metadef_property.pattern or None,
'minLength': metadef_property.minLength or None,
'maxLength': metadef_property.maxLength or None,
'confidential': metadef_property.confidential or None,
'items': metadef_property.items or None,
'uniqueItems': metadef_property.uniqueItems or None,
'minItems': metadef_property.minItems or None,
'maxItems': metadef_property.maxItems or None,
'additionalItems': metadef_property.additionalItems or None,
}
def format_metadef_property_notification(metadef_property):
schema = metadef_property.schema
return {
'namespace': metadef_property.namespace,
'name': metadef_property.name,
'name_old': metadef_property.name,
'type': schema.get('type'),
'title': schema.get('title'),
'description': schema.get('description'),
'default': schema.get('default'),
'minimum': schema.get('minimum'),
'maximum': schema.get('maximum'),
'enum': schema.get('enum'),
'pattern': schema.get('pattern'),
'minLength': schema.get('minLength'),
'maxLength': schema.get('maxLength'),
'confidential': schema.get('confidential'),
'items': schema.get('items'),
'uniqueItems': schema.get('uniqueItems'),
'minItems': schema.get('minItems'),
'maxItems': schema.get('maxItems'),
'additionalItems': schema.get('additionalItems'),
'deleted': False,
'deleted_at': None,
}
def format_metadef_resource_type_notification(metadef_resource_type):
return {
'namespace': metadef_resource_type.namespace,
'name': metadef_resource_type.name,
'name_old': metadef_resource_type.name,
'prefix': metadef_resource_type.prefix,
'properties_target': metadef_resource_type.properties_target,
'created_at': timeutils.isotime(metadef_resource_type.created_at),
'updated_at': timeutils.isotime(metadef_resource_type.updated_at),
'deleted': False,
'deleted_at': None,
}
def format_metadef_tag_notification(metadef_tag):
return {
'namespace': metadef_tag.namespace,
'name': metadef_tag.name,
'name_old': metadef_tag.name,
'created_at': timeutils.isotime(metadef_tag.created_at),
'updated_at': timeutils.isotime(metadef_tag.updated_at),
'deleted': False,
'deleted_at': None,
}
class NotificationBase(object):
def get_payload(self, obj):
return {}
def send_notification(self, notification_id, obj, extra_payload=None):
payload = self.get_payload(obj)
if extra_payload is not None:
payload.update(extra_payload)
_send_notification(self.notifier.info, notification_id, payload)
@six.add_metaclass(abc.ABCMeta)
class NotificationProxy(NotificationBase):
def __init__(self, repo, context, notifier):
self.repo = repo
self.context = context
self.notifier = notifier
super_class = self.get_super_class()
super_class.__init__(self, repo)
@abc.abstractmethod
def get_super_class(self):
pass
@six.add_metaclass(abc.ABCMeta)
class NotificationRepoProxy(NotificationBase):
def __init__(self, repo, context, notifier):
self.repo = repo
self.context = context
self.notifier = notifier
proxy_kwargs = {'context': self.context, 'notifier': self.notifier}
proxy_class = self.get_proxy_class()
super_class = self.get_super_class()
super_class.__init__(self, repo, proxy_class, proxy_kwargs)
@abc.abstractmethod
def get_super_class(self):
pass
@abc.abstractmethod
def get_proxy_class(self):
pass
@six.add_metaclass(abc.ABCMeta)
class NotificationFactoryProxy(object):
def __init__(self, factory, context, notifier):
kwargs = {'context': context, 'notifier': notifier}
proxy_class = self.get_proxy_class()
super_class = self.get_super_class()
super_class.__init__(self, factory, proxy_class, kwargs)
@abc.abstractmethod
def get_super_class(self):
pass
@abc.abstractmethod
def get_proxy_class(self):
pass
class ImageProxy(NotificationProxy, domain_proxy.Image):
def get_super_class(self):
return domain_proxy.Image
def get_payload(self, obj):
return format_image_notification(obj)
def _format_image_send(self, bytes_sent):
return {
'bytes_sent': bytes_sent,
'image_id': self.repo.image_id,
'owner_id': self.repo.owner,
'receiver_tenant_id': self.context.tenant,
'receiver_user_id': self.context.user,
}
def _get_chunk_data_iterator(self, data, chunk_size=None):
sent = 0
for chunk in data:
yield chunk
sent += len(chunk)
if sent != (chunk_size or self.repo.size):
notify = self.notifier.error
else:
notify = self.notifier.info
try:
_send_notification(notify, 'image.send',
self._format_image_send(sent))
except Exception as err:
msg = (_LE("An error occurred during image.send"
" notification: %(err)s") % {'err': err})
LOG.error(msg)
def get_data(self, offset=0, chunk_size=None):
# Due to the need of evaluating subsequent proxies, this one
# should return a generator, the call should be done before
# generator creation
data = self.repo.get_data(offset=offset, chunk_size=chunk_size)
return self._get_chunk_data_iterator(data, chunk_size=chunk_size)
def set_data(self, data, size=None):
self.send_notification('image.prepare', self.repo)
notify_error = self.notifier.error
try:
self.repo.set_data(data, size)
except glance_store.StorageFull as e:
msg = (_("Image storage media is full: %s") %
utils.exception_to_str(e))
_send_notification(notify_error, 'image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
except glance_store.StorageWriteDenied as e:
msg = (_("Insufficient permissions on image storage media: %s")
% utils.exception_to_str(e))
_send_notification(notify_error, 'image.upload', msg)
raise webob.exc.HTTPServiceUnavailable(explanation=msg)
except ValueError as e:
msg = (_("Cannot save data for image %(image_id)s: %(error)s") %
{'image_id': self.repo.image_id,
'error': utils.exception_to_str(e)})
_send_notification(notify_error, 'image.upload', msg)
raise webob.exc.HTTPBadRequest(
explanation=utils.exception_to_str(e))
except exception.Duplicate as e:
msg = (_("Unable to upload duplicate image data for image"
"%(image_id)s: %(error)s") %
{'image_id': self.repo.image_id,
'error': utils.exception_to_str(e)})
_send_notification(notify_error, 'image.upload', msg)
raise webob.exc.HTTPConflict(explanation=msg)
except exception.Forbidden as e:
msg = (_("Not allowed to upload image data for image %(image_id)s:"
" %(error)s") % {'image_id': self.repo.image_id,
'error': utils.exception_to_str(e)})
_send_notification(notify_error, 'image.upload', msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.NotFound as e:
msg = (_("Image %(image_id)s could not be found after upload."
" The image may have been deleted during the upload:"
" %(error)s") % {'image_id': self.repo.image_id,
'error': utils.exception_to_str(e)})
_send_notification(notify_error, 'image.upload', msg)
raise webob.exc.HTTPNotFound(explanation=utils.exception_to_str(e))
except webob.exc.HTTPError as e:
with excutils.save_and_reraise_exception():
msg = (_("Failed to upload image data for image %(image_id)s"
" due to HTTP error: %(error)s") %
{'image_id': self.repo.image_id,
'error': utils.exception_to_str(e)})
_send_notification(notify_error, 'image.upload', msg)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_("Failed to upload image data for image %(image_id)s "
"due to internal error: %(error)s") %
{'image_id': self.repo.image_id,
'error': utils.exception_to_str(e)})
_send_notification(notify_error, 'image.upload', msg)
else:
self.send_notification('image.upload', self.repo)
self.send_notification('image.activate', self.repo)
class ImageFactoryProxy(NotificationFactoryProxy, domain_proxy.ImageFactory):
def get_super_class(self):
return domain_proxy.ImageFactory
def get_proxy_class(self):
return ImageProxy
class ImageRepoProxy(NotificationRepoProxy, domain_proxy.Repo):
def get_super_class(self):
return domain_proxy.Repo
def get_proxy_class(self):
return ImageProxy
def get_payload(self, obj):
return format_image_notification(obj)
def save(self, image, from_state=None):
super(ImageRepoProxy, self).save(image, from_state=from_state)
self.send_notification('image.update', image)
def add(self, image):
super(ImageRepoProxy, self).add(image)
self.send_notification('image.create', image)
def remove(self, image):
super(ImageRepoProxy, self).remove(image)
self.send_notification('image.delete', image, extra_payload={
'deleted': True, 'deleted_at': timeutils.isotime()
})
class TaskProxy(NotificationProxy, domain_proxy.Task):
def get_super_class(self):
return domain_proxy.Task
def get_payload(self, obj):
return format_task_notification(obj)
def begin_processing(self):
super(TaskProxy, self).begin_processing()
self.send_notification('task.processing', self.repo)
def succeed(self, result):
super(TaskProxy, self).succeed(result)
self.send_notification('task.success', self.repo)
def fail(self, message):
super(TaskProxy, self).fail(message)
self.send_notification('task.failure', self.repo)
def run(self, executor):
super(TaskProxy, self).run(executor)
self.send_notification('task.run', self.repo)
class TaskFactoryProxy(NotificationFactoryProxy, domain_proxy.TaskFactory):
def get_super_class(self):
return domain_proxy.TaskFactory
def get_proxy_class(self):
return TaskProxy
class TaskRepoProxy(NotificationRepoProxy, domain_proxy.TaskRepo):
def get_super_class(self):
return domain_proxy.TaskRepo
def get_proxy_class(self):
return TaskProxy
def get_payload(self, obj):
return format_task_notification(obj)
def add(self, task):
result = super(TaskRepoProxy, self).add(task)
self.send_notification('task.create', task)
return result
def remove(self, task):
result = super(TaskRepoProxy, self).remove(task)
self.send_notification('task.delete', task, extra_payload={
'deleted': True, 'deleted_at': timeutils.isotime()
})
return result
class TaskStubProxy(NotificationProxy, domain_proxy.TaskStub):
def get_super_class(self):
return domain_proxy.TaskStub
class TaskStubRepoProxy(NotificationRepoProxy, domain_proxy.TaskStubRepo):
def get_super_class(self):
return domain_proxy.TaskStubRepo
def get_proxy_class(self):
return TaskStubProxy
class MetadefNamespaceProxy(NotificationProxy, domain_proxy.MetadefNamespace):
def get_super_class(self):
return domain_proxy.MetadefNamespace
class MetadefNamespaceFactoryProxy(NotificationFactoryProxy,
domain_proxy.MetadefNamespaceFactory):
def get_super_class(self):
return domain_proxy.MetadefNamespaceFactory
def get_proxy_class(self):
return MetadefNamespaceProxy
class MetadefNamespaceRepoProxy(NotificationRepoProxy,
domain_proxy.MetadefNamespaceRepo):
def get_super_class(self):
return domain_proxy.MetadefNamespaceRepo
def get_proxy_class(self):
return MetadefNamespaceProxy
def get_payload(self, obj):
return format_metadef_namespace_notification(obj)
def save(self, metadef_namespace):
name = getattr(metadef_namespace, '_old_namespace',
metadef_namespace.namespace)
result = super(MetadefNamespaceRepoProxy, self).save(metadef_namespace)
self.send_notification(
'metadef_namespace.update', metadef_namespace,
extra_payload={
'namespace_old': name,
})
return result
def add(self, metadef_namespace):
result = super(MetadefNamespaceRepoProxy, self).add(metadef_namespace)
self.send_notification('metadef_namespace.create', metadef_namespace)
return result
def remove(self, metadef_namespace):
result = super(MetadefNamespaceRepoProxy, self).remove(
metadef_namespace)
self.send_notification(
'metadef_namespace.delete', metadef_namespace,
extra_payload={'deleted': True, 'deleted_at': timeutils.isotime()}
)
return result
def remove_objects(self, metadef_namespace):
result = super(MetadefNamespaceRepoProxy, self).remove_objects(
metadef_namespace)
self.send_notification('metadef_namespace.delete_objects',
metadef_namespace)
return result
def remove_properties(self, metadef_namespace):
result = super(MetadefNamespaceRepoProxy, self).remove_properties(
metadef_namespace)
self.send_notification('metadef_namespace.delete_properties',
metadef_namespace)
return result
def remove_tags(self, metadef_namespace):
result = super(MetadefNamespaceRepoProxy, self).remove_tags(
metadef_namespace)
self.send_notification('metadef_namespace.delete_tags',
metadef_namespace)
return result
class MetadefObjectProxy(NotificationProxy, domain_proxy.MetadefObject):
def get_super_class(self):
return domain_proxy.MetadefObject
class MetadefObjectFactoryProxy(NotificationFactoryProxy,
domain_proxy.MetadefObjectFactory):
def get_super_class(self):
return domain_proxy.MetadefObjectFactory
def get_proxy_class(self):
return MetadefObjectProxy
class MetadefObjectRepoProxy(NotificationRepoProxy,
domain_proxy.MetadefObjectRepo):
def get_super_class(self):
return domain_proxy.MetadefObjectRepo
def get_proxy_class(self):
return MetadefObjectProxy
def get_payload(self, obj):
return format_metadef_object_notification(obj)
def save(self, metadef_object):
name = getattr(metadef_object, '_old_name', metadef_object.name)
result = super(MetadefObjectRepoProxy, self).save(metadef_object)
self.send_notification(
'metadef_object.update', metadef_object,
extra_payload={
'namespace': metadef_object.namespace.namespace,
'name_old': name,
})
return result
def add(self, metadef_object):
result = super(MetadefObjectRepoProxy, self).add(metadef_object)
self.send_notification('metadef_object.create', metadef_object)
return result
def remove(self, metadef_object):
result = super(MetadefObjectRepoProxy, self).remove(metadef_object)
self.send_notification(
'metadef_object.delete', metadef_object,
extra_payload={
'deleted': True,
'deleted_at': timeutils.isotime(),
'namespace': metadef_object.namespace.namespace
}
)
return result
class MetadefPropertyProxy(NotificationProxy, domain_proxy.MetadefProperty):
def get_super_class(self):
return domain_proxy.MetadefProperty
class MetadefPropertyFactoryProxy(NotificationFactoryProxy,
domain_proxy.MetadefPropertyFactory):
def get_super_class(self):
return domain_proxy.MetadefPropertyFactory
def get_proxy_class(self):
return MetadefPropertyProxy
class MetadefPropertyRepoProxy(NotificationRepoProxy,
domain_proxy.MetadefPropertyRepo):
def get_super_class(self):
return domain_proxy.MetadefPropertyRepo
def get_proxy_class(self):
return MetadefPropertyProxy
def get_payload(self, obj):
return format_metadef_property_notification(obj)
def save(self, metadef_property):
name = getattr(metadef_property, '_old_name', metadef_property.name)
result = super(MetadefPropertyRepoProxy, self).save(metadef_property)
self.send_notification(
'metadef_property.update', metadef_property,
extra_payload={
'namespace': metadef_property.namespace.namespace,
'name_old': name,
})
return result
def add(self, metadef_property):
result = super(MetadefPropertyRepoProxy, self).add(metadef_property)
self.send_notification('metadef_property.create', metadef_property)
return result
def remove(self, metadef_property):
result = super(MetadefPropertyRepoProxy, self).remove(metadef_property)
self.send_notification(
'metadef_property.delete', metadef_property,
extra_payload={
'deleted': True,
'deleted_at': timeutils.isotime(),
'namespace': metadef_property.namespace.namespace
}
)
return result
class MetadefResourceTypeProxy(NotificationProxy,
domain_proxy.MetadefResourceType):
def get_super_class(self):
return domain_proxy.MetadefResourceType
class MetadefResourceTypeFactoryProxy(NotificationFactoryProxy,
domain_proxy.MetadefResourceTypeFactory):
def get_super_class(self):
return domain_proxy.MetadefResourceTypeFactory
def get_proxy_class(self):
return MetadefResourceTypeProxy
class MetadefResourceTypeRepoProxy(NotificationRepoProxy,
domain_proxy.MetadefResourceTypeRepo):
def get_super_class(self):
return domain_proxy.MetadefResourceTypeRepo
def get_proxy_class(self):
return MetadefResourceTypeProxy
def get_payload(self, obj):
return format_metadef_resource_type_notification(obj)
def add(self, md_resource_type):
result = super(MetadefResourceTypeRepoProxy, self).add(
md_resource_type)
self.send_notification('metadef_resource_type.create',
md_resource_type)
return result
def remove(self, md_resource_type):
result = super(MetadefResourceTypeRepoProxy, self).remove(
md_resource_type)
self.send_notification(
'metadef_resource_type.delete', md_resource_type,
extra_payload={
'deleted': True,
'deleted_at': timeutils.isotime(),
'namespace': md_resource_type.namespace.namespace
}
)
return result
class MetadefTagProxy(NotificationProxy, domain_proxy.MetadefTag):
def get_super_class(self):
return domain_proxy.MetadefTag
class MetadefTagFactoryProxy(NotificationFactoryProxy,
domain_proxy.MetadefTagFactory):
def get_super_class(self):
return domain_proxy.MetadefTagFactory
def get_proxy_class(self):
return MetadefTagProxy
class MetadefTagRepoProxy(NotificationRepoProxy, domain_proxy.MetadefTagRepo):
def get_super_class(self):
return domain_proxy.MetadefTagRepo
def get_proxy_class(self):
return MetadefTagProxy
def get_payload(self, obj):
return format_metadef_tag_notification(obj)
def save(self, metadef_tag):
name = getattr(metadef_tag, '_old_name', metadef_tag.name)
result = super(MetadefTagRepoProxy, self).save(metadef_tag)
self.send_notification(
'metadef_tag.update', metadef_tag,
extra_payload={
'namespace': metadef_tag.namespace.namespace,
'name_old': name,
})
return result
def add(self, metadef_tag):
result = super(MetadefTagRepoProxy, self).add(metadef_tag)
self.send_notification('metadef_tag.create', metadef_tag)
return result
def add_tags(self, metadef_tags):
result = super(MetadefTagRepoProxy, self).add_tags(metadef_tags)
for metadef_tag in metadef_tags:
self.send_notification('metadef_tag.create', metadef_tag)
return result
def remove(self, metadef_tag):
result = super(MetadefTagRepoProxy, self).remove(metadef_tag)
self.send_notification(
'metadef_tag.delete', metadef_tag,
extra_payload={
'deleted': True,
'deleted_at': timeutils.isotime(),
'namespace': metadef_tag.namespace.namespace
}
)
return result
|
|
from flask import Response, jsonify, request, abort, redirect, url_for
from flask_cors import cross_origin
from flask_user import current_user
from functools import wraps
from bootstrap import app, db
from resolver import resolve_dbo
from config import GEOTAGS_API_PREFIX
from shapely.geometry import asShape
import geojson
def _call_or_get(function_or_property):
return function_or_property() if callable(function_or_property) else function_or_property
def auth_required(func):
""" This decorator ensures that the current user is logged in before calling the actual view.
Calls the unauthorized_view_function() when the user is not logged in."""
@wraps(func)
def decorated_view(*args, **kwargs):
# User must be authenticated
if not _call_or_get(current_user.is_authenticated):
return abort(403)
# Call the actual view
return func(*args, **kwargs)
return decorated_view
def flatten(props, keys, tags):
flat_props = dict()
other_tags = dict()
for k,v in props.get('tags').items():
if k in tags:
flat_props[k] = v
else:
other_tags[k] = v
for k,v in props.items():
if k in keys:
flat_props[k] = v
flat_props['tags'] = other_tags
return flat_props
@app.route(GEOTAGS_API_PREFIX + '/<key>/export/')
@app.route(GEOTAGS_API_PREFIX + '/<key>/export/features.geojson')
def export_all_features(key):
dbo = resolve_dbo(key)
if dbo is None:
return abort(404)
keys = [ 'name', 'version', 'user_email' ]
tags = [ 'insee', 'commune', 'pk', 'adresse', 'comment', 'valid', 'modified', 'geometry_modified', 'created' ]
query = dbo.Feature.query.all()
features = []
for f in query:
feature = {
'type': 'Feature',
'id': f.id,
'properties': flatten(f.properties, keys, tags),
'geometry': f.shape.__geo_interface__
}
features.append(feature)
response = jsonify({
'type': 'FeatureCollection',
'features': features
})
return response
@app.route(GEOTAGS_API_PREFIX + '/<key>/features.geojson', methods = [ 'GET' ])
@app.route(GEOTAGS_API_PREFIX + '/<key>/features', methods=[ 'GET', 'PUT' ])
def features(key):
if request.method == 'GET':
return features_get(key)
elif request.method == 'PUT':
return feature_create(key)
def features_get(key):
dbo = resolve_dbo(key)
if dbo is None:
return abort(404)
query = dbo.Feature.query.all()
features = []
for f in query:
feature = {
'type': 'Feature',
'id': f.id,
'properties': f.properties,
'geometry': f.shape.__geo_interface__
}
features.append(feature)
response = jsonify({
'type': 'FeatureCollection',
'features': features
})
return response
@auth_required
def feature_create(key):
dbo = resolve_dbo(key)
if dbo is None:
return abort(404)
json = request.get_json()
submitted = geojson.feature.Feature.to_instance(json)
new_feature = dbo.Feature()
new_feature.name = submitted.properties.get('name')
new_feature.shape = asShape(submitted.geometry)
new_feature.last_contributor = current_user
tags = submitted.properties.get('tags')
if tags:
for tag, tag_value in tags.items():
new_feature.tag(tag, tag_value)
db.session.add(new_feature)
make_changeset(new_feature, dbo)
db.session.commit()
return (feature_as_geojson(new_feature), 201)
# @app.route(GEOTAGS_API_PREFIX + "/feature/<int:id>", methods=[ 'GET' ])
# def feature(id):
# f = Feature.query.get(id)
# if f is None:
# return abort(404)
# if request.method == 'GET':
# return feature_get(f)
# elif request.method == 'POST':
# return feature_update(f)
# def feature_get(feature):
# return jsonify(feature.properties)
@app.route(GEOTAGS_API_PREFIX + "/<key>/feature/<int:id>.geojson", methods=[ 'GET', 'POST' ])
# @cross_origin()
def feature(key, id):
dbo = resolve_dbo(key)
if dbo is None:
return abort(404)
f = dbo.Feature.query.get(id)
if f is None:
return abort(404)
if request.method == 'GET':
return feature_as_geojson(f)
elif request.method == 'POST':
return feature_update(f, dbo)
def feature_as_geojson(feature):
if feature is not None:
data = {
'id': feature.id,
'type': 'Feature',
'geometry': feature.shape.__geo_interface__,
'properties': feature.properties
}
return jsonify(data)
else:
return abort(404)
# UPDATE name, label, geom
# DELETE feature()
@auth_required
def feature_update(feature, dbo):
json = request.get_json()
submitted = geojson.feature.Feature.to_instance(json)
feature.name = submitted.properties.get('name', feature.name)
feature.shape = asShape(submitted.geometry)
tags = submitted.properties.get('tags')
if tags:
for tag, tag_value in tags.items():
feature.tag(tag, tag_value)
feature.last_contributor = current_user
make_changeset(feature, dbo)
db.session.commit()
return feature_as_geojson(feature)
def make_changeset(feature, dbo):
feature.stamp()
cs = dbo.ChangeSet()
cs.feature = feature
cs.contributor = current_user
cs.timestamp = feature.timestamp
cs.values = feature.properties
db.session.add(cs)
# TODO Not yet implemented methods
# @app.route(GEOTAGS_API_PREFIX + "/feature/<int:id>/tag", methods=[ 'POST' ])
# # @cross_origin()
# @auth_required
# def feature_tag(id):
# f = Feature.query.get(id)
# if f is None:
# return abort(404)
# # if tag already exit, update tag
# # otherwise, create tag
# data = request.get_json()
# @app.route(GEOTAGS_API_PREFIX + "/feature/<int:id>/mark", methods=[ 'POST' ])
# # @cross_origin()
# @auth_required
# def feature_mark(id):
# f = Feature.query.get(id)
# if f is None:
# return abort(404)
# # TODO implement mark method
# return abort(201)
# @app.route(GEOTAGS_API_PREFIX + "/feature/<int:id>/geometry", methods=[ 'GET', 'POST' ])
# # @cross_origin()
# @auth_required
# def feature_update_geometry(id):
# f = Feature.query.get(id)
# if f is None:
# return abort(404)
# if request.method == 'GET':
# data = geojson.dumps(f.shape)
# return data, 200, { 'Content-Type': 'application/json' }
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import shutil
import tempfile
import time
import jinja2
import jinja2.meta
import jsonschema
from rally.common.i18n import _, _LI
from rally.common import log as logging
from rally.common import objects
from rally import consts
from rally.deployment import engine as deploy_engine
from rally import exceptions
from rally import osclients
from rally.task import engine
from rally.verification.tempest import tempest
LOG = logging.getLogger(__name__)
class Deployment(object):
@classmethod
def create(cls, config, name):
"""Create a deployment.
:param config: a dict with deployment configuration
:param name: a str represents a name of the deployment
:returns: Deployment object
"""
try:
deployment = objects.Deployment(name=name, config=config)
except exceptions.DeploymentNameExists as e:
if logging.is_debug():
LOG.exception(e)
raise
deployer = deploy_engine.Engine.get_engine(
deployment["config"]["type"], deployment)
try:
deployer.validate()
except jsonschema.ValidationError:
LOG.error(_("Deployment %s: Schema validation error.") %
deployment["uuid"])
deployment.update_status(consts.DeployStatus.DEPLOY_FAILED)
raise
with deployer:
endpoints = deployer.make_deploy()
deployment.update_endpoints(endpoints)
return deployment
@classmethod
def destroy(cls, deployment):
"""Destroy the deployment.
:param deployment: UUID or name of the deployment
"""
# TODO(akscram): We have to be sure that there are no running
# tasks for this deployment.
# TODO(akscram): Check that the deployment have got a status that
# is equal to "*->finished" or "deploy->inconsistent".
deployment = objects.Deployment.get(deployment)
deployer = deploy_engine.Engine.get_engine(
deployment["config"]["type"], deployment)
tempest.Tempest(deployment["uuid"]).uninstall()
with deployer:
deployer.make_cleanup()
deployment.delete()
@classmethod
def recreate(cls, deployment):
"""Performs a cleanup and then makes a deployment again.
:param deployment: UUID or name of the deployment
"""
deployment = objects.Deployment.get(deployment)
deployer = deploy_engine.Engine.get_engine(
deployment["config"]["type"], deployment)
with deployer:
deployer.make_cleanup()
endpoints = deployer.make_deploy()
deployment.update_endpoints(endpoints)
@classmethod
def get(cls, deployment):
"""Get the deployment.
:param deployment: UUID or name of the deployment
:returns: Deployment instance
"""
return objects.Deployment.get(deployment)
@classmethod
def service_list(cls, deployment):
"""Get the services list.
:param deployment: Deployment object
:returns: Service list
"""
# TODO(kun): put this work into objects.Deployment
clients = osclients.Clients(objects.Endpoint(**deployment["admin"]))
return clients.services()
class Task(object):
@classmethod
def render_template(cls, task_template, template_dir="./", **kwargs):
"""Render jinja2 task template to Rally input task.
:param task_template: String that contains template
:param template_dir: The path of directory contain template files
:param kwargs: Dict with template arguments
:returns: rendered template str
"""
def is_really_missing(mis, task_template):
# NOTE(boris-42): Removing variables that have default values from
# missing. Construction that won't be properly
# checked is {% set x = x or 1}
if re.search(mis.join(["{%\s*set\s+", "\s*=\s*", "[^\w]+"]),
task_template):
return False
# NOTE(jlk): Also check for a default filter which can show up as
# a missing variable
if re.search(mis + "\s*\|\s*default\(", task_template):
return False
return True
# NOTE(boris-42): We have to import builtins to get the full list of
# builtin functions (e.g. range()). Unfortunately,
# __builtins__ doesn't return them (when it is not
# main module)
from six.moves import builtins
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir, encoding="utf8"))
ast = env.parse(task_template)
required_kwargs = jinja2.meta.find_undeclared_variables(ast)
missing = set(required_kwargs) - set(kwargs) - set(dir(builtins))
real_missing = [mis for mis in missing
if is_really_missing(mis, task_template)]
if real_missing:
multi_msg = _("Please specify next template task arguments: %s")
single_msg = _("Please specify template task argument: %s")
raise TypeError((len(real_missing) > 1 and multi_msg or single_msg)
% ", ".join(real_missing))
return env.from_string(task_template).render(**kwargs)
@classmethod
def create(cls, deployment, tag):
"""Create a task without starting it.
Task is a list of benchmarks that will be called one by one, results of
execution will be stored in DB.
:param deployment: UUID or name of the deployment
:param tag: tag for this task
:returns: Task object
"""
deployment_uuid = objects.Deployment.get(deployment)["uuid"]
return objects.Task(deployment_uuid=deployment_uuid, tag=tag)
@classmethod
def validate(cls, deployment, config, task_instance=None):
"""Validate a task config against specified deployment.
:param deployment: UUID or name of the deployment
:param config: a dict with a task configuration
"""
deployment = objects.Deployment.get(deployment)
task = task_instance or objects.Task(
deployment_uuid=deployment["uuid"], temporary=True)
benchmark_engine = engine.BenchmarkEngine(
config, task, admin=deployment["admin"], users=deployment["users"])
benchmark_engine.validate()
@classmethod
def start(cls, deployment, config, task=None, abort_on_sla_failure=False):
"""Start a task.
Task is a list of benchmarks that will be called one by one, results of
execution will be stored in DB.
:param deployment: UUID or name of the deployment
:param config: a dict with a task configuration
:param task: Task object. If None, it will be created
:param abort_on_sla_failure: if True, the execution of a benchmark
scenario will stop when any SLA check
for it fails
"""
deployment = objects.Deployment.get(deployment)
task = task or objects.Task(deployment_uuid=deployment["uuid"])
if task.is_temporary:
raise ValueError(_(
"Unable to run a temporary task. Please check your code."))
LOG.info("Benchmark Task %s on Deployment %s" % (task["uuid"],
deployment["uuid"]))
benchmark_engine = engine.BenchmarkEngine(
config, task, admin=deployment["admin"], users=deployment["users"],
abort_on_sla_failure=abort_on_sla_failure)
try:
benchmark_engine.run()
except Exception:
deployment.update_status(consts.DeployStatus.DEPLOY_INCONSISTENT)
raise
@classmethod
def abort(cls, task_uuid, soft=False, async=True):
"""Abort running task.
:param task_uuid: The UUID of the task
:type task_uuid: str
:param soft: if set to True, task should be aborted after execution of
current scenario, otherwise as soon as possible before
all the scenario iterations finish [Default: False]
:type soft: bool
:param async: don't wait until task became in 'running' state
[Default: False]
:type async: bool
"""
if not async:
current_status = objects.Task.get_status(task_uuid)
if current_status in objects.Task.NOT_IMPLEMENTED_STAGES_FOR_ABORT:
LOG.info(_LI("Task status is '%s'. Should wait until it became"
" 'running'") % current_status)
while (current_status in
objects.Task.NOT_IMPLEMENTED_STAGES_FOR_ABORT):
time.sleep(1)
current_status = objects.Task.get_status(task_uuid)
objects.Task.get(task_uuid).abort(soft=soft)
if not async:
LOG.info(_LI("Waiting until the task stops."))
finished_stages = [consts.TaskStatus.ABORTED,
consts.TaskStatus.FINISHED,
consts.TaskStatus.FAILED]
while objects.Task.get_status(task_uuid) not in finished_stages:
time.sleep(1)
@classmethod
def delete(cls, task_uuid, force=False):
"""Delete the task.
:param task_uuid: The UUID of the task.
:param force: If set to True, then delete the task despite to the
status.
:raises: :class:`rally.exceptions.TaskInvalidStatus` when the
status of the task is not FINISHED and the force argument
if not True
"""
status = None if force else consts.TaskStatus.FINISHED
objects.Task.delete_by_uuid(task_uuid, status=status)
class Verification(object):
@classmethod
def verify(cls, deployment, set_name, regex, tempest_config,
system_wide_install=False):
"""Start verifying.
:param deployment: UUID or name of a deployment.
:param set_name: Valid name of tempest test set.
:param regex: Regular expression of test
:param tempest_config: User specified Tempest config file
:param system_wide_install: Use virtualenv else run tests in local
environment
:returns: Verification object
"""
deployment_uuid = objects.Deployment.get(deployment)["uuid"]
verification = objects.Verification(deployment_uuid=deployment_uuid)
verifier = cls._create_verifier(deployment_uuid, verification,
tempest_config, system_wide_install)
LOG.info("Starting verification of deployment: %s" % deployment_uuid)
verification.set_running()
verifier.verify(set_name=set_name, regex=regex)
return verification
@staticmethod
def _create_verifier(deployment_uuid, verification=None,
tempest_config=None, system_wide_install=False):
"""Create a Tempest object.
:param deployment_uuid: UUID or name of a deployment
:param verification: Verification object
:param tempest_config: User specified Tempest config file
:param system_wide_install: Use virtualenv else run tests in local
environment
:return: Tempest object
"""
verifier = tempest.Tempest(deployment_uuid, verification=verification,
tempest_config=tempest_config,
system_wide_install=system_wide_install)
if not verifier.is_installed():
LOG.info(_("Tempest is not installed "
"for the specified deployment."))
LOG.info(_("Installing Tempest "
"for deployment: %s") % deployment_uuid)
verifier.install()
return verifier
@classmethod
def import_results(cls, deployment, set_name="", log_file=None):
"""Import Tempest tests results into the Rally database.
:param deployment: UUID or name of a deployment
:param log_file: User specified Tempest log file in subunit format
:returns: Deployment and verification objects
"""
# TODO(aplanas): Create an external deployment if this is
# missing, as required in the blueprint [1].
# [1] https://blueprints.launchpad.net/rally/+spec/verification-import
deployment_uuid = objects.Deployment.get(deployment)["uuid"]
verification = objects.Verification(deployment_uuid=deployment_uuid)
verifier = tempest.Tempest(deployment_uuid, verification=verification)
LOG.info("Importing verification of deployment: %s" % deployment_uuid)
verification.set_running()
verifier.import_results(set_name=set_name, log_file=log_file)
return deployment, verification
@classmethod
def install_tempest(cls, deployment, source=None):
"""Install Tempest.
:param deployment: UUID or name of the deployment
:param source: Source to fetch Tempest from
"""
deployment_uuid = objects.Deployment.get(deployment)["uuid"]
verifier = tempest.Tempest(deployment_uuid, source=source)
verifier.install()
@classmethod
def uninstall_tempest(cls, deployment):
"""Remove deployment's local Tempest installation.
:param deployment: UUID or name of the deployment
"""
deployment_uuid = objects.Deployment.get(deployment)["uuid"]
verifier = tempest.Tempest(deployment_uuid)
verifier.uninstall()
@classmethod
def reinstall_tempest(cls, deployment, tempest_config=None, source=None):
"""Uninstall Tempest and then reinstall from new source.
:param deployment: UUID or name of the deployment
:param tempest_config: Tempest config file. Use previous file as
default
:param source: Source to fetch Tempest from. Use old source as default
"""
deployment_uuid = objects.Deployment.get(deployment)["uuid"]
verifier = tempest.Tempest(deployment_uuid)
if not tempest_config:
config_path = verifier.config_file
filename = os.path.basename(config_path)
temp_location = tempfile.gettempdir()
tmp_conf_path = os.path.join(temp_location, filename)
shutil.copy2(config_path, tmp_conf_path)
source = source or verifier.tempest_source
verifier.uninstall()
verifier = tempest.Tempest(deployment_uuid, source=source,
tempest_config=tempest_config)
verifier.install()
if not tempest_config:
shutil.move(tmp_conf_path, verifier.config_file)
@classmethod
def configure_tempest(cls, deployment, tempest_config=None,
override=False):
"""Generate configuration file of Tempest.
:param deployment: UUID or name of a deployment
:param tempest_config: User specified Tempest config file location
:param override: Whether or not override existing Tempest config file
"""
deployment_uuid = objects.Deployment.get(deployment)["uuid"]
verifier = cls._create_verifier(deployment_uuid,
tempest_config=tempest_config)
verifier.generate_config_file(override)
|
|
"""Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics.cluster import homogeneity_score
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_elkan_results():
rnd = np.random.RandomState(0)
X_normal = rnd.normal(size=(50, 10))
X_blobs, _ = make_blobs(random_state=0)
km_full = KMeans(algorithm='full', n_clusters=5, random_state=0, n_init=1)
km_elkan = KMeans(algorithm='elkan', n_clusters=5,
random_state=0, n_init=1)
for X in [X_normal, X_blobs]:
km_full.fit(X)
km_elkan.fit(X)
assert_array_almost_equal(km_elkan.cluster_centers_,
km_full.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_full.labels_)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_explicit_init_shape():
# test for sensible errors when giving explicit init
# with wrong number of features or clusters
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 3))
for Class in [KMeans, MiniBatchKMeans]:
# mismatch of number of features
km = Class(n_init=1, init=X[:, :2], n_clusters=len(X))
msg = "does not match the number of features of the data"
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:, :2],
n_clusters=len(X))
assert_raises_regex(ValueError, msg, km.fit, X)
# mismatch of number of clusters
msg = "does not match the number of clusters"
km = Class(n_init=1, init=X[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:2, :],
n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42, n_clusters=2)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
msg = "does not match the number of clusters"
assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init,
random_state=42).fit,
X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1,
algorithm='elkan')
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1,
algorithm='elkan')
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_int_input():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
for dtype in [np.int32, np.int64]:
X_int = np.array(X_list, dtype=dtype)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
for km in fitted_models:
assert_equal(km.cluster_centers_.dtype, np.float64)
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_predict_equal_labels():
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='full')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='elkan')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
def test_full_vs_elkan():
km1 = KMeans(algorithm='full', random_state=13)
km2 = KMeans(algorithm='elkan', random_state=13)
km1.fit(X)
km2.fit(X)
homogeneity_score(km1.predict(X), km2.predict(X)) == 1.0
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
def test_max_iter_error():
km = KMeans(max_iter=-1)
assert_raise_message(ValueError, 'Number of iterations should be',
km.fit, X)
def test_float_precision():
km = KMeans(n_init=1, random_state=30)
mb_km = MiniBatchKMeans(n_init=1, random_state=30)
inertia = {}
X_new = {}
centers = {}
for estimator in [km, mb_km]:
for is_sparse in [False, True]:
for dtype in [np.float64, np.float32]:
if is_sparse:
X_test = sp.csr_matrix(X_csr, dtype=dtype)
else:
X_test = dtype(X)
estimator.fit(X_test)
# dtype of cluster centers has to be the dtype of the input data
assert_equal(estimator.cluster_centers_.dtype, dtype)
inertia[dtype] = estimator.inertia_
X_new[dtype] = estimator.transform(X_test)
centers[dtype] = estimator.cluster_centers_
# make sure predictions correspond to the correct label
assert_equal(estimator.predict(X_test[0]), estimator.labels_[0])
if hasattr(estimator, 'partial_fit'):
estimator.partial_fit(X_test[0:3])
# dtype of cluster centers has to stay the same after partial_fit
assert_equal(estimator.cluster_centers_.dtype, dtype)
# compare arrays with low precision since the difference between
# 32 and 64 bit sometimes makes a difference up to the 4th decimal place
assert_array_almost_equal(inertia[np.float32], inertia[np.float64],
decimal=4)
assert_array_almost_equal(X_new[np.float32], X_new[np.float64],
decimal=4)
assert_array_almost_equal(centers[np.float32], centers[np.float64],
decimal=4)
def test_KMeans_init_centers():
# This test is used to check KMeans won't mutate the user provided input array silently
# even if input data and init centers have the same type
X_small = np.array([[1.1, 1.1], [-7.5, -7.5], [-1.1, -1.1], [7.5, 7.5]])
init_centers = np.array([[0.0, 0.0], [5.0, 5.0], [-5.0, -5.0]])
for dtype in [np.int32, np.int64, np.float32, np.float64]:
X_test = dtype(X_small)
init_centers_test = dtype(init_centers)
assert_array_equal(init_centers, init_centers_test)
km = KMeans(init=init_centers_test, n_clusters=3)
km.fit(X_test)
assert_equal(False, np.may_share_memory(km.cluster_centers_, init_centers))
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple, OrderedDict
import itertools
import json
from six import string_types
from six.moves.urllib import parse
import requests
from pydruid.db import exceptions
class Type(object):
STRING = 1
NUMBER = 2
BOOLEAN = 3
def connect(
host="localhost",
port=8082,
path="/druid/v2/sql/",
scheme="http",
user=None,
password=None,
context=None,
header=False,
ssl_verify_cert=True,
ssl_client_cert=None,
proxies=None,
): # noqa: E125
"""
Constructor for creating a connection to the database.
>>> conn = connect('localhost', 8082)
>>> curs = conn.cursor()
"""
context = context or {}
return Connection(
host,
port,
path,
scheme,
user,
password,
context,
header,
ssl_verify_cert,
ssl_client_cert,
proxies,
)
def check_closed(f):
"""Decorator that checks if connection/cursor is closed."""
def g(self, *args, **kwargs):
if self.closed:
raise exceptions.Error(
"{klass} already closed".format(klass=self.__class__.__name__)
)
return f(self, *args, **kwargs)
return g
def check_result(f):
"""Decorator that checks if the cursor has results from `execute`."""
def g(self, *args, **kwargs):
if self._results is None:
raise exceptions.Error("Called before `execute`")
return f(self, *args, **kwargs)
return g
def get_description_from_row(row):
"""
Return description from a single row.
We only return the name, type (inferred from the data) and if the values
can be NULL. String columns in Druid are NULLable. Numeric columns are NOT
NULL.
"""
return [
(
name, # name
get_type(value), # type_code
None, # [display_size]
None, # [internal_size]
None, # [precision]
None, # [scale]
get_type(value) == Type.STRING, # [null_ok]
)
for name, value in row.items()
]
def get_type(value):
"""
Infer type from value.
Note that bool is a subclass of int so order of statements matter.
"""
if isinstance(value, string_types) or value is None:
return Type.STRING
elif isinstance(value, bool):
return Type.BOOLEAN
elif isinstance(value, (int, float)):
return Type.NUMBER
raise exceptions.Error("Value of unknown type: {value}".format(value=value))
class Connection(object):
"""Connection to a Druid database."""
def __init__(
self,
host="localhost",
port=8082,
path="/druid/v2/sql/",
scheme="http",
user=None,
password=None,
context=None,
header=False,
ssl_verify_cert=True,
ssl_client_cert=None,
proxies=None,
):
netloc = "{host}:{port}".format(host=host, port=port)
self.url = parse.urlunparse((scheme, netloc, path, None, None, None))
self.context = context or {}
self.closed = False
self.cursors = []
self.header = header
self.user = user
self.password = password
self.ssl_verify_cert = ssl_verify_cert
self.ssl_client_cert = ssl_client_cert
self.proxies = proxies
@check_closed
def close(self):
"""Close the connection now."""
self.closed = True
for cursor in self.cursors:
try:
cursor.close()
except exceptions.Error:
pass # already closed
@check_closed
def commit(self):
"""
Commit any pending transaction to the database.
Not supported.
"""
pass
@check_closed
def cursor(self):
"""Return a new Cursor Object using the connection."""
cursor = Cursor(
self.url,
self.user,
self.password,
self.context,
self.header,
self.ssl_verify_cert,
self.ssl_client_cert,
self.proxies,
)
self.cursors.append(cursor)
return cursor
@check_closed
def execute(self, operation, parameters=None):
cursor = self.cursor()
return cursor.execute(operation, parameters)
def __enter__(self):
return self.cursor()
def __exit__(self, *exc):
self.close()
class Cursor(object):
"""Connection cursor."""
def __init__(
self,
url,
user=None,
password=None,
context=None,
header=False,
ssl_verify_cert=True,
proxies=None,
ssl_client_cert=None,
):
self.url = url
self.context = context or {}
self.header = header
self.user = user
self.password = password
self.ssl_verify_cert = ssl_verify_cert
self.ssl_client_cert = ssl_client_cert
self.proxies = proxies
# This read/write attribute specifies the number of rows to fetch at a
# time with .fetchmany(). It defaults to 1 meaning to fetch a single
# row at a time.
self.arraysize = 1
self.closed = False
# this is updated only after a query
self.description = None
# this is set to an iterator after a successfull query
self._results = None
@property
@check_result
@check_closed
def rowcount(self):
# consume the iterator
results = list(self._results)
n = len(results)
self._results = iter(results)
return n
@check_closed
def close(self):
"""Close the cursor."""
self.closed = True
@check_closed
def execute(self, operation, parameters=None):
query = apply_parameters(operation, parameters)
results = self._stream_query(query)
# `_stream_query` returns a generator that produces the rows; we need to
# consume the first row so that `description` is properly set, so let's
# consume it and insert it back if it is not the header.
try:
first_row = next(results)
self._results = (
results if self.header else itertools.chain([first_row], results)
)
except StopIteration:
self._results = iter([])
return self
@check_closed
def executemany(self, operation, seq_of_parameters=None):
raise exceptions.NotSupportedError(
"`executemany` is not supported, use `execute` instead"
)
@check_result
@check_closed
def fetchone(self):
"""
Fetch the next row of a query result set, returning a single sequence,
or `None` when no more data is available.
"""
try:
return self.next()
except StopIteration:
return None
@check_result
@check_closed
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of
sequences (e.g. a list of tuples). An empty sequence is returned when
no more rows are available.
"""
size = size or self.arraysize
return list(itertools.islice(self._results, size))
@check_result
@check_closed
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a
sequence of sequences (e.g. a list of tuples). Note that the cursor's
arraysize attribute can affect the performance of this operation.
"""
return list(self._results)
@check_closed
def setinputsizes(self, sizes):
# not supported
pass
@check_closed
def setoutputsizes(self, sizes):
# not supported
pass
@check_closed
def __iter__(self):
return self
@check_closed
def __next__(self):
return next(self._results)
next = __next__
def _stream_query(self, query):
"""
Stream rows from a query.
This method will yield rows as the data is returned in chunks from the
server.
"""
self.description = None
headers = {"Content-Type": "application/json"}
payload = {"query": query, "context": self.context, "header": self.header}
auth = (
requests.auth.HTTPBasicAuth(self.user, self.password) if self.user else None
)
r = requests.post(
self.url,
stream=True,
headers=headers,
json=payload,
auth=auth,
verify=self.ssl_verify_cert,
cert=self.ssl_client_cert,
proxies=self.proxies,
)
if r.encoding is None:
r.encoding = "utf-8"
# raise any error messages
if r.status_code != 200:
try:
payload = r.json()
except Exception:
payload = {
"error": "Unknown error",
"errorClass": "Unknown",
"errorMessage": r.text,
}
msg = "{error} ({errorClass}): {errorMessage}".format(**payload)
raise exceptions.ProgrammingError(msg)
# Druid will stream the data in chunks of 8k bytes, splitting the JSON
# between them; setting `chunk_size` to `None` makes it use the server
# size
chunks = r.iter_content(chunk_size=None, decode_unicode=True)
Row = None
for row in rows_from_chunks(chunks):
# update description
if self.description is None:
self.description = (
list(row.items()) if self.header else get_description_from_row(row)
)
# return row in namedtuple
if Row is None:
Row = namedtuple("Row", row.keys(), rename=True)
yield Row(*row.values())
def rows_from_chunks(chunks):
"""
A generator that yields rows from JSON chunks.
Druid will return the data in chunks, but they are not aligned with the
JSON objects. This function will parse all complete rows inside each chunk,
yielding them as soon as possible.
"""
body = ""
for chunk in chunks:
if chunk:
body = "".join((body, chunk))
# find last complete row
boundary = 0
brackets = 0
in_string = False
for i, char in enumerate(body):
if char == '"':
if not in_string:
in_string = True
elif body[i - 1] != "\\":
in_string = False
if in_string:
continue
if char == "{":
brackets += 1
elif char == "}":
brackets -= 1
if brackets == 0 and i > boundary:
boundary = i + 1
rows = body[:boundary].lstrip("[,")
body = body[boundary:]
for row in json.loads(
"[{rows}]".format(rows=rows), object_pairs_hook=OrderedDict
):
yield row
def apply_parameters(operation, parameters):
if not parameters:
return operation
escaped_parameters = {key: escape(value) for key, value in parameters.items()}
return operation % escaped_parameters
def escape(value):
"""
Escape the parameter value.
Note that bool is a subclass of int so order of statements matter.
"""
if value == "*":
return value
elif isinstance(value, string_types):
return "'{}'".format(value.replace("'", "''"))
elif isinstance(value, bool):
return "TRUE" if value else "FALSE"
elif isinstance(value, (int, float)):
return value
elif isinstance(value, (list, tuple)):
return ", ".join(escape(element) for element in value)
|
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 3 18:15:07 2017
@author: camacho
"""
import Kernel;reload(Kernel);kl=Kernel
import Kernel_likelihood;reload(Kernel_likelihood);lk=Kernel_likelihood
import Kernel_optimization;reload(Kernel_optimization);opt=Kernel_optimization
import RV_function;reload(RV_function);RVfunc=RV_function
import numpy as np;np.random.seed(1234)
import matplotlib.pylab as pl
import astropy.table as Table
import sys
f=open("Test_ES_4spots.txt","w")
sys.stdout = f
pl.close('all')
##### spots data pre-processing #####
rdb_data=Table.Table.read('4spots.rdb',format='ascii')
RV_spot=rdb_data['RV_tot'][1:101]
RV_spot=np.array(RV_spot)
RV_spot=RV_spot.astype('Float64')
RV_SPOT=np.concatenate((RV_spot,RV_spot,RV_spot,RV_spot),axis=0)
spots_yy=[]
for i in np.arange(4,401,4):
a=(RV_SPOT[i-4]+RV_SPOT[i-3]+RV_SPOT[i-2]+RV_SPOT[i-1])*1000/4.
spots_yy.append(a)
spots_data=[]
for j in np.arange(1,100,3.3):
spots_data.append(spots_yy[int(round(j))])
##### data and plot #####
# Period(P) ~ 20 e 50 days
# Observations(space) ~ every 4 days
# Error(yerr) ~ 0.20 a 0.50 m
# K=17.353 => planet with 1/4 mass of Jupiter
test1=RVfunc.RV_circular(P=25,K=17.353,T=0,gamma=0,time=100,space=30)
t=np.linspace(0,100,30) #np.linspace(0,time,space)
y0=np.array(test1[1])
yerr=np.array([np.random.uniform(0.2,0.5) for x in y0])
y=np.array([x1+x2 for x1,x2 in zip(y0,spots_data)])
total=np.array([x1+x2 for x1,x2 in zip(y,yerr)])
Xfinal=t
Yfinal=total
##### Lets try GP to fit #####
#kl.ExpSineSquared(theta,l,P) + kl.WhiteNoise(theta)
def sub_tests(trials=20,variation=-0.1):
theta=17.0;l=1.0;P=25.0
for i in range(1,trials+1):
print 'EXAMPLE %i' %i
kernel1=kl.ExpSquared(theta, l)
print 'initial kernel ->', kernel1
likelihood1=lk.likelihood(kernel1,Xfinal,Xfinal,Yfinal,yerr)
print 'initial likelihood ->', likelihood1
Xcalc=np.linspace(0,100,1000)
# [mu,std]=lk.compute_kernel(kernel1,Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
optimization1=opt.committed_optimization(kernel1,Xfinal,Xfinal,Yfinal,yerr,max_opt=10)
print 'final kernel ->',optimization1[1]
print 'final likelihood ->', optimization1[0]
# [mu,std]=lk.compute_kernel(optimization1[1],Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
theta=theta+variation;l=l;P=P+variation
def subNoise_tests(trials=20,variation=-0.1):
theta=17.0;l=1.0;P=24.0;noise=0.5
for i in range(1,trials+1):
print 'EXAMPLE %i' %i
kernel1=kl.ExpSquared(theta, l)+kl.WhiteNoise(noise)
print 'initial kernel ->', kernel1
likelihood1=lk.likelihood(kernel1,Xfinal,Xfinal,Yfinal,yerr)
print 'initial likelihood ->', likelihood1
Xcalc=np.linspace(0,100,1000)
# [mu,std]=lk.compute_kernel(kernel1,Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
optimization1=opt.committed_optimization(kernel1,Xfinal,Xfinal,Yfinal,yerr,max_opt=10)
print 'final kernel ->',optimization1[1]
print 'final likelihood ->', optimization1[0]
# [mu,std]=lk.compute_kernel(optimization1[1],Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
theta=theta+variation;l=l;P=P+variation;noise=noise+(variation/2.)
def add_tests(trials=20,variation=0.1):
theta=17.0;l=1.0;P=25.0
for i in range(1,trials+1):
print 'EXAMPLE %i' %i
kernel1=kl.ExpSquared(theta, l)
print 'initial kernel ->', kernel1
likelihood1=lk.likelihood(kernel1,Xfinal,Xfinal,Yfinal,yerr)
print 'initial likelihood ->', likelihood1
Xcalc=np.linspace(0,100,1000)
# [mu,std]=lk.compute_kernel(kernel1,Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
optimization1=opt.committed_optimization(kernel1,Xfinal,Xfinal,Yfinal,yerr,max_opt=10)
print 'final kernel ->',optimization1[1]
print 'final likelihood ->', optimization1[0]
# [mu,std]=lk.compute_kernel(optimization1[1],Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
theta=theta+variation;l=l;P=P+variation
def addNoise_tests(trials=20,variation=0.1):
theta=17.0;l=1.0;P=24.0;noise=0.1
for i in range(1,trials+1):
print 'EXAMPLE %i' %i
kernel1=kl.ExpSquared(theta, l)+kl.WhiteNoise(noise)
print 'initial kernel ->', kernel1
likelihood1=lk.likelihood(kernel1,Xfinal,Xfinal,Yfinal,yerr)
print 'initial likelihood ->', likelihood1
Xcalc=np.linspace(0,100,1000)
# [mu,std]=lk.compute_kernel(kernel1,Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
optimization1=opt.committed_optimization(kernel1,Xfinal,Xfinal,Yfinal,yerr,max_opt=10)
print 'final kernel ->',optimization1[1]
print 'final likelihood ->', optimization1[0]
# [mu,std]=lk.compute_kernel(optimization1[1],Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
theta=theta;l=l;noise=noise+(variation/2.)
sub_tests()
print ''
subNoise_tests()
print ''
add_tests()
print ''
addNoise_tests()
print ''
#for when everything ends
f.close()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains MentorMix models.
See the README.md file for compilation and running instructions.
"""
import os
import time
import cifar_data_provider
import numpy as np
import resnet_model
import tensorflow as tf
import tensorflow.contrib.slim as slim
import utils
flags = tf.app.flags
flags.DEFINE_integer('batch_size', 128, 'The number of images in each batch.')
flags.DEFINE_string('master', None, 'BNS name of the TensorFlow master to use.')
flags.DEFINE_string('data_dir', '', 'Data dir')
flags.DEFINE_string('train_log_dir', '', 'Directory to the save trained model.')
flags.DEFINE_string('dataset_name', 'cifar100', 'cifar10 or cifar100')
flags.DEFINE_string('studentnet', 'resnet32', 'network backbone.')
flags.DEFINE_float('learning_rate', 0.1, 'The learning rate')
flags.DEFINE_float('learning_rate_decay_factor', 0.9,
'learning rate decay factor')
flags.DEFINE_float('num_epochs_per_decay', 3,
'Number of epochs after which learning rate decays.')
flags.DEFINE_integer(
'save_summaries_secs', 120,
'The frequency with which summaries are saved, in seconds.')
flags.DEFINE_integer(
'save_interval_secs', 1200,
'The frequency with which the model is saved, in seconds.')
flags.DEFINE_integer('max_number_of_steps', 100000,
'The maximum number of gradient steps.')
flags.DEFINE_integer(
'ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
flags.DEFINE_integer(
'task', 0,
'The Task ID. This value is used when training with multiple workers to '
'identify each worker.')
flags.DEFINE_string('device_id', '0', 'GPU device ID to run the job.')
# Learned MentorNet location
flags.DEFINE_string('trained_mentornet_dir', '',
'Directory where to find the trained MentorNet model.')
flags.DEFINE_list('example_dropout_rates', '0.0, 100',
'Comma-separated list indicating the example drop-out rate.'
'This has little impact to the performance.')
# Hyper-parameters for MentorMix to tune
flags.DEFINE_integer('burn_in_epoch', 0, 'Number of first epochs to perform'
'burn-in. In the burn-in period, every sample has a'
'fixed 1.0 weight.')
flags.DEFINE_float('loss_p_percentile', 0.7, 'p-percentile used to compute'
'the loss moving average.')
flags.DEFINE_float('mixup_alpha', 8.0, 'Alpha parameter for the beta'
'distribution to sample during mixup.')
flags.DEFINE_bool('second_reweight', True, 'Whether to weight the mixed up'
'examples again with mentornet')
FLAGS = flags.FLAGS
# Turn this on if there are no log outputs
tf.logging.set_verbosity(tf.logging.INFO)
def resnet_train_step(sess, train_op, global_step, train_step_kwargs):
"""Function that takes a gradient step and specifies whether to stop.
Args:
sess: The current session.
train_op: An `Operation` that evaluates the gradients and returns the
total loss.
global_step: A `Tensor` representing the global training step.
train_step_kwargs: A dictionary of keyword arguments.
Returns:
The total loss and a boolean indicating whether or not to stop training.
Raises:
ValueError: if 'should_trace' is in `train_step_kwargs` but `logdir` is not.
"""
start_time = time.time()
total_loss = tf.get_collection('total_loss')[0]
_, np_global_step, total_loss_val = sess.run(
[train_op, global_step, total_loss])
time_elapsed = time.time() - start_time
if 'should_log' in train_step_kwargs:
if sess.run(train_step_kwargs['should_log']):
tf.logging.info('global step %d: loss = %.4f (%.3f sec/step)',
np_global_step, total_loss_val, time_elapsed)
if 'should_stop' in train_step_kwargs:
should_stop = sess.run(train_step_kwargs['should_stop'])
else:
should_stop = False
return total_loss, should_stop
def train_resnet_mentormix(max_step_run):
"""Trains the mentornet with the student resnet model.
Args:
max_step_run: The maximum number of gradient steps.
"""
if not os.path.exists(FLAGS.train_log_dir):
os.makedirs(FLAGS.train_log_dir)
g = tf.Graph()
with g.as_default():
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
tf_global_step = tf.train.get_or_create_global_step()
(images, one_hot_labels, num_samples_per_epoch,
num_of_classes) = cifar_data_provider.provide_resnet_data(
FLAGS.dataset_name,
'train',
FLAGS.batch_size,
dataset_dir=FLAGS.data_dir)
hps = resnet_model.HParams(
batch_size=FLAGS.batch_size,
num_classes=num_of_classes,
min_lrn_rate=0.0001,
lrn_rate=FLAGS.learning_rate,
num_residual_units=5,
use_bottleneck=False,
weight_decay_rate=0.0002,
relu_leakiness=0.1,
optimizer='mom')
images.set_shape([FLAGS.batch_size, 32, 32, 3])
# Define the model:
resnet = resnet_model.ResNet(hps, images, one_hot_labels, mode='train')
with tf.variable_scope('ResNet32'):
logits = resnet.build_model()
# Specify the loss function:
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=one_hot_labels, logits=logits)
dropout_rates = utils.parse_dropout_rate_list(FLAGS.example_dropout_rates)
example_dropout_rates = tf.convert_to_tensor(
dropout_rates, np.float32, name='example_dropout_rates')
loss_p_percentile = tf.convert_to_tensor(
np.array([FLAGS.loss_p_percentile] * 100),
np.float32,
name='loss_p_percentile')
loss = tf.reshape(loss, [-1, 1])
epoch_step = tf.to_int32(
tf.floor(tf.divide(tf_global_step, max_step_run) * 100))
zero_labels = tf.zeros([tf.shape(loss)[0], 1], tf.float32)
mentornet_net_hparams = utils.get_mentornet_network_hyperparameter(
FLAGS.trained_mentornet_dir)
# In the simplest case, this function can be replaced with a thresholding
# function. See loss_thresholding_function in utils.py.
v = utils.mentornet(
epoch_step,
loss,
zero_labels,
loss_p_percentile,
example_dropout_rates,
burn_in_epoch=FLAGS.burn_in_epoch,
mentornet_net_hparams=mentornet_net_hparams,
avg_name='individual')
v = tf.stop_gradient(v)
loss = tf.stop_gradient(tf.identity(loss))
logits = tf.stop_gradient(tf.identity(logits))
# Perform MentorMix
images_mix, labels_mix = utils.mentor_mix_up(
images, one_hot_labels, v, FLAGS.mixup_alpha)
resnet = resnet_model.ResNet(hps, images_mix, labels_mix, mode='train')
with tf.variable_scope('ResNet32', reuse=True):
logits_mix = resnet.build_model()
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels_mix, logits=logits_mix)
decay_loss = resnet.decay()
# second weighting
if FLAGS.second_reweight:
loss = tf.reshape(loss, [-1, 1])
v = utils.mentornet(
epoch_step,
loss,
zero_labels,
loss_p_percentile,
example_dropout_rates,
burn_in_epoch=FLAGS.burn_in_epoch,
mentornet_net_hparams=mentornet_net_hparams,
avg_name='mixed')
v = tf.stop_gradient(v)
weighted_loss_vector = tf.multiply(loss, v)
loss = tf.reduce_mean(weighted_loss_vector)
# reproduced with the following decay loss which should be 0.
decay_loss = tf.losses.get_regularization_loss()
decay_loss = decay_loss * (tf.reduce_sum(v) / FLAGS.batch_size)
# Log data utilization
data_util = utils.summarize_data_utilization(v, tf_global_step,
FLAGS.batch_size)
loss = tf.reduce_mean(loss)
slim.summaries.add_scalar_summary(
tf.reduce_mean(loss), 'mentormix/mix_loss')
weighted_total_loss = loss + decay_loss
slim.summaries.add_scalar_summary(weighted_total_loss, 'total_loss')
tf.add_to_collection('total_loss', weighted_total_loss)
# Set up the moving averages:
moving_average_variables = tf.trainable_variables()
moving_average_variables = tf.contrib.framework.filter_variables(
moving_average_variables, exclude_patterns=['mentornet'])
variable_averages = tf.train.ExponentialMovingAverage(
0.9999, tf_global_step)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS,
variable_averages.apply(moving_average_variables))
decay_steps = FLAGS.num_epochs_per_decay * num_samples_per_epoch / FLAGS.batch_size
lr = tf.train.exponential_decay(
FLAGS.learning_rate,
tf_global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
lr = tf.squeeze(lr)
slim.summaries.add_scalar_summary(lr, 'learning_rate')
# Specify the optimization scheme:
with tf.control_dependencies([weighted_total_loss, data_util]):
# Set up training.
trainable_variables = tf.trainable_variables()
trainable_variables = tf.contrib.framework.filter_variables(
trainable_variables, exclude_patterns=['mentornet'])
grads = tf.gradients(weighted_total_loss, trainable_variables)
optimizer = tf.train.MomentumOptimizer(lr, momentum=0.9)
apply_op = optimizer.apply_gradients(
zip(grads, trainable_variables),
global_step=tf_global_step,
name='train_step')
train_ops = [apply_op] + resnet.extra_train_ops + tf.get_collection(
tf.GraphKeys.UPDATE_OPS)
train_op = tf.group(*train_ops)
# Parameter restore setup
if FLAGS.trained_mentornet_dir is not None:
ckpt_model = FLAGS.trained_mentornet_dir
if os.path.isdir(FLAGS.trained_mentornet_dir):
ckpt_model = tf.train.latest_checkpoint(ckpt_model)
# Fix the mentornet parameters
variables_to_restore = slim.get_variables_to_restore(
include=['mentornet', 'mentornet_inputs'])
iassign_op1, ifeed_dict1 = tf.contrib.framework.assign_from_checkpoint(
ckpt_model, variables_to_restore)
# Create an initial assignment function.
def init_assign_fn(sess):
tf.logging.info('Restore using customer initializer %s', '.' * 10)
sess.run(iassign_op1, ifeed_dict1)
else:
init_assign_fn = None
tf.logging.info('-' * 20 + 'MentorMix' + '-' * 20)
tf.logging.info('loss_p_percentile=%3f', FLAGS.loss_p_percentile)
tf.logging.info('mixup_alpha=%d', FLAGS.mixup_alpha)
tf.logging.info('-' * 20)
saver = tf.train.Saver(max_to_keep=10, keep_checkpoint_every_n_hours=24)
# Run training.
slim.learning.train(
train_op=train_op,
train_step_fn=resnet_train_step,
logdir=FLAGS.train_log_dir,
master=FLAGS.master,
is_chief=FLAGS.task == 0,
saver=saver,
number_of_steps=max_step_run,
init_fn=init_assign_fn,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
def main(_):
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.device_id
if FLAGS.studentnet == 'resnet32':
train_resnet_mentormix(FLAGS.max_number_of_steps)
else:
tf.logging.error('unknown backbone student network %s', FLAGS.studentnet)
if __name__ == '__main__':
tf.app.run()
|
|
import datetime
import json
import logging
import tba_config
from google.appengine.ext import deferred
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
from controllers.apiv3.model_properties import filter_match_properties
from database.dict_converters.match_converter import MatchConverter
from database.dict_converters.event_converter import EventConverter
from database.dict_converters.event_details_converter import EventDetailsConverter
from helpers.event_helper import EventHelper
from helpers.webcast_online_helper import WebcastOnlineHelper
from models.sitevar import Sitevar
class FirebasePusher(object):
@classmethod
def _get_secret(cls):
firebase_secrets = Sitevar.get_by_id("firebase.secrets")
if firebase_secrets is None:
logging.error("Missing sitevar: firebase.secrets. Can't write to Firebase.")
return None
return firebase_secrets.contents['FIREBASE_SECRET']
@classmethod
def _delete_data(cls, key):
"""
Remove data from the specified Firebase database reference.
"""
if not tba_config.CONFIG['firebase-push']:
return
secret = cls._get_secret()
if secret is None:
return
url = tba_config.CONFIG['firebase-url'].format(key, secret)
result = urlfetch.fetch(url, method='DELETE', deadline=10)
if result.status_code not in {200, 204}:
raise Exception("Error with DELETE data from Firebase: {}. ERROR {}: {}".format(url, result.status_code, result.content))
@classmethod
def _patch_data(cls, key, data_json):
"""
Write or replace data to a defined path, like messages/users/user1/<data>
"""
if not tba_config.CONFIG['firebase-push']:
return
secret = cls._get_secret()
if secret is None:
return
url = tba_config.CONFIG['firebase-url'].format(key, secret)
result = urlfetch.fetch(url, payload=data_json, method='PATCH', deadline=10)
if result.status_code not in {200, 204}:
raise Exception("Error with PATCH data to Firebase: {}; {}. ERROR {}: {}".format(url, data_json, result.status_code, result.content))
@classmethod
def _put_data(cls, key, data_json):
"""
Write or replace data to a defined path, like messages/users/user1/<data>
"""
if not tba_config.CONFIG['firebase-push']:
return
secret = cls._get_secret()
if secret is None:
return
url = tba_config.CONFIG['firebase-url'].format(key, secret)
result = urlfetch.fetch(url, payload=data_json, method='PUT', deadline=10)
if result.status_code not in {200, 204}:
raise Exception("Error with PUT data to Firebase: {}; {}. ERROR {}: {}".format(url, data_json, result.status_code, result.content))
@classmethod
def _push_data(cls, key, data_json):
"""
Add to a list of data in our Firebase database.
Every time we send a POST request, the Firebase client generates a unique key, like messages/users/<unique-id>/<data>
"""
if not tba_config.CONFIG['firebase-push']:
return
secret = cls._get_secret()
if secret is None:
return
url = tba_config.CONFIG['firebase-url'].format(key, secret)
result = urlfetch.fetch(url, payload=data_json, method='POST', deadline=10)
if result.status_code not in {200, 204}:
raise Exception("Error with POST data to Firebase: {}; {}. ERROR {}: {}".format(url, data_json, result.status_code, result.content))
@classmethod
def delete_match(cls, match):
"""
Deletes a match from an event and event_team
"""
deferred.defer(
cls._delete_data,
'events/{}/matches/{}'.format(match.event.id(), match.key.id()),
_queue="firebase")
# for team_key_name in match.team_key_names:
# deferred.defer(
# cls._delete_data,
# 'event_teams/{}/{}/matches/{}'.format(match.event.id(), team_key_name, match.key.id()),
# _queue="firebase")
@classmethod
def replace_event_matches(cls, event_key, matches):
"""
Deletes matches from an event and puts these instead
"""
match_data = {}
for match in matches:
match_data[match.key.id()] = filter_match_properties([MatchConverter.convert(match, 3)], 'simple')[0]
deferred.defer(
cls._put_data,
'events/{}/matches'.format(event_key),
json.dumps(match_data),
_queue="firebase")
@classmethod
def update_match(cls, match):
"""
Updates a match in an event and event/team
"""
if match.year < 2017:
return
match_data_json = json.dumps(filter_match_properties([MatchConverter.convert(match, 3)], 'simple')[0])
deferred.defer(
cls._patch_data,
'events/{}/matches/{}'.format(match.event.id(), match.key.id()),
match_data_json,
_queue="firebase")
# for team_key_name in match.team_key_names:
# deferred.defer(
# cls._put_data,
# 'event_teams/{}/{}/matches/{}'.format(match.event.id(), team_key_name, match.key.id()),
# match_data_json,
# _queue="firebase")
@classmethod
def update_event_details(cls, event_details):
"""
Updates an event_detail in an event
"""
if int(event_details.key.id()[:4]) < 2017:
return
event_details_json = json.dumps(EventDetailsConverter.convert(event_details, 3))
deferred.defer(
cls._patch_data,
'events/{}/details'.format(event_details.key.id()),
event_details_json,
_queue="firebase")
@classmethod
def update_event_team_status(cls, event_key, team_key, status):
"""
Updates an event team status
"""
return
# if int(event_key[:4]) < 2017:
# return
# from helpers.event_team_status_helper import EventTeamStatusHelper # Prevent circular import
# if status:
# status.update({
# 'alliance_status_str': EventTeamStatusHelper.generate_team_at_event_alliance_status_string(team_key, status),
# 'playoff_status_str': EventTeamStatusHelper.generate_team_at_event_playoff_status_string(team_key, status),
# 'overall_status_str': EventTeamStatusHelper.generate_team_at_event_status_string(team_key, status),
# })
# status_json = json.dumps(status)
# deferred.defer(
# cls._put_data,
# 'event_teams/{}/{}/status'.format(event_key, team_key),
# status_json,
# _queue="firebase")
@classmethod
def update_live_events(cls):
"""
Updates live_events and special webcasts
"""
events_by_key = {}
for event_key, event in cls._update_live_events_helper().items():
converted_event = EventConverter.convert(event, 3)
# Only what's needed to render webcast
partial_event = {key: converted_event[key] for key in ['key', 'name', 'short_name', 'webcasts']}
# Hack in district code
if event.district_key and partial_event.get('short_name'):
partial_event['short_name'] = '[{}] {}'.format(event.district_key.id()[4:].upper(), partial_event['short_name'])
events_by_key[event_key] = partial_event
deferred.defer(
cls._put_data,
'live_events',
json.dumps(events_by_key),
_queue="firebase")
deferred.defer(
cls._put_data,
'special_webcasts',
json.dumps(cls.get_special_webcasts()),
_queue="firebase")
@classmethod
@ndb.toplevel
def _update_live_events_helper(cls):
week_events = EventHelper.getWeekEvents()
events_by_key = {}
live_events = []
for event in week_events:
if event.now:
event._webcast = event.current_webcasts # Only show current webcasts
for webcast in event.webcast:
WebcastOnlineHelper.add_online_status_async(webcast)
events_by_key[event.key.id()] = event
if event.within_a_day:
live_events.append(event)
# Add in the Fake TBA BlueZone event (watch for circular imports)
from helpers.bluezone_helper import BlueZoneHelper
bluezone_event = BlueZoneHelper.update_bluezone(live_events)
if bluezone_event:
for webcast in bluezone_event.webcast:
WebcastOnlineHelper.add_online_status_async(webcast)
events_by_key[bluezone_event.key_name] = bluezone_event
return events_by_key
@classmethod
@ndb.toplevel
def get_special_webcasts(cls): # TODO: Break this out of FirebasePusher 2017-03-01 -fangeugene
special_webcasts_temp = Sitevar.get_by_id('gameday.special_webcasts')
if special_webcasts_temp:
special_webcasts_temp = special_webcasts_temp.contents.get('webcasts', [])
else:
special_webcasts_temp = []
special_webcasts = []
for webcast in special_webcasts_temp:
WebcastOnlineHelper.add_online_status_async(webcast)
special_webcasts.append(webcast)
return special_webcasts
@classmethod
def update_event(cls, event):
WebcastOnlineHelper.add_online_status(event.webcast)
converted_event = EventConverter.convert(event, 3)
deferred.defer(
cls._patch_data,
'live_events/{}'.format(event.key_name),
json.dumps({key: converted_event[key] for key in ['key', 'name', 'short_name', 'webcasts']}),
_queue="firebase")
|
|
# -*- coding: utf-8 -*-
import abc
from django.contrib.contenttypes.models import ContentType
from denorm.db import triggers
from django.db import connection
from django.db.models import sql, ManyToManyField
from django.db.models.fields.related import ManyToManyField
from django.db.models.manager import Manager
from denorm.models import DirtyInstance
from django.db.models.sql import Query
from django.db.models.sql.compiler import SQLCompiler
from django.db.models.sql.query import Query
from django.db.models.sql.where import WhereNode
# Remember all denormalizations.
# This is used to rebuild all denormalized values in the whole DB.
alldenorms = []
def many_to_many_pre_save(sender, instance, **kwargs):
"""
Updates denormalised many-to-many fields for the model
"""
if instance.pk:
# Need a primary key to do m2m stuff
for m2m in sender._meta.local_many_to_many:
# This gets us all m2m fields, so limit it to just those that are denormed
if hasattr(m2m, 'denorm'):
# Does some extra jiggery-pokery for "through" m2m models.
# May not work under lots of conditions.
if hasattr(m2m.rel, 'through_model'):
# Clear exisiting through records (bit heavy handed?)
kwargs = {m2m.related.var_name: instance}
# Can't use m2m_column_name in a filter
# kwargs = { m2m.m2m_column_name(): instance.pk, }
m2m.rel.through_model.objects.filter(**kwargs).delete()
values = m2m.denorm.func(instance)
for value in values:
kwargs.update({m2m.m2m_reverse_name(): value.pk})
m2m.rel.through_model.objects.create(**kwargs)
else:
values = m2m.denorm.func(instance)
setattr(instance, m2m.attname, values)
def many_to_many_post_save(sender, instance, created, **kwargs):
if created:
def check_resave():
for m2m in sender._meta.local_many_to_many:
if hasattr(m2m, 'denorm'):
return True
return False
if check_resave():
instance.save()
class Denorm(object):
def __init__(self, skip=None):
self.func = None
self.skip = skip
def setup(self, **kwargs):
"""
Adds 'self' to the global denorm list
and connects all needed signals.
"""
global alldenorms
if self not in alldenorms:
alldenorms.append(self)
def update(self, qs):
"""
Updates the denormalizations in all instances in the queryset 'qs'.
"""
for instance in qs.distinct().iterator():
# only write new values to the DB if they actually changed
new_value = self.func(instance)
# Get attribute name (required for denormalising ForeignKeys)
attname = instance._meta.get_field(self.fieldname).attname
if isinstance(getattr(instance, attname), Manager):
# for a many to many field the decorated
# function should return a list of either model instances
# or primary keys
old_pks = set([x.pk for x in getattr(instance, attname).all()])
new_pks = set([])
for x in new_value:
# we need to compare sets of objects based on pk values,
# as django lacks an identity map.
if hasattr(x,'pk'):
new_pks.add(x.pk)
else:
new_pks.add(x)
if old_pks != new_pks:
print old_pks
for o in qs.filter(pk=instance.pk):
o.attname = new_value
instance.save()
elif not getattr(instance, attname) == new_value:
setattr(instance, attname, new_value)
# an update before the save is needed to handle CountFields
# CountField does not update its value during pre_save
qs.filter(pk=instance.pk).update(**{self.fieldname: new_value})
instance.save()
flush()
def get_triggers(self, using):
return []
class BaseCallbackDenorm(Denorm):
"""
Handles the denormalization of one field, using a python function
as a callback.
"""
def setup(self, **kwargs):
"""
Calls setup() on all DenormDependency resolvers
"""
super(BaseCallbackDenorm, self).setup(**kwargs)
for dependency in self.depend:
dependency.setup(self.model)
def get_triggers(self, using):
"""
Creates a list of all triggers needed to keep track of changes
to fields this denorm depends on.
"""
trigger_list = list()
# Get the triggers of all DenormDependency instances attached
# to our callback.
for dependency in self.depend:
trigger_list += dependency.get_triggers(using=using)
return trigger_list + super(BaseCallbackDenorm, self).get_triggers(using=using)
class CallbackDenorm(BaseCallbackDenorm):
"""
As above, but with extra triggers on self as described below
"""
def get_triggers(self, using):
content_type = str(ContentType.objects.get_for_model(self.model).pk)
# Create a trigger that marks any updated or newly created
# instance of the model containing the denormalized field
# as dirty.
# This is only really needed if the instance was changed without
# using the ORM or if it was part of a bulk update.
# In those cases the self_save_handler won't get called by the
# pre_save signal, so we need to ensure flush() does this later.
action = triggers.TriggerActionInsert(
model=DirtyInstance,
columns=("content_type_id", "object_id"),
values=(content_type, "NEW.%s" % self.model._meta.pk.get_attname_column()[1])
)
trigger_list = [
triggers.Trigger(self.model, "after", "update", [action], content_type, using, self.skip),
triggers.Trigger(self.model, "after", "insert", [action], content_type, using, self.skip),
]
return trigger_list + super(CallbackDenorm, self).get_triggers(using=using)
class BaseCacheKeyDenorm(Denorm):
def __init__(self, depend_on_related, *args, **kwargs):
self.depend = depend_on_related
super(BaseCacheKeyDenorm, self).__init__(*args, **kwargs)
import random
self.func = lambda o: random.randint(-9223372036854775808, 9223372036854775807)
def setup(self, **kwargs):
"""
Calls setup() on all DenormDependency resolvers
"""
super(BaseCacheKeyDenorm, self).setup(**kwargs)
for dependency in self.depend:
dependency.setup(self.model)
def get_triggers(self, using):
"""
Creates a list of all triggers needed to keep track of changes
to fields this denorm depends on.
"""
trigger_list = list()
# Get the triggers of all DenormDependency instances attached
# to our callback.
for dependency in self.depend:
trigger_list += dependency.get_triggers(using=using)
return trigger_list + super(BaseCacheKeyDenorm, self).get_triggers(using=using)
class CacheKeyDenorm(BaseCacheKeyDenorm):
"""
As above, but with extra triggers on self as described below
"""
def get_triggers(self, using):
content_type = str(ContentType.objects.get_for_model(self.model).pk)
# This is only really needed if the instance was changed without
# using the ORM or if it was part of a bulk update.
# In those cases the self_save_handler won't get called by the
# pre_save signal
action = triggers.TriggerActionUpdate(
model=self.model,
columns=(self.fieldname,),
values=(triggers.RandomBigInt(),),
where="%s=NEW.%s" % ((self.model._meta.pk.get_attname_column()[1],) * 2),
)
trigger_list = [
triggers.Trigger(self.model, "after", "update", [action], content_type, using, self.skip),
triggers.Trigger(self.model, "after", "insert", [action], content_type, using, self.skip),
]
return trigger_list + super(CacheKeyDenorm, self).get_triggers(using=using)
class TriggerWhereNode(WhereNode):
def sql_for_columns(self, data, qn, connection):
"""
Returns the SQL fragment used for the left-hand side of a column
constraint (for example, the "T1.foo" portion in the clause
"WHERE ... T1.foo = 6").
"""
table_alias, name, db_type = data
if table_alias:
if table_alias in ('NEW', 'OLD'):
lhs = '%s.%s' % (table_alias, qn(name))
else:
lhs = '%s.%s' % (qn(table_alias), qn(name))
else:
lhs = qn(name)
return connection.ops.field_cast_sql(db_type) % lhs
class TriggerFilterQuery(sql.Query):
def __init__(self, model, trigger_alias, where=TriggerWhereNode):
super(TriggerFilterQuery, self).__init__(model, where)
self.trigger_alias = trigger_alias
def get_initial_alias(self):
return self.trigger_alias
class AggregateDenorm(Denorm):
__metaclass__ = abc.ABCMeta
def __init__(self, skip=None):
self.manager = None
self.skip = skip
def setup(self, sender, **kwargs):
# as we connected to the ``class_prepared`` signal for any sender
# and we only need to setup once, check if the sender is our model.
if sender is self.model:
super(AggregateDenorm, self).setup(sender=sender, **kwargs)
# related managers will only be available after both models are initialized
# so check if its available already, and get our manager
if not self.manager and hasattr(self.model, self.manager_name):
self.manager = getattr(self.model, self.manager_name)
def get_related_where(self, fk_name, using, type):
related_where = ["%s=%s.%s" % (self.model._meta.pk.get_attname_column()[1], type, fk_name)]
related_query = Query(self.manager.related.model)
for name, value in self.filter.iteritems():
related_query.add_filter((name, value))
related_query.add_extra(None, None,
["%s=%s.%s" % (self.model._meta.pk.get_attname_column()[1], type, self.manager.related.field.m2m_column_name())],
None, None, None)
related_query.add_count_column()
related_query.clear_ordering(force_empty=True)
related_query.default_cols = False
related_filter_where, related_where_params = related_query.get_compiler(using=using,
connection=connection).as_sql()
if related_filter_where is not None:
related_where.append('(' + related_filter_where + ') > 0')
return related_where, related_where_params
def m2m_triggers(self, content_type, fk_name, related_field, using):
"""
Returns triggers for m2m relation
"""
related_inc_where, _ = self.get_related_where(fk_name, using, 'NEW')
related_dec_where, related_where_params = self.get_related_where(fk_name, using, 'OLD')
related_increment = triggers.TriggerActionUpdate(
model=self.model,
columns=(self.fieldname,),
values=("%s+1" % self.fieldname,),
where=(' AND '.join(related_inc_where), related_where_params),
)
related_decrement = triggers.TriggerActionUpdate(
model=self.model,
columns=(self.fieldname,),
values=("%s-1" % self.fieldname,),
where=(' AND '.join(related_dec_where), related_where_params),
)
trigger_list = [
triggers.Trigger(related_field, "after", "update", [related_increment, related_decrement], content_type,
using,
self.skip),
triggers.Trigger(related_field, "after", "insert", [related_increment], content_type, using, self.skip),
triggers.Trigger(related_field, "after", "delete", [related_decrement], content_type, using, self.skip),
]
return trigger_list
def get_triggers(self, using):
related_field = self.manager.related.field
if isinstance(related_field, ManyToManyField):
fk_name = related_field.m2m_reverse_name()
inc_where = ["%(id)s=(SELECT %(reverse_related)s FROM %(m2m_table)s WHERE %(related)s=NEW.%(id)s)" % {
'id': self.model._meta.pk.get_attname_column()[0],
'related': related_field.m2m_column_name(),
'm2m_table': related_field.m2m_db_table(),
'reverse_related': fk_name,
}]
dec_where = [action.replace('NEW.', 'OLD.') for action in inc_where]
else:
fk_name = related_field.attname
inc_where = ["%s=NEW.%s" % (self.model._meta.pk.get_attname_column()[1], fk_name)]
dec_where = ["%s=OLD.%s" % (self.model._meta.pk.get_attname_column()[1], fk_name)]
content_type = str(ContentType.objects.get_for_model(self.model).pk)
inc_query = TriggerFilterQuery(self.manager.related.model, trigger_alias='NEW')
for name, value in self.filter.iteritems():
inc_query.add_filter((name, value))
inc_filter_where, _ = inc_query.where.as_sql(SQLCompiler(inc_query, connection, using).quote_name_unless_alias,
connection)
dec_query = TriggerFilterQuery(self.manager.related.model, trigger_alias='OLD')
for name, value in self.filter.iteritems():
dec_query.add_filter((name, value))
dec_filter_where, where_params = dec_query.where.as_sql(
SQLCompiler(inc_query, connection, using).quote_name_unless_alias, connection)
if inc_filter_where is not None:
inc_where.append(inc_filter_where)
if dec_filter_where is not None:
dec_where.append(dec_filter_where)
# create the triggers for the incremental updates
increment = triggers.TriggerActionUpdate(
model=self.model,
columns=(self.fieldname,),
values=(self.get_increment_value(),),
where=(' AND '.join(inc_where), where_params),
)
decrement = triggers.TriggerActionUpdate(
model=self.model,
columns=(self.fieldname,),
values=(self.get_decrement_value(),),
where=(' AND '.join(dec_where), where_params),
)
other_model = self.manager.related.model
trigger_list = [
triggers.Trigger(other_model, "after", "update", [increment, decrement], content_type, using, self.skip),
triggers.Trigger(other_model, "after", "insert", [increment], content_type, using, self.skip),
triggers.Trigger(other_model, "after", "delete", [decrement], content_type, using, self.skip),
]
if isinstance(related_field, ManyToManyField):
trigger_list.extend(self.m2m_triggers(content_type, fk_name, related_field, using))
return trigger_list
@abc.abstractmethod
def get_increment_value(self):
"""
Returns SQL for incrementing value
"""
@abc.abstractmethod
def get_decrement_value(self):
"""
Returns SQL for decrementing value
"""
class SumDenorm(AggregateDenorm):
"""
Handles denormalization of a sum field by doing incrementally updates.
"""
def __init__(self, skip=None, field = None):
super(SumDenorm, self).__init__(skip)
# in case we want to set the value without relying on the
# correctness of the incremental updates we create a function that
# calculates it from scratch.
self.sum_field = field
self.func = lambda obj: getattr(obj, self.manager_name).filter(**self.filter).aggregate('sum')
def get_increment_value(self):
return "%s+NEW.%s" % (self.fieldname, self.sum_field)
def get_decrement_value(self):
return "%s-OLD.%s" % (self.fieldname, self.sum_field)
class CountDenorm(AggregateDenorm):
"""
Handles the denormalization of a count field by doing incrementally
updates.
"""
def __init__(self, skip=None):
super(CountDenorm, self).__init__(skip)
# in case we want to set the value without relying on the
# correctness of the incremental updates we create a function that
# calculates it from scratch.
self.func = lambda obj: getattr(obj, self.manager_name).filter(**self.filter).count()
def get_increment_value(self):
return "%s+1" % self.fieldname
def get_decrement_value(self):
return "%s-1" % self.fieldname
def rebuildall(verbose=False, model_name=None):
"""
Updates all models containing denormalized fields.
Used by the 'denormalize' management command.
"""
global alldenorms
for i, denorm in enumerate(alldenorms):
if model_name is None or denorm.model.__name__ == model_name:
if verbose:
print 'rebuilding', '%s/%s' % (i + 1, len(alldenorms)), denorm.fieldname, 'in', denorm.model
denorm.update(denorm.model.objects.all())
def drop_triggers(using=None):
triggerset = triggers.TriggerSet(using=using)
triggerset.drop()
def install_triggers(using=None):
"""
Installs all required triggers in the database
"""
build_triggerset(using=using).install()
def build_triggerset(using=None):
global alldenorms
# Use a TriggerSet to ensure each event gets just one trigger
triggerset = triggers.TriggerSet(using=using)
for denorm in alldenorms:
triggerset.append(denorm.get_triggers(using=using))
return triggerset
def flush():
"""
Updates all model instances marked as dirty by the DirtyInstance
model.
After this method finishes the DirtyInstance table is empty and
all denormalized fields have consistent data.
"""
# Loop until break.
# We may need multiple passes, because an update on one instance
# may cause an other instance to be marked dirty (dependency chains)
while True:
# Get all dirty markers
qs = DirtyInstance.objects.all()
# DirtyInstance table is empty -> all data is consistent -> we're done
if not qs:
break
# Call save() on all dirty instances, causing the self_save_handler()
# getting called by the pre_save signal.
for dirty_instance in qs:
if dirty_instance.content_object:
dirty_instance.content_object.save()
dirty_instance.delete()
|
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import re
import random
import shutil
import socket
import string
import json
import ipaddress
from charms.leadership import leader_get, leader_set
from shutil import move
from shlex import split
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import is_state
from charms.reactive import when, when_any, when_not
from charms.reactive.helpers import data_changed, any_file_changed
from charms.kubernetes.common import get_version
from charms.kubernetes.common import retry
from charms.layer import tls_client
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import unitdata
from charmhelpers.core.host import service_stop
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
def set_upgrade_needed(forced=False):
set_state('kubernetes-master.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
hookenv.log('set upgrade needed')
if previous_channel is None or not require_manual or forced:
hookenv.log('forcing upgrade')
set_state('kubernetes-master.upgrade-specified')
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
def service_cidr():
''' Return the charm's service-cidr config '''
db = unitdata.kv()
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
db = unitdata.kv()
db.set('kubernetes-master.service-cidr', service_cidr())
@hook('upgrade-charm')
def check_for_upgrade_needed():
'''An upgrade charm event was triggered by Juju, react to that here.'''
hookenv.status_set('maintenance', 'Checking resources')
migrate_from_pre_snaps()
add_rbac_roles()
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
changed = snap_resources_changed()
if changed == 'yes':
set_upgrade_needed()
elif changed == 'unknown':
# We are here on an upgrade from non-rolling master
# Since this upgrade might also include resource updates eg
# juju upgrade-charm kubernetes-master --resource kube-any=my.snap
# we take no risk and forcibly upgrade the snaps.
# Forcibly means we do not prompt the user to call the upgrade action.
set_upgrade_needed(forced=True)
# Set the auto storage backend to etcd2.
auto_storage_backend = leader_get('auto_storage_backend')
is_leader = is_state('leadership.is_leader')
if not auto_storage_backend and is_leader:
leader_set(auto_storage_backend='etcd2')
def snap_resources_changed():
'''
Check if the snapped resources have changed. The first time this method is
called will report "unknown".
Returns: "yes" in case a snap resource file has changed,
"no" in case a snap resources are the same as last call,
"unknown" if it is the first time this method is called
'''
db = unitdata.kv()
resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager',
'kube-scheduler', 'cdk-addons']
paths = [hookenv.resource_get(resource) for resource in resources]
if db.get('snap.resources.fingerprint.initialised'):
result = 'yes' if any_file_changed(paths) else 'no'
return result
else:
db.set('snap.resources.fingerprint.initialised', True)
any_file_changed(paths)
return 'unknown'
def add_rbac_roles():
'''Update the known_tokens file with proper groups.'''
tokens_fname = '/root/cdk/known_tokens.csv'
tokens_backup_fname = '/root/cdk/known_tokens.csv.backup'
move(tokens_fname, tokens_backup_fname)
with open(tokens_fname, 'w') as ftokens:
with open(tokens_backup_fname, 'r') as stream:
for line in stream:
record = line.strip().split(',')
# token, username, user, groups
if record[2] == 'admin' and len(record) == 3:
towrite = '{0},{1},{2},"{3}"\n'.format(record[0],
record[1],
record[2],
'system:masters')
ftokens.write(towrite)
continue
if record[2] == 'kube_proxy':
towrite = '{0},{1},{2}\n'.format(record[0],
'system:kube-proxy',
'kube-proxy')
ftokens.write(towrite)
continue
if record[2] == 'kubelet' and record[1] == 'kubelet':
continue
ftokens.write('{}'.format(line))
def rename_file_idempotent(source, destination):
if os.path.isfile(source):
os.rename(source, destination)
def migrate_from_pre_snaps():
# remove old states
remove_state('kubernetes.components.installed')
remove_state('kubernetes.dashboard.available')
remove_state('kube-dns.available')
remove_state('kubernetes-master.app_version.set')
# disable old services
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
host.service_stop(service)
# rename auth files
os.makedirs('/root/cdk', exist_ok=True)
rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
'/root/cdk/serviceaccount.key')
rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
'/root/cdk/basic_auth.csv')
rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
'/root/cdk/known_tokens.csv')
# cleanup old files
files = [
"/lib/systemd/system/kube-apiserver.service",
"/lib/systemd/system/kube-controller-manager.service",
"/lib/systemd/system/kube-scheduler.service",
"/etc/default/kube-defaults",
"/etc/default/kube-apiserver.defaults",
"/etc/default/kube-controller-manager.defaults",
"/etc/default/kube-scheduler.defaults",
"/srv/kubernetes",
"/home/ubuntu/kubectl",
"/usr/local/bin/kubectl",
"/usr/local/bin/kube-apiserver",
"/usr/local/bin/kube-controller-manager",
"/usr/local/bin/kube-scheduler",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('kubernetes-master.upgrade-needed')
@when_not('kubernetes-master.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-master.upgrade-specified')
def do_upgrade():
install_snaps()
remove_state('kubernetes-master.upgrade-needed')
remove_state('kubernetes-master.upgrade-specified')
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-apiserver snap')
snap.install('kube-apiserver', channel=channel)
hookenv.status_set('maintenance',
'Installing kube-controller-manager snap')
snap.install('kube-controller-manager', channel=channel)
hookenv.status_set('maintenance', 'Installing kube-scheduler snap')
snap.install('kube-scheduler', channel=channel)
hookenv.status_set('maintenance', 'Installing cdk-addons snap')
snap.install('cdk-addons', channel=channel)
snap_resources_changed()
set_state('kubernetes-master.snaps.installed')
remove_state('kubernetes-master.components.started')
@when('config.changed.client_password', 'leadership.is_leader')
def password_changed():
"""Handle password change via the charms config."""
password = hookenv.config('client_password')
if password == "" and is_state('client.password.initialised'):
# password_changed is called during an upgrade. Nothing to do.
return
elif password == "":
# Password not initialised
password = token_generator()
setup_basic_auth(password, "admin", "admin")
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
set_state('client.password.initialised')
@when('config.changed.storage-backend')
def storage_backend_changed():
remove_state('kubernetes-master.components.started')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. '''
cni.set_config(is_master=True, kubeconfig_path='')
@when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
# Try first to fetch data from an old leadership broadcast.
if not get_keys_from_leader(keys) \
or is_state('reconfigure.authentication.setup'):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin', 'system:masters')
if not os.path.isfile(known_tokens):
touch(known_tokens)
# Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True)
if not os.path.isfile(service_key):
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
@when_not('leadership.is_leader')
def setup_non_leader_authentication():
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
keys = [service_key, basic_auth, known_tokens]
# The source of truth for non-leaders is the leader.
# Therefore we overwrite_local with whatever the leader has.
if not get_keys_from_leader(keys, overwrite_local=True):
# the keys were not retrieved. Non-leaders have to retry.
return
if not any_file_changed(keys) and is_state('authentication.setup'):
# No change detected and we have already setup the authentication
return
hookenv.status_set('maintenance', 'Rendering authentication templates.')
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
def get_keys_from_leader(keys, overwrite_local=False):
"""
Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not.
"""
# This races with other codepaths, and seems to require being created first
# This block may be extracted later, but for now seems to work as intended
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
# If the path does not exist, assume we need it
if not os.path.exists(k) or overwrite_local:
# Fetch data from leadership broadcast
contents = leader_get(k)
# Default to logging the warning and wait for leader data to be set
if contents is None:
msg = "Waiting on leaders crypto keys."
hookenv.status_set('waiting', msg)
hookenv.log('Missing content for file {}'.format(k))
return False
# Write out the file and move on to the next item
with open(k, 'w+') as fp:
fp.write(contents)
fp.write('\n')
return True
@when('kubernetes-master.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('cdk-addons.configured', 'kube-api-endpoint.available',
'kube-control.connected')
@when_not('kubernetes-master.upgrade-needed')
def idle_status(kube_api, kube_control):
''' Signal at the end of the run that we are running. '''
if not all_kube_system_pods_running():
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
elif hookenv.config('service-cidr') != service_cidr():
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
hookenv.status_set('active', msg)
else:
# All services should be up and running at this point. Double-check...
failing_services = master_services_down()
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes master running.')
else:
msg = 'Stopped services: {}'.format(','.join(failing_services))
hookenv.status_set('blocked', msg)
def master_services_down():
"""Ensure master services are up and running.
Return: list of failing services"""
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not host.service_running(daemon):
failing_services.append(service)
return failing_services
@when('etcd.available', 'tls_client.server.certificate.saved',
'authentication.setup')
@when('leadership.set.auto_storage_backend')
@when_not('kubernetes-master.components.started')
def start_master(etcd):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
'Configuring the Kubernetes master services.')
freeze_service_cidr()
if not etcd.get_connection_string():
# etcd is not returning a connection string. This happens when
# the master unit disconnects from etcd and is ready to terminate.
# No point in trying to start master services and fail. Just return.
return
# TODO: Make sure below relation is handled on change
# https://github.com/kubernetes/kubernetes/issues/43461
handle_etcd_relation(etcd)
# Add CLI options to all components
configure_apiserver(etcd.get_connection_string(), getStorageBackend())
configure_controller_manager()
configure_scheduler()
set_state('kubernetes-master.components.started')
hookenv.open_port(6443)
@when('etcd.available')
def etcd_data_change(etcd):
''' Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistenly only when the number of etcd
units has actually changed '''
# key off of the connection string
connection_string = etcd.get_connection_string()
# If the connection string changes, remove the started state to trigger
# handling of the master components
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started')
# We are the leader and the auto_storage_backend is not set meaning
# this is the first time we connect to etcd.
auto_storage_backend = leader_get('auto_storage_backend')
is_leader = is_state('leadership.is_leader')
if is_leader and not auto_storage_backend:
if etcd.get_version().startswith('3.'):
leader_set(auto_storage_backend='etcd3')
else:
leader_set(auto_storage_backend='etcd2')
@when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
''' Send cluster DNS info '''
enableKubeDNS = hookenv.config('enable-kube-dns')
dnsDomain = hookenv.config('dns_domain')
dns_ip = None
if enableKubeDNS:
try:
dns_ip = get_dns_ip()
except CalledProcessError:
hookenv.log("kubedns not ready yet")
return
kube_control.set_dns(53, dnsDomain, dns_ip, enableKubeDNS)
@when('kube-control.connected')
@when('snap.installed.kubectl')
@when('leadership.is_leader')
def create_service_configs(kube_control):
"""Create the users for kubelet"""
should_restart = False
# generate the username/pass for the requesting unit
proxy_token = get_token('system:kube-proxy')
if not proxy_token:
setup_tokens(None, 'system:kube-proxy', 'kube-proxy')
proxy_token = get_token('system:kube-proxy')
should_restart = True
client_token = get_token('admin')
if not client_token:
setup_tokens(None, 'admin', 'admin', "system:masters")
client_token = get_token('admin')
should_restart = True
requests = kube_control.auth_user()
for request in requests:
username = request[1]['user']
group = request[1]['group']
kubelet_token = get_token(username)
if not kubelet_token and username and group:
# Usernames have to be in the form of system:node:<nodeName>
userid = "kubelet-{}".format(request[0].split('/')[1])
setup_tokens(None, username, userid, group)
kubelet_token = get_token(username)
kube_control.sign_auth_request(request[0], username,
kubelet_token, proxy_token,
client_token)
should_restart = True
if should_restart:
host.service_restart('snap.kube-apiserver.daemon')
remove_state('authentication.setup')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator master is waiting for a relation to workers.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set('blocked', 'Waiting for workers.')
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
''' Send configuration to the load balancer, and close access to the
public interface '''
kube_api.configure(port=6443)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-api-endpoint.available')
def send_data(tls, kube_api_endpoint):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Get the SDN gateway based on the cidr address.
kubernetes_service_ip = get_kubernetes_service_ip()
# Get ingress address
ingress_ip = get_ingress_address(kube_api_endpoint)
domain = hookenv.config('dns_domain')
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
socket.gethostname(),
kubernetes_service_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
# maybe they have extra names they want as SANs
extra_sans = hookenv.config('extra_sans')
if extra_sans and not extra_sans == "":
sans.extend(extra_sans.split())
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('config.changed.extra_sans', 'certificates.available',
'kube-api-endpoint.available')
def update_certificate(tls, kube_api_endpoint):
# Using the config.changed.extra_sans flag to catch changes.
# IP changes will take ~5 minutes or so to propagate, but
# it will update.
send_data(tls, kube_api_endpoint)
@when('certificates.server.cert.available',
'kubernetes-master.components.started',
'tls_client.server.certificate.written')
def kick_api_server(tls):
# need to be idempotent and don't want to kick the api server
# without need
if data_changed('cert', tls.get_server_cert()):
# certificate changed, so restart the api server
hookenv.log("Certificate information changed, restarting api server")
restart_apiserver()
tls_client.reset_certificate_write_flag('server')
@when('kubernetes-master.components.started')
def configure_cdk_addons():
''' Configure CDK addons '''
remove_state('cdk-addons.configured')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
dnsEnabled = str(hookenv.config('enable-kube-dns')).lower()
args = [
'arch=' + arch(),
'dns-ip=' + get_deprecated_dns_ip(),
'dns-domain=' + hookenv.config('dns_domain'),
'enable-dashboard=' + dbEnabled,
'enable-kube-dns=' + dnsEnabled
]
check_call(['snap', 'set', 'cdk-addons'] + args)
if not addons_ready():
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured')
@retry(times=3, delay_secs=20)
def addons_ready():
"""
Test if the add ons got installed
Returns: True is the addons got applied
"""
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log("Addons are not ready yet.")
return False
@when('loadbalancer.available', 'certificates.ca.available',
'certificates.client.cert.available', 'authentication.setup')
def loadbalancer_kubeconfig(loadbalancer, ca, client):
# Get the potential list of loadbalancers from the relation object.
hosts = loadbalancer.get_addresses_ports()
# Get the public address of loadbalancers so users can access the cluster.
address = hosts[0].get('public-address')
# Get the port of the loadbalancer so users can access the cluster.
port = hosts[0].get('port')
server = 'https://{0}:{1}'.format(address, port)
build_kubeconfig(server)
@when('certificates.ca.available', 'certificates.client.cert.available',
'authentication.setup')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'''Create a kubernetes configuration for the master unit.'''
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server)
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
''' Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs '''
ceph_relation_data = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'hostname': socket.gethostname(),
'key': ceph_admin.key()
}
# Re-execute the rendering if the data has changed.
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured')
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'''Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.'''
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': "true",
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if not os.path.isdir(etc_ceph_directory):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
# Render the ceph configuration from the ceph conf template
render('ceph.conf', charm_ceph_conf, ceph_context)
# The key can rotate independently of other ceph config, so validate it
admin_key = os.path.join(etc_ceph_directory,
'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
# Enlist the ceph-admin key as a kubernetes secret
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
# We didn't have a key, and cannot proceed. Do not set state and
# allow this method to re-execute
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
# At first glance this is deceptive. The apply stanza will create if
# it doesn't exist, otherwise it will update the entry, ensuring our
# ceph-secret is always reflective of what we have in /etc/ceph
# assuming we have invoked this anytime that file would change.
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except: # NOQA
# the enlistment in kubernetes failed, return and prepare for re-exec
return
# when complete, set a state relating to configuration of the storage
# backend that will allow other modules to hook into this and verify we
# have performed the necessary pre-req steps to interface with a ceph
# deployment.
set_state('ceph-storage.configured')
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('config.changed.authorization-mode',
'kubernetes-master.components.started')
def switch_auth_mode():
config = hookenv.config()
mode = config.get('authorization-mode')
if data_changed('auth-mode', mode):
remove_state('kubernetes-master.components.started')
@when('kubernetes-master.components.started')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def is_privileged():
"""Return boolean indicating whether or not to set allow-privileged=true.
"""
privileged = hookenv.config('allow-privileged').lower()
if privileged == 'auto':
return is_state('kubernetes-master.gpu.enabled')
else:
return privileged == 'true'
@when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged')
@when('config.changed.api-extra-args')
@when('kubernetes-master.components.started')
@when('leadership.set.auto_storage_backend')
@when('etcd.available')
def on_config_api_extra_args_change(etcd):
configure_apiserver(etcd.get_connection_string(),
getStorageBackend())
@when('config.changed.controller-manager-extra-args')
@when('kubernetes-master.components.started')
def on_config_controller_manager_extra_args_change():
configure_controller_manager()
@when('config.changed.scheduler-extra-args')
@when('kubernetes-master.components.started')
def on_config_scheduler_extra_args_change():
configure_scheduler()
@when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
"""The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode.
"""
config = hookenv.config()
if config['allow-privileged'].lower() == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled')
@when('kubernetes-master.gpu.enabled')
@when_not('kubernetes-master.privileged')
def disable_gpu_mode():
"""We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore.
"""
remove_state('kubernetes-master.gpu.enabled')
@hook('stop')
def shutdown():
""" Stop the kubernetes master services
"""
service_stop('snap.kube-apiserver.daemon')
service_stop('snap.kube-controller-manager.daemon')
service_stop('snap.kube-scheduler.daemon')
def restart_apiserver():
prev_state, prev_msg = hookenv.status_get()
hookenv.status_set('maintenance', 'Restarting kube-apiserver')
host.service_restart('snap.kube-apiserver.daemon')
hookenv.status_set(prev_state, prev_msg)
def restart_controller_manager():
prev_state, prev_msg = hookenv.status_get()
hookenv.status_set('maintenance', 'Restarting kube-controller-manager')
host.service_restart('snap.kube-controller-manager.daemon')
hookenv.status_set(prev_state, prev_msg)
def restart_scheduler():
prev_state, prev_msg = hookenv.status_get()
hookenv.status_set('maintenance', 'Restarting kube-scheduler')
host.service_restart('snap.kube-scheduler.daemon')
hookenv.status_set(prev_state, prev_msg)
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def build_kubeconfig(server):
'''Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
ca_exists = ca and os.path.isfile(ca)
client_pass = get_password('basic_auth.csv', 'admin')
# Do we have everything we need?
if ca_exists and client_pass:
# Create an absolute path for the kubeconfig file.
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
# Create the kubeconfig on this system so users can access the cluster.
create_kubeconfig(kubeconfig_path, server, ca,
user='admin', password=client_pass)
# Make the config file readable by the ubuntu users so juju scp works.
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_dns_ip():
cmd = "kubectl get service --namespace kube-system kube-dns --output json"
output = check_output(cmd, shell=True).decode()
svc = json.loads(output)
return svc['spec']['clusterIP']
def get_deprecated_dns_ip():
'''We previously hardcoded the dns ip. This function returns the old
hardcoded value for use with older versions of cdk_addons.'''
interface = ipaddress.IPv4Interface(service_cidr())
ip = interface.network.network_address + 10
return ip.exploded
def get_kubernetes_service_ip():
'''Get the IP address for the kubernetes service based on the cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .1 at the end of the network
ip = interface.network.network_address + 1
return ip.exploded
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
# Define where the etcd tls files will be kept.
etcd_dir = '/root/cdk/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-master.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_apiserver(etcd_connection_string, leader_etcd_version):
api_opts = {}
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
if is_privileged():
api_opts['allow-privileged'] = 'true'
set_state('kubernetes-master.privileged')
else:
api_opts['allow-privileged'] = 'false'
remove_state('kubernetes-master.privileged')
# Handle static options for now
api_opts['service-cluster-ip-range'] = service_cidr()
api_opts['min-request-timeout'] = '300'
api_opts['v'] = '4'
api_opts['tls-cert-file'] = server_cert_path
api_opts['tls-private-key-file'] = server_key_path
api_opts['kubelet-certificate-authority'] = ca_cert_path
api_opts['kubelet-client-certificate'] = client_cert_path
api_opts['kubelet-client-key'] = client_key_path
api_opts['logtostderr'] = 'true'
api_opts['insecure-bind-address'] = '127.0.0.1'
api_opts['insecure-port'] = '8080'
api_opts['storage-backend'] = leader_etcd_version
api_opts['basic-auth-file'] = '/root/cdk/basic_auth.csv'
api_opts['token-auth-file'] = '/root/cdk/known_tokens.csv'
api_opts['service-account-key-file'] = '/root/cdk/serviceaccount.key'
api_opts['kubelet-preferred-address-types'] = \
'[InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP]'
etcd_dir = '/root/cdk/etcd'
etcd_ca = os.path.join(etcd_dir, 'client-ca.pem')
etcd_key = os.path.join(etcd_dir, 'client-key.pem')
etcd_cert = os.path.join(etcd_dir, 'client-cert.pem')
api_opts['etcd-cafile'] = etcd_ca
api_opts['etcd-keyfile'] = etcd_key
api_opts['etcd-certfile'] = etcd_cert
api_opts['etcd-servers'] = etcd_connection_string
admission_control = [
'Initializers',
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'ResourceQuota',
'DefaultTolerationSeconds'
]
auth_mode = hookenv.config('authorization-mode')
if 'Node' in auth_mode:
admission_control.append('NodeRestriction')
api_opts['authorization-mode'] = auth_mode
if get_version('kube-apiserver') < (1, 6):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control.remove('DefaultTolerationSeconds')
if get_version('kube-apiserver') < (1, 7):
hookenv.log('Removing Initializers from admission-control')
admission_control.remove('Initializers')
api_opts['admission-control'] = ','.join(admission_control)
configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args')
restart_apiserver()
def configure_controller_manager():
controller_opts = {}
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
# Default to 3 minute resync. TODO: Make this configureable?
controller_opts['min-resync-period'] = '3m'
controller_opts['v'] = '2'
controller_opts['root-ca-file'] = ca_cert_path
controller_opts['logtostderr'] = 'true'
controller_opts['master'] = 'http://127.0.0.1:8080'
controller_opts['service-account-private-key-file'] = \
'/root/cdk/serviceaccount.key'
configure_kubernetes_service('kube-controller-manager', controller_opts,
'controller-manager-extra-args')
restart_controller_manager()
def configure_scheduler():
scheduler_opts = {}
scheduler_opts['v'] = '2'
scheduler_opts['logtostderr'] = 'true'
scheduler_opts['master'] = 'http://127.0.0.1:8080'
configure_kubernetes_service('kube-scheduler', scheduler_opts,
'scheduler-extra-args')
restart_scheduler()
def setup_basic_auth(password=None, username='admin', uid='admin',
groups=None):
'''Create the htacces file and the tokens.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
if not password:
password = token_generator()
with open(htaccess, 'w') as stream:
if groups:
stream.write('{0},{1},{2},"{3}"'.format(password,
username, uid, groups))
else:
stream.write('{0},{1},{2}'.format(password, username, uid))
def setup_tokens(token, username, user, groups=None):
'''Create a token file for kubernetes authentication.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if not token:
token = token_generator()
with open(known_tokens, 'a') as stream:
if groups:
stream.write('{0},{1},{2},"{3}"\n'.format(token,
username,
user,
groups))
else:
stream.write('{0},{1},{2}\n'.format(token, username, user))
def get_password(csv_fname, user):
'''Get the password of user within the csv file provided.'''
root_cdk = '/root/cdk'
tokens_fname = os.path.join(root_cdk, csv_fname)
if not os.path.isfile(tokens_fname):
return None
with open(tokens_fname, 'r') as stream:
for line in stream:
record = line.split(',')
if record[1] == user:
return record[0]
return None
def get_token(username):
"""Grab a token from the static file if present. """
return get_password('known_tokens.csv', username)
def set_token(password, save_salt):
''' Store a token so it can be recalled later by token_generator.
param: password - the password to be stored
param: save_salt - the key to store the value of the token.'''
db = unitdata.kv()
db.set(save_salt, password)
return db.get(save_salt)
def token_generator(length=32):
''' Generate a random token for use in passwords and account tokens.
param: length - the length of the token to generate'''
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
@retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
''' Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. '''
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
result = json.loads(output)
for pod in result['items']:
status = pod['status']['phase']
# Evicted nodes should re-spawn
if status != 'Running' and \
pod['status'].get('reason', '') != 'Evicted':
return False
return True
def apiserverVersion():
cmd = 'kube-apiserver --version'.split()
version_string = check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
def touch(fname):
try:
os.utime(fname, None)
except OSError:
open(fname, 'a').close()
def getStorageBackend():
storage_backend = hookenv.config('storage-backend')
if storage_backend == 'auto':
storage_backend = leader_get('auto_storage_backend')
return storage_backend
|
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Mimic TTS, a local TTS backend.
This Backend uses the mimic executable to render text into speech.
"""
import os
import os.path
from os.path import exists, join, expanduser
import stat
import subprocess
from threading import Thread
from time import sleep
from mycroft import MYCROFT_ROOT_PATH
from mycroft.api import DeviceApi
from mycroft.configuration import Configuration
from mycroft.util.download import download
from mycroft.util.log import LOG
from .tts import TTS, TTSValidator
def get_mimic_binary():
"""Find the mimic binary, either from config or from PATH.
Returns:
(str) path of mimic executable
"""
config = Configuration.get().get("tts", {}).get("mimic")
bin_ = config.get("path",
os.path.join(MYCROFT_ROOT_PATH, 'mimic', 'bin', 'mimic'))
if not os.path.isfile(bin_):
# Search for mimic on the path
import distutils.spawn
bin_ = distutils.spawn.find_executable("mimic")
return bin_
def get_subscriber_voices():
"""Get dict of mimic voices exclusive to subscribers.
Returns:
(dict) map of voices to custom Mimic executables.
"""
data_dir = expanduser(Configuration.get()['data_dir'])
return {'trinity': join(data_dir, 'voices/mimic_tn')}
def download_subscriber_voices(selected_voice):
"""Function to download all premium voices.
The function starts with the currently selected if applicable
"""
subscriber_voices = get_subscriber_voices()
def make_executable(dest):
"""Call back function to make the downloaded file executable."""
LOG.info('Make executable new voice binary executable')
# make executable
file_stat = os.stat(dest)
os.chmod(dest, file_stat.st_mode | stat.S_IEXEC)
# First download the selected voice if needed
voice_file = subscriber_voices.get(selected_voice)
if voice_file is not None and not exists(voice_file):
LOG.info('Voice doesn\'t exist, downloading')
url = DeviceApi().get_subscriber_voice_url(selected_voice)
# Check we got an url
if url:
dl_status = download(url, voice_file, make_executable)
# Wait for completion
while not dl_status.done:
sleep(1)
else:
LOG.debug('{} is not available for this architecture'
.format(selected_voice))
# Download the rest of the subsciber voices as needed
for voice in subscriber_voices:
voice_file = subscriber_voices[voice]
if not exists(voice_file):
url = DeviceApi().get_subscriber_voice_url(voice)
# Check we got an url
if url:
dl_status = download(url, voice_file, make_executable)
# Wait for completion
while not dl_status.done:
sleep(1)
else:
LOG.debug('{} is not available for this architecture'
.format(voice))
def parse_phonemes(phonemes):
"""Parse mimic phoneme string into a list of phone, duration pairs.
Arguments
phonemes (bytes): phoneme output from mimic
Returns:
(list) list of phoneme duration pairs
"""
phon_str = phonemes.decode()
pairs = phon_str.split(' ')
return [pair.split(':') for pair in pairs if ':' in pair]
class Mimic(TTS):
"""TTS interface for local mimic v1."""
def __init__(self, lang, config):
super(Mimic, self).__init__(
lang, config, MimicValidator(self), 'wav',
ssml_tags=["speak", "ssml", "phoneme", "voice", "audio", "prosody"]
)
self.default_binary = get_mimic_binary()
self.clear_cache()
# Download subscriber voices if needed
self.subscriber_voices = get_subscriber_voices()
self.is_subscriber = DeviceApi().is_subscriber
if self.is_subscriber:
trd = Thread(target=download_subscriber_voices, args=[self.voice])
trd.daemon = True
trd.start()
def modify_tag(self, tag):
"""Modify the SSML to suite Mimic."""
ssml_conversions = {
'x-slow': '0.4',
'slow': '0.7',
'medium': '1.0',
'high': '1.3',
'x-high': '1.6',
'speed': 'rate'
}
for key, value in ssml_conversions.items():
tag = tag.replace(key, value)
return tag
@property
def args(self):
"""Build mimic arguments."""
subscriber_voices = self.subscriber_voices
if (self.voice in subscriber_voices and
exists(subscriber_voices[self.voice]) and self.is_subscriber):
# Use subscriber voice
mimic_bin = subscriber_voices[self.voice]
voice = self.voice
elif self.voice in subscriber_voices:
# Premium voice but bin doesn't exist, use ap while downloading
mimic_bin = self.default_binary
voice = 'ap'
else:
# Normal case use normal binary and selected voice
mimic_bin = self.default_binary
voice = self.voice
args = [mimic_bin, '-voice', voice, '-psdur', '-ssml']
stretch = self.config.get('duration_stretch', None)
if stretch:
args += ['--setf', 'duration_stretch={}'.format(stretch)]
return args
def get_tts(self, sentence, wav_file):
"""Generate WAV and phonemes.
Args:
sentence (str): sentence to generate audio for
wav_file (str): output file
Returns:
tuple ((str) file location, (str) generated phonemes)
"""
phonemes = subprocess.check_output(self.args + ['-o', wav_file,
'-t', sentence])
return wav_file, parse_phonemes(phonemes)
def viseme(self, phoneme_pairs):
"""Convert phoneme string to visemes.
Args:
phoneme_pairs (list): Phoneme output from mimic
Returns:
(list) list of tuples of viseme and duration
"""
visemes = []
for phon, dur in phoneme_pairs:
visemes.append((VISIMES.get(phon, '4'), float(dur)))
return visemes
class MimicValidator(TTSValidator):
"""Validator class checking that Mimic can be used."""
def validate_lang(self):
"""Verify that the language is supported."""
# TODO: Verify version of mimic can handle the requested language
def validate_connection(self):
"""Check that Mimic executable is found and works."""
mimic_bin = get_mimic_binary()
try:
subprocess.call([mimic_bin, '--version'])
except Exception as err:
if mimic_bin:
LOG.error('Failed to find mimic at: {}'.format(mimic_bin))
else:
LOG.error('Mimic executable not found')
raise Exception(
'Mimic was not found. Run install-mimic.sh to install it.') \
from err
def get_tts_class(self):
"""Return the TTS class associated with the validator."""
return Mimic
# Mapping based on Jeffers phoneme to viseme map, seen in table 1 from:
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.221.6377&rep=rep1&type=pdf
#
# Mycroft unit visemes based on images found at:
# http://www.web3.lu/wp-content/uploads/2014/09/visemes.jpg
#
# Mapping was created partially based on the "12 mouth shapes visuals seen at:
# https://wolfpaulus.com/journal/software/lipsynchronization/
VISIMES = {
# /A group
'v': '5',
'f': '5',
# /B group
'uh': '2',
'w': '2',
'uw': '2',
'er': '2',
'r': '2',
'ow': '2',
# /C group
'b': '4',
'p': '4',
'm': '4',
# /D group
'aw': '1',
# /E group
'th': '3',
'dh': '3',
# /F group
'zh': '3',
'ch': '3',
'sh': '3',
'jh': '3',
# /G group
'oy': '6',
'ao': '6',
# /Hgroup
'z': '3',
's': '3',
# /I group
'ae': '0',
'eh': '0',
'ey': '0',
'ah': '0',
'ih': '0',
'y': '0',
'iy': '0',
'aa': '0',
'ay': '0',
'ax': '0',
'hh': '0',
# /J group
'n': '3',
't': '3',
'd': '3',
'l': '3',
# /K group
'g': '3',
'ng': '3',
'k': '3',
# blank mouth
'pau': '4',
}
|
|
import os
import sys
from ._compat import _default_text_stderr
from ._compat import _default_text_stdout
from ._compat import _find_binary_writer
from ._compat import auto_wrap_for_ansi
from ._compat import binary_streams
from ._compat import filename_to_ui
from ._compat import get_filesystem_encoding
from ._compat import get_strerror
from ._compat import is_bytes
from ._compat import open_stream
from ._compat import should_strip_ansi
from ._compat import strip_ansi
from ._compat import text_streams
from ._compat import WIN
from .globals import resolve_color_default
echo_native_types = (str, bytes, bytearray)
def _posixify(name):
return "-".join(name.split()).lower()
def safecall(func):
"""Wraps a function so that it swallows exceptions."""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
pass
return wrapper
def make_str(value):
"""Converts a value into a valid string."""
if isinstance(value, bytes):
try:
return value.decode(get_filesystem_encoding())
except UnicodeError:
return value.decode("utf-8", "replace")
return str(value)
def make_default_short_help(help, max_length=45):
"""Return a condensed version of help string."""
words = help.split()
total_length = 0
result = []
done = False
for word in words:
if word[-1:] == ".":
done = True
new_length = 1 + len(word) if result else len(word)
if total_length + new_length > max_length:
result.append("...")
done = True
else:
if result:
result.append(" ")
result.append(word)
if done:
break
total_length += new_length
return "".join(result)
class LazyFile:
"""A lazy file works like a regular file but it does not fully open
the file but it does perform some basic checks early to see if the
filename parameter does make sense. This is useful for safely opening
files for writing.
"""
def __init__(
self, filename, mode="r", encoding=None, errors="strict", atomic=False
):
self.name = filename
self.mode = mode
self.encoding = encoding
self.errors = errors
self.atomic = atomic
if filename == "-":
self._f, self.should_close = open_stream(filename, mode, encoding, errors)
else:
if "r" in mode:
# Open and close the file in case we're opening it for
# reading so that we can catch at least some errors in
# some cases early.
open(filename, mode).close()
self._f = None
self.should_close = True
def __getattr__(self, name):
return getattr(self.open(), name)
def __repr__(self):
if self._f is not None:
return repr(self._f)
return f"<unopened file '{self.name}' {self.mode}>"
def open(self):
"""Opens the file if it's not yet open. This call might fail with
a :exc:`FileError`. Not handling this error will produce an error
that Click shows.
"""
if self._f is not None:
return self._f
try:
rv, self.should_close = open_stream(
self.name, self.mode, self.encoding, self.errors, atomic=self.atomic
)
except OSError as e: # noqa: E402
from .exceptions import FileError
raise FileError(self.name, hint=get_strerror(e))
self._f = rv
return rv
def close(self):
"""Closes the underlying file, no matter what."""
if self._f is not None:
self._f.close()
def close_intelligently(self):
"""This function only closes the file if it was opened by the lazy
file wrapper. For instance this will never close stdin.
"""
if self.should_close:
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close_intelligently()
def __iter__(self):
self.open()
return iter(self._f)
class KeepOpenFile:
def __init__(self, file):
self._file = file
def __getattr__(self, name):
return getattr(self._file, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
pass
def __repr__(self):
return repr(self._file)
def __iter__(self):
return iter(self._file)
def echo(message=None, file=None, nl=True, err=False, color=None):
"""Prints a message plus a newline to the given file or stdout. On
first sight, this looks like the print function, but it has improved
support for handling Unicode and binary data that does not fail no
matter how badly configured the system is.
Primarily it means that you can print binary data as well as Unicode
data on both 2.x and 3.x to the given file in the most appropriate way
possible. This is a very carefree function in that it will try its
best to not fail. As of Click 6.0 this includes support for unicode
output on the Windows console.
In addition to that, if `colorama`_ is installed, the echo function will
also support clever handling of ANSI codes. Essentially it will then
do the following:
- add transparent handling of ANSI color codes on Windows.
- hide ANSI codes automatically if the destination file is not a
terminal.
.. _colorama: https://pypi.org/project/colorama/
.. versionchanged:: 6.0
As of Click 6.0 the echo function will properly support unicode
output on the windows console. Not that click does not modify
the interpreter in any way which means that `sys.stdout` or the
print statement or function will still not provide unicode support.
.. versionchanged:: 2.0
Starting with version 2.0 of Click, the echo function will work
with colorama if it's installed.
.. versionadded:: 3.0
The `err` parameter was added.
.. versionchanged:: 4.0
Added the `color` flag.
:param message: the message to print
:param file: the file to write to (defaults to ``stdout``)
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``. This is faster and easier than calling
:func:`get_text_stderr` yourself.
:param nl: if set to `True` (the default) a newline is printed afterwards.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection.
"""
if file is None:
if err:
file = _default_text_stderr()
else:
file = _default_text_stdout()
# Convert non bytes/text into the native string type.
if message is not None and not isinstance(message, echo_native_types):
message = str(message)
if nl:
message = message or ""
if isinstance(message, str):
message += "\n"
else:
message += b"\n"
# If there is a message and the value looks like bytes, we manually
# need to find the binary stream and write the message in there.
# This is done separately so that most stream types will work as you
# would expect. Eg: you can write to StringIO for other cases.
if message and is_bytes(message):
binary_file = _find_binary_writer(file)
if binary_file is not None:
file.flush()
binary_file.write(message)
binary_file.flush()
return
# ANSI-style support. If there is no message or we are dealing with
# bytes nothing is happening. If we are connected to a file we want
# to strip colors. If we are on windows we either wrap the stream
# to strip the color or we use the colorama support to translate the
# ansi codes to API calls.
if message and not is_bytes(message):
color = resolve_color_default(color)
if should_strip_ansi(file, color):
message = strip_ansi(message)
elif WIN:
if auto_wrap_for_ansi is not None:
file = auto_wrap_for_ansi(file)
elif not color:
message = strip_ansi(message)
if message:
file.write(message)
file.flush()
def get_binary_stream(name):
"""Returns a system stream for byte processing.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
"""
opener = binary_streams.get(name)
if opener is None:
raise TypeError(f"Unknown standard stream '{name}'")
return opener()
def get_text_stream(name, encoding=None, errors="strict"):
"""Returns a system stream for text processing. This usually returns
a wrapped stream around a binary stream returned from
:func:`get_binary_stream` but it also can take shortcuts for already
correctly configured streams.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
:param encoding: overrides the detected default encoding.
:param errors: overrides the default error mode.
"""
opener = text_streams.get(name)
if opener is None:
raise TypeError(f"Unknown standard stream '{name}'")
return opener(encoding, errors)
def open_file(
filename, mode="r", encoding=None, errors="strict", lazy=False, atomic=False
):
"""This is similar to how the :class:`File` works but for manual
usage. Files are opened non lazy by default. This can open regular
files as well as stdin/stdout if ``'-'`` is passed.
If stdin/stdout is returned the stream is wrapped so that the context
manager will not close the stream accidentally. This makes it possible
to always use the function like this without having to worry to
accidentally close a standard stream::
with open_file(filename) as f:
...
.. versionadded:: 3.0
:param filename: the name of the file to open (or ``'-'`` for stdin/stdout).
:param mode: the mode in which to open the file.
:param encoding: the encoding to use.
:param errors: the error handling for this file.
:param lazy: can be flipped to true to open the file lazily.
:param atomic: in atomic mode writes go into a temporary file and it's
moved on close.
"""
if lazy:
return LazyFile(filename, mode, encoding, errors, atomic=atomic)
f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic)
if not should_close:
f = KeepOpenFile(f)
return f
def get_os_args():
"""Returns the argument part of ``sys.argv``, removing the first
value which is the name of the script.
.. deprecated:: 8.0
Will be removed in 8.1. Access ``sys.argv[1:]`` directly
instead.
"""
import warnings
warnings.warn(
"'get_os_args' is deprecated and will be removed in 8.1. Access"
" 'sys.argv[1:]' directly instead.",
DeprecationWarning,
stacklevel=2,
)
return sys.argv[1:]
def format_filename(filename, shorten=False):
"""Formats a filename for user display. The main purpose of this
function is to ensure that the filename can be displayed at all. This
will decode the filename to unicode if necessary in a way that it will
not fail. Optionally, it can shorten the filename to not include the
full path to the filename.
:param filename: formats a filename for UI display. This will also convert
the filename into unicode without failing.
:param shorten: this optionally shortens the filename to strip of the
path that leads up to it.
"""
if shorten:
filename = os.path.basename(filename)
return filename_to_ui(filename)
def get_app_dir(app_name, roaming=True, force_posix=False):
r"""Returns the config folder for the application. The default behavior
is to return whatever is most appropriate for the operating system.
To give you an idea, for an app called ``"Foo Bar"``, something like
the following folders could be returned:
Mac OS X:
``~/Library/Application Support/Foo Bar``
Mac OS X (POSIX):
``~/.foo-bar``
Unix:
``~/.config/foo-bar``
Unix (POSIX):
``~/.foo-bar``
Win XP (roaming):
``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo Bar``
Win XP (not roaming):
``C:\Documents and Settings\<user>\Application Data\Foo Bar``
Win 7 (roaming):
``C:\Users\<user>\AppData\Roaming\Foo Bar``
Win 7 (not roaming):
``C:\Users\<user>\AppData\Local\Foo Bar``
.. versionadded:: 2.0
:param app_name: the application name. This should be properly capitalized
and can contain whitespace.
:param roaming: controls if the folder should be roaming or not on Windows.
Has no affect otherwise.
:param force_posix: if this is set to `True` then on any POSIX system the
folder will be stored in the home folder with a leading
dot instead of the XDG config home or darwin's
application support folder.
"""
if WIN:
key = "APPDATA" if roaming else "LOCALAPPDATA"
folder = os.environ.get(key)
if folder is None:
folder = os.path.expanduser("~")
return os.path.join(folder, app_name)
if force_posix:
return os.path.join(os.path.expanduser(f"~/.{_posixify(app_name)}"))
if sys.platform == "darwin":
return os.path.join(
os.path.expanduser("~/Library/Application Support"), app_name
)
return os.path.join(
os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")),
_posixify(app_name),
)
class PacifyFlushWrapper:
"""This wrapper is used to catch and suppress BrokenPipeErrors resulting
from ``.flush()`` being called on broken pipe during the shutdown/final-GC
of the Python interpreter. Notably ``.flush()`` is always called on
``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any
other cleanup code, and the case where the underlying file is not a broken
pipe, all calls and attributes are proxied.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
def flush(self):
try:
self.wrapped.flush()
except OSError as e:
import errno
if e.errno != errno.EPIPE:
raise
def __getattr__(self, attr):
return getattr(self.wrapped, attr)
|
|
#!/usr/bin/python
from time import gmtime, strftime
import subprocess
import json
time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
def getAllSizes():
locations = json.loads(subprocess.check_output(['az', 'account', 'list-locations']))
sizeMap = {}
for location in locations:
sizes = json.loads(subprocess.check_output(['az', 'vm', 'list-sizes', '-l', location['name']]))
for size in sizes:
if not size['name'] in sizeMap and not size['name'].split('_')[0] == 'Basic':
sizeMap[size['name']] = size
return sizeMap
min_cores = 2
dcos_masters_ephemeral_disk_min = 102400
def getDcosMasterMap(sizeMap):
masterMap = {}
for key in sizeMap.keys():
size = sizeMap[key]
if size['numberOfCores'] >= min_cores and \
size['resourceDiskSizeInMb'] >= dcos_masters_ephemeral_disk_min:
masterMap[size['name']] = size
return masterMap
def getMasterAgentMap(sizeMap):
agentMap = {}
for key in sizeMap.keys():
size = sizeMap[key]
if size['numberOfCores'] >= min_cores:
agentMap[size['name']] = size
return agentMap
def getLocations():
locations = json.loads(subprocess.check_output(['az', 'account', 'list-locations']))
locationList = [l['name'] for l in locations]
#hard code Azure China Cloud location
locationList.append('chinanorth')
locationList.append('chinaeast')
# Adding two Canary locations
locationList.append('centraluseuap')
locationList.append('eastus2euap')
locationList = sorted(locationList)
return locationList
def getStorageAccountType(sizeName):
capability = sizeName.split('_')[1]
if 'S' in capability or 's' in capability:
return "Premium_LRS"
else:
return "Standard_LRS"
def getFileContents(dcosMasterMap, masterAgentMap, kubernetesAgentMap, sizeMap, locations):
text = r"""package acsengine
// AUTOGENERATED FILE """
text += r"""
// AzureLocations provides all azure regions in prod.
// Related powershell to refresh this list:
// Get-AzureRmLocation | Select-Object -Property Location
var AzureLocations = []string{
"""
for location in locations:
text += ' "' + location + '",' + '\n'
text += r""" "chinaeast",
"chinanorth",
"germanycentral",
"germanynortheast",
"usgovvirginia",
"usgoviowa",
"usgovarizona",
"usgovtexas",
}
// GetDCOSMasterAllowedSizes returns the master allowed sizes
func GetDCOSMasterAllowedSizes() string {
return ` "allowedValues": [
"""
dcosMasterMapKeys = sorted(dcosMasterMap.keys())
for key in dcosMasterMapKeys[:-1]:
text += ' "' + key + '",\n'
text += ' "' + dcosMasterMapKeys[-1] + '"\n'
text += r""" ],
`
}
// GetMasterAgentAllowedSizes returns the agent allowed sizes
func GetMasterAgentAllowedSizes() string {
return ` "allowedValues": [
"""
masterAgentMapKeys = sorted(masterAgentMap.keys())
for key in masterAgentMapKeys[:-1]:
text += ' "' + key + '",\n'
text += ' "' + masterAgentMapKeys[-1] + '"\n'
text += r""" ],
`
}
// GetKubernetesAgentAllowedSizes returns the allowed sizes for Kubernetes agent
func GetKubernetesAgentAllowedSizes() string {
return ` "allowedValues": [
"""
kubernetesAgentMapKeys = sorted(kubernetesAgentMap.keys())
for key in kubernetesAgentMapKeys[:-1]:
text += ' "' + key + '",\n'
text += ' "' + kubernetesAgentMapKeys[-1] + '"\n'
text += r""" ],
`
}
// GetSizeMap returns the size / storage map
func GetSizeMap() string {
return ` "vmSizesMap": {
"""
mergedMap = {}
for key in kubernetesAgentMapKeys:
size = kubernetesAgentMap[key]
if not key in mergedMap:
mergedMap[size['name']] = size
mergedMapKeys = sorted(mergedMap.keys())
for key in mergedMapKeys[:-1]:
size = mergedMap[key]
text += ' "' + size['name'] + '": {\n'
storageAccountType = getStorageAccountType(size['name'])
text += ' "storageAccountType": "' + storageAccountType + '"\n },\n'
key = mergedMapKeys[-1]
size = mergedMap[key]
text += ' "' + size['name'] + '": {\n'
storageAccountType = getStorageAccountType(size['name'])
text += ' "storageAccountType": "' + storageAccountType + '"\n }\n'
text += r""" }
`
}
// GetClassicAllowedSizes returns the classic allowed sizes
func GetClassicAllowedSizes() string {
return ` "allowedValues": [
"""
sizeMapKeys = sorted(sizeMap.keys())
for key in sizeMapKeys[:-1]:
text += ' "' + sizeMap[key]['name'] + '",\n'
key = sizeMapKeys[-1]
text += ' "' + sizeMap[key]['name'] + '"\n'
text += r""" ],
`
}
// GetClassicSizeMap returns the size / storage map
func GetClassicSizeMap() string {
return ` "vmSizesMap": {
"""
sizeMapKeys = sorted(sizeMap.keys())
for key in sizeMapKeys[:-1]:
text += ' "' + sizeMap[key]['name'] + '": {\n'
storageAccountType = getStorageAccountType(size['name'])
text += ' "storageAccountType": "' + storageAccountType + '"\n },\n'
key = sizeMapKeys[-1]
text += ' "' + sizeMap[key]['name'] + '": {\n'
storageAccountType = getStorageAccountType(size['name'])
text += ' "storageAccountType": "' + storageAccountType + '"\n }\n'
text += r""" }
`
}"""
return text
def main():
outfile = 'pkg/acsengine/azureconst.go'
allSizes = getAllSizes()
dcosMasterMap = getDcosMasterMap(allSizes)
masterAgentMap = getMasterAgentMap(allSizes)
kubernetesAgentMap = allSizes
locations = getLocations()
text = getFileContents(dcosMasterMap, masterAgentMap, kubernetesAgentMap, allSizes, locations)
with open(outfile, 'w') as f:
f.write(text)
subprocess.check_call(['gofmt', '-w', outfile])
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python
#
"""
Class implementation for devices for Holiday by MooresCloud
Right now we understand four different classes of devices:
GPIO by Linux
Light by MooresCloud
Holiday by MooresCloud
EngineRoom Chippendale by MooresCloud
Hue by Philips
WeMo by Belkin
Homepage and documentation: http://dev.moorescloud.com/
Copyright (c) 2013, Mark Pesce.
License: MIT (see LICENSE for details)
"""
__author__ = 'Mark Pesce'
__version__ = '0.01-dev'
__license__ = 'MIT'
import urllib2, json, socket, os.path, os, subprocess, math, time
class GPIO:
def __init__(self, number, direction):
"""The GPIO port number is passed as number,
The direction is a boolean, where True = out, False = in"""
self.number = number
self.base = "/sys/class/gpio/gpio%s/" % number
self.create()
self.direction = direction # True = out, False = in
self.set_direction(self.direction)
#print self.base
return
def get_info(self):
resp= {}
resp['device_type'] = 'gpio'
resp['num'] = self.number
resp['direction'] = self.get_direction()
resp['value'] = self.value()
return resp
def create(self):
"""Instance the GPIO port in the OS.
If it already exists, don't do anything."""
if os.path.exists("/sys/class/gpio/gpio%s" % self.number):
return
else:
f = open("/sys/class/gpio/export", "w")
f.write("%s" % self.number)
f.close()
def get_direction(self):
dirname = self.base + "direction"
f = open(dirname, "r")
d = f.read()
d = d[:-1] # Remove line feed thingy maybe
#print "Direction is %s" % d
if (d == "in"):
return False
else:
return True
def set_direction(self, inout):
dirname = self.base + "direction"
f = open(dirname, "w")
if inout == True:
f.write("out")
else:
f.write("in")
f.close()
self.direction = inout
def on(self):
"""Raises error if direction is not out"""
if self.direction == True:
dirname = self.base + "value"
f = open(dirname, "w")
f.write("1")
f.close()
#cmd = """echo "1" > %s""" % dirname
#print cmd
#os.system(cmd)
else:
raise Error("Invalid direction")
return
def off(self):
"""Raises error if direction is not out"""
if self.direction == True:
dirname = self.base + "value"
f = open(dirname, "w")
f.write("0")
f.close()
else:
raise Error("Invalid direction")
return
def value(self):
dirname = self.base + "value"
fd = open(dirname, "r")
val = fd.read()
fd.close()
#print "value read is %s" % val
if val[:1] == "1":
return True
else:
return False
class Holiday:
def __init__(self, address):
self.address = address
self.numleds = 50
self.pipename = "/run/pipelights.fifo"
self.leds = [] # Array of LED values. This may actually exist elsewhere eventually.
try:
self.pipe = open(self.pipename,"w+")
except:
print "Couldn't open the pipe, there's gonna be trouble!"
ln = 0
while (ln < self.numleds):
self.leds.append([0x00, 0x00, 0x00]) # Create and clear an array of RGB LED values
ln = ln + 1
return
def get_devices(self):
l = { "device_type": "Holiday", "number": 50, "version": 0.1 }
return [ l ]
def get_led_value(self, lednum):
if lednum < self.numleds:
return self.leds[lednum]
else:
raise IndexError("Illegal LED number")
def set_led_value(self, lednum, value):
if lednum < self.numleds:
self.leds[lednum][0] = value[0]
self.leds[lednum][1] = value[1]
self.leds[lednum][2] = value[2]
self.render()
#print self.leds
return self.leds[lednum]
else:
raise IndexError("Illegal LED number")
def get_light_values(self):
return { "lights": self.leds }
def set_light_values(self, value):
ln = 0
while (ln < self.numleds):
self.leds[ln][0] = value[0] # White please
self.leds[ln][1] = value[1]
self.leds[ln][2] = value[2]
ln = ln + 1
self.render()
return { "lights": self.leds }
def do_setvalues(self, values):
ln = 0
while (ln < self.numleds):
self.leds[ln][0] = values[ln][0] # White please
self.leds[ln][1] = values[ln][1]
self.leds[ln][2] = values[ln][2]
ln = ln + 1
self.render()
return { "lights": self.leds }
def gradient(self, begin, end, steps):
"""Do it the new-fashioned way"""
steps = float(steps)
base = [0.0,0.0,0.0]
base[0] = begin[0]
base[1] = begin[1]
base[2] = begin[2]
incr = [0.0,0.0,0.0]
incr[0] = float((end[0]-begin[0]) / steps)
incr[1] = float((end[1]-begin[1]) / steps)
incr[2] = float((end[2]-begin[2]) / steps)
print "r-incr %f g-incr %f b-incr %f" % (incr[0],incr[1],incr[2])
s = 0.0
gr = [0,0,0]
while (s < steps):
gr[0] = int(base[0] + (incr[0] * s))
gr[1] = int(base[1] + (incr[1] * s))
gr[2] = int(base[2] + (incr[2] * s))
self.set_light_values(gr)
s = s + 1
time.sleep(.02)
return { "value": True }
def nrl(self, data):
"""Set the NRL team colours based on the passed value"""
team_num = int(data['team'])
print "team_num %d" % team_num
if (team_num < 1) or (team_num > 16):
return { 'value': False }
try:
resp = subprocess.call(['/home/mpesce/sport/nrl', str(team_num)])
except:
return { 'value': False }
return { 'value': True }
def afl(self, data):
"""Set the NRL team colours based on the passed value"""
team_num = int(data['team'])
if (team_num < 1) or (team_num > 18):
return { 'value': False }
try:
resp = subprocess.call(['/home/mpesce/sport/afl', str(team_num)])
except:
return { 'value': False }
return { 'value': True }
def render(self):
"""Render the LED array to the Light"""
"""This version is safe because it renders to a string in memory"""
echo = ""
ln = 0
while (ln < self.numleds):
tripval = (self.leds[ln][0] * 65536) + (self.leds[ln][1] * 256) + self.leds[ln][2]
#echo = echo + "%6X" % tripval + "\\" + "\\" + "x0a" # magic pixie formatting eh?
echo = echo + "%6X\n" % tripval
ln = ln+1
#print echo
#os.system("""%s""" % echo)
self.pipe.write(echo)
self.pipe.flush()
#os.system("""%s | /srv/http/cgi-bin/setlights""" % echo)
return
def on(self):
return set_light_values([255,255,255])
def off(self):
return set_light_values([0,0,0])
class EngineRoom:
def __init__(self, address):
self.address = address
self.numleds = 96
self.pipename = "/run/pipelights.fifo"
self.leds = [] # Array of LED values. This may actually exist elsewhere eventually.
try:
self.pipe = open(self.pipename,"w+")
except:
print "Couldn't open the pipe, there's gonna be trouble!"
ln = 0
while (ln < self.numleds):
self.leds.append([0x00, 0x00, 0x00]) # Create and clear an array of RGB LED values
ln = ln + 1
return
def get_devices(self):
l = { "device_type": "LEDs", "number": 96, "version": 4.1 }
return [ l ]
def get_led_value(self, lednum):
if lednum < self.numleds:
return self.leds[lednum]
else:
raise IndexError("Illegal LED number")
def set_led_value(self, lednum, value):
if lednum < self.numleds:
self.leds[lednum][0] = value[0]
self.leds[lednum][1] = value[1]
self.leds[lednum][2] = value[2]
self.render()
#print self.leds
return self.leds[lednum]
else:
raise IndexError("Illegal LED number")
def get_light_values(self):
return { "lights": self.leds }
def set_light_values(self, value):
ln = 0
while (ln < self.numleds):
self.leds[ln][0] = value[0] # White please
self.leds[ln][1] = value[1]
self.leds[ln][2] = value[2]
ln = ln + 1
self.render()
return { "lights": self.leds }
def do_setvalues(self, values):
ln = 0
while (ln < self.numleds):
self.leds[ln][0] = values[ln][0] # White please
self.leds[ln][1] = values[ln][1]
self.leds[ln][2] = values[ln][2]
ln = ln + 1
self.render()
return { "lights": self.leds }
def gradient(self, begin, end, steps):
"""Do it the new-fashioned way"""
steps = float(steps)
base = [0.0,0.0,0.0]
base[0] = begin[0]
base[1] = begin[1]
base[2] = begin[2]
incr = [0.0,0.0,0.0]
incr[0] = float((end[0]-begin[0]) / steps)
incr[1] = float((end[1]-begin[1]) / steps)
incr[2] = float((end[2]-begin[2]) / steps)
print "r-incr %f g-incr %f b-incr %f" % (incr[0],incr[1],incr[2])
s = 0.0
gr = [0,0,0]
while (s < steps):
gr[0] = int(base[0] + (incr[0] * s))
gr[1] = int(base[1] + (incr[1] * s))
gr[2] = int(base[2] + (incr[2] * s))
self.set_light_values(gr)
s = s + 1
time.sleep(.02)
return { "value": True }
def render(self):
"""Render the LED array to the Light"""
"""This version is safe because it renders to a string in memory"""
echo = ""
ln = 0
while (ln < self.numleds):
tripval = (self.leds[ln][0] * 65536) + (self.leds[ln][1] * 256) + self.leds[ln][2]
#echo = echo + "%6X" % tripval + "\\" + "\\" + "x0a" # magic pixie formatting eh?
echo = echo + "%6X\n" % tripval
ln = ln+1
#print echo
#os.system("""%s""" % echo)
self.pipe.write(echo)
self.pipe.flush()
#os.system("""%s | /srv/http/cgi-bin/setlights""" % echo)
return
def on(self):
return set_light_values([127,127,127])
def off(self):
return set_light_values([0,0,0])
class Device:
def __init__(self, dev):
self.dev = dev
return
def on(self):
self.dev.on()
return
def off(self):
self.dev.off()
return
def value(self):
try:
val = self.dev.value()
except:
raise Error("Method does not exist")
return val
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
import unittest
from metrics.rendering_stats import UI_COMP_NAME, BEGIN_COMP_NAME, END_COMP_NAME
from metrics.rendering_stats import GetScrollInputLatencyEvents
from metrics.rendering_stats import ComputeMouseWheelScrollLatency
from metrics.rendering_stats import ComputeTouchScrollLatency
from metrics.rendering_stats import HasRenderingStats
from metrics.rendering_stats import RenderingStats
import telemetry.core.timeline.bounds as timeline_bounds
from telemetry.core.timeline import model
import telemetry.core.timeline.async_slice as tracing_async_slice
class MockTimer(object):
"""A mock timer class which can generate random durations.
An instance of this class is used as a global timer to generate random
durations for stats and consistent timestamps for all mock trace events.
The unit of time is milliseconds.
"""
def __init__(self):
self.milliseconds = 0
def Get(self):
return self.milliseconds
def Advance(self, low=0, high=1):
delta = random.uniform(low, high)
self.milliseconds += delta
return delta
class ReferenceRenderingStats(object):
""" Stores expected data for comparison with actual RenderingStats """
def __init__(self):
self.frame_timestamps = []
self.frame_times = []
self.paint_times = []
self.painted_pixel_counts = []
self.record_times = []
self.recorded_pixel_counts = []
self.rasterize_times = []
self.rasterized_pixel_counts = []
def AppendNewRange(self):
self.frame_timestamps.append([])
self.frame_times.append([])
self.paint_times.append([])
self.painted_pixel_counts.append([])
self.record_times.append([])
self.recorded_pixel_counts.append([])
self.rasterize_times.append([])
self.rasterized_pixel_counts.append([])
class ReferenceInputLatencyStats(object):
""" Stores expected data for comparison with actual input latency stats """
def __init__(self):
self.mouse_wheel_scroll_latency = []
self.touch_scroll_latency = []
self.js_touch_scroll_latency = []
self.mouse_wheel_scroll_events = []
self.touch_scroll_events = []
self.js_touch_scroll_events = []
def AddMainThreadRenderingStats(mock_timer, thread, first_frame,
ref_stats = None):
""" Adds a random main thread rendering stats event.
thread: The timeline model thread to which the event will be added.
first_frame: Is this the first frame within the bounds of an action?
ref_stats: A ReferenceRenderingStats object to record expected values.
"""
# Create randonm data and timestap for main thread rendering stats.
data = { 'frame_count': 0,
'paint_time': 0.0,
'painted_pixel_count': 0,
'record_time': mock_timer.Advance(2, 4) / 1000.0,
'recorded_pixel_count': 3000*3000 }
timestamp = mock_timer.Get()
# Add a slice with the event data to the given thread.
thread.PushCompleteSlice(
'benchmark', 'BenchmarkInstrumentation::MainThreadRenderingStats',
timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
args={'data': data})
if not ref_stats:
return
# Add timestamp only if a frame was output
if data['frame_count'] == 1:
if not first_frame:
# Add frame_time if this is not the first frame in within the bounds of an
# action.
prev_timestamp = ref_stats.frame_timestamps[-1][-1]
ref_stats.frame_times[-1].append(round(timestamp - prev_timestamp, 2))
ref_stats.frame_timestamps[-1].append(timestamp)
ref_stats.paint_times[-1].append(data['paint_time'] * 1000.0)
ref_stats.painted_pixel_counts[-1].append(data['painted_pixel_count'])
ref_stats.record_times[-1].append(data['record_time'] * 1000.0)
ref_stats.recorded_pixel_counts[-1].append(data['recorded_pixel_count'])
def AddImplThreadRenderingStats(mock_timer, thread, first_frame,
ref_stats = None):
""" Adds a random impl thread rendering stats event.
thread: The timeline model thread to which the event will be added.
first_frame: Is this the first frame within the bounds of an action?
ref_stats: A ReferenceRenderingStats object to record expected values.
"""
# Create randonm data and timestap for impl thread rendering stats.
data = { 'frame_count': 1,
'rasterize_time': mock_timer.Advance(5, 10) / 1000.0,
'rasterized_pixel_count': 1280*720 }
timestamp = mock_timer.Get()
# Add a slice with the event data to the given thread.
thread.PushCompleteSlice(
'benchmark', 'BenchmarkInstrumentation::ImplThreadRenderingStats',
timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
args={'data': data})
if not ref_stats:
return
# Add timestamp only if a frame was output
if data['frame_count'] == 1:
if not first_frame:
# Add frame_time if this is not the first frame in within the bounds of an
# action.
prev_timestamp = ref_stats.frame_timestamps[-1][-1]
ref_stats.frame_times[-1].append(round(timestamp - prev_timestamp, 2))
ref_stats.frame_timestamps[-1].append(timestamp)
ref_stats.rasterize_times[-1].append(data['rasterize_time'] * 1000.0)
ref_stats.rasterized_pixel_counts[-1].append(data['rasterized_pixel_count'])
def AddInputLatencyStats(mock_timer, input_type, start_thread, end_thread,
ref_latency_stats = None):
""" Adds a random input latency stats event.
input_type: The input type for which the latency slice is generated.
start_thread: The start thread on which the async slice is added.
end_thread: The end thread on which the async slice is ended.
ref_latency_stats: A ReferenceInputLatencyStats object for expected values.
"""
mock_timer.Advance()
ui_comp_time = mock_timer.Get() * 1000.0
mock_timer.Advance()
begin_comp_time = mock_timer.Get() * 1000.0
mock_timer.Advance(10, 20)
end_comp_time = mock_timer.Get() * 1000.0
data = { UI_COMP_NAME: {'time': ui_comp_time},
BEGIN_COMP_NAME: {'time': begin_comp_time},
END_COMP_NAME: {'time': end_comp_time} }
timestamp = mock_timer.Get()
async_slice = tracing_async_slice.AsyncSlice(
'benchmark', 'InputLatency', timestamp)
async_sub_slice = tracing_async_slice.AsyncSlice(
'benchmark', 'InputLatency', timestamp)
async_sub_slice.args = {'data': data, 'step': input_type}
async_sub_slice.parent_slice = async_slice
async_sub_slice.start_thread = start_thread
async_sub_slice.end_thread = end_thread
async_slice.sub_slices.append(async_sub_slice)
async_slice.start_thread = start_thread
async_slice.end_thread = end_thread
start_thread.AddAsyncSlice(async_slice)
if not ref_latency_stats:
return
if input_type == 'MouseWheel':
ref_latency_stats.mouse_wheel_scroll_events.append(async_sub_slice)
ref_latency_stats.mouse_wheel_scroll_latency.append(
(data[END_COMP_NAME]['time'] - data[BEGIN_COMP_NAME]['time']) / 1000.0)
if input_type == 'GestureScrollUpdate':
ref_latency_stats.touch_scroll_events.append(async_sub_slice)
ref_latency_stats.touch_scroll_latency.append(
(data[END_COMP_NAME]['time'] - data[UI_COMP_NAME]['time']) / 1000.0)
if input_type == 'TouchMove':
ref_latency_stats.js_touch_scroll_events.append(async_sub_slice)
ref_latency_stats.js_touch_scroll_latency.append(
(data[END_COMP_NAME]['time'] - data[UI_COMP_NAME]['time']) / 1000.0)
class RenderingStatsUnitTest(unittest.TestCase):
def testHasRenderingStats(self):
timeline = model.TimelineModel()
timer = MockTimer()
# A process without rendering stats
process_without_stats = timeline.GetOrCreateProcess(pid = 1)
thread_without_stats = process_without_stats.GetOrCreateThread(tid = 11)
process_without_stats.FinalizeImport()
self.assertFalse(HasRenderingStats(thread_without_stats))
# A process with rendering stats, but no frames in them
process_without_frames = timeline.GetOrCreateProcess(pid = 2)
thread_without_frames = process_without_frames.GetOrCreateThread(tid = 21)
AddMainThreadRenderingStats(timer, thread_without_frames, True, None)
process_without_frames.FinalizeImport()
self.assertFalse(HasRenderingStats(thread_without_frames))
# A process with rendering stats and frames in them
process_with_frames = timeline.GetOrCreateProcess(pid = 3)
thread_with_frames = process_with_frames.GetOrCreateThread(tid = 31)
AddImplThreadRenderingStats(timer, thread_with_frames, True, None)
process_with_frames.FinalizeImport()
self.assertTrue(HasRenderingStats(thread_with_frames))
def testFromTimeline(self):
timeline = model.TimelineModel()
# Create a browser process and a renderer process, and a main thread and
# impl thread for each.
browser = timeline.GetOrCreateProcess(pid = 1)
browser_main = browser.GetOrCreateThread(tid = 11)
browser_compositor = browser.GetOrCreateThread(tid = 12)
renderer = timeline.GetOrCreateProcess(pid = 2)
renderer_main = renderer.GetOrCreateThread(tid = 21)
renderer_compositor = renderer.GetOrCreateThread(tid = 22)
timer = MockTimer()
ref_stats = ReferenceRenderingStats()
# Create 10 main and impl rendering stats events for Action A.
timer.Advance()
renderer_main.BeginSlice('webkit.console', 'ActionA', timer.Get(), '')
ref_stats.AppendNewRange()
for i in xrange(0, 10):
first = (i == 0)
AddMainThreadRenderingStats(timer, renderer_main, first, None)
AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
AddMainThreadRenderingStats(timer, browser_main, first, ref_stats)
AddImplThreadRenderingStats(timer, browser_compositor, first, ref_stats)
renderer_main.EndSlice(timer.Get())
# Create 5 main and impl rendering stats events not within any action.
for i in xrange(0, 5):
first = (i == 0)
AddMainThreadRenderingStats(timer, renderer_main, first, None)
AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
AddMainThreadRenderingStats(timer, browser_main, first, None)
AddImplThreadRenderingStats(timer, browser_compositor, first, None)
# Create 10 main and impl rendering stats events for Action B.
timer.Advance()
renderer_main.BeginSlice('webkit.console', 'ActionB', timer.Get(), '')
ref_stats.AppendNewRange()
for i in xrange(0, 10):
first = (i == 0)
AddMainThreadRenderingStats(timer, renderer_main, first, None)
AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
AddMainThreadRenderingStats(timer, browser_main, first, ref_stats)
AddImplThreadRenderingStats(timer, browser_compositor, first, ref_stats)
renderer_main.EndSlice(timer.Get())
# Create 10 main and impl rendering stats events for Action A.
timer.Advance()
renderer_main.BeginSlice('webkit.console', 'ActionA', timer.Get(), '')
ref_stats.AppendNewRange()
for i in xrange(0, 10):
first = (i == 0)
AddMainThreadRenderingStats(timer, renderer_main, first, None)
AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
AddMainThreadRenderingStats(timer, browser_main, first, ref_stats)
AddImplThreadRenderingStats(timer, browser_compositor, first, ref_stats)
renderer_main.EndSlice(timer.Get())
browser.FinalizeImport()
renderer.FinalizeImport()
timeline_markers = timeline.FindTimelineMarkers(
['ActionA', 'ActionB', 'ActionA'])
timeline_ranges = [ timeline_bounds.Bounds.CreateFromEvent(marker)
for marker in timeline_markers ]
stats = RenderingStats(renderer, browser, timeline_ranges)
# Check if we are using the browser compositor's stats
self.assertEquals(stats.top_level_process, browser)
# Compare rendering stats to reference.
self.assertEquals(stats.frame_timestamps, ref_stats.frame_timestamps)
self.assertEquals(stats.frame_times, ref_stats.frame_times)
self.assertEquals(stats.rasterize_times, ref_stats.rasterize_times)
self.assertEquals(stats.rasterized_pixel_counts,
ref_stats.rasterized_pixel_counts)
self.assertEquals(stats.paint_times, ref_stats.paint_times)
self.assertEquals(stats.painted_pixel_counts,
ref_stats.painted_pixel_counts)
self.assertEquals(stats.record_times, ref_stats.record_times)
self.assertEquals(stats.recorded_pixel_counts,
ref_stats.recorded_pixel_counts)
def testScrollLatencyFromTimeline(self):
timeline = model.TimelineModel()
# Create a browser process and a renderer process.
browser = timeline.GetOrCreateProcess(pid = 1)
browser_main = browser.GetOrCreateThread(tid = 11)
renderer = timeline.GetOrCreateProcess(pid = 2)
renderer_main = renderer.GetOrCreateThread(tid = 21)
timer = MockTimer()
ref_latency_stats = ReferenceInputLatencyStats()
# Create 10 input latency stats events for Action A.
timer.Advance()
renderer_main.BeginSlice('webkit.console', 'ActionA', timer.Get(), '')
for _ in xrange(0, 10):
AddInputLatencyStats(timer, 'MouseWheel', browser_main,
renderer_main, ref_latency_stats)
AddInputLatencyStats(timer, 'GestureScrollUpdate', browser_main,
renderer_main, ref_latency_stats)
AddInputLatencyStats(timer, 'TouchMove', browser_main,
renderer_main, ref_latency_stats)
renderer_main.EndSlice(timer.Get())
# Create 5 input latency stats events not within any action.
for _ in xrange(0, 5):
AddInputLatencyStats(timer, 'MouseWheel', browser_main,
renderer_main, None)
AddInputLatencyStats(timer, 'GestureScrollUpdate', browser_main,
renderer_main, None)
AddInputLatencyStats(timer, 'TouchMove', browser_main,
renderer_main, None)
# Create 10 input latency stats events for Action B.
timer.Advance()
renderer_main.BeginSlice('webkit.console', 'ActionB', timer.Get(), '')
for _ in xrange(0, 10):
AddInputLatencyStats(timer, 'MouseWheel', browser_main,
renderer_main, ref_latency_stats)
AddInputLatencyStats(timer, 'GestureScrollUpdate', browser_main,
renderer_main, ref_latency_stats)
AddInputLatencyStats(timer, 'TouchMove', browser_main,
renderer_main, ref_latency_stats)
renderer_main.EndSlice(timer.Get())
# Create 10 input latency stats events for Action A.
timer.Advance()
renderer_main.BeginSlice('webkit.console', 'ActionA', timer.Get(), '')
for _ in xrange(0, 10):
AddInputLatencyStats(timer, 'MouseWheel', browser_main,
renderer_main, ref_latency_stats)
AddInputLatencyStats(timer, 'GestureScrollUpdate', browser_main,
renderer_main, ref_latency_stats)
AddInputLatencyStats(timer, 'TouchMove', browser_main,
renderer_main, ref_latency_stats)
renderer_main.EndSlice(timer.Get())
browser.FinalizeImport()
renderer.FinalizeImport()
mouse_wheel_scroll_events = []
touch_scroll_events = []
js_touch_scroll_events = []
timeline_markers = timeline.FindTimelineMarkers(
['ActionA', 'ActionB', 'ActionA'])
for timeline_range in [ timeline_bounds.Bounds.CreateFromEvent(marker)
for marker in timeline_markers ]:
if timeline_range.is_empty:
continue
tmp_mouse_events = GetScrollInputLatencyEvents(
'MouseWheel', browser, timeline_range)
tmp_touch_scroll_events = GetScrollInputLatencyEvents(
'GestureScrollUpdate', browser, timeline_range)
tmp_js_touch_scroll_events = GetScrollInputLatencyEvents(
'TouchMove', browser, timeline_range)
mouse_wheel_scroll_events.extend(tmp_mouse_events)
touch_scroll_events.extend(tmp_touch_scroll_events)
js_touch_scroll_events.extend(tmp_js_touch_scroll_events)
self.assertEquals(mouse_wheel_scroll_events,
ref_latency_stats.mouse_wheel_scroll_events)
self.assertEquals(touch_scroll_events,
ref_latency_stats.touch_scroll_events)
self.assertEquals(js_touch_scroll_events,
ref_latency_stats.js_touch_scroll_events)
self.assertEquals(ComputeMouseWheelScrollLatency(mouse_wheel_scroll_events),
ref_latency_stats.mouse_wheel_scroll_latency)
self.assertEquals(ComputeTouchScrollLatency(touch_scroll_events),
ref_latency_stats.touch_scroll_latency)
self.assertEquals(ComputeTouchScrollLatency(js_touch_scroll_events),
ref_latency_stats.js_touch_scroll_latency)
|
|
#!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions, Variable
from random import choice, randint
from os.path import dirname, abspath, join
import string
import sys
sys.path.append(join(dirname(dirname(dirname(abspath(__file__)))), "support"))
from support import Support
def random_alpha(a, b):
return ''.join(choice(string.letters) for _ in range(randint(a, b)))
def random_string(a, b):
chars = string.letters + string.digits
return ''.join(choice(chars) for _ in range(randint(a, b)))
def random_digits(a, b):
chars = string.digits
return ''.join(choice(chars) for _ in range(randint(a, b)))
class Solfedge(Actions):
DEBUG = False
DEBUG_ERR = DEBUG or False
DEBUG_FUNC = DEBUG or False
DEBUG_INIT = DEBUG or False
MAX_SYLLABLES_BYTES = 4096
MAX_NOTES_BYTES = 2048
BUFFER_LEN = 4096
NUM_BYTES_MIN = 5
NUM_BYTES_MAX = 1500
def start(self):
self.state['m'] = Support()
self.state['invalid'] = False
self.state['no_sol'] = False
def _send_command(self, command_name):
if self.DEBUG_FUNC:
print('-- _send_command-- {0}'.format(command_name))
if self.chance(0.01):
cmd = self.state['m'].pack_command('BAD')
self.write(cmd)
self._recv_error('ERR_INVALID_CMD')
if self.DEBUG_ERR:
print('exiting due to BAD command.')
return -1
else:
cmd = self.state['m'].pack_command(command_name)
self.write(cmd)
return 0
def _send_bytes_count(self, bytes_count, bytes_count_type):
if self.DEBUG_FUNC:
print('-- _send_bytes_count-- {0}'.format(bytes_count))
# bytes_count of 0, results in ERR_NO_NOTES or ERR_NO_SYLLABLES
if self.chance(0.01):
bytes_count = 0
val = self.state['m'].pack_single_uint32(bytes_count)
self.write(val)
if 'notes' == bytes_count_type:
self._recv_error('ERR_NO_NOTES')
elif 'syllables' == bytes_count_type:
self._recv_error('ERR_NO_SYLLABLES')
if self.DEBUG_ERR:
print('exiting due to 0 bytes_count.')
return -1
elif self.chance(0.01):
# bytes_count_type of 'notes', then bytes_count => MAX_NOTES_BYTES results in ERR_TOO_MANY_NOTES
if 'notes' == bytes_count_type:
bytes_count = self.MAX_NOTES_BYTES + randint(2, 10)
val = self.state['m'].pack_single_uint32(bytes_count)
self.write(val)
self._recv_error('ERR_TOO_MANY_NOTES')
if self.DEBUG_ERR:
print('exiting due to too many notes.')
# bytes_count_type of 'syllables', then bytes_count => MAX_SYLLABLES_BYTES results in ERR_TOO_MANY_SYLLABLES
elif 'syllables' == bytes_count_type:
bytes_count = self.MAX_SYLLABLES_BYTES + randint(2, 10)
val = self.state['m'].pack_single_uint32(bytes_count)
self.write(val)
self._recv_error('ERR_TOO_MANY_SYLLABLES')
if self.DEBUG_ERR:
print('exiting due to too many syllables.')
return -1
else:
val = self.state['m'].pack_single_uint32(bytes_count)
self.write(val)
return 0
def _send_byte_count_and_notes(self, note_id_list):
if self.DEBUG_FUNC:
print('-- _send_byte_count_and_notes-- \n\t{0}'.format(note_id_list))
val = self.state['m'].pack_as_notes(note_id_list)
if 0 > self._send_bytes_count(len(val), 'notes'):
return -1
self.write(val)
if self.DEBUG_FUNC:
print('-- _send_byte_count_and_notes-- sent {0} bytes\n\t{1}'.format(len(val), val))
return 0
def _send_byte_count_and_syllables(self, syllable_id_list):
if self.DEBUG_FUNC:
print('-- _send_byte_count_and_syllables-- ids:\n\t->{0}<-'.format(syllable_id_list))
val = self.state['m'].pack_as_syllables(syllable_id_list)
if 0 > self._send_bytes_count(len(val), 'syllables'):
return -1
self.write(val)
if self.DEBUG_FUNC:
print('-- _send_byte_count_and_syllables-- sent {0} bytes\n\t{1}'.format(len(val), val))
return 0
def _recv_results(self, result_type, result_id_list):
if self.DEBUG_FUNC:
print('-- _recv_results-- \n\t{0} {1}\n\t ->{2}<-'.format(len(result_id_list), result_type, result_id_list))
expected = ''
if 'notes' == result_type:
expected += self.state['m'].pack_as_notes(result_id_list)
elif 'syllables' == result_type:
expected += self.state['m'].pack_as_syllables(result_id_list)
self.read(length=len(expected), expect=expected)
if self.DEBUG_FUNC:
print('-- _recv_results-- received {0} bytes\n\t->{1}<-'.format(len(expected), expected))
def _recv_error(self, error_name):
expected = self.state['m'].pack_error(error_name)
self.read(length=len(expected), expect=expected)
if True == self.DEBUG_ERR:
print('exiting due to {0}'.format(error_name))
def _generate_random_harmony(self, num_bytes, harmony_type, include_invalid=False, no_sol=False):
return self.state['m'].generate_random_harmony_as_list(num_bytes, harmony_type, include_invalid=include_invalid, no_sol=no_sol)
def to_notes(self):
if self.DEBUG_FUNC:
print('- to_notes -')
num_bytes = randint(self.NUM_BYTES_MIN, self.NUM_BYTES_MAX)
if self.chance(0.01):
harmony = self._generate_random_harmony(num_bytes, 'syllables', include_invalid=True)
self.state['invalid'] = True
else:
harmony = self._generate_random_harmony(num_bytes, 'syllables')
if 0 > self._send_command('CMD_TO_NOTES'):
return -1
if 0 > self._send_byte_count_and_syllables(harmony):
return -1
if True == self.state['invalid']:
self._recv_error('ERR_INVALID_SYLLABLE')
return -1
else:
self._recv_results('notes', harmony)
def to_syllables(self):
if self.DEBUG_FUNC:
print('- to_syllables -')
num_bytes = randint(self.NUM_BYTES_MIN, self.NUM_BYTES_MAX)
if self.chance(0.01):
harmony = self._generate_random_harmony(num_bytes, 'notes', include_invalid=True)
self.state['invalid'] = True
else:
harmony = self._generate_random_harmony(num_bytes, 'notes')
if 0 > self._send_command('CMD_TO_SYLLABLES'):
return -1
if 0 > self._send_byte_count_and_notes(harmony):
return -1
if True == self.state['invalid']:
self._recv_error('ERR_INVALID_NOTE')
return -1
else:
self._recv_results('syllables', harmony)
|
|
"""
Tray icon for udiskie.
"""
from gi.repository import Gio
from gi.repository import Gtk
from .async_ import run_bg, Future
from .common import setdefault, DaemonBase, cachedmethod
from .locale import _
from .mount import Action, prune_empty_node
from .prompt import Dialog
from .icons import IconDist
import os
__all__ = ['TrayMenu', 'TrayIcon']
class MenuFolder:
def __init__(self, label, items):
self.label = label
self.items = items
def __bool__(self):
return bool(self.items)
__nonzero__ = __bool__
class MenuSection(MenuFolder):
pass
class SubMenu(MenuFolder):
pass
class Icons:
"""Encapsulates the responsibility to load icons."""
_icon_names = {
'media': [
'drive-removable-media-usb-panel',
'drive-removable-media-usb-pendrive',
'drive-removable-media-usb',
'drive-removable-media',
'media-optical',
'media-flash',
],
'browse': ['document-open', 'folder-open'],
'terminal': ['terminal', 'utilities-terminal'],
'mount': ['udiskie-mount'],
'unmount': ['udiskie-unmount'],
'unlock': ['udiskie-unlock'],
'lock': ['udiskie-lock'],
'eject': ['udiskie-eject', 'media-eject'],
'detach': ['udiskie-detach'],
'quit': ['application-exit'],
'forget_password': ['edit-delete'],
'delete': ['udiskie-eject'],
'losetup': ['udiskie-mount'],
# checkbox workaround:
'checked': ['checkbox-checked', 'udiskie-checkbox-checked'],
'unchecked': ['checkbox', 'udiskie-checkbox-unchecked'],
'submenu': ['udiskie-submenu', 'pan-end-symbolic'],
}
def __init__(self, icon_names={}):
"""Merge ``icon_names`` into default icon names."""
self._icon_dist = IconDist()
_icon_names = icon_names.copy()
setdefault(_icon_names, self.__class__._icon_names)
self._icon_names = _icon_names
for k, v in _icon_names.items():
if isinstance(v, str):
self._icon_names[k] = v = [v]
self._icon_names[k] = self._icon_dist.patch_list(v)
@cachedmethod
def get_icon_name(self, icon_id: str) -> str:
"""Lookup the system icon name from udisie-internal id."""
icon_theme = Gtk.IconTheme.get_default()
for name in self._icon_names[icon_id]:
if icon_theme.has_icon(name):
return name
elif os.path.exists(name):
return name
return 'not-available'
def get_icon(self, icon_id: str, size: "Gtk.IconSize") -> "Gtk.Image":
"""Load Gtk.Image from udiskie-internal id."""
return Gtk.Image.new_from_gicon(self.get_gicon(icon_id), size)
def get_gicon(self, icon_id: str) -> "Gio.Icon":
"""Lookup Gio.Icon from udiskie-internal id."""
name = self.get_icon_name(icon_id)
if os.path.exists(name):
# TODO (?): we could also add the icon to the theme using
# Gtk.IconTheme.append_search_path or .add_resource_path:
file = Gio.File.new_for_path(name)
return Gio.FileIcon.new(file)
else:
return Gio.ThemedIcon.new(name)
class TrayMenu:
"""
Builder for udiskie menus.
Objects of this class generate action menus when being called.
"""
def __init__(self, daemon, icons, actions, flat=True,
quickmenu_actions=None,
checkbox_workaround=False,
update_workaround=False):
"""
Initialize a new menu maker.
:param object mounter: mount operation provider
:param Icons icons: icon provider
:param DeviceActions actions: device actions discovery
:returns: a new menu maker
:rtype: cls
Required keys for the ``_labels``, ``_menu_icons`` and
``actions`` dictionaries are:
- browse Open mount location
- terminal Open mount location in terminal
- mount Mount a device
- unmount Unmount a device
- unlock Unlock a LUKS device
- lock Lock a LUKS device
- eject Eject a drive
- detach Detach (power down) a drive
- quit Exit the application
NOTE: If using a main loop other than ``Gtk.main`` the 'quit' action
must be customized.
"""
self._icons = icons
self._daemon = daemon
self._mounter = daemon.mounter
self._actions = actions
self._quit_action = daemon.mainloop.quit
self.flat = flat
# actions shown in the quick-menu ("flat", left-click):
self._quickmenu_actions = quickmenu_actions or [
'mount',
'browse',
'terminal',
'unlock',
'detach',
'delete',
# suppressed:
# 'unmount',
# 'lock',
# 'eject',
# 'forget_password',
]
self._checkbox_workaround = checkbox_workaround
self._update_workaround = update_workaround
def __call__(self, menu, extended=True):
"""Populate the Gtk.Menu with udiskie mount operations."""
# create actions items
flat = self.flat and not extended
if self._update_workaround:
# When showing menus via AppIndicator3 on sway, the menu geometry
# seems to be calculated before the 'about-to-show' event, and
# therefore cannot take into account newly inserted menu items.
# For this reason, we have to keep the top-level menu fixed-size
# and insert dynamic entries into a submenu.
devmenu = Gtk.Menu()
menu.append(self._menuitem(
label=_('Managed devices'),
icon=None,
onclick=devmenu,
))
else:
devmenu = menu
self._create_menu_items(
devmenu, self._prepare_menu(self.detect(), flat))
if extended:
self._insert_options(menu)
return menu
def _insert_options(self, menu):
"""Add configuration options to menu."""
menu.append(Gtk.SeparatorMenuItem())
menu.append(self._menuitem(
_('Mount disc image'),
self._icons.get_icon('losetup', Gtk.IconSize.MENU),
run_bg(lambda _: self._losetup())
))
menu.append(Gtk.SeparatorMenuItem())
menu.append(self._menuitem(
_("Enable automounting"),
icon=None,
onclick=lambda _: self._daemon.automounter.toggle_on(),
checked=self._daemon.automounter.is_on(),
))
menu.append(self._menuitem(
_("Enable notifications"),
icon=None,
onclick=lambda _: self._daemon.notify.toggle(),
checked=self._daemon.notify.active,
))
# append menu item for closing the application
if self._quit_action:
menu.append(Gtk.SeparatorMenuItem())
menu.append(self._menuitem(
_('Quit'),
self._icons.get_icon('quit', Gtk.IconSize.MENU),
lambda _: self._quit_action()
))
async def _losetup(self):
gtk_dialog = Gtk.FileChooserDialog(
_('Open disc image'), None,
Gtk.FileChooserAction.OPEN,
(_('Open'), Gtk.ResponseType.OK,
_('Cancel'), Gtk.ResponseType.CANCEL))
with Dialog(gtk_dialog) as dialog:
response = await dialog
if response == Gtk.ResponseType.OK:
await self._mounter.losetup(dialog.window.get_filename())
def detect(self):
"""Detect all currently known devices. Returns the root device."""
root = self._actions.detect()
prune_empty_node(root, set())
return root
def _create_menu(self, items):
"""
Create a menu from the given node.
:param list items: list of menu items
:returns: a new Gtk.Menu object holding all items of the node
"""
menu = Gtk.Menu()
self._create_menu_items(menu, items)
return menu
def _create_menu_items(self, menu, items):
def make_action_callback(node):
return run_bg(lambda _: node.action())
for node in items:
if isinstance(node, Action):
menu.append(self._menuitem(
node.label,
self._icons.get_icon(node.method, Gtk.IconSize.MENU),
make_action_callback(node)))
elif isinstance(node, SubMenu):
menu.append(self._menuitem(
node.label,
icon=None,
onclick=self._create_menu(node.items)))
elif isinstance(node, MenuSection):
self._create_menu_section(menu, node)
else:
raise ValueError(_("Invalid node!"))
if len(menu) == 0:
mi = self._menuitem(_("No external devices"), None, None)
mi.set_sensitive(False)
menu.append(mi)
def _create_menu_section(self, menu, section):
if len(menu) > 0:
menu.append(Gtk.SeparatorMenuItem())
if section.label:
mi = self._menuitem(section.label, None, None)
mi.set_sensitive(False)
menu.append(mi)
self._create_menu_items(menu, section.items)
def _menuitem(self, label, icon, onclick, checked=None):
"""
Create a generic menu item.
:param str label: text
:param Gtk.Image icon: icon (may be ``None``)
:param onclick: onclick handler, either a callable or Gtk.Menu
:returns: the menu item object
:rtype: Gtk.MenuItem
"""
if self._checkbox_workaround:
if checked is not None:
icon_name = 'checked' if checked else 'unchecked'
icon = self._icons.get_icon(icon_name, Gtk.IconSize.MENU)
checked = None
elif isinstance(onclick, Gtk.Menu):
icon = self._icons.get_icon('submenu', Gtk.IconSize.MENU)
if checked is not None:
item = Gtk.CheckMenuItem()
item.set_active(checked)
elif icon is None:
item = Gtk.MenuItem()
else:
item = Gtk.ImageMenuItem()
item.set_image(icon)
# I don't really care for the "show icons only for nouns, not
# for verbs" policy:
item.set_always_show_image(True)
if label is not None:
item.set_label(label)
if isinstance(onclick, Gtk.Menu):
item.set_submenu(onclick)
elif onclick is not None:
item.connect('activate', onclick)
return item
def _prepare_menu(self, node, flat=None):
"""
Prepare the menu hierarchy from the given device tree.
:param Device node: root node of device hierarchy
:returns: menu hierarchy as list
"""
if flat is None:
flat = self.flat
ItemGroup = MenuSection if flat else SubMenu
return [
ItemGroup(branch.label, self._collapse_device(branch, flat))
for branch in node.branches
if branch.methods or branch.branches
]
def _collapse_device(self, node, flat):
"""Collapse device hierarchy into a flat folder."""
items = [item
for branch in node.branches
for item in self._collapse_device(branch, flat)
if item]
show_all = not flat or self._quickmenu_actions == 'all'
methods = node.methods if show_all else [
method
for method in node.methods
if method.method in self._quickmenu_actions
]
if flat:
items.extend(methods)
else:
items.append(MenuSection(None, methods))
return items
class TrayIcon:
"""Default TrayIcon class."""
def __init__(self, menumaker, icons, statusicon=None):
"""
Create an object managing a tray icon.
The actual Gtk.StatusIcon is only created as soon as you call show()
for the first time. The reason to delay its creation is that the GTK
icon will be initially visible, which results in a perceptable
flickering.
:param TrayMenu menumaker: menu factory
:param Gtk.StatusIcon statusicon: status icon
"""
self._icons = icons
self._icon = statusicon
self._menu = menumaker
self._conn_left = None
self._conn_right = None
self.task = Future()
menumaker._quit_action = self.destroy
def destroy(self):
self.show(False)
self.task.set_result(True)
def _create_statusicon(self):
"""Return a new Gtk.StatusIcon."""
statusicon = Gtk.StatusIcon()
statusicon.set_from_gicon(self._icons.get_gicon('media'))
statusicon.set_tooltip_text(_("udiskie"))
return statusicon
@property
def visible(self):
"""Return visibility state of icon."""
return bool(self._conn_left)
def show(self, show=True):
"""Show or hide the tray icon."""
if show and not self.visible:
self._show()
if not show and self.visible:
self._hide()
def _show(self):
"""Show the tray icon."""
if not self._icon:
self._icon = self._create_statusicon()
widget = self._icon
widget.set_visible(True)
self._conn_left = widget.connect("activate", self._activate)
self._conn_right = widget.connect("popup-menu", self._popup_menu)
def _hide(self):
"""Hide the tray icon."""
self._icon.set_visible(False)
self._icon.disconnect(self._conn_left)
self._icon.disconnect(self._conn_right)
self._conn_left = None
self._conn_right = None
def create_context_menu(self, extended):
"""Create the context menu."""
menu = Gtk.Menu()
self._menu(menu, extended)
return menu
def _activate(self, icon):
"""Handle a left click event (show the menu)."""
self._popup_menu(icon, button=0, time=Gtk.get_current_event_time(),
extended=False)
def _popup_menu(self, icon, button, time, extended=True):
"""Handle a right click event (show the menu)."""
m = self.create_context_menu(extended)
m.show_all()
m.popup(parent_menu_shell=None,
parent_menu_item=None,
func=icon.position_menu,
data=icon,
button=button,
activate_time=time)
# need to store reference or menu will be destroyed before showing:
self._m = m
class UdiskieStatusIcon(DaemonBase):
"""
Manage a status icon.
When `smart` is on, the icon will automatically hide if there is no action
available and the menu will have no 'Quit' item.
"""
def __init__(self, icon, menumaker, smart=False):
self._icon = icon
self._menumaker = menumaker
self._mounter = menumaker._mounter
self._quit_action = menumaker._quit_action
self.smart = smart
self.active = False
self.events = {
'device_changed': self.update,
'device_added': self.update,
'device_removed': self.update,
}
def activate(self):
super().activate()
self.update()
def deactivate(self):
super().deactivate()
self._icon.show(False)
@property
def smart(self):
return getattr(self, '_smart', None)
@smart.setter
def smart(self, smart):
if smart == self.smart:
return
if smart:
self._menumaker._quit_action = None
else:
self._menumaker._quit_action = self._quit_action
self._smart = smart
self.update()
def has_menu(self):
"""Check if a menu action is available."""
return any(self._menumaker._prepare_menu(self._menumaker.detect()))
def update(self, *args):
"""Show/hide icon depending on whether there are devices."""
if self.smart:
self._icon.show(self.has_menu())
else:
self._icon.show(True)
|
|
# -*- coding: utf-8 -*-
# =============================================================================
# Federal University of Rio Grande do Sul (UFRGS)
# Connectionist Artificial Intelligence Laboratory (LIAC)
# Renato de Pontes Pereira - [email protected]
# =============================================================================
# Copyright (c) 2011 Renato de Pontes Pereira, renato.ppontes at gmail dot com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
'''
The liac-arff module implements functions to read and write ARFF files in
Python. It was created in the Connectionist Artificial Intelligence Laboratory
(LIAC), which takes place at the Federal University of Rio Grande do Sul
(UFRGS), in Brazil.
ARFF (Attribute-Relation File Format) is an file format specially created for
describe datasets which are commonly used for machine learning experiments and
softwares. This file format was created to be used in Weka, the best
representative software for machine learning automated experiments.
An ARFF file can be divided into two sections: header and data. The Header
describes the metadata of the dataset, including a general description of the
dataset, its name and its attributes. The source below is an example of a
header section in a XOR dataset::
%
% XOR Dataset
%
% Created by Renato Pereira
% [email protected]
% http://inf.ufrgs.br/~rppereira
%
%
@RELATION XOR
@ATTRIBUTE input1 REAL
@ATTRIBUTE input2 REAL
@ATTRIBUTE y REAL
The Data section of an ARFF file describes the observations of the dataset, in
the case of XOR dataset::
@DATA
0.0,0.0,0.0
0.0,1.0,1.0
1.0,0.0,1.0
1.0,1.0,0.0
%
%
%
Notice that several lines are starting with an ``%`` symbol, denoting a
comment, thus, lines with ``%`` at the beginning will be ignored, except by the
description part at the beginning of the file. The declarations ``@RELATION``,
``@ATTRIBUTE``, and ``@DATA`` are all case insensitive and obligatory.
For more information and details about the ARFF file description, consult
http://www.cs.waikato.ac.nz/~ml/weka/arff.html
ARFF Files in Python
~~~~~~~~~~~~~~~~~~~~
This module uses built-ins python objects to represent a deserialized ARFF
file. A dictionary is used as the container of the data and metadata of ARFF,
and have the following keys:
- **description**: (OPTIONAL) a string with the description of the dataset.
- **relation**: (OBLIGATORY) a string with the name of the dataset.
- **attributes**: (OBLIGATORY) a list of attributes with the following
template::
(attribute_name, attribute_type)
the attribute_name is a string, and attribute_type must be an string
or a list of strings.
- **data**: (OBLIGATORY) a list of data instances. Each data instance must be
a list with values, depending on the attributes.
The above keys must follow the case which were described, i.e., the keys are
case sensitive. The attribute type ``attribute_type`` must be one of these
strings (they are not case sensitive): ``NUMERIC``, ``INTEGER``, ``REAL`` or
``STRING``. For nominal attributes, the ``attribute_type`` must be a list of
strings.
In this format, the XOR dataset presented above can be represented as a python
object as::
xor_dataset = {
'description': 'XOR Dataset',
'relation': 'XOR',
'attributes': [
('input1', 'REAL'),
('input2', 'REAL'),
('y', 'REAL'),
],
'data': [
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0]
]
}
Features
~~~~~~~~
This module provides several features, including:
- Read and write ARFF files using python built-in structures, such dictionaries
and lists;
- Supports `scipy.sparse.coo <http://docs.scipy
.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.html#scipy.sparse.coo_matrix>`_
and lists of dictionaries as used by SVMLight
- Supports the following attribute types: NUMERIC, REAL, INTEGER, STRING, and
NOMINAL;
- Has an interface similar to other built-in modules such as ``json``, or
``zipfile``;
- Supports read and write the descriptions of files;
- Supports missing values and names with spaces;
- Supports unicode values and names;
- Fully compatible with Python 2.7+, Python 3.3+, pypy and pypy3;
- Under `MIT License <http://opensource.org/licenses/MIT>`_
'''
__author__ = 'Renato de Pontes Pereira, Matthias Feurer, Joel Nothman'
__author_email__ = ('[email protected], '
'[email protected], '
'[email protected]')
__version__ = '2.4.0'
from typing import TYPE_CHECKING
from typing import Optional, List, Dict, Any, Iterator, Union, Tuple
import re
import sys
import csv
# CONSTANTS ===================================================================
_SIMPLE_TYPES = ['NUMERIC', 'REAL', 'INTEGER', 'STRING']
_TK_DESCRIPTION = '%'
_TK_COMMENT = '%'
_TK_RELATION = '@RELATION'
_TK_ATTRIBUTE = '@ATTRIBUTE'
_TK_DATA = '@DATA'
_RE_RELATION = re.compile(r'^([^\{\}%,\s]*|\".*\"|\'.*\')$', re.UNICODE)
_RE_ATTRIBUTE = re.compile(r'^(\".*\"|\'.*\'|[^\{\}%,\s]*)\s+(.+)$', re.UNICODE)
_RE_TYPE_NOMINAL = re.compile(r'^\{\s*((\".*\"|\'.*\'|\S*)\s*,\s*)*(\".*\"|\'.*\'|\S*)\s*\}$', re.UNICODE)
_RE_QUOTE_CHARS = re.compile(r'["\'\\\s%,\000-\031]', re.UNICODE)
_RE_ESCAPE_CHARS = re.compile(r'(?=["\'\\%])|[\n\r\t\000-\031]')
_RE_SPARSE_LINE = re.compile(r'^\s*\{.*\}\s*$', re.UNICODE)
_RE_NONTRIVIAL_DATA = re.compile('["\'{}\\s]', re.UNICODE)
ArffDenseDataType = Iterator[List]
ArffSparseDataType = Tuple[List, ...]
if TYPE_CHECKING:
# typing_extensions is available when mypy is installed
from typing_extensions import TypedDict
class ArffContainerType(TypedDict):
description: str
relation: str
attributes: List
data: Union[ArffDenseDataType, ArffSparseDataType]
else:
ArffContainerType = Dict[str, Any]
def _build_re_values():
quoted_re = r'''
" # open quote followed by zero or more of:
(?:
(?<!\\) # no additional backslash
(?:\\\\)* # maybe escaped backslashes
\\" # escaped quote
|
\\[^"] # escaping a non-quote
|
[^"\\] # non-quote char
)*
" # close quote
'''
# a value is surrounded by " or by ' or contains no quotables
value_re = r'''(?:
%s| # a value may be surrounded by "
%s| # or by '
[^,\s"'{}]+ # or may contain no characters requiring quoting
)''' % (quoted_re,
quoted_re.replace('"', "'"))
# This captures (value, error) groups. Because empty values are allowed,
# we cannot just look for empty values to handle syntax errors.
# We presume the line has had ',' prepended...
dense = re.compile(r'''(?x)
, # may follow ','
\s*
((?=,)|$|%(value_re)s) # empty or value
|
(\S.*) # error
''' % {'value_re': value_re})
# This captures (key, value) groups and will have an empty key/value
# in case of syntax errors.
# It does not ensure that the line starts with '{' or ends with '}'.
sparse = re.compile(r'''(?x)
(?:^\s*\{|,) # may follow ',', or '{' at line start
\s*
(\d+) # attribute key
\s+
(%(value_re)s) # value
|
(?!}\s*$) # not an error if it's }$
(?!^\s*{\s*}\s*$) # not an error if it's ^{}$
\S.* # error
''' % {'value_re': value_re})
return dense, sparse
_RE_DENSE_VALUES, _RE_SPARSE_KEY_VALUES = _build_re_values()
_ESCAPE_SUB_MAP = {
'\\\\': '\\',
'\\"': '"',
"\\'": "'",
'\\t': '\t',
'\\n': '\n',
'\\r': '\r',
'\\b': '\b',
'\\f': '\f',
'\\%': '%',
}
_UNESCAPE_SUB_MAP = {chr(i): '\\%03o' % i for i in range(32)}
_UNESCAPE_SUB_MAP.update({v: k for k, v in _ESCAPE_SUB_MAP.items()})
_UNESCAPE_SUB_MAP[''] = '\\'
_ESCAPE_SUB_MAP.update({'\\%d' % i: chr(i) for i in range(10)})
def _escape_sub_callback(match):
s = match.group()
if len(s) == 2:
try:
return _ESCAPE_SUB_MAP[s]
except KeyError as e:
raise ValueError('Unsupported escape sequence: %s' % s) from e
if s[1] == 'u':
return unichr(int(s[2:], 16))
else:
return chr(int(s[1:], 8))
def _unquote(v):
if v[:1] in ('"', "'"):
return re.sub(r'\\([0-9]{1,3}|u[0-9a-f]{4}|.)', _escape_sub_callback,
v[1:-1])
elif v in ('?', ''):
return None
else:
return v
def _parse_values(s):
'''(INTERNAL) Split a line into a list of values'''
if not _RE_NONTRIVIAL_DATA.search(s):
# Fast path for trivial cases (unfortunately we have to handle missing
# values because of the empty string case :(.)
return [None if s in ('?', '') else s
for s in next(csv.reader([s]))]
# _RE_DENSE_VALUES tokenizes despite quoting, whitespace, etc.
values, errors = zip(*_RE_DENSE_VALUES.findall(',' + s))
if not any(errors):
return [_unquote(v) for v in values]
if _RE_SPARSE_LINE.match(s):
try:
return {int(k): _unquote(v)
for k, v in _RE_SPARSE_KEY_VALUES.findall(s)}
except ValueError as exc:
# an ARFF syntax error in sparse data
for match in _RE_SPARSE_KEY_VALUES.finditer(s):
if not match.group(1):
raise BadLayout('Error parsing %r' % match.group()) from exc
raise BadLayout('Unknown parsing error') from exc
else:
# an ARFF syntax error
for match in _RE_DENSE_VALUES.finditer(s):
if match.group(2):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error')
DENSE = 0 # Constant value representing a dense matrix
COO = 1 # Constant value representing a sparse matrix in coordinate format
LOD = 2 # Constant value representing a sparse matrix in list of
# dictionaries format
DENSE_GEN = 3 # Generator of dictionaries
LOD_GEN = 4 # Generator of dictionaries
_SUPPORTED_DATA_STRUCTURES = [DENSE, COO, LOD, DENSE_GEN, LOD_GEN]
# =============================================================================
# COMPATIBILITY WITH PYTHON 3 =================================================
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
basestring = str
xrange = range
unichr = chr
# COMPABILITY WITH PYTHON 2 ===================================================
# =============================================================================
PY2 = sys.version_info[0] == 2
if PY2:
from itertools import izip as zip
# EXCEPTIONS ==================================================================
class ArffException(Exception):
message : Optional[str] = None
def __init__(self):
self.line = -1
def __str__(self):
return self.message%self.line
class BadRelationFormat(ArffException):
'''Error raised when the relation declaration is in an invalid format.'''
message = 'Bad @RELATION format, at line %d.'
class BadAttributeFormat(ArffException):
'''Error raised when some attribute declaration is in an invalid format.'''
message = 'Bad @ATTRIBUTE format, at line %d.'
class BadDataFormat(ArffException):
'''Error raised when some data instance is in an invalid format.'''
def __init__(self, value):
super(BadDataFormat, self).__init__()
self.message = (
'Bad @DATA instance format in line %d: ' +
('%s' % value)
)
class BadAttributeType(ArffException):
'''Error raised when some invalid type is provided into the attribute
declaration.'''
message = 'Bad @ATTRIBUTE type, at line %d.'
class BadAttributeName(ArffException):
'''Error raised when an attribute name is provided twice the attribute
declaration.'''
def __init__(self, value, value2):
super(BadAttributeName, self).__init__()
self.message = (
('Bad @ATTRIBUTE name %s at line' % value) +
' %d, this name is already in use in line' +
(' %d.' % value2)
)
class BadNominalValue(ArffException):
'''Error raised when a value in used in some data instance but is not
declared into it respective attribute declaration.'''
def __init__(self, value):
super(BadNominalValue, self).__init__()
self.message = (
('Data value %s not found in nominal declaration, ' % value)
+ 'at line %d.'
)
class BadNominalFormatting(ArffException):
'''Error raised when a nominal value with space is not properly quoted.'''
def __init__(self, value):
super(BadNominalFormatting, self).__init__()
self.message = (
('Nominal data value "%s" not properly quoted in line ' % value) +
'%d.'
)
class BadNumericalValue(ArffException):
'''Error raised when and invalid numerical value is used in some data
instance.'''
message = 'Invalid numerical value, at line %d.'
class BadStringValue(ArffException):
'''Error raise when a string contains space but is not quoted.'''
message = 'Invalid string value at line %d.'
class BadLayout(ArffException):
'''Error raised when the layout of the ARFF file has something wrong.'''
message = 'Invalid layout of the ARFF file, at line %d.'
def __init__(self, msg=''):
super(BadLayout, self).__init__()
if msg:
self.message = BadLayout.message + ' ' + msg.replace('%', '%%')
class BadObject(ArffException):
'''Error raised when the object representing the ARFF file has something
wrong.'''
def __init__(self, msg='Invalid object.'):
self.msg = msg
def __str__(self):
return '%s' % self.msg
# =============================================================================
# INTERNAL ====================================================================
def _unescape_sub_callback(match):
return _UNESCAPE_SUB_MAP[match.group()]
def encode_string(s):
if _RE_QUOTE_CHARS.search(s):
return u"'%s'" % _RE_ESCAPE_CHARS.sub(_unescape_sub_callback, s)
return s
class EncodedNominalConversor(object):
def __init__(self, values):
self.values = {v: i for i, v in enumerate(values)}
self.values[0] = 0
def __call__(self, value):
try:
return self.values[value]
except KeyError as e:
raise BadNominalValue(value) from e
class NominalConversor(object):
def __init__(self, values):
self.values = set(values)
self.zero_value = values[0]
def __call__(self, value):
if value not in self.values:
if value == 0:
# Sparse decode
# See issue #52: nominals should take their first value when
# unspecified in a sparse matrix. Naturally, this is consistent
# with EncodedNominalConversor.
return self.zero_value
raise BadNominalValue(value)
return unicode(value)
class DenseGeneratorData(object):
'''Internal helper class to allow for different matrix types without
making the code a huge collection of if statements.'''
def decode_rows(self, stream, conversors):
for row in stream:
values = _parse_values(row)
if isinstance(values, dict):
if values and max(values) >= len(conversors):
raise BadDataFormat(row)
# XXX: int 0 is used for implicit values, not '0'
values = [values[i] if i in values else 0 for i in
xrange(len(conversors))]
else:
if len(values) != len(conversors):
raise BadDataFormat(row)
yield self._decode_values(values, conversors)
@staticmethod
def _decode_values(values, conversors):
try:
values = [None if value is None else conversor(value)
for conversor, value
in zip(conversors, values)]
except ValueError as exc:
if 'float: ' in str(exc):
raise BadNumericalValue from exc
return values
def encode_data(self, data, attributes):
'''(INTERNAL) Encodes a line of data.
Data instances follow the csv format, i.e, attribute values are
delimited by commas. After converted from csv.
:param data: a list of values.
:param attributes: a list of attributes. Used to check if data is valid.
:return: a string with the encoded data line.
'''
current_row = 0
for inst in data:
if len(inst) != len(attributes):
raise BadObject(
'Instance %d has %d attributes, expected %d' %
(current_row, len(inst), len(attributes))
)
new_data = []
for value in inst:
if value is None or value == u'' or value != value:
s = '?'
else:
s = encode_string(unicode(value))
new_data.append(s)
current_row += 1
yield u','.join(new_data)
class _DataListMixin(object):
"""Mixin to return a list from decode_rows instead of a generator"""
def decode_rows(self, stream, conversors):
return list(super(_DataListMixin, self).decode_rows(stream, conversors))
class Data(_DataListMixin, DenseGeneratorData):
pass
class COOData(object):
def decode_rows(self, stream, conversors):
data, rows, cols = [], [], []
for i, row in enumerate(stream):
values = _parse_values(row)
if not isinstance(values, dict):
raise BadLayout()
if not values:
continue
row_cols, values = zip(*sorted(values.items()))
try:
values = [value if value is None else conversors[key](value)
for key, value in zip(row_cols, values)]
except ValueError as exc:
if 'float: ' in str(exc):
raise BadNumericalValue from exc
raise
except IndexError as e:
# conversor out of range
raise BadDataFormat(row) from e
data.extend(values)
rows.extend([i] * len(values))
cols.extend(row_cols)
return data, rows, cols
def encode_data(self, data, attributes):
num_attributes = len(attributes)
new_data = []
current_row = 0
row = data.row
col = data.col
data = data.data
# Check if the rows are sorted
if not all(row[i] <= row[i + 1] for i in xrange(len(row) - 1)):
raise ValueError("liac-arff can only output COO matrices with "
"sorted rows.")
for v, col, row in zip(data, col, row):
if row > current_row:
# Add empty rows if necessary
while current_row < row:
yield " ".join([u"{", u','.join(new_data), u"}"])
new_data = []
current_row += 1
if col >= num_attributes:
raise BadObject(
'Instance %d has at least %d attributes, expected %d' %
(current_row, col + 1, num_attributes)
)
if v is None or v == u'' or v != v:
s = '?'
else:
s = encode_string(unicode(v))
new_data.append("%d %s" % (col, s))
yield " ".join([u"{", u','.join(new_data), u"}"])
class LODGeneratorData(object):
def decode_rows(self, stream, conversors):
for row in stream:
values = _parse_values(row)
if not isinstance(values, dict):
raise BadLayout()
try:
yield {key: None if value is None else conversors[key](value)
for key, value in values.items()}
except ValueError as exc:
if 'float: ' in str(exc):
raise BadNumericalValue from exc
raise
except IndexError as e:
# conversor out of range
raise BadDataFormat(row) from e
def encode_data(self, data, attributes):
current_row = 0
num_attributes = len(attributes)
for row in data:
new_data = []
if len(row) > 0 and max(row) >= num_attributes:
raise BadObject(
'Instance %d has %d attributes, expected %d' %
(current_row, max(row) + 1, num_attributes)
)
for col in sorted(row):
v = row[col]
if v is None or v == u'' or v != v:
s = '?'
else:
s = encode_string(unicode(v))
new_data.append("%d %s" % (col, s))
current_row += 1
yield " ".join([u"{", u','.join(new_data), u"}"])
class LODData(_DataListMixin, LODGeneratorData):
pass
def _get_data_object_for_decoding(matrix_type):
if matrix_type == DENSE:
return Data()
elif matrix_type == COO:
return COOData()
elif matrix_type == LOD:
return LODData()
elif matrix_type == DENSE_GEN:
return DenseGeneratorData()
elif matrix_type == LOD_GEN:
return LODGeneratorData()
else:
raise ValueError("Matrix type %s not supported." % str(matrix_type))
def _get_data_object_for_encoding(matrix):
# Probably a scipy.sparse
if hasattr(matrix, 'format'):
if matrix.format == 'coo':
return COOData()
else:
raise ValueError('Cannot guess matrix format!')
elif isinstance(matrix[0], dict):
return LODData()
else:
return Data()
# =============================================================================
# ADVANCED INTERFACE ==========================================================
class ArffDecoder(object):
'''An ARFF decoder.'''
def __init__(self):
'''Constructor.'''
self._conversors = []
self._current_line = 0
def _decode_comment(self, s):
'''(INTERNAL) Decodes a comment line.
Comments are single line strings starting, obligatorily, with the ``%``
character, and can have any symbol, including whitespaces or special
characters.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a string with the decoded comment.
'''
res = re.sub(r'^\%( )?', '', s)
return res
def _decode_relation(self, s):
'''(INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string. The string must
start with alphabetic character and must be quoted if the name includes
spaces, otherwise this method will raise a `BadRelationFormat` exception.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a string with the decoded relation name.
'''
_, v = s.split(' ', 1)
v = v.strip()
if not _RE_RELATION.match(v):
raise BadRelationFormat()
res = unicode(v.strip('"\''))
return res
def _decode_attribute(self, s):
'''(INTERNAL) Decodes an attribute line.
The attribute is the most complex declaration in an arff file. All
attributes must follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, quoted if the name contains any
whitespace, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
The nominal names follow the rules for the attribute names, i.e., they
must be quoted if the name contains whitespaces.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES).
'''
_, v = s.split(' ', 1)
v = v.strip()
# Verify the general structure of declaration
m = _RE_ATTRIBUTE.match(v)
if not m:
raise BadAttributeFormat()
# Extracts the raw name and type
name, type_ = m.groups()
# Extracts the final name
name = unicode(name.strip('"\''))
# Extracts the final type
if _RE_TYPE_NOMINAL.match(type_):
try:
type_ = _parse_values(type_.strip('{} '))
except Exception as e:
raise BadAttributeType from e
if isinstance(type_, dict):
raise BadAttributeType()
else:
# If not nominal, verify the type name
type_ = unicode(type_).upper()
if type_ not in ['NUMERIC', 'REAL', 'INTEGER', 'STRING']:
raise BadAttributeType()
return (name, type_)
def _decode(self, s, encode_nominal=False, matrix_type=DENSE):
'''Do the job the ``encode``.'''
# Make sure this method is idempotent
self._current_line = 0
# If string, convert to a list of lines
if isinstance(s, basestring):
s = s.strip('\r\n ').replace('\r\n', '\n').split('\n')
# Create the return object
obj: ArffContainerType = {
u'description': u'',
u'relation': u'',
u'attributes': [],
u'data': []
}
attribute_names = {}
# Create the data helper object
data = _get_data_object_for_decoding(matrix_type)
# Read all lines
STATE = _TK_DESCRIPTION
s = iter(s)
for row in s:
self._current_line += 1
# Ignore empty lines
row = row.strip(' \r\n')
if not row: continue
u_row = row.upper()
# DESCRIPTION -----------------------------------------------------
if u_row.startswith(_TK_DESCRIPTION) and STATE == _TK_DESCRIPTION:
obj['description'] += self._decode_comment(row) + '\n'
# -----------------------------------------------------------------
# RELATION --------------------------------------------------------
elif u_row.startswith(_TK_RELATION):
if STATE != _TK_DESCRIPTION:
raise BadLayout()
STATE = _TK_RELATION
obj['relation'] = self._decode_relation(row)
# -----------------------------------------------------------------
# ATTRIBUTE -------------------------------------------------------
elif u_row.startswith(_TK_ATTRIBUTE):
if STATE != _TK_RELATION and STATE != _TK_ATTRIBUTE:
raise BadLayout()
STATE = _TK_ATTRIBUTE
attr = self._decode_attribute(row)
if attr[0] in attribute_names:
raise BadAttributeName(attr[0], attribute_names[attr[0]])
else:
attribute_names[attr[0]] = self._current_line
obj['attributes'].append(attr)
if isinstance(attr[1], (list, tuple)):
if encode_nominal:
conversor = EncodedNominalConversor(attr[1])
else:
conversor = NominalConversor(attr[1])
else:
CONVERSOR_MAP = {'STRING': unicode,
'INTEGER': lambda x: int(float(x)),
'NUMERIC': float,
'REAL': float}
conversor = CONVERSOR_MAP[attr[1]]
self._conversors.append(conversor)
# -----------------------------------------------------------------
# DATA ------------------------------------------------------------
elif u_row.startswith(_TK_DATA):
if STATE != _TK_ATTRIBUTE:
raise BadLayout()
break
# -----------------------------------------------------------------
# COMMENT ---------------------------------------------------------
elif u_row.startswith(_TK_COMMENT):
pass
# -----------------------------------------------------------------
else:
# Never found @DATA
raise BadLayout()
def stream():
for row in s:
self._current_line += 1
row = row.strip()
# Ignore empty lines and comment lines.
if row and not row.startswith(_TK_COMMENT):
yield row
# Alter the data object
obj['data'] = data.decode_rows(stream(), self._conversors)
if obj['description'].endswith('\n'):
obj['description'] = obj['description'][:-1]
return obj
def decode(self, s, encode_nominal=False, return_type=DENSE):
'''Returns the Python representation of a given ARFF file.
When a file object is passed as an argument, this method reads lines
iteratively, avoiding to load unnecessary information to the memory.
:param s: a string or file object with the ARFF file.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
'''
try:
return self._decode(s, encode_nominal=encode_nominal,
matrix_type=return_type)
except ArffException as e:
e.line = self._current_line
raise e
class ArffEncoder(object):
'''An ARFF encoder.'''
def _encode_comment(self, s=''):
'''(INTERNAL) Encodes a comment line.
Comments are single line strings starting, obligatorily, with the ``%``
character, and can have any symbol, including whitespaces or special
characters.
If ``s`` is None, this method will simply return an empty comment.
:param s: (OPTIONAL) string.
:return: a string with the encoded comment line.
'''
if s:
return u'%s %s'%(_TK_COMMENT, s)
else:
return u'%s' % _TK_COMMENT
def _encode_relation(self, name):
'''(INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string.
:param name: a string.
:return: a string with the encoded relation declaration.
'''
for char in ' %{},':
if char in name:
name = '"%s"'%name
break
return u'%s %s'%(_TK_RELATION, name)
def _encode_attribute(self, name, type_):
'''(INTERNAL) Encodes an attribute line.
The attribute follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
This method must receive a the name of the attribute and its type, if
the attribute type is nominal, ``type`` must be a list of values.
:param name: a string.
:param type_: a string or a list of string.
:return: a string with the encoded attribute declaration.
'''
for char in ' %{},':
if char in name:
name = '"%s"'%name
break
if isinstance(type_, (tuple, list)):
type_tmp = [u'%s' % encode_string(type_k) for type_k in type_]
type_ = u'{%s}'%(u', '.join(type_tmp))
return u'%s %s %s'%(_TK_ATTRIBUTE, name, type_)
def encode(self, obj):
'''Encodes a given object to an ARFF file.
:param obj: the object containing the ARFF information.
:return: the ARFF file as an unicode string.
'''
data = [row for row in self.iter_encode(obj)]
return u'\n'.join(data)
def iter_encode(self, obj):
'''The iterative version of `arff.ArffEncoder.encode`.
This encodes iteratively a given object and return, one-by-one, the
lines of the ARFF file.
:param obj: the object containing the ARFF information.
:return: (yields) the ARFF file as unicode strings.
'''
# DESCRIPTION
if obj.get('description', None):
for row in obj['description'].split('\n'):
yield self._encode_comment(row)
# RELATION
if not obj.get('relation'):
raise BadObject('Relation name not found or with invalid value.')
yield self._encode_relation(obj['relation'])
yield u''
# ATTRIBUTES
if not obj.get('attributes'):
raise BadObject('Attributes not found.')
attribute_names = set()
for attr in obj['attributes']:
# Verify for bad object format
if not isinstance(attr, (tuple, list)) or \
len(attr) != 2 or \
not isinstance(attr[0], basestring):
raise BadObject('Invalid attribute declaration "%s"'%str(attr))
if isinstance(attr[1], basestring):
# Verify for invalid types
if attr[1] not in _SIMPLE_TYPES:
raise BadObject('Invalid attribute type "%s"'%str(attr))
# Verify for bad object format
elif not isinstance(attr[1], (tuple, list)):
raise BadObject('Invalid attribute type "%s"'%str(attr))
# Verify attribute name is not used twice
if attr[0] in attribute_names:
raise BadObject('Trying to use attribute name "%s" for the '
'second time.' % str(attr[0]))
else:
attribute_names.add(attr[0])
yield self._encode_attribute(attr[0], attr[1])
yield u''
attributes = obj['attributes']
# DATA
yield _TK_DATA
if 'data' in obj:
data = _get_data_object_for_encoding(obj.get('data'))
for line in data.encode_data(obj.get('data'), attributes):
yield line
yield u''
# =============================================================================
# BASIC INTERFACE =============================================================
def load(fp, encode_nominal=False, return_type=DENSE):
'''Load a file-like object containing the ARFF document and convert it into
a Python object.
:param fp: a file-like object.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
:return: a dictionary.
'''
decoder = ArffDecoder()
return decoder.decode(fp, encode_nominal=encode_nominal,
return_type=return_type)
def loads(s, encode_nominal=False, return_type=DENSE):
'''Convert a string instance containing the ARFF document into a Python
object.
:param s: a string object.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
:return: a dictionary.
'''
decoder = ArffDecoder()
return decoder.decode(s, encode_nominal=encode_nominal,
return_type=return_type)
def dump(obj, fp):
'''Serialize an object representing the ARFF document to a given file-like
object.
:param obj: a dictionary.
:param fp: a file-like object.
'''
encoder = ArffEncoder()
generator = encoder.iter_encode(obj)
last_row = next(generator)
for row in generator:
fp.write(last_row + u'\n')
last_row = row
fp.write(last_row)
return fp
def dumps(obj):
'''Serialize an object representing the ARFF document, returning a string.
:param obj: a dictionary.
:return: a string with the ARFF document.
'''
encoder = ArffEncoder()
return encoder.encode(obj)
# =============================================================================
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import argparse as _argparse
import itertools as _itertools
import json as _json
import numpy as _numpy
import os as _os
import plano as _plano
import resource as _resource
import shlex as _shlex
import subprocess as _subprocess
import time as _time
from .common import *
from .common import __version__
from .common import _epilog_urls
from .common import _epilog_arrow_impls
from .common import _epilog_count_and_duration_formats
from .common import _urlparse
_description = """
Send or receive a set number of messages as fast as possible using a
single connection.
'quiver-arrow' is one of the Quiver tools for testing the performance
of message servers and APIs.
"""
_epilog = """
operations:
send Send messages
receive Receive messages
{_epilog_urls}
{_epilog_count_and_duration_formats}
{_epilog_arrow_impls}
server and passive modes:
By default quiver-arrow operates in client and active modes, meaning
that it creates an outbound connection to a server and actively
initiates creation of the protocol entities (sessions and links)
required for communication. The --server option tells quiver-arrow
to instead listen for and accept incoming connections. The
--passive option tells it to receive and confirm incoming requests
for new protocol entities but not to create them itself.
example usage:
$ qdrouterd & # Start a message server
$ quiver-arrow receive q0 & # Start receiving
$ quiver-arrow send q0 # Start sending
""".format(**globals())
class QuiverArrowCommand(Command):
def __init__(self, home_dir):
super(QuiverArrowCommand, self).__init__(home_dir)
self.parser.description = _description.lstrip()
self.parser.epilog = _epilog.lstrip()
self.parser.add_argument("operation", metavar="OPERATION",
choices=["send", "receive"],
help="Either 'send' or 'receive'")
self.parser.add_argument("url", metavar="URL",
help="The location of a message source or target")
self.parser.add_argument("--output", metavar="DIR",
help="Save output files to DIR")
self.parser.add_argument("--impl", metavar="IMPL", default=DEFAULT_ARROW_IMPL,
help="Use IMPL to send and receive " \
"(default {})".format(DEFAULT_ARROW_IMPL))
self.parser.add_argument("--summary", action="store_true",
help="Print the configuration and results to the console")
self.parser.add_argument("--info", action="store_true",
help="Print implementation details and exit")
self.parser.add_argument("--id", metavar="ID",
help="Use ID as the client or server identity")
self.parser.add_argument("--server", action="store_true",
help="Operate in server mode")
self.parser.add_argument("--passive", action="store_true",
help="Operate in passive mode")
self.parser.add_argument("--prelude", metavar="PRELUDE", default="",
help="Commands to precede the implementation invocation")
self.add_common_test_arguments()
self.add_common_tool_arguments()
self.add_common_tls_arguments()
def init(self):
self.intercept_info_request(DEFAULT_ARROW_IMPL)
super(QuiverArrowCommand, self).init()
self.operation = self.args.operation
self.impl = require_impl(self.args.impl)
self.id_ = self.args.id
self.connection_mode = "client"
self.channel_mode = "active"
self.prelude = _shlex.split(self.args.prelude)
if self.operation == "send":
self.role = "sender"
self.transfers_parse_func = _parse_send
elif self.operation == "receive":
self.role = "receiver"
self.transfers_parse_func = _parse_receive
else:
raise Exception()
if self.id_ is None:
self.id_ = "quiver-{}-{}".format(self.role, _plano.unique_id(4))
if self.args.server:
self.connection_mode = "server"
if self.args.passive:
self.channel_mode = "passive"
self.cert = self.args.cert
self.key = self.args.key
self.init_url_attributes()
self.init_common_test_attributes()
self.init_common_tool_attributes()
self.init_output_dir()
if _urlparse(self.url).port is None:
if self.impl.name in ("activemq-jms", "activemq-artemis-jms"):
self.port = "61616"
self.snapshots_file = _join(self.output_dir, "{}-snapshots.csv".format(self.role))
self.summary_file = _join(self.output_dir, "{}-summary.json".format(self.role))
self.transfers_file = _join(self.output_dir, "{}-transfers.csv".format(self.role))
self.start_time = None
self.timeout_checkpoint = None
self.first_send_time = None
self.last_send_time = None
self.first_receive_time = None
self.last_receive_time = None
self.message_count = None
self.message_rate = None
self.latency_average = None
self.latency_quartiles = None
self.latency_nines = None
def run(self):
args = self.prelude + [
self.impl.file,
"connection-mode={}".format(self.connection_mode),
"channel-mode={}".format(self.channel_mode),
"operation={}".format(self.operation),
"id={}".format(self.id_),
"scheme={}".format(self.scheme),
"host={}".format(self.host),
"port={}".format(self.port),
"path={}".format(self.path),
"duration={}".format(self.duration),
"count={}".format(self.count),
"rate={}".format(self.rate),
"body-size={}".format(self.body_size),
"credit-window={}".format(self.credit_window),
"transaction-size={}".format(self.transaction_size),
"durable={}".format(1 if self.durable else 0),
]
if self.username:
args.append("username={}".format(self.username))
if self.password:
args.append("password={}".format(self.password))
if self.args.cert and self.args.key:
args.append("key={}".format(self.key))
args.append("cert={}".format(self.cert))
with open(self.transfers_file, "wb") as fout:
env = _plano.ENV
if self.verbose:
env["QUIVER_VERBOSE"] = "1"
proc = _plano.start_process(args, stdout=fout, env=env)
try:
self.monitor_subprocess(proc)
except:
_plano.stop_process(proc)
raise
if proc.returncode != 0:
raise CommandError("{} exited with code {}", self.role, proc.returncode)
if _plano.file_size(self.transfers_file) == 0:
raise CommandError("No transfers")
self.compute_results()
self.save_summary()
if _plano.exists("{}.xz".format(self.transfers_file)):
_plano.remove("{}.xz".format(self.transfers_file))
_plano.call("xz --compress -0 --threads 0 {}", self.transfers_file)
if (self.args.summary):
self.print_summary()
def monitor_subprocess(self, proc):
snap = _StatusSnapshot(self, None)
snap.timestamp = now()
self.start_time = snap.timestamp
self.timeout_checkpoint = snap
sleep = 2.0
with open(self.transfers_file, "rb") as fin:
with open(self.snapshots_file, "ab") as fsnaps:
while proc.poll() is None:
_time.sleep(sleep)
period_start = _time.time()
snap.previous = None
snap = _StatusSnapshot(self, snap)
snap.capture(fin, proc)
fsnaps.write(snap.marshal())
fsnaps.flush()
self.check_timeout(snap)
period = _time.time() - period_start
sleep = max(1.0, 2.0 - period)
def check_timeout(self, snap):
checkpoint = self.timeout_checkpoint
since = (snap.timestamp - checkpoint.timestamp) / 1000
# print("check_timeout", snap.count, "==", checkpoint.count, "and", since, ">", self.timeout)
if snap.count == checkpoint.count and since > self.timeout:
raise CommandError("{} timed out", self.role)
if snap.count > checkpoint.count:
self.timeout_checkpoint = snap
def compute_results(self):
start = _time.time()
def read_transfers():
with open(self.transfers_file, "rb") as f:
for line in f:
try:
yield self.transfers_parse_func(line)
except Exception as e:
_plano.error("Failed to parse line '{}': {}", line, e)
continue
if self.operation == "send":
dtype = [("send_time", _numpy.uint64)]
transfers = _numpy.fromiter(read_transfers(), dtype=dtype)
else:
dtype = [("send_time", _numpy.uint64), ("receive_time", _numpy.uint64)]
transfers = _numpy.fromiter(read_transfers(), dtype=dtype)
# print("load time", _time.time() - start)
self.message_count = len(transfers)
if self.message_count == 0:
return
if self.operation == "send":
self.first_send_time = int(transfers[0]["send_time"])
self.last_send_time = int(transfers[-1]["send_time"])
duration = (self.last_send_time - self.first_send_time) / 1000
elif self.operation == "receive":
self.first_receive_time = int(transfers[0]["receive_time"])
self.last_receive_time = int(transfers[-1]["receive_time"])
duration = (self.last_receive_time - self.first_receive_time) / 1000
start = _time.time()
self.compute_latencies(transfers)
# print("compute latencies time", _time.time() - start)
else:
raise Exception()
if duration > 0:
self.message_rate = int(round(self.message_count / duration))
def compute_latencies(self, transfers):
latencies = transfers["receive_time"] - transfers["send_time"]
q = 0, 25, 50, 75, 100, 90, 99, 99.9, 99.99, 99.999
percentiles = _numpy.percentile(latencies, q)
percentiles = [int(x) for x in percentiles]
self.latency_average = _numpy.mean(latencies)
self.latency_quartiles = percentiles[:5]
self.latency_nines = percentiles[5:]
def save_summary(self):
props = {
"config": {
"impl": self.impl.name,
"url": self.url,
"output_dir": self.output_dir,
"timeout": self.timeout,
"connection_mode": self.connection_mode,
"channel_mode": self.channel_mode,
"operation": self.operation,
"id": self.id_,
"host": self.host,
"port": self.port,
"path": self.path,
"duration": self.duration,
"count": self.count,
"rate": self.rate,
"body_size": self.body_size,
"credit_window": self.credit_window,
"transaction_size": self.transaction_size,
"durable": self.durable,
},
"results": {
"first_send_time": self.first_send_time,
"last_send_time": self.last_send_time,
"first_receive_time": self.first_receive_time,
"last_receive_time": self.last_receive_time,
"message_count": self.message_count,
"message_rate": self.message_rate,
"latency_average": self.latency_average,
"latency_quartiles": self.latency_quartiles,
"latency_nines": self.latency_nines,
},
}
with open(self.summary_file, "w") as f:
_json.dump(props, f, indent=2)
def print_summary(self):
with open(self.summary_file) as f:
arrow = _json.load(f)
print_heading("Configuration")
print_field("URL", self.url)
print_field("Output files", self.output_dir)
if self.count != 0:
print_numeric_field("Count", self.count, _plano.plural("message", self.count))
if self.duration != 0:
print_numeric_field("Duration", self.duration, _plano.plural("second", self.duration))
print_numeric_field("Body size", self.body_size, _plano.plural("byte", self.body_size))
print_numeric_field("Credit window", self.credit_window, _plano.plural("message", self.credit_window))
if self.transaction_size != 0:
print_numeric_field("Transaction size", self.transaction_size, _plano.plural("message", self.transaction_size))
if self.durable:
print_field("Durable", "Yes")
print_heading("Results")
if self.operation == "send":
start_time = arrow["results"]["first_send_time"]
end_time = arrow["results"]["last_send_time"]
elif self.operation == "receive":
start_time = arrow["results"]["first_receive_time"]
end_time = arrow["results"]["last_receive_time"]
else:
raise Exception()
count = arrow["results"]["message_count"]
duration = (end_time - start_time) / 1000
print_numeric_field("Count", count, _plano.plural("message", self.count))
print_numeric_field("Duration", duration, "seconds", "{:,.1f}")
print_numeric_field("Message rate", arrow["results"]["message_rate"], "messages/s")
if self.operation == "receive":
print()
print("Latencies by percentile:")
print()
print_latency_fields("0%", arrow["results"]["latency_quartiles"][0],
"90.00%", arrow["results"]["latency_nines"][0])
print_latency_fields("25%", arrow["results"]["latency_quartiles"][1],
"99.00%", arrow["results"]["latency_nines"][1])
print_latency_fields("50%", arrow["results"]["latency_quartiles"][2],
"99.90%", arrow["results"]["latency_nines"][2])
print_latency_fields("100%", arrow["results"]["latency_quartiles"][4],
"99.99%", arrow["results"]["latency_nines"][3])
class _StatusSnapshot:
def __init__(self, command, previous):
self.command = command
self.previous = previous
self.timestamp = 0
self.period = 0
self.count = 0
self.period_count = 0
self.latency = 0
self.cpu_time = 0
self.period_cpu_time = 0
self.rss = 0
def capture(self, transfers_file, proc):
assert self.previous is not None
self.timestamp = now()
self.period = self.timestamp - self.previous.timestamp
self.capture_transfers(transfers_file)
self.capture_proc_info(proc)
def capture_proc_info(self, proc):
proc_file = _join("/", "proc", str(proc.pid), "stat")
try:
with open(proc_file, "r") as f:
line = f.read()
except IOError:
return
fields = line.split()
self.cpu_time = int(sum(map(int, fields[13:17])) / _ticks_per_ms)
self.period_cpu_time = self.cpu_time
if self.previous is not None:
self.period_cpu_time = self.cpu_time - self.previous.cpu_time
self.rss = int(fields[23]) * _page_size
def capture_transfers(self, transfers_file):
transfers = list()
sample = 1000
count = sample
for line in _itertools.islice(transfers_file, sample, None, sample):
count += sample
try:
record = self.command.transfers_parse_func(line)
except Exception as e:
_plano.error("Failed to parse line '{}': {}", line, e)
continue
transfers.append(record)
self.period_count = count
self.count = self.previous.count + self.period_count
if self.period_count > 0 and self.command.operation == "receive":
latencies = list()
for send_time, receive_time in transfers:
latency = receive_time - send_time
latencies.append(latency)
if latencies:
self.latency = int(_numpy.mean(latencies))
def marshal(self):
fields = (self.timestamp,
self.period,
self.count,
self.period_count,
self.latency,
self.cpu_time,
self.period_cpu_time,
self.rss)
fields = map(str, fields)
line = "{}\n".format(",".join(fields))
return line.encode("ascii")
def unmarshal(self, line):
line = line.decode("ascii")
fields = [int(x) for x in line.split(",")]
(self.timestamp,
self.period,
self.count,
self.period_count,
self.latency,
self.cpu_time,
self.period_cpu_time,
self.rss) = fields
def _parse_send(line):
_, send_time = line.split(b",", 1)
return int(send_time)
def _parse_receive(line):
_, send_time, receive_time = line.split(b",", 2)
return int(send_time), int(receive_time)
_join = _plano.join
_ticks_per_ms = _os.sysconf(_os.sysconf_names["SC_CLK_TCK"]) / 1000
_page_size = _resource.getpagesize()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from uuid import uuid4
from datetime import datetime
from ujson import dumps
import sqlalchemy as sa
from sqlalchemy.orm import relationship
from holmes.models import Base
class Review(Base):
__tablename__ = "reviews"
id = sa.Column(sa.Integer, primary_key=True)
is_active = sa.Column('is_active', sa.Boolean, default=False, nullable=False)
is_complete = sa.Column('is_complete', sa.Boolean, default=False, nullable=False)
uuid = sa.Column('uuid', sa.String(36), default=uuid4, nullable=False)
created_date = sa.Column('created_date', sa.DateTime, default=datetime.utcnow, nullable=False)
completed_date = sa.Column('completed_date', sa.DateTime, nullable=True)
completed_day = sa.Column('completed_day', sa.Date, nullable=True)
failure_message = sa.Column('failure_message', sa.String(2000), nullable=True)
domain_id = sa.Column('domain_id', sa.Integer, sa.ForeignKey('domains.id'))
page_id = sa.Column('page_id', sa.Integer, sa.ForeignKey('pages.id'))
facts = relationship("Fact", cascade="all,delete")
violations = relationship("Violation", cascade="all,delete")
def to_dict(self, fact_definitions, violation_definitions, _):
return {
'page': self.page and self.page.to_dict() or None,
'domain': self.domain and self.domain.name or None,
'isComplete': self.is_complete,
'uuid': str(self.uuid),
'createdAt': self.created_date,
'completedAt': self.completed_date,
'facts': [fact.to_dict(fact_definitions, _) for fact in self.facts],
'violations': [violation.to_dict(violation_definitions, _)
for violation in self.violations]
}
def __str__(self):
return str(self.uuid)
def __repr__(self):
return str(self)
def add_fact(self, key, value):
if self.is_complete:
raise ValueError("Can't add anything to a completed review.")
from holmes.models.fact import Fact # to avoid circular dependency
fact = Fact(key=key, value=value)
self.facts.append(fact)
def add_violation(self, key, value, points, domain):
if self.is_complete:
raise ValueError("Can't add anything to a completed review.")
from holmes.models.violation import Violation # to avoid circular dependency
violation = Violation(
key=key,
value=value,
points=int(float(points)),
domain=domain
)
self.violations.append(violation)
@property
def failed(self):
return self.failure_message is not None
@classmethod
def get_last_reviews(cls, db, domain_filter=None, limit=12):
query = db.query(Review).filter(Review.is_active == True)
if domain_filter:
from holmes.models.domain import Domain
domain = Domain.get_domain_by_name(domain_filter, db)
if domain:
query = query.filter(Review.domain_id == domain.id)
return query.order_by(Review.completed_date.desc())[:limit]
@classmethod
def get_reviews_count_in_period(cls, db, from_date, domain_filter=None,
to_date=None):
if to_date is None:
to_date = datetime.utcnow()
reviews = db \
.query(Review.completed_date) \
.filter(Review.is_active == True) \
.filter(Review.completed_date.between(from_date, to_date))
if domain_filter:
from holmes.models.domain import Domain
domain = Domain.get_domain_by_name(domain_filter, db)
if domain:
reviews = reviews.filter(Review.domain_id == domain.id)
reviews = reviews.order_by(Review.completed_date.asc()).all()
count = len(reviews)
first_date = None
if count > 0:
first_date = reviews[0].completed_date
return count, first_date
def get_violation_points(self):
points = 0
for violation in self.violations:
points += violation.points
return points
@classmethod
def by_uuid(cls, uuid, db):
return db.query(Review).filter(Review.uuid == uuid).first()
@property
def violation_count(self):
return len(self.violations)
@classmethod
def _filter_by_violation_key_name(cls, db, query, key_id, domain_filter=None, page_filter=None):
from holmes.models.violation import Violation # to avoid circular dependency
from holmes.models.page import Page # to avoid circular dependency
query = query \
.filter(Page.last_review_id == Violation.review_id) \
.filter(Violation.review_is_active == 1) \
.filter(Violation.key_id == key_id)
if domain_filter:
from holmes.models.domain import Domain # to avoid circular dependency
domain = Domain.get_domain_by_name(domain_filter, db)
if domain:
query = query.filter(Page.domain_id == domain.id)
if page_filter:
query = query.filter(
Page.url.like(
u'{0}/{1}%'.format(domain.url, page_filter)
)
)
return query
@classmethod
def count_by_violation_key_name(cls, db, key_id, domain_filter=None, page_filter=None):
from holmes.models.page import Page # to avoid circular dependency
query = db.query(sa.func.count(sa.func.distinct(Page.id)))
query = cls._filter_by_violation_key_name(db, query, key_id, domain_filter, page_filter)
return query.scalar()
@classmethod
def get_by_violation_key_name(cls, db, key_id, current_page=1, page_size=10, domain_filter=None, page_filter=None):
from holmes.models.page import Page # to avoid circular dependency
from holmes.models.domain import Domain # to avoid circular dependency
lower_bound = (current_page - 1) * page_size
upper_bound = lower_bound + page_size
query = db \
.query(
Page.last_review_uuid.label('review_uuid'),
Page.url,
Page.uuid.label('page_uuid'),
Page.last_review_date.label('completed_date'),
Domain.name.label('domain_name')
) \
.filter(
Domain.id == Page.domain_id
)
query = cls._filter_by_violation_key_name(db, query, key_id, domain_filter, page_filter)
return query.order_by(Page.last_review_date.desc())[lower_bound:upper_bound]
@classmethod
def save_review(cls, page_uuid, review_data, db, search_provider, fact_definitions, violation_definitions, cache, publish, config):
from holmes.models import Page, Request
page = Page.by_uuid(page_uuid, db)
if page is None:
return
if review_data['requests']:
Request.save_requests(db, publish, page, review_data['requests'])
last_review = page.last_review
review = Review(
domain_id=page.domain.id,
page_id=page.id,
is_active=True,
is_complete=False,
completed_date=datetime.utcnow(),
uuid=uuid4(),
)
db.add(review)
for fact in review_data['facts']:
name = fact['key']
key = fact_definitions[name]['key']
review.add_fact(key, fact['value'])
for violation in review_data['violations']:
name = violation['key']
key = violation_definitions[name]['key']
review.add_violation(
key,
violation['value'],
violation['points'],
page.domain
)
page.expires = review_data['expires']
page.last_modified = review_data['lastModified']
page.last_review_uuid = review.uuid
page.last_review = review
page.last_review_date = review.completed_date
page.violations_count = len(review_data['violations'])
review.is_complete = True
if not last_review:
cache.increment_active_review_count(page.domain)
else:
for violation in last_review.violations:
violation.review_is_active = False
last_review.is_active = False
Review.delete_old_reviews(db, config, page)
search_provider.index_review(review)
publish(dumps({
'type': 'new-review',
'reviewId': str(review.uuid)
}))
@classmethod
def delete_old_reviews(cls, db, config, page):
reviews = db \
.query(Review) \
.filter(Review.is_active == 0) \
.filter(Review.page_id == page.id) \
.order_by(Review.completed_date.desc()) \
.all()
last_reviews = reviews[config.NUMBER_OF_REVIEWS_TO_KEEP:]
if len(last_reviews) > 0:
for review in last_reviews:
db.delete(review)
|
|
# -*- coding: utf-8 -*-
from gluon import current
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
class S3MainMenu(default.S3MainMenu):
"""
Custom Application Main Menu:
The main menu consists of several sub-menus, each of which can
be customised separately as a method of this class. The overall
composition of the menu is defined in the menu() method, which can
be customised as well:
Function Sub-Menu Access to (standard)
menu_modules() the modules menu the Eden modules
menu_admin() the Admin menu System/User Administration
menu_lang() the Language menu Selection of the GUI locale
menu_auth() the User menu Login, Logout, User Profile
menu_help() the Help menu Contact page, About page
The standard uses the MM layout class for main menu items - but you
can of course use a custom layout class which you define in layouts.py.
Additional sub-menus can simply be defined as additional functions in
this class, and then be included in the menu() method.
Each sub-menu function returns a list of menu items, only the menu()
function must return a layout class instance.
"""
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
AUTHENTICATED = current.session.s3.system_roles.AUTHENTICATED
INDIVIDUALS = current.deployment_settings.get_hrm_staff_label()
return [
MM("Dashboard", c="default", f="index",
args=["dashboard"],
restrict=[AUTHENTICATED],
),
MM("Contacts", link=False, restrict=[AUTHENTICATED])(
MM("Networks", c="org", f="group"),
MM("Groups", c="hrm", f="group"),
MM("Organizations", c="org", f="organisation"),
MM(INDIVIDUALS, c="hrm", f="staff"),
),
MM("Facilities", c="org", f="facility", m="summary",
restrict=[AUTHENTICATED])(
),
#MM("Services", c="cms", f="page", vars={"name": "Services"}),
MM("News", c="cms", f="newsfeed", m="datalist",
#icon="icon-news",
restrict=[AUTHENTICATED],
),
MM("Map", c="gis", f="index",
#icon="icon-map",
restrict=[AUTHENTICATED],
),
#MM("Data", c="cms", f="page", vars={"name": "Data"}),
MM("Get Involved", link=False)(
MM("Events",
url="http://nycprepared.org/events",
_target="_blank",
),
MM("Learn more",
url="http://nycprepared.org",
_target="_blank",
),
MM("Donate",
url="https://sarapis.org/donate-to-nycprepared",
_target="_blank",
),
),
]
# -------------------------------------------------------------------------
@classmethod
def menu_help(cls, **attr):
""" Help Menu """
#ADMIN = current.auth.get_system_roles().ADMIN
menu_help = MM("Help", c="default", f="help", link=False, **attr)(
MM("User Guide", f="help"),
MM("Contact us", f="contact"),
#MM("About", f="about", restrict=[ADMIN]),
)
return menu_help
# -------------------------------------------------------------------------
@classmethod
def menu_auth(cls, **attr):
""" Auth Menu """
auth = current.auth
logged_in = auth.is_logged_in()
if not logged_in:
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
self_registration = current.deployment_settings.get_security_registration_visible()
if self_registration == "index":
register = MM("Register", c="default", f="index", m="register",
vars=dict(_next=login_next),
check=self_registration)
else:
register = MM("Register", m="register",
vars=dict(_next=login_next),
check=self_registration)
menu_auth = MM("Login", c="default", f="user", m="login",
_id="auth_menu_login",
vars=dict(_next=login_next), **attr)(
MM("Login", m="login",
vars=dict(_next=login_next)),
register,
MM("Lost Password", m="retrieve_password")
)
else:
# Logged-in
menu_auth = MM(auth.user.email, c="default", f="user",
translate=False,
link=False,
_id="auth_menu_email",
**attr)(
MM("Logout", m="logout", _id="auth_menu_logout"),
#MM("User Profile", m="profile"),
MM("Personal Profile", c="default", f="person", m="update"),
#MM("Contact Details", c="pr", f="person",
# args="contact",
# vars={"person.pe_id" : auth.user.pe_id}),
#MM("Subscriptions", c="pr", f="person",
# args="pe_subscription",
# vars={"person.pe_id" : auth.user.pe_id}),
MM("Change Password", m="change_password"),
SEP(),
MM({"name": current.T("Rapid Data Entry"),
"id": "rapid_toggle",
"value": current.session.s3.rapid_data_entry is True,
},
f="rapid",
),
)
return menu_auth
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
"""
Custom Controller Menus
The options menu (left-hand options menu) is individual for each
controller, so each controller has its own options menu function
in this class.
Each of these option menu functions can be customised separately,
by simply overriding (re-defining) the default function. The
options menu function must return an instance of the item layout.
The standard menu uses the M item layout class, but you can of
course also use any other layout class which you define in
layouts.py (can also be mixed).
Make sure additional helper functions in this class don't match
any current or future controller prefix (e.g. by using an
underscore prefix).
"""
# -------------------------------------------------------------------------
@staticmethod
def hrm():
""" HRM / Human Resources Management """
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
AUTHENTICATED = s3.system_roles.AUTHENTICATED
INDIVIDUALS = current.deployment_settings.get_hrm_staff_label()
return M()(
M("Networks", c="org", f="group")(
M("Search"),
M("Create", m="create"),
),
M("Groups", c="hrm", f="group")(
M("Search"),
M("Create", m="create"),
),
M("Organizations", c="org", f="organisation")(
M("Search"),
M("Create", m="create",
restrict=[AUTHENTICATED]),
),
M(INDIVIDUALS, c="hrm", f="staff", t="hrm_human_resource")(
M("Search"),
M("Create", m="create"),
),
M("Your Personal Profile", c="default", f="person",
m="update")(
),
M("Import", link=False,
restrict=[ADMIN])(
M("Import Contacts", c="hrm", f="person", m="import",
vars={"group":"staff"}),
M("Import Organizations", c="org", f="organisation",
m="import"),
M("Import Groups", c="hrm", f="group", m="import"),
),
M("Organization Types", c="org", f="organisation_type",
restrict=[ADMIN])(
M("View"),
M("Create", m="create"),
),
M("Job Title Catalog", c="hrm", f="job_title",
restrict=[ADMIN])(
M("View"),
M("Create", m="create"),
),
M("Skills Catalog", c="hrm", f="skill",
restrict=[ADMIN])(
M("View"),
M("Create", m="create"),
),
M("Organization Approval", c="org", f="organisation",
m="review", restrict=[ADMIN])(
),
)
# -------------------------------------------------------------------------
def org(self):
""" ORG / Organization Registry """
if not current.auth.is_logged_in():
# No Side Menu
return None
else:
request = current.request
function = request.function
if function in ("facility", "facility_type"):
ADMIN = current.session.s3.system_roles.ADMIN
if function == "facility" and request.args(0) == "summary":
LIST = M("List", _onclick="$('#ui-id-1').click()")
MAP = M("Map", _onclick="$('#ui-id-3').click()")
REPORT = M("Report", _onclick="$('#ui-id-2').click()")
else:
LIST = M("List", m="summary")
MAP = M("Map", m="summary", vars={"t":2})
REPORT = M("Report", m="summary", vars={"t":1})
return M()(
M("Create a Facility", c="org", f="facility", m="create")(
),
M("View Facilities", c="org", f="facility", m="summary")(
LIST,
MAP,
REPORT,
),
M("Import Facilities", c="org", f="facility", m="import",
restrict=[ADMIN])(
),
M("Facility Types", c="org", f="facility_type",
restrict=[ADMIN])(
M("View"),
M("Create", m="create"),
),
)
else:
# organisation, organisation_type or hrm
return self.hrm()
# -------------------------------------------------------------------------
def pr(self):
""" Person Registry """
if not current.auth.is_logged_in():
# No Side Menu
return None
else:
return self.hrm()
# END =========================================================================
|
|
from django import template
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from oauth2 import Client
from socialregistration.signals import login, connect
import mock
import urllib
import urlparse
class TemplateTagTest(object):
def get_tag(self):
"""
Return the appropriate {% load %} and {% button %} tag to try rendering
as a tuple:
('facebook', 'facebook_button')
"""
raise NotImplementedError
def test_tag_renders_correctly(self):
load, button = self.get_tag()
tpl = """{%% load %s %%}{%% %s %%}""" % (load, button)
self.assertTrue('form' in template.Template(tpl).render(template.Context({'request': None})))
tpl = """{%% load %s %%}{%% %s STATIC_URL 'custom/button/url.jpg' %%}""" % (load, button)
rendered = template.Template(tpl).render(template.Context({
'request': None,
'STATIC_URL': '/static/'}))
self.assertTrue('custom/button/url.jpg' in rendered)
self.assertTrue('/static/' in rendered)
def get_mock_func(func):
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
class OAuthTest(object):
"""
Mixin for OAuth tests. This does not go out to the services that we're
testing but mocks instead the responses we *should* get back.
"""
# The profile model to be used
profile = None
def get_redirect_url(self):
raise NotImplementedError
def get_callback_url(self):
raise NotImplementedError
def get_callback_setup_url(self):
raise NotImplementedError
def get_redirect_mock_response(self, *args, **kwargs):
"""
If the redirect view does any requests, this is the method that returns
the mocked response. In case of OAuth1 this will be the request token
response.
"""
raise NotImplementedError
def get_callback_mock_response(self, *args, **kwargs):
"""
If the callback view does any request, this is the method that returns
the mocked response. In case of OAuth{1,2} this will be the access token.
"""
raise NotImplementedError
def get_setup_callback_mock_response(self, *args, **kwargs):
"""
If the setup callback view does any requests, this is the method that
returns the mocked response. In case of OAuth{1,2} this will be the
user information that we'll be authenticating with.
"""
raise NotImplementedError
def create_profile(self, user):
raise NotImplementedError
def create_user(self, is_active=True):
user = User.objects.create(username='alen')
user.set_password('test')
user.is_active = is_active
user.save()
return user
def login(self):
self.client.login(username='alen', password='test')
def get_counter(self):
return type('Counter', (object,), {'counter' : 0})()
@mock.patch('oauth2.Client.request')
def redirect(self, MockRequest):
MockRequest.side_effect = get_mock_func(self.get_redirect_mock_response)
response = self.client.post(self.get_redirect_url())
return response
@mock.patch('oauth2.Client.request')
def callback(self, MockRequest):
MockRequest.side_effect = get_mock_func(self.get_callback_mock_response)
response = self.client.get(self.get_callback_url(), {'oauth_verifier': 'abc'})
return response
@mock.patch('oauth2.Client.request')
def setup_callback(self, MockRequest):
MockRequest.side_effect = get_mock_func(self.get_setup_callback_mock_response)
response = self.client.get(self.get_setup_callback_url())
return response
def flow(self):
self.redirect()
self.callback()
return self.setup_callback()
def test_redirect_should_redirect_a_user(self,):
response = self.redirect()
self.assertEqual(302, response.status_code, response.content)
def test_callback_should_redirect_a_user(self):
self.redirect()
response = self.callback()
self.assertEqual(302, response.status_code, response.content)
def test_setup_callback_should_redirect_a_new_user(self):
self.redirect()
self.callback()
response = self.setup_callback()
self.assertEqual(302, response.status_code, response.content)
self.assertEqual(urlparse.urlparse(response['Location']).path, reverse('socialregistration:setup'))
def test_setup_callback_should_redirect_a_logged_in_user(self):
self.create_user()
self.login()
self.redirect()
self.callback()
response = self.setup_callback()
self.assertEqual(302, response.status_code, response.content)
self.assertNotEqual(urlparse.urlparse(response['Location']).path, reverse('socialregistration:setup'))
def test_connected_user_should_be_logged_in(self):
user = self.create_user()
self.assertFalse(self.client.session.get('_auth_user_id', False))
self.create_profile(user)
self.flow()
self.assertEqual(1, self.client.session['_auth_user_id'])
def test_logged_in_user_should_be_connected(self):
user = self.create_user()
self.login()
self.assertEqual(0, self.profile.objects.filter(user=user).count())
self.flow()
self.assertEqual(1, self.profile.objects.filter(user=user).count())
def test_only_one_user_can_connect_with_a_provider(self):
user = self.create_user()
self.create_profile(user)
other = User.objects.create(username='other')
other.is_active = True
other.set_password('test')
other.save()
self.client.login(username='other', password='test')
response = self.flow()
self.assertEqual(200, response.status_code, response.content)
self.assertContains(response, 'This profile is already connected to another user account')
def test_logging_in_should_send_the_login_signal(self):
counter = self.get_counter()
user = self.create_user()
self.create_profile(user)
def handler(sender, **kwargs):
counter.counter += 1
self.assertEqual(self.profile, sender)
login.connect(handler, sender=self.profile, dispatch_uid='socialreg.test.login')
self.flow()
self.assertEqual(1, counter.counter)
def test_connecting_should_send_the_connect_signal(self):
counter = self.get_counter()
user = self.create_user()
self.login()
def handler(sender, **kwargs):
counter.counter += 1
self.assertEqual(self.profile, sender)
connect.connect(handler, sender=self.profile, dispatch_uid='socialreg.test.connect')
self.flow()
self.assertEqual(1, counter.counter)
def test_setup_callback_should_indicate_an_inactive_user(self):
user = self.create_user(is_active=False)
self.create_profile(user)
self.redirect()
self.callback()
response = self.setup_callback()
self.assertEqual(200, response.status_code, response.content)
self.assertContains(response, "inactive", 1)
def test_setup_callback_should_redirect_an_inactive_user(self):
settings.LOGIN_INACTIVE_REDIRECT_URL = '/inactive/'
user = self.create_user(is_active=False)
self.create_profile(user)
self.redirect()
self.callback()
response = self.setup_callback()
self.assertEqual(302, response.status_code, response.content)
self.assertTrue('/inactive/' in response['Location'])
settings.LOGIN_INACTIVE_REDIRECT_URL = False
class OAuth2Test(OAuthTest):
def redirect(self):
response = self.client.post(self.get_redirect_url())
return response
@mock.patch('socialregistration.clients.oauth.OAuth2.request')
def callback(self, MockRequest):
MockRequest.side_effect = get_mock_func(self.get_callback_mock_response)
response = self.client.get(self.get_callback_url(), {'code': 'abc'})
return response
@mock.patch('socialregistration.clients.oauth.OAuth2.request')
def setup_callback(self, MockRequest):
MockRequest.side_effect = get_mock_func(self.get_setup_callback_mock_response)
response = self.client.get(self.get_setup_callback_url())
return response
class TestContextProcessors(TestCase):
def test_request_is_in_context(self):
self.assertTrue('django.core.context_processors.request' in settings.TEMPLATE_CONTEXT_PROCESSORS)
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stubs for File service."""
import base64
import datetime
import hashlib
import os
import random
import string
import StringIO
import tempfile
import time
from google.appengine.api import apiproxy_stub
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import blobstore as api_blobstore
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api.files import blobstore as files_blobstore
from google.appengine.api.files import file as files
from google.appengine.api.files import file_service_pb
from google.appengine.api.files import gs
from google.appengine.ext import blobstore
from google.appengine.runtime import apiproxy_errors
MAX_REQUEST_SIZE = 32 << 20
GS_INFO_KIND = blobstore_stub._GS_INFO_KIND
_now_function = datetime.datetime.now
def _to_seconds(datetime_obj):
return time.mktime(datetime_obj.timetuple())
def _random_string(length):
"""Generate a random string of given length."""
return ''.join(
random.choice(string.letters + string.digits) for _ in range(length))
def raise_error(error_code, error_detail=''):
"""Raise application error helper method."""
raise apiproxy_errors.ApplicationError(error_code, error_detail=error_detail)
_BLOBSTORE_DIRECTORY = files_blobstore._BLOBSTORE_DIRECTORY
_GS_PREFIX = gs._GS_PREFIX
_GS_UPLOAD_PREFIX = _GS_PREFIX + 'writable:'
class _GoogleStorageUpload(tuple):
"""Stores information about a writable Google Storage file."""
buf = property(lambda self: self[0])
content_type = property(lambda self: self[1])
key = property(lambda self: self[2])
class GoogleStorage(object):
"""Virtual google storage to be used by file api."""
def _Upload(self, buf, content_type, key):
return _GoogleStorageUpload([buf, content_type, key])
def __init__(self, blob_storage):
"""Constructor.
Args:
blob_storage:
apphosting.api.blobstore.blobstore_stub.BlobStorage instance.
"""
self.blob_storage = blob_storage
self.uploads = {}
self.finalized = set()
self.sequence_keys = {}
def has_upload(self, filename):
"""Checks if there is an upload at this filename."""
return filename in self.uploads
def finalize(self, filename):
"""Marks file as finalized."""
upload = self.uploads[filename]
self.finalized.add(filename)
upload.buf.seek(0)
self.blob_storage.StoreBlob(self.get_blob_key(upload.key), upload.buf)
del self.sequence_keys[filename]
encoded_key = blobstore.create_gs_key(upload.key)
file_info = datastore.Entity(GS_INFO_KIND,
name=encoded_key,
namespace='')
file_info['creation'] = _now_function()
file_info['filename'] = upload.key
file_info['size'] = upload.buf.len
file_info['content_type'] = upload.content_type
file_info['storage_key'] = self.get_blob_key(upload.key)
datastore.Put(file_info)
@staticmethod
def get_blob_key(key):
"""Converts a bigstore key into a base64 encoded blob key/filename."""
return base64.urlsafe_b64encode(key)
def is_finalized(self, filename):
"""Checks if file is already finalized."""
assert filename in self.uploads
return filename in self.finalized
def start_upload(self, request):
"""Starts a new upload based on the specified CreateRequest."""
mime_type = None
gs_filename = request.filename()
ignored_parameters = [
gs._CACHE_CONTROL_PARAMETER,
gs._CANNED_ACL_PARAMETER,
gs._CONTENT_DISPOSITION_PARAMETER,
gs._CONTENT_ENCODING_PARAMETER,
]
for param in request.parameters_list():
name = param.name()
if name == gs._MIME_TYPE_PARAMETER:
mime_type = param.value()
elif (name in ignored_parameters or
name.startswith(gs._USER_METADATA_PREFIX)):
pass
else:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
if not mime_type:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
elif not gs_filename:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
random_str = ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(64))
writable_name = '%s%s' % (
_GS_UPLOAD_PREFIX, base64.urlsafe_b64encode(random_str))
self.uploads[writable_name] = self._Upload(
StringIO.StringIO(), mime_type, gs_filename)
self.sequence_keys[writable_name] = None
datastore.Delete(
datastore.Key.from_path(GS_INFO_KIND,
blobstore.create_gs_key(gs_filename),
namespace=''))
return writable_name
def append(self, filename, data, sequence_key):
"""Appends data to the upload filename."""
assert not self.is_finalized(filename)
if sequence_key:
current_sequence_key = self.sequence_keys[filename]
if current_sequence_key and current_sequence_key >= sequence_key:
raise_error(file_service_pb.FileServiceErrors.SEQUENCE_KEY_OUT_OF_ORDER,
error_detail=current_sequence_key)
self.sequence_keys[filename] = sequence_key
self.uploads[filename].buf.write(data)
def stat(self, filename):
"""
Returns:
file info for a finalized file with given filename
"""
blob_key = blobstore.create_gs_key(filename)
try:
return datastore.Get(
datastore.Key.from_path(GS_INFO_KIND, blob_key, namespace=''))
except datastore_errors.EntityNotFoundError:
raise raise_error(file_service_pb.FileServiceErrors.EXISTENCE_ERROR,
filename)
def get_reader(self, filename):
try:
return self.blob_storage.OpenBlob(self.get_blob_key(filename))
except IOError:
return None
def listdir(self, request, response):
"""listdir.
Args:
request: ListDir RPC request.
response: ListDir RPC response.
Returns:
A list of fully qualified filenames under a certain path sorted by in
char order.
"""
path = request.path()
prefix = request.prefix() if request.has_prefix() else ''
q = datastore.Query(GS_INFO_KIND, namespace='')
fully_qualified_name = '/'.join([path, prefix])
if request.has_marker():
q['filename >'] = '/'.join([path, request.marker()])
else:
q['filename >='] = fully_qualified_name
if request.has_max_keys():
max_keys = request.max_keys()
else:
max_keys = 2**31-1
for gs_file_info in q.Get(max_keys):
filename = gs_file_info['filename']
if filename.startswith(fully_qualified_name):
response.add_filenames(filename)
else:
break
class GoogleStorageFile(object):
"""File object for '/gs/' files."""
def __init__(self, open_request, file_storage):
self.filename = open_request.filename()
self.file_storage = file_storage
self.open_mode = open_request.open_mode()
content_type = open_request.content_type()
if self.is_appending:
if not self.filename.startswith(_GS_UPLOAD_PREFIX):
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
elif not self.file_storage.has_upload(self.filename):
raise_error(file_service_pb.FileServiceErrors.EXISTENCE_ERROR)
elif self.file_storage.is_finalized(self.filename):
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'File is already finalized')
else:
if not self.filename.startswith(_GS_PREFIX):
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
elif self.filename.startswith(_GS_UPLOAD_PREFIX):
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
else:
self.buf = self.file_storage.get_reader(self.filename)
if not self.buf:
raise_error(file_service_pb.FileServiceErrors.EXISTENCE_ERROR)
if content_type != file_service_pb.FileContentType.RAW:
raise_error(file_service_pb.FileServiceErrors.WRONG_CONTENT_TYPE)
@property
def is_appending(self):
"""Checks if the file is opened for appending or reading."""
return self.open_mode == file_service_pb.OpenRequest.APPEND
def stat(self, request, response):
"""Fill response with file stat.
Current implementation only fills length, finalized, filename, and content
type. File must be opened in read mode before stat is called.
"""
file_info = self.file_storage.stat(self.filename)
file_stat = response.add_stat()
file_stat.set_filename(file_info['filename'])
file_stat.set_finalized(True)
file_stat.set_length(file_info['size'])
file_stat.set_ctime(_to_seconds(file_info['creation']))
file_stat.set_mtime(_to_seconds(file_info['creation']))
file_stat.set_content_type(file_service_pb.FileContentType.RAW)
response.set_more_files_found(False)
def read(self, request, response):
"""Copies up to max_bytes starting at pos into response from filename."""
if self.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
self.buf.seek(request.pos())
data = self.buf.read(request.max_bytes())
response.set_data(data)
def append(self, request, response):
"""Appends data to filename."""
if not self.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
self.file_storage.append(
self.filename, request.data(), request.sequence_key())
def finalize(self):
"""Finalize a file.
Copies temp file data to permanent location for reading.
"""
if not self.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
elif self.file_storage.is_finalized(self.filename):
raise_error(
file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'File is already finalized')
self.file_storage.finalize(self.filename)
class BlobstoreStorage(object):
"""Virtual file storage to be used by file api.
Abstracts away all aspects of logical and physical file organization of the
API.
"""
def __init__(self, blob_storage):
"""Constructor.
Args:
blob_storage: An instance of
apphosting.api.blobstore.blobstore_stub.BlobStorage to use for blob
integration.
"""
self.blob_keys = {}
self.blobstore_files = set()
self.finalized_files = set()
self.created_files = set()
self.data_files = {}
self.sequence_keys = {}
self.blob_storage = blob_storage
self.blob_content_types = {}
self.blob_file_names = {}
def finalize(self, filename):
"""Marks file as finalized."""
if self.is_finalized(filename):
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'File is already finalized')
self.finalized_files.add(filename)
def is_finalized(self, filename):
"""Checks if file is already finalized."""
return filename in self.finalized_files
def get_blob_key(self, ticket):
"""Gets blob key for blob creation ticket."""
return self.blob_keys.get(ticket)
def register_blob_key(self, ticket, blob_key):
"""Register blob key for a ticket."""
self.blob_keys[ticket] = blob_key
def has_blobstore_file(self, filename):
"""Checks if blobstore file was already created."""
return filename in self.blobstore_files
def add_blobstore_file(self, request):
"""Registers a created blob store file."""
mime_type = None
blob_filename = ''
for param in request.parameters_list():
name = param.name()
if name == files_blobstore._MIME_TYPE_PARAMETER:
mime_type = param.value()
elif name == files_blobstore._BLOBINFO_UPLOADED_FILENAME_PARAMETER:
blob_filename = param.value()
else:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
if mime_type is None:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
random_str = ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(64))
filename = (_BLOBSTORE_DIRECTORY +
files._CREATION_HANDLE_PREFIX +
base64.urlsafe_b64encode(random_str))
self.blobstore_files.add(filename)
self.blob_content_types[filename] = mime_type
self.blob_file_names[filename] = blob_filename
return filename
def get_sequence_key(self, filename):
"""Get sequence key for a file."""
return self.sequence_keys.get(filename, '')
def set_sequence_key(self, filename, sequence_key):
"""Set sequence key for a file."""
self.sequence_keys[filename] = sequence_key
def stat(self, filename):
"""
Returns:
file info for a finalized file with given filename."""
blob_key = files_blobstore.get_blob_key(filename)
file_info = datastore.Get(
datastore.Key.from_path(api_blobstore.BLOB_INFO_KIND, str(blob_key),
namespace=''))
if file_info == None:
raise raise_error(
file_service_pb.FileServiceErrors.EXISTENCE_ERROR_MEATADATA_NOT_FOUND,
filename)
return file_info
def save_blob(self, filename, blob_key):
"""Save filename temp data to a blobstore under given key."""
f = self._get_data_file(filename)
f.seek(0)
self.blob_storage.StoreBlob(blob_key, f)
f.seek(0, os.SEEK_END)
size = f.tell()
f.close()
del self.data_files[filename]
return size
def _get_data_file(self, filename):
"""Get a temp data file for a file."""
if not filename in self.data_files:
f = tempfile.TemporaryFile()
self.data_files[filename] = f
return f
return self.data_files[filename]
def get_md5_from_blob(self, blobkey):
"""Get md5 hexdigest of the blobfile with blobkey."""
try:
f = self.blob_storage.OpenBlob(blobkey)
file_md5 = hashlib.md5()
file_md5.update(f.read())
return file_md5.hexdigest()
finally:
f.close()
def append(self, filename, data):
"""Append data to file."""
self._get_data_file(filename).write(data)
def get_content_type(self, filename):
return self.blob_content_types[filename]
def get_blob_file_name(self, filename):
return self.blob_file_names[filename]
class BlobstoreFile(object):
"""File object for generic /blobstore/ file."""
def __init__(self, open_request, file_storage):
"""Constructor.
Args:
open_request: An instance of open file request.
file_storage: An instance of BlobstoreStorage.
"""
self.filename = open_request.filename()
self.file_storage = file_storage
self.blob_reader = None
self.content_type = None
self.mime_content_type = None
open_mode = open_request.open_mode()
content_type = open_request.content_type()
if not self.filename.startswith(_BLOBSTORE_DIRECTORY):
if not self.file_storage.has_blobstore_file(self.filename):
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
self.ticket = self.filename[len(_BLOBSTORE_DIRECTORY):]
if open_mode == file_service_pb.OpenRequest.APPEND:
if not self.file_storage.has_blobstore_file(self.filename):
raise_error(file_service_pb.FileServiceErrors.EXISTENCE_ERROR)
if self.file_storage.is_finalized(self.filename):
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'File is already finalized')
self.mime_content_type = self.file_storage.get_content_type(self.filename)
self.blob_file_name = self.file_storage.get_blob_file_name(self.filename)
else:
if self.ticket.startswith(files._CREATION_HANDLE_PREFIX):
blobkey = self.file_storage.get_blob_key(self.ticket)
if not blobkey:
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'Blobkey not found.')
else:
blobkey = self.ticket
blob_info = blobstore.BlobInfo.get(blobkey)
if not blob_info:
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'Blobinfo not found.')
self.blob_reader = blobstore.BlobReader(blob_info)
self.mime_content_type = blob_info.content_type
if content_type != file_service_pb.FileContentType.RAW:
raise_error(file_service_pb.FileServiceErrors.WRONG_CONTENT_TYPE)
@property
def is_appending(self):
"""Checks if the file is opened for appending or reading."""
return self.blob_reader == None
def stat(self, request, response):
"""Fill response with file stat.
Current implementation only fills length, finalized, filename, and content
type. File must be opened in read mode before stat is called.
"""
file_info = self.file_storage.stat(self.filename)
file_stat = response.add_stat()
file_stat.set_filename(self.filename)
file_stat.set_finalized(True)
file_stat.set_length(file_info['size'])
file_stat.set_ctime(_to_seconds(file_info['creation']))
file_stat.set_mtime(_to_seconds(file_info['creation']))
file_stat.set_content_type(file_service_pb.FileContentType.RAW)
response.set_more_files_found(False)
def read(self, request, response):
"""Read data from file
Args:
request: An instance of file_service_pb.ReadRequest.
response: An instance of file_service_pb.ReadResponse.
"""
if self.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
self.blob_reader.seek(request.pos())
response.set_data(self.blob_reader.read(request.max_bytes()))
def append(self, request, response):
"""Append data to file.
Args:
request: An instance of file_service_pb.AppendRequest.
response: An instance of file_service_pb.AppendResponse.
"""
sequence_key = request.sequence_key()
if sequence_key:
current_sequence_key = self.file_storage.get_sequence_key(self.filename)
if current_sequence_key and current_sequence_key >= sequence_key:
raise_error(file_service_pb.FileServiceErrors.SEQUENCE_KEY_OUT_OF_ORDER,
error_detail=current_sequence_key)
self.file_storage.set_sequence_key(self.filename, sequence_key)
self.file_storage.append(self.filename, request.data())
def finalize(self):
"""Finalize a file.
Copies temp file data to the blobstore.
"""
self.file_storage.finalize(self.filename)
blob_key = _random_string(64)
self.file_storage.register_blob_key(self.ticket, blob_key)
size = self.file_storage.save_blob(self.filename, blob_key)
blob_info = datastore.Entity(api_blobstore.BLOB_INFO_KIND,
name=str(blob_key), namespace='')
blob_info['content_type'] = self.mime_content_type
blob_info['creation'] = _now_function()
blob_info['filename'] = self.blob_file_name
blob_info['size'] = size
blob_info['creation_handle'] = self.ticket
blob_info['md5_hash'] = self.file_storage.get_md5_from_blob(blob_key)
datastore.Put(blob_info)
blob_file = datastore.Entity('__BlobFileIndex__',
name=self.ticket,
namespace='')
blob_file['blob_key'] = str(blob_key)
datastore.Put(blob_file)
class FileServiceStub(apiproxy_stub.APIProxyStub):
"""Python stub for file service."""
def __init__(self, blob_storage):
"""Constructor."""
super(FileServiceStub, self).__init__('file',
max_request_size=MAX_REQUEST_SIZE)
self.open_files = {}
self.file_storage = BlobstoreStorage(blob_storage)
self.gs_storage = GoogleStorage(blob_storage)
def _Dynamic_Create(self, request, response):
filesystem = request.filesystem()
if request.has_filename() and filesystem != gs._GS_FILESYSTEM:
raise_error(file_service_pb.FileServiceErrors.FILE_NAME_SPECIFIED)
if filesystem == files_blobstore._BLOBSTORE_FILESYSTEM:
response.set_filename(self.file_storage.add_blobstore_file(request))
elif filesystem == gs._GS_FILESYSTEM:
response.set_filename(self.gs_storage.start_upload(request))
else:
raise_error(file_service_pb.FileServiceErrors.UNSUPPORTED_FILE_SYSTEM)
def _Dynamic_Open(self, request, response):
"""Handler for Open RPC call."""
filename = request.filename()
if request.exclusive_lock() and filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.EXCLUSIVE_LOCK_FAILED)
if filename.startswith(_BLOBSTORE_DIRECTORY):
self.open_files[filename] = BlobstoreFile(request, self.file_storage)
elif filename.startswith(_GS_PREFIX):
self.open_files[filename] = GoogleStorageFile(request, self.gs_storage)
else:
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
def _Dynamic_Close(self, request, response):
"""Handler for Close RPC call."""
filename = request.filename()
finalize = request.finalize()
if not filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.FILE_NOT_OPENED)
if finalize:
self.open_files[filename].finalize()
del self.open_files[filename]
def _Dynamic_Stat(self, request, response):
"""Handler for Stat RPC call."""
filename = request.filename()
if not filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.FILE_NOT_OPENED)
file = self.open_files[filename]
if file.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
file.stat(request, response)
def _Dynamic_Read(self, request, response):
"""Handler for Read RPC call."""
filename = request.filename()
if not filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.FILE_NOT_OPENED)
self.open_files[filename].read(request, response)
def _Dynamic_Append(self, request, response):
"""Handler for Append RPC call."""
filename = request.filename()
if not filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.FILE_NOT_OPENED)
self.open_files[filename].append(request, response)
def _Dynamic_GetCapabilities(self, request, response):
"""Handler for GetCapabilities RPC call."""
response.add_filesystem('blobstore')
response.add_filesystem('gs')
response.set_shuffle_available(False)
def _Dynamic_GetDefaultGsBucketName(self, request, response):
"""Handler for GetDefaultGsBucketName RPC call."""
response.set_default_gs_bucket_name('app_default_bucket')
def _Dynamic_ListDir(self, request, response):
"""Handler for ListDir RPC call.
Only for dev app server. See b/6761691.
"""
path = request.path()
if not path.startswith(_GS_PREFIX):
raise_error(file_service_pb.FileServiceErrors.UNSUPPORTED_FILE_SYSTEM)
self.gs_storage.listdir(request, response)
|
|
#/usr/bin/env python
"""
glext-emu - code generator scripts helper module
Copyright (c) 2013 Leszek Godlewski
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import os
import subprocess
import tempfile
cxx = 'gcc'
registry = [
'GL_ARB_multitexture',
'GLX_ARB_get_proc_address',
'GL_ARB_transpose_matrix',
'WGL_ARB_buffer_region',
'GL_ARB_multisample ',
'GLX_ARB_multisample ',
'WGL_ARB_multisample',
'GL_ARB_texture_env_add',
'GL_ARB_texture_cube_map',
'WGL_ARB_extensions_string',
'WGL_ARB_pixel_format',
'WGL_ARB_make_current_read',
'WGL_ARB_pbuffer',
'GL_ARB_texture_compression',
'GL_ARB_texture_border_clamp',
'GL_ARB_point_parameters',
'GL_ARB_vertex_blend',
'GL_ARB_matrix_palette',
'GL_ARB_texture_env_combine',
'GL_ARB_texture_env_crossbar',
'GL_ARB_texture_env_dot3',
'WGL_ARB_render_texture',
'GL_ARB_texture_mirrored_repeat',
'GL_ARB_depth_texture',
'GL_ARB_shadow',
'GL_ARB_shadow_ambient',
'GL_ARB_window_pos',
'GL_ARB_vertex_program',
'GL_ARB_fragment_program',
'GL_ARB_vertex_buffer_object ',
'GLX_ARB_vertex_buffer_object',
'GL_ARB_occlusion_query',
'GL_ARB_shader_objects',
'GL_ARB_vertex_shader',
'GL_ARB_fragment_shader',
'GL_ARB_shading_language_100',
'GL_ARB_texture_non_power_of_two',
'GL_ARB_point_sprite',
'GL_ARB_fragment_program_shadow',
'GL_ARB_draw_buffers',
'GL_ARB_texture_rectangle',
'GL_ARB_color_buffer_float ',
'WGL_ARB_pixel_format_float ',
'GLX_ARB_fbconfig_float',
'GL_ARB_half_float_pixel',
'GL_ARB_texture_float',
'GL_ARB_pixel_buffer_object',
'GL_ARB_depth_buffer_float',
'GL_ARB_draw_instanced',
'GL_ARB_framebuffer_object',
'GL_ARB_framebuffer_sRGB ',
'GLX_ARB_framebuffer_sRGB ',
'WGL_ARB_framebuffer_sRGB',
'GL_ARB_geometry_shader4',
'GL_ARB_half_float_vertex',
'GL_ARB_instanced_arrays',
'GL_ARB_map_buffer_range',
'GL_ARB_texture_buffer_object',
'GL_ARB_texture_compression_rgtc',
'GL_ARB_texture_rg',
'GL_ARB_vertex_array_object',
'WGL_ARB_create_context',
'GLX_ARB_create_context',
'GL_ARB_uniform_buffer_object',
'GL_ARB_compatibility',
'GL_ARB_copy_buffer',
'GL_ARB_shader_texture_lod',
'GL_ARB_depth_clamp',
'GL_ARB_draw_elements_base_vertex',
'GL_ARB_fragment_coord_conventions',
'GL_ARB_provoking_vertex',
'GL_ARB_seamless_cube_map',
'GL_ARB_sync',
'GL_ARB_texture_multisample',
'GL_ARB_vertex_array_bgra',
'GL_ARB_draw_buffers_blend',
'GL_ARB_sample_shading',
'GL_ARB_texture_cube_map_array',
'GL_ARB_texture_gather',
'GL_ARB_texture_query_lod',
'WGL_ARB_create_context_profile',
'GLX_ARB_create_context_profile',
'GL_ARB_shading_language_include',
'GL_ARB_texture_compression_bptc',
'GL_ARB_blend_func_extended',
'GL_ARB_explicit_attrib_location',
'GL_ARB_occlusion_query2',
'GL_ARB_sampler_objects',
'GL_ARB_shader_bit_encoding',
'GL_ARB_texture_rgb10_a2ui',
'GL_ARB_texture_swizzle',
'GL_ARB_timer_query',
'GL_ARB_vertex_type_2_10_10_10_rev',
'GL_ARB_draw_indirect',
'GL_ARB_gpu_shader5',
'GL_ARB_gpu_shader_fp64',
'GL_ARB_shader_subroutine',
'GL_ARB_tessellation_shader',
'GL_ARB_texture_buffer_object_rgb32',
'GL_ARB_transform_feedback2',
'GL_ARB_transform_feedback3',
'GL_ARB_ES2_compatibility',
'GL_ARB_get_program_binary',
'GL_ARB_separate_shader_objects',
'GL_ARB_shader_precision',
'GL_ARB_vertex_attrib_64bit',
'GL_ARB_viewport_array',
'GLX_ARB_create_context_robustness',
'WGL_ARB_create_context_robustness',
'GL_ARB_cl_event',
'GL_ARB_debug_output',
'GL_ARB_robustness',
'GL_ARB_shader_stencil_export',
'GL_ARB_base_instance',
'GL_ARB_shading_language_420pack',
'GL_ARB_transform_feedback_instanced',
'GL_ARB_compressed_texture_pixel_storage',
'GL_ARB_conservative_depth',
'GL_ARB_internalformat_query',
'GL_ARB_map_buffer_alignment',
'GL_ARB_shader_atomic_counters',
'GL_ARB_shader_image_load_store',
'GL_ARB_shading_language_packing',
'GL_ARB_texture_storage',
'GL_KHR_texture_compression_astc_hdr ',
'GL_KHR_texture_compression_astc_ldr',
'GL_KHR_debug',
'GL_ARB_arrays_of_arrays',
'GL_ARB_clear_buffer_object',
'GL_ARB_compute_shader',
'GL_ARB_copy_image',
'GL_ARB_texture_view',
'GL_ARB_vertex_attrib_binding',
'GL_ARB_robustness_isolation',
'GL_ARB_ES3_compatibility',
'GL_ARB_explicit_uniform_location',
'GL_ARB_fragment_layer_viewport',
'GL_ARB_framebuffer_no_attachments',
'GL_ARB_internalformat_query2',
'GL_ARB_invalidate_subdata',
'GL_ARB_multi_draw_indirect',
'GL_ARB_program_interface_query',
'GL_ARB_robust_buffer_access_behavior',
'GL_ARB_shader_image_size',
'GL_ARB_shader_storage_buffer_object',
'GL_ARB_stencil_texturing',
'GL_ARB_texture_buffer_range',
'GL_ARB_texture_query_levels',
'GL_ARB_texture_storage_multisample',
'GLX_ARB_robustness_application_isolation ',
'GLX_ARB_robustness_share_group_isolation',
'WGL_ARB_robustness_application_isolation ',
'WGL_ARB_robustness_share_group_isolation',
'GL_ARB_buffer_storage',
'GL_ARB_clear_texture',
'GL_ARB_enhanced_layouts',
'GL_ARB_multi_bind',
'GL_ARB_query_buffer_object',
'GL_ARB_texture_mirror_clamp_to_edge',
'GL_ARB_texture_stencil8',
'GL_ARB_vertex_type_10f_11f_11f_rev',
'GL_ARB_bindless_texture',
'GL_ARB_compute_variable_group_size',
'GL_ARB_indirect_parameters',
'GL_ARB_seamless_cubemap_per_texture',
'GL_ARB_shader_draw_parameters',
'GL_ARB_shader_group_vote',
'GL_ARB_sparse_texture',
'Vendor and EXT Extensions by number',
'GL_EXT_abgr',
'GL_EXT_blend_color',
'GL_EXT_polygon_offset',
'GL_EXT_texture',
'GL_EXT_texture3D',
'GL_SGIS_texture_filter4',
'GL_EXT_subtexture',
'GL_EXT_copy_texture',
'GL_EXT_histogram',
'GL_EXT_convolution',
'GL_SGI_color_matrix',
'GL_SGI_color_table',
'GL_SGIS_pixel_texture',
'GL_SGIX_pixel_texture',
'GL_SGIS_texture4D',
'GL_SGI_texture_color_table',
'GL_EXT_cmyka',
'GL_EXT_texture_object',
'GL_SGIS_detail_texture',
'GL_SGIS_sharpen_texture',
'GL_EXT_packed_pixels',
'GL_SGIS_texture_lod',
'GL_SGIS_multisample ',
'GLX_SGIS_multisample',
'GL_EXT_rescale_normal',
'GLX_EXT_visual_info',
'GL_EXT_vertex_array',
'GL_EXT_misc_attribute',
'GL_SGIS_generate_mipmap',
'GL_SGIX_clipmap',
'GL_SGIX_shadow',
'GL_SGIS_texture_edge_clamp',
'GL_SGIS_texture_border_clamp',
'GL_EXT_blend_minmax',
'GL_EXT_blend_subtract',
'GL_EXT_blend_logic_op',
'GLX_SGI_swap_control',
'GLX_SGI_video_sync',
'GLX_SGI_make_current_read',
'GLX_SGIX_video_source',
'GLX_EXT_visual_rating',
'GL_SGIX_interlace',
'GLX_EXT_import_context',
'GLX_SGIX_fbconfig',
'GLX_SGIX_pbuffer',
'GL_SGIS_texture_select',
'GL_SGIX_sprite',
'GL_SGIX_texture_multi_buffer',
'GL_EXT_point_parameters',
'GL_SGIX_instruments',
'GL_SGIX_texture_scale_bias',
'GL_SGIX_framezoom',
'GL_SGIX_tag_sample_buffer',
'GL_SGIX_reference_plane',
'GL_SGIX_flush_raster',
'GLX_SGI_cushion',
'GL_SGIX_depth_texture',
'GL_SGIS_fog_function',
'GL_SGIX_fog_offset',
'GL_HP_image_transform',
'GL_HP_convolution_border_modes',
'GL_SGIX_texture_add_env',
'GL_EXT_color_subtable',
'GLU_EXT_object_space_tess',
'GL_PGI_vertex_hints',
'GL_PGI_misc_hints',
'GL_EXT_paletted_texture',
'GL_EXT_clip_volume_hint',
'GL_SGIX_list_priority',
'GL_SGIX_ir_instrument1',
'GLX_SGIX_video_resize',
'GL_SGIX_texture_lod_bias',
'GLU_SGI_filter4_parameters',
'GLX_SGIX_dm_buffer',
'GL_SGIX_shadow_ambient',
'GLX_SGIX_swap_group',
'GLX_SGIX_swap_barrier',
'GL_EXT_index_texture',
'GL_EXT_index_material',
'GL_EXT_index_func',
'GL_EXT_index_array_formats',
'GL_EXT_compiled_vertex_array',
'GL_EXT_cull_vertex',
'GLU_EXT_nurbs_tessellator',
'GL_SGIX_ycrcb',
'GL_EXT_fragment_lighting',
'GL_IBM_rasterpos_clip',
'GL_HP_texture_lighting',
'GL_EXT_draw_range_elements',
'GL_WIN_phong_shading',
'GL_WIN_specular_fog',
'GLX_SGIS_color_range ',
'GL_SGIS_color_range',
'GL_EXT_light_texture',
'GL_SGIX_blend_alpha_minmax',
'GL_EXT_scene_marker ',
'GLX_EXT_scene_marker',
'GL_SGIX_pixel_texture_bits',
'GL_EXT_bgra',
'GL_SGIX_async',
'GL_SGIX_async_pixel',
'GL_SGIX_async_histogram',
'GL_INTEL_texture_scissor',
'GL_INTEL_parallel_arrays',
'GL_HP_occlusion_test',
'GL_EXT_pixel_transform',
'GL_EXT_pixel_transform_color_table',
'GL_EXT_shared_texture_palette',
'GLX_SGIS_blended_overlay',
'GL_EXT_separate_specular_color',
'GL_EXT_secondary_color',
'GL_EXT_texture_env',
'GL_EXT_texture_perturb_normal',
'GL_EXT_multi_draw_arrays ',
'GL_SUN_multi_draw_arrays',
'GL_EXT_fog_coord',
'GL_REND_screen_coordinates',
'GL_EXT_coordinate_frame',
'GL_EXT_texture_env_combine',
'GL_APPLE_specular_vector',
'GL_APPLE_transform_hint',
'GL_SUNX_constant_data',
'GL_SUN_global_alpha',
'GL_SUN_triangle_list',
'GL_SUN_vertex',
'WGL_EXT_display_color_table',
'WGL_EXT_extensions_string',
'WGL_EXT_make_current_read',
'WGL_EXT_pixel_format',
'WGL_EXT_pbuffer',
'WGL_EXT_swap_control',
'GL_EXT_blend_func_separate',
'GL_INGR_color_clamp',
'GL_INGR_interlace_read',
'GL_EXT_stencil_wrap',
'WGL_EXT_depth_float',
'GL_EXT_422_pixels',
'GL_NV_texgen_reflection',
'GL_SGIX_texture_range',
'GL_SUN_convolution_border_modes',
'GLX_SUN_get_transparent_index',
'GL_EXT_texture_env_add',
'GL_EXT_texture_lod_bias',
'GL_EXT_texture_filter_anisotropic',
'GL_EXT_vertex_weighting',
'GL_NV_light_max_exponent',
'GL_NV_vertex_array_range',
'GL_NV_register_combiners',
'GL_NV_fog_distance',
'GL_NV_texgen_emboss',
'GL_NV_blend_square',
'GL_NV_texture_env_combine4',
'GL_MESA_resize_buffers',
'GL_MESA_window_pos',
'GL_EXT_texture_compression_s3tc',
'GL_IBM_cull_vertex',
'GL_IBM_multimode_draw_arrays',
'GL_IBM_vertex_array_lists',
'GL_3DFX_texture_compression_FXT1',
'GL_3DFX_multisample',
'GL_3DFX_tbuffer',
'WGL_EXT_multisample ',
'GL_EXT_multisample',
'GL_SGIX_vertex_preclip ',
'GL_SGIX_vertex_preclip_hint',
'GL_SGIX_convolution_accuracy',
'GL_SGIX_resample',
'GL_SGIS_point_line_texgen',
'GL_SGIS_texture_color_mask',
'GLX_MESA_copy_sub_buffer',
'GLX_MESA_pixmap_colormap',
'GLX_MESA_release_buffers',
'GLX_MESA_set_3dfx_mode',
'GL_EXT_texture_env_dot3',
'GL_ATI_texture_mirror_once',
'GL_NV_fence',
'GL_IBM_static_data',
'GL_IBM_texture_mirrored_repeat',
'GL_NV_evaluators',
'GL_NV_packed_depth_stencil',
'GL_NV_register_combiners2',
'GL_NV_texture_compression_vtc',
'GL_NV_texture_rectangle',
'GL_NV_texture_shader',
'GL_NV_texture_shader2',
'GL_NV_vertex_array_range2',
'GL_NV_vertex_program',
'GLX_SGIX_visual_select_group',
'GL_SGIX_texture_coordinate_clamp',
'GLX_OML_swap_method',
'GLX_OML_sync_control',
'GL_OML_interlace',
'GL_OML_subsample',
'GL_OML_resample',
'WGL_OML_sync_control',
'GL_NV_copy_depth_to_color',
'GL_ATI_envmap_bumpmap',
'GL_ATI_fragment_shader',
'GL_ATI_pn_triangles',
'GL_ATI_vertex_array_object',
'GL_EXT_vertex_shader',
'GL_ATI_vertex_streams',
'WGL_I3D_digital_video_control',
'WGL_I3D_gamma',
'WGL_I3D_genlock',
'WGL_I3D_image_buffer',
'WGL_I3D_swap_frame_lock',
'WGL_I3D_swap_frame_usage',
'GL_ATI_element_array',
'GL_SUN_mesh_array',
'GL_SUN_slice_accum',
'GL_NV_multisample_filter_hint',
'GL_NV_depth_clamp',
'GL_NV_occlusion_query',
'GL_NV_point_sprite',
'WGL_NV_render_depth_texture',
'WGL_NV_render_texture_rectangle',
'GL_NV_texture_shader3',
'GL_NV_vertex_program1_1',
'GL_EXT_shadow_funcs',
'GL_EXT_stencil_two_side',
'GL_ATI_text_fragment_shader',
'GL_APPLE_client_storage',
'GL_APPLE_element_array',
'GL_APPLE_fence',
'GL_APPLE_vertex_array_object',
'GL_APPLE_vertex_array_range',
'GL_APPLE_ycbcr_422',
'GL_S3_s3tc',
'GL_ATI_draw_buffers',
'WGL_ATI_pixel_format_float',
'GL_ATI_texture_env_combine3',
'GL_ATI_texture_float',
'GL_NV_float_buffer ',
'WGL_NV_float_buffer',
'GL_NV_fragment_program',
'GL_NV_half_float',
'GL_NV_pixel_data_range',
'GL_NV_primitive_restart',
'GL_NV_texture_expand_normal',
'GL_NV_vertex_program2',
'GL_ATI_map_object_buffer',
'GL_ATI_separate_stencil',
'GL_ATI_vertex_attrib_array_object',
'GL_OES_byte_coordinates',
'GL_OES_fixed_point',
'GL_OES_single_precision',
'GL_OES_compressed_paletted_texture',
'GL_OES_read_format',
'GL_OES_query_matrix',
'GL_EXT_depth_bounds_test',
'GL_EXT_texture_mirror_clamp',
'GL_EXT_blend_equation_separate',
'GL_MESA_pack_invert',
'GL_MESA_ycbcr_texture',
'GL_EXT_pixel_buffer_object',
'GL_NV_fragment_program_option',
'GL_NV_fragment_program2',
'GL_NV_vertex_program2_option',
'GL_NV_vertex_program3',
'GLX_SGIX_hyperpipe',
'GLX_MESA_agp_offset',
'GL_EXT_texture_compression_dxt1',
'GL_EXT_framebuffer_object',
'GL_GREMEDY_string_marker',
'GL_EXT_packed_depth_stencil',
'WGL_3DL_stereo_control',
'GL_EXT_stencil_clear_tag',
'GL_EXT_texture_sRGB',
'GL_EXT_framebuffer_blit',
'GL_EXT_framebuffer_multisample',
'GL_MESAX_texture_stack',
'GL_EXT_timer_query',
'GL_EXT_gpu_program_parameters',
'GL_APPLE_flush_buffer_range',
'GL_NV_gpu_program4',
'GL_NV_geometry_program4',
'GL_EXT_geometry_shader4',
'GL_NV_vertex_program4',
'GL_EXT_gpu_shader4',
'GL_EXT_draw_instanced',
'GL_EXT_packed_float ',
'WGL_EXT_pixel_format_packed_float ',
'GLX_EXT_fbconfig_packed_float',
'GL_EXT_texture_array',
'GL_EXT_texture_buffer_object',
'GL_EXT_texture_compression_latc',
'GL_EXT_texture_compression_rgtc',
'GL_EXT_texture_shared_exponent',
'GL_NV_depth_buffer_float',
'GL_NV_fragment_program4',
'GL_NV_framebuffer_multisample_coverage',
'GL_EXT_framebuffer_sRGB ',
'GLX_EXT_framebuffer_sRGB ',
'WGL_EXT_framebuffer_sRGB',
'GL_NV_geometry_shader4',
'GL_NV_parameter_buffer_object',
'GL_EXT_draw_buffers2',
'GL_NV_transform_feedback',
'GL_EXT_bindable_uniform',
'GL_EXT_texture_integer',
'GLX_EXT_texture_from_pixmap',
'GL_GREMEDY_frame_terminator',
'GL_NV_conditional_render',
'GL_NV_present_video ',
'GLX_NV_present_video ',
'WGL_NV_present_video',
'GLX_NV_video_output',
'WGL_NV_video_output',
'GLX_NV_swap_group',
'WGL_NV_swap_group',
'GL_EXT_transform_feedback',
'GL_EXT_direct_state_access',
'GL_EXT_vertex_array_bgra',
'WGL_NV_gpu_affinity',
'GL_EXT_texture_swizzle',
'GL_NV_explicit_multisample',
'GL_NV_transform_feedback2',
'GL_ATI_meminfo',
'GL_AMD_performance_monitor',
'WGL_AMD_gpu_association',
'GL_AMD_texture_texture4',
'GL_AMD_vertex_shader_tessellator',
'GL_EXT_provoking_vertex',
'GL_EXT_texture_snorm',
'GL_AMD_draw_buffers_blend',
'GL_APPLE_texture_range',
'GL_APPLE_float_pixels',
'GL_APPLE_vertex_program_evaluators',
'GL_APPLE_aux_depth_stencil',
'GL_APPLE_object_purgeable',
'GL_APPLE_row_bytes',
'GL_APPLE_rgb_422',
'GL_NV_video_capture ',
'GLX_NV_video_capture ',
'WGL_NV_video_capture',
'GL_EXT_swap_control',
'GL_NV_copy_image ',
'WGL_NV_copy_image ',
'GLX_NV_copy_image',
'GL_EXT_separate_shader_objects',
'GL_NV_parameter_buffer_object2',
'GL_NV_shader_buffer_load',
'GL_NV_vertex_buffer_unified_memory',
'GL_NV_texture_barrier',
'GL_AMD_shader_stencil_export',
'GL_AMD_seamless_cubemap_per_texture',
'GLX_INTEL_swap_event',
'GL_AMD_conservative_depth',
'GL_EXT_shader_image_load_store',
'GL_EXT_vertex_attrib_64bit',
'GL_NV_gpu_program5',
'GL_NV_gpu_shader5',
'GL_NV_shader_buffer_store',
'GL_NV_tessellation_program5',
'GL_NV_vertex_attrib_integer_64bit',
'GL_NV_multisample_coverage',
'GL_AMD_name_gen_delete',
'GL_AMD_debug_output',
'GL_NV_vdpau_interop',
'GL_AMD_transform_feedback3_lines_triangles',
'GLX_AMD_gpu_association',
'GLX_EXT_create_context_es2_profile ',
'GLX_EXT_create_context_es_profile',
'WGL_EXT_create_context_es2_profile ',
'WGL_EXT_create_context_es_profile',
'GL_AMD_depth_clamp_separate',
'GL_EXT_texture_sRGB_decode',
'GL_NV_texture_multisample',
'GL_AMD_blend_minmax_factor',
'GL_AMD_sample_positions',
'GL_EXT_x11_sync_object',
'WGL_NV_DX_interop',
'GL_AMD_multi_draw_indirect',
'GL_EXT_framebuffer_multisample_blit_scaled',
'GL_NV_path_rendering',
'GL_AMD_pinned_memory',
'WGL_NV_DX_interop2',
'GL_AMD_stencil_operation_extended',
'GLX_EXT_swap_control_tear',
'WGL_EXT_swap_control_tear',
'GL_AMD_vertex_shader_viewport_index',
'GL_AMD_vertex_shader_layer',
'GL_NV_bindless_texture',
'GL_NV_shader_atomic_float',
'GL_AMD_query_buffer_object',
'GL_NV_compute_program5',
'GL_NV_shader_storage_buffer_object',
'GL_NV_shader_atomic_counters',
'GL_NV_deep_texture3D',
'GL_NVX_conditional_render',
'GL_AMD_sparse_texture',
'GLX_EXT_buffer_age',
'GL_AMD_shader_trinary_minmax',
'GL_INTEL_map_texture',
'GL_NV_draw_texture',
'GL_AMD_interleaved_elements',
'GL_NV_bindless_multi_draw_indirect',
'GL_NV_blend_equation_advanced ',
'GL_NV_blend_equation_advanced_coherent',
'GL_NV_gpu_program5_mem_extended',
'GL_AMD_shader_atomic_counter_ops',
'WGL_NV_delay_before_swap',
'GL_EXT_shader_integer_mix',
'GL_NVX_gpu_memory_info',
'GL_EXT_debug_label',
'GL_EXT_debug_marker',
'GL_INTEL_fragment_shader_ordering',
'GL_AMD_occlusion_query_event',
'GL_ARB_imaging',
'GL_SGIS_point_parameters',
'GL_SGIX_fragment_lighting',
'GL_SGIX_igloo_interface',
'GL_SGIX_polynomial_ffd',
'GL_SGIX_pixel_tiles',
'GL_INGR_blend_func_separate',
# not extensions, but also need to be spoofed to keep the output concise
'GL_VERSION_1_2',
'GL_VERSION_1_2_DEPRECATED',
'GL_VERSION_1_3',
'GL_VERSION_1_3_DEPRECATED',
'GL_VERSION_1_4',
'GL_VERSION_1_4_DEPRECATED',
'GL_VERSION_1_5',
'GL_VERSION_2_0',
'GL_VERSION_2_1',
'GL_VERSION_3_0',
'GL_VERSION_3_1',
'GL_VERSION_3_2',
'GL_VERSION_3_3',
'GL_VERSION_4_0',
'GL_VERSION_4_1',
'GL_VERSION_4_2',
'GL_VERSION_4_3',
'GL_VERSION_4_4'
]
# Calls C preprocessor on glext.h, filtering all extensions but the requested
# one and then loads the prototypes from the temporary file.
def load_prototypes(extension):
global cxx, registry
if (len(sys.argv) != 2):
print("Usage: {0} <path/to/glext.h>".format(sys.argv[0]))
exit(1)
GLAPI = 'GLEXTEMU_PROTOTYPE'
output = tempfile.mktemp()
args = [cxx, '-E', sys.argv[1], '-o', output, '-DGL_GLEXT_PROTOTYPES',
'-DGLAPI={0}'.format(GLAPI)]
# define the extension symbols to suppress emission of unwanted stuff
for ext in registry:
if (ext == extension):
continue
args.append("-D{0}".format(ext))
subprocess.call(args)
f = open(output, 'r')
prototypes = []
for line in f:
line = line.strip()
# cast away everything that doesn't start with our GLAPI string
if (not line.startswith(GLAPI)):
continue
# cut off our GLAPI string
line = line[len(GLAPI):].strip()
# this is a prototype we can use
prototypes.append(line)
# clean up
f.close()
os.remove(output)
commands = []
for p in prototypes:
entry = {}
# split commands into return type, command name and arguments
parts = p.split(None, 1)
entry['return_type'] = parts[0]
# handle pointer return types
while (parts[1][0] == '*'):
entry['return_type'] += '*'
parts[1] = parts[1][1:].strip()
arg_start = parts[1].find('(')
entry['command'] = parts[1][:arg_start].strip()[2:]
args = []
arg_pairs = parts[1][arg_start + 1:-2].split(',')
for a in arg_pairs:
arg = {}
arg_parts = a.rsplit(None, 1)
arg['type'] = arg_parts[0].strip()
while (arg_parts[1][0] == '*'):
arg['type'] += '*'
arg_parts[1] = arg_parts[1][1:].strip()
arg['name'] = arg_parts[1].strip()
args.append(arg)
entry['args'] = args
commands.append(entry)
return commands
|
|
"""
These algorithms are concerned with finding corners in a path. A path is a
list of (x,y) points.
They return a list of corners which does not include the endpoints of the
path.
There are different criteria depending on the color/type of the line for
finding corners. For example, we can be stricter about finding corners in hv
lines than we can in ordinary straight lines. Thus this module has several
algorithms for finding corners.
"""
import itertools
import math
from core.topology import rdp, pairwise, is_horizontal, distance, quadrance
import core.topology as topology
def triplewise(iterable):
"""s -> (s0,s1,s2), (s1,s2,s3), (s2,s3,s4), ..."""
a, b, c = itertools.tee(iterable, 3)
next(b, None)
next(c, None)
next(c, None)
return itertools.izip(a, b, c)
def angle(a, b, c):
"""Returns the angle (in radians) created between 3 points. The angle at B
going from point A to point C. Result is always between 0 and pi.
"""
ab = distance(a, b)
bc = distance(b, c)
ac = distance(a, c)
d = (ab**2 + bc**2 - ac**2) / (2 * ab * bc)
d = min(1, max(d, -1))
return math.acos(d)
def find_corners_hv(path, epsilon=3):
corners = []
simplified_path = rdp(path, epsilon)
for (a, b, c) in triplewise(simplified_path):
if is_horizontal(a, b) != is_horizontal(b, c):
corners.append(b)
return corners
# def find_corners_straight(path, epsilon=3, angle_tolerance=):
# corners = []
# simplified_path = rdp(path, epsilon)
# for (a, b, c) in triplewise(simplified_path):
# if is_horizontal(a, b) != is_horizontal(b, c):
# corners.append(b)
# return corners
def add((ax,ay), (bx,by)):
return ((ax+bx), (ay+by))
def sub((ax,ay), (bx,by)):
return ((ax-bx), (ay-by))
def dot((ax,ay), (bx,by)):
return ax*bx + ay*by
def normalize((x,y)):
length = math.sqrt(x**2 + y**2)
return (x/length, y/length)
def is_colinear(points):
epsilon = .000001
direction = normalize(sub(points[1], points[0]))
for a, b in pairwise(points):
d = normalize(sub(b, a))
(x,y) = sub(direction, d)
if abs(x) > epsilon or abs(y) > epsilon:
return False
return True
def get_neighborhood(path, index, direction, neighborhood):
"""Finds the neighborhood of points around path[index].
That is, all the points that are within neighborhood distance (measured as
path length) from the point in question (path[index]). This includes the
point itself.
We look at the points on the path before the point (if direction is -1) or
after the point (if direction is 1), and we return the points that are
within neighborhood distance of the point.
"""
path_length = 0
result = [path[index]]
index += direction
while 0 <= index < len(path):
point = path[index]
prev_point = path[index - direction]
step_distance = distance(prev_point, point)
path_length += step_distance
if path_length > neighborhood:
break
result.append(point)
index += direction
return result
def get_tangent_direction(path, index, direction, neighborhood):
"""The tangent as normalized vector at the point at path[index].
This is determined by considering the points leading either into the point
(if direction is -1) or out of the point (if direction is 1).
We look at the points that are within the neighborhood of the point in
question. We fit a circle to these points and then get the tangent of the
circle at the point.
"""
origin = path[index]
points = get_neighborhood(path, index, direction, neighborhood)
if is_colinear(points):
# If they're colinear, then getting the tangent is easy. We just pick
# two points in the path to get the direction.
return normalize(sub(origin, points[1]))
(center, radius) = topology.fit_circle_to_points(points)
perpendicular = normalize(sub(center, origin))
(px, py) = perpendicular
# There are two options for tangent direction: a or b.
a = (-py, px)
b = (py, -px)
pa = add(origin, a)
pb = add(origin, b)
# We choose the one that is furthest from points.
a_total = 0
b_total = 0
for point in points:
a_total += quadrance(pa, point)
b_total += quadrance(pb, point)
if a_total > b_total:
return a
else:
return b
def corner_score(path, index, neighborhood):
"""The likelihood that the point at index along the path is a corner.
It is computed by determining the tangent vector coming in to the point
and the tangent vector coming out of the point, and then getting the angle
between these two vectors.
The corner score is the difference between pi (180 degrees) and this
angle. The higher the score, the more likely the point is a sharp corner.
"""
if index < 10 or index >= len(path) - 10:
# If there are fewer than 10 points before or after the point, we don't
# have good enough information to tell that it's a corner, so we
# return a corner score of 0.
return 0
before_tangent = get_tangent_direction(path, index, -1, neighborhood)
after_tangent = get_tangent_direction(path, index, 1, neighborhood)
a = angle(before_tangent, (0,0), after_tangent)
return math.pi - a
def extremeness_score(path, index, neighborhood):
"""How extreme is the point at path[index].
This is computed by looking at the neighborhood of points and seeing how
towards-one-side these points are.
The higher the score, the more extreme is the point in question.
"""
origin = path[index]
before_points = get_neighborhood(path, index, -1, neighborhood)
after_points = get_neighborhood(path, index, 1, neighborhood)
# Ensure that before_points and after_points have the same number of
# points, otherwise the score will get skewed towards whichever side has
# more points!
length = min(len(before_points), len(after_points))
before_points = before_points[0:length]
after_points = after_points[0:length]
total = (0,0)
for point in before_points + after_points:
d = sub(point, origin)
if not (d[0] == 0 and d[1] == 0):
total = add(total, normalize(d))
(x,y) = total
return x**2 + y**2
def find_corners(path, neighborhood=22, angle_tolerance=math.pi/5):
corners = []
# for (index, point) in enumerate(path):
# if corner_score(path, index, neighborhood) > angle_tolerance:
# corners.append(point)
# return corners
corner_scores = []
extremeness_scores = []
for (index, point) in enumerate(path):
c = corner_score(path, index, neighborhood)
corner_scores.append(c)
if c > angle_tolerance:
e = extremeness_score(path, index, neighborhood)
extremeness_scores.append(e)
else:
extremeness_scores.append(0)
for (index, score) in enumerate(corner_scores):
if score > angle_tolerance:
# To count as a corner, the point's extremeness score needs to be
# the highest in its neighborhood.
before_points = get_neighborhood(path, index, -1, neighborhood)
after_points = get_neighborhood(path, index, 1, neighborhood)
start_index = index - len(before_points) + 1
end_index = index + len(after_points) - 1
extremeness = extremeness_scores[index]
is_highest = True
for neighbor_index in range(start_index, end_index+1):
neighbor_extremeness = extremeness_scores[neighbor_index]
if neighbor_extremeness > extremeness:
is_highest = False
break
if neighbor_extremeness == extremeness and neighbor_index < index:
# We tie break in favor of the earlier corner.
is_highest = False
break
if is_highest:
corners.append(path[index])
return corners
|
|
# -*- coding: utf-8 -*-
r"""
The :mod:`pygsp.plotting` module implements functionality to plot PyGSP objects
with a `pyqtgraph <https://www.pyqtgraph.org>`_ or `matplotlib
<https://matplotlib.org>`_ drawing backend (which can be controlled by the
:data:`BACKEND` constant or individually for each plotting call).
Most users won't use this module directly.
Graphs (from :mod:`pygsp.graphs`) are to be plotted with
:meth:`pygsp.graphs.Graph.plot` and
:meth:`pygsp.graphs.Graph.plot_spectrogram`.
Filters (from :mod:`pygsp.filters`) are to be plotted with
:meth:`pygsp.filters.Filter.plot`.
.. data:: BACKEND
The default drawing backend to use if none are provided to the plotting
functions. Should be either ``'matplotlib'`` or ``'pyqtgraph'``. In general
pyqtgraph is better for interactive exploration while matplotlib is better
at generating figures to be included in papers or elsewhere.
"""
from __future__ import division
import functools
import numpy as np
from pygsp import utils
_logger = utils.build_logger(__name__)
BACKEND = 'matplotlib'
_qtg_widgets = []
_plt_figures = []
def _import_plt():
try:
import matplotlib as mpl
from matplotlib import pyplot as plt
from mpl_toolkits import mplot3d
except Exception as e:
raise ImportError('Cannot import matplotlib. Choose another backend '
'or try to install it with '
'pip (or conda) install matplotlib. '
'Original exception: {}'.format(e))
return mpl, plt, mplot3d
def _import_qtg():
try:
import pyqtgraph as qtg
import pyqtgraph.opengl as gl
from pyqtgraph.Qt import QtGui
except Exception as e:
raise ImportError('Cannot import pyqtgraph. Choose another backend '
'or try to install it with '
'pip (or conda) install pyqtgraph. You will also '
'need PyQt5 (or PySide) and PyOpenGL. '
'Original exception: {}'.format(e))
return qtg, gl, QtGui
def _plt_handle_figure(plot):
r"""Handle the common work (creating an axis if not given, setting the
title) of all matplotlib plot commands."""
# Preserve documentation of plot.
@functools.wraps(plot)
def inner(obj, **kwargs):
# Create a figure and an axis if none were passed.
if kwargs['ax'] is None:
_, plt, _ = _import_plt()
fig = plt.figure()
global _plt_figures
_plt_figures.append(fig)
if (hasattr(obj, 'coords') and obj.coords.ndim == 2 and
obj.coords.shape[1] == 3):
kwargs['ax'] = fig.add_subplot(111, projection='3d')
else:
kwargs['ax'] = fig.add_subplot(111)
title = kwargs.pop('title')
plot(obj, **kwargs)
kwargs['ax'].set_title(title)
try:
fig.show(warn=False)
except NameError:
# No figure created, an axis was passed.
pass
return kwargs['ax'].figure, kwargs['ax']
return inner
def close_all():
r"""Close all opened windows."""
global _qtg_widgets
for widget in _qtg_widgets:
widget.close()
_qtg_widgets = []
global _plt_figures
for fig in _plt_figures:
_, plt, _ = _import_plt()
plt.close(fig)
_plt_figures = []
def show(*args, **kwargs):
r"""Show created figures, alias to ``plt.show()``.
By default, showing plots does not block the prompt.
Calling this function will block execution.
"""
_, plt, _ = _import_plt()
plt.show(*args, **kwargs)
def close(*args, **kwargs):
r"""Close last created figure, alias to ``plt.close()``."""
_, plt, _ = _import_plt()
plt.close(*args, **kwargs)
def _qtg_plot_graph(G, edges, vertex_size, title):
qtg, gl, QtGui = _import_qtg()
if G.coords.shape[1] == 2:
widget = qtg.GraphicsLayoutWidget()
view = widget.addViewBox()
view.setAspectLocked()
if edges:
pen = tuple(np.array(G.plotting['edge_color']) * 255)
else:
pen = None
adj = _get_coords(G, edge_list=True)
g = qtg.GraphItem(pos=G.coords, adj=adj, pen=pen,
size=vertex_size/10)
view.addItem(g)
elif G.coords.shape[1] == 3:
if not QtGui.QApplication.instance():
QtGui.QApplication([]) # We want only one application.
widget = gl.GLViewWidget()
widget.opts['distance'] = 10
if edges:
x, y, z = _get_coords(G)
pos = np.stack((x, y, z), axis=1)
g = gl.GLLinePlotItem(pos=pos, mode='lines',
color=G.plotting['edge_color'])
widget.addItem(g)
gp = gl.GLScatterPlotItem(pos=G.coords, size=vertex_size/3,
color=G.plotting['vertex_color'])
widget.addItem(gp)
widget.setWindowTitle(title)
widget.show()
global _qtg_widgets
_qtg_widgets.append(widget)
def _plot_filter(filters, n, eigenvalues, sum, labels, title, ax, **kwargs):
r"""Plot the spectral response of a filter bank.
Parameters
----------
n : int
Number of points where the filters are evaluated.
eigenvalues : boolean
Whether to show the eigenvalues of the graph Laplacian.
The eigenvalues should have been computed with
:meth:`~pygsp.graphs.Graph.compute_fourier_basis`.
By default, the eigenvalues are shown if they are available.
sum : boolean
Whether to plot the sum of the squared magnitudes of the filters.
Default False if there is only one filter in the bank, True otherwise.
labels : boolean
Whether to label the filters.
Default False if there is only one filter in the bank, True otherwise.
title : str
Title of the figure.
ax : :class:`matplotlib.axes.Axes`
Axes where to draw the graph. Optional, created if not passed.
kwargs : dict
Additional parameters passed to the matplotlib plot function.
Useful for example to change the linewidth, linestyle, or set a label.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
The figure the plot belongs to. Only with the matplotlib backend.
ax : :class:`matplotlib.axes.Axes`
The axes the plot belongs to. Only with the matplotlib backend.
Notes
-----
This function is only implemented with the matplotlib backend.
Examples
--------
>>> import matplotlib
>>> G = graphs.Logo()
>>> mh = filters.MexicanHat(G)
>>> fig, ax = mh.plot()
"""
if eigenvalues is None:
eigenvalues = (filters.G._e is not None)
if sum is None:
sum = (filters.n_filters > 1)
if labels is None:
labels = (filters.n_filters > 1)
if title is None:
title = repr(filters)
return _plt_plot_filter(filters, n=n, eigenvalues=eigenvalues, sum=sum,
labels=labels, title=title, ax=ax, **kwargs)
@_plt_handle_figure
def _plt_plot_filter(filters, n, eigenvalues, sum, labels, ax, **kwargs):
x = np.linspace(0, filters.G.lmax, n)
params = dict(alpha=0.5)
params.update(kwargs)
if eigenvalues:
# Evaluate the filter bank at the eigenvalues to avoid plotting
# artifacts, for example when deltas are centered on the eigenvalues.
x = np.sort(np.concatenate([x, filters.G.e]))
y = filters.evaluate(x).T
lines = ax.plot(x, y, **params)
# TODO: plot highlighted eigenvalues
if sum:
line_sum, = ax.plot(x, np.sum(y**2, 1), 'k', **kwargs)
if labels:
for i, line in enumerate(lines):
line.set_label(fr'$g_{{{i}}}(\lambda)$')
if sum:
line_sum.set_label(fr'$\sum_i g_i^2(\lambda)$')
ax.legend()
if eigenvalues:
segs = np.empty((len(filters.G.e), 2, 2))
segs[:, 0, 0] = segs[:, 1, 0] = filters.G.e
segs[:, :, 1] = [0, 1]
mpl, _, _ = _import_plt()
ax.add_collection(mpl.collections.LineCollection(
segs, transform=ax.get_xaxis_transform(), zorder=0,
color=[0.9]*3, linewidth=1, label='eigenvalues')
)
# Plot dots where the evaluation matters.
y = filters.evaluate(filters.G.e).T
params.pop('label', None)
for i in range(y.shape[1]):
params.update(color=lines[i].get_color())
ax.plot(filters.G.e, y[:, i], '.', **params)
if sum:
params.update(color=line_sum.get_color())
ax.plot(filters.G.e, np.sum(y**2, 1), '.', **params)
ax.set_xlabel(r"laplacian's eigenvalues (graph frequencies) $\lambda$")
ax.set_ylabel(r'filter response $g(\lambda)$')
def _plot_graph(G, vertex_color, vertex_size, highlight,
edges, edge_color, edge_width,
indices, colorbar, limits, ax, title, backend):
r"""Plot a graph with signals as color or vertex size.
Parameters
----------
vertex_color : array_like or color
Signal to plot as vertex color (length is the number of vertices).
If None, vertex color is set to `graph.plotting['vertex_color']`.
Alternatively, a color can be set in any format accepted by matplotlib.
Each vertex color can by specified by an RGB(A) array of dimension
`n_vertices` x 3 (or 4).
vertex_size : array_like or int
Signal to plot as vertex size (length is the number of vertices).
Vertex size ranges from 0.5 to 2 times `graph.plotting['vertex_size']`.
If None, vertex size is set to `graph.plotting['vertex_size']`.
Alternatively, a size can be passed as an integer.
The pyqtgraph backend only accepts an integer size.
highlight : iterable
List of indices of vertices to be highlighted.
Useful for example to show where a filter was localized.
Only available with the matplotlib backend.
edges : bool
Whether to draw edges in addition to vertices.
Default to True if less than 10,000 edges to draw.
Note that drawing many edges can be slow.
edge_color : array_like or color
Signal to plot as edge color (length is the number of edges).
Edge color is given by `graph.plotting['edge_color']` and transparency
ranges from 0.2 to 0.9.
If None, edge color is set to `graph.plotting['edge_color']`.
Alternatively, a color can be set in any format accepted by matplotlib.
Each edge color can by specified by an RGB(A) array of dimension
`n_edges` x 3 (or 4).
Only available with the matplotlib backend.
edge_width : array_like or int
Signal to plot as edge width (length is the number of edges).
Edge width ranges from 0.5 to 2 times `graph.plotting['edge_width']`.
If None, edge width is set to `graph.plotting['edge_width']`.
Alternatively, a width can be passed as an integer.
Only available with the matplotlib backend.
indices : bool
Whether to print the node indices (in the adjacency / Laplacian matrix
and signal vectors) on top of each node.
Useful to locate a node of interest.
Only available with the matplotlib backend.
colorbar : bool
Whether to plot a colorbar indicating the signal's amplitude.
Only available with the matplotlib backend.
limits : [vmin, vmax]
Map colors from vmin to vmax.
Defaults to signal minimum and maximum value.
Only available with the matplotlib backend.
ax : :class:`matplotlib.axes.Axes`
Axes where to draw the graph. Optional, created if not passed.
Only available with the matplotlib backend.
title : str
Title of the figure.
backend: {'matplotlib', 'pyqtgraph', None}
Defines the drawing backend to use.
Defaults to :data:`pygsp.plotting.BACKEND`.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
The figure the plot belongs to. Only with the matplotlib backend.
ax : :class:`matplotlib.axes.Axes`
The axes the plot belongs to. Only with the matplotlib backend.
Notes
-----
The orientation of directed edges is not shown. If edges exist in both
directions, they will be drawn on top of each other.
Examples
--------
>>> import matplotlib
>>> graph = graphs.Sensor(20, seed=42)
>>> graph.compute_fourier_basis(n_eigenvectors=4)
>>> _, _, weights = graph.get_edge_list()
>>> fig, ax = graph.plot(graph.U[:, 1], vertex_size=graph.dw,
... edge_color=weights)
>>> graph.plotting['vertex_size'] = 300
>>> graph.plotting['edge_width'] = 5
>>> graph.plotting['edge_style'] = '--'
>>> fig, ax = graph.plot(edge_width=weights, edge_color=(0, .8, .8, .5),
... vertex_color='black')
>>> fig, ax = graph.plot(vertex_size=graph.dw, indices=True,
... highlight=[17, 3, 16], edges=False)
"""
if not hasattr(G, 'coords') or G.coords is None:
raise AttributeError('Graph has no coordinate set. '
'Please run G.set_coordinates() first.')
check_2d_3d = (G.coords.ndim != 2) or (G.coords.shape[1] not in [2, 3])
if G.coords.ndim != 1 and check_2d_3d:
raise AttributeError('Coordinates should be in 1D, 2D or 3D space.')
if G.coords.shape[0] != G.N:
raise AttributeError('Graph needs G.N = {} coordinates.'.format(G.N))
if backend is None:
backend = BACKEND
def check_shape(signal, name, length, many=False):
if (signal.ndim == 0) or (signal.shape[0] != length):
txt = '{}: signal should have length {}.'
txt = txt.format(name, length)
raise ValueError(txt)
if (not many) and (signal.ndim != 1):
txt = '{}: can plot only one signal (not {}).'
txt = txt.format(name, signal.shape[1])
raise ValueError(txt)
def normalize(x):
"""Scale values in [intercept, 1]. Return 0.5 if constant.
Set intercept value in G.plotting["normalize_intercept"]
with value in [0, 1], default is .25.
"""
ptp = x.ptp()
if ptp == 0:
return np.full(x.shape, 0.5)
else:
intercept = G.plotting['normalize_intercept']
return (1. - intercept) * (x - x.min()) / ptp + intercept
def is_color(color):
if backend == 'matplotlib':
mpl, _, _ = _import_plt()
if mpl.colors.is_color_like(color):
return True # single color
try:
return all(map(mpl.colors.is_color_like, color)) # color list
except TypeError:
return False # e.g., color is an int
else:
return False # No support for pyqtgraph (yet).
if vertex_color is None:
limits = [0, 0]
colorbar = False
if backend == 'matplotlib':
vertex_color = (G.plotting['vertex_color'],)
elif is_color(vertex_color):
limits = [0, 0]
colorbar = False
else:
vertex_color = np.asanyarray(vertex_color).squeeze()
check_shape(vertex_color, 'Vertex color', G.n_vertices,
many=(G.coords.ndim == 1))
if vertex_size is None:
vertex_size = G.plotting['vertex_size']
elif not np.isscalar(vertex_size):
vertex_size = np.asanyarray(vertex_size).squeeze()
check_shape(vertex_size, 'Vertex size', G.n_vertices)
vertex_size = G.plotting['vertex_size'] * 4 * normalize(vertex_size)**2
if edges is None:
edges = G.Ne < 10e3
if edge_color is None:
edge_color = (G.plotting['edge_color'],)
elif not is_color(edge_color):
edge_color = np.asanyarray(edge_color).squeeze()
check_shape(edge_color, 'Edge color', G.n_edges)
edge_color = 0.9 * normalize(edge_color)
edge_color = [
np.tile(G.plotting['edge_color'][:3], [len(edge_color), 1]),
edge_color[:, np.newaxis],
]
edge_color = np.concatenate(edge_color, axis=1)
if edge_width is None:
edge_width = G.plotting['edge_width']
elif not np.isscalar(edge_width):
edge_width = np.array(edge_width).squeeze()
check_shape(edge_width, 'Edge width', G.n_edges)
edge_width = G.plotting['edge_width'] * 2 * normalize(edge_width)
if limits is None:
limits = [1.05*vertex_color.min(), 1.05*vertex_color.max()]
if title is None:
title = G.__repr__(limit=4)
if backend == 'pyqtgraph':
if vertex_color is None:
_qtg_plot_graph(G, edges=edges, vertex_size=vertex_size,
title=title)
else:
_qtg_plot_signal(G, signal=vertex_color, vertex_size=vertex_size,
edges=edges, limits=limits, title=title)
elif backend == 'matplotlib':
return _plt_plot_graph(G, vertex_color=vertex_color,
vertex_size=vertex_size, highlight=highlight,
edges=edges, indices=indices, colorbar=colorbar,
edge_color=edge_color, edge_width=edge_width,
limits=limits, ax=ax, title=title)
else:
raise ValueError('Unknown backend {}.'.format(backend))
@_plt_handle_figure
def _plt_plot_graph(G, vertex_color, vertex_size, highlight,
edges, edge_color, edge_width,
indices, colorbar, limits, ax):
mpl, plt, mplot3d = _import_plt()
if edges and (G.coords.ndim != 1): # No edges for 1D plots.
sources, targets, _ = G.get_edge_list()
edges = [
G.coords[sources],
G.coords[targets],
]
edges = np.stack(edges, axis=1)
if G.coords.shape[1] == 2:
LineCollection = mpl.collections.LineCollection
elif G.coords.shape[1] == 3:
LineCollection = mplot3d.art3d.Line3DCollection
ax.add_collection(LineCollection(
edges,
linewidths=edge_width,
colors=edge_color,
linestyles=G.plotting['edge_style'],
zorder=1,
))
try:
iter(highlight)
except TypeError:
highlight = [highlight]
coords_hl = G.coords[highlight]
if G.coords.ndim == 1:
ax.plot(G.coords, vertex_color, alpha=0.5)
ax.set_ylim(limits)
for coord_hl in coords_hl:
ax.axvline(x=coord_hl, color=G.plotting['highlight_color'],
linewidth=2)
else:
sc = ax.scatter(*G.coords.T,
c=vertex_color, s=vertex_size,
marker='o', linewidths=0, alpha=0.5, zorder=2,
vmin=limits[0], vmax=limits[1])
if np.isscalar(vertex_size):
size_hl = vertex_size
else:
size_hl = vertex_size[highlight]
ax.scatter(*coords_hl.T,
s=2*size_hl, zorder=3,
marker='o', c='None',
edgecolors=G.plotting['highlight_color'], linewidths=2)
if G.coords.shape[1] == 3:
try:
ax.view_init(elev=G.plotting['elevation'],
azim=G.plotting['azimuth'])
ax.dist = G.plotting['distance']
except KeyError:
pass
if G.coords.ndim != 1 and colorbar:
plt.colorbar(sc, ax=ax)
if indices:
for node in range(G.N):
ax.text(*tuple(G.coords[node]), # accomodate 2D and 3D
s=node,
color='white',
horizontalalignment='center',
verticalalignment='center')
def _qtg_plot_signal(G, signal, edges, vertex_size, limits, title):
qtg, gl, QtGui = _import_qtg()
if G.coords.shape[1] == 2:
widget = qtg.GraphicsLayoutWidget()
view = widget.addViewBox()
elif G.coords.shape[1] == 3:
if not QtGui.QApplication.instance():
QtGui.QApplication([]) # We want only one application.
widget = gl.GLViewWidget()
widget.opts['distance'] = 10
if edges:
if G.coords.shape[1] == 2:
adj = _get_coords(G, edge_list=True)
pen = tuple(np.array(G.plotting['edge_color']) * 255)
g = qtg.GraphItem(pos=G.coords, adj=adj, symbolBrush=None,
symbolPen=None, pen=pen)
view.addItem(g)
elif G.coords.shape[1] == 3:
x, y, z = _get_coords(G)
pos = np.stack((x, y, z), axis=1)
g = gl.GLLinePlotItem(pos=pos, mode='lines',
color=G.plotting['edge_color'])
widget.addItem(g)
pos = [1, 8, 24, 40, 56, 64]
color = np.array([[0, 0, 143, 255], [0, 0, 255, 255], [0, 255, 255, 255],
[255, 255, 0, 255], [255, 0, 0, 255], [128, 0, 0, 255]])
cmap = qtg.ColorMap(pos, color)
signal = 1 + 63 * (signal - limits[0]) / limits[1] - limits[0]
if G.coords.shape[1] == 2:
gp = qtg.ScatterPlotItem(G.coords[:, 0],
G.coords[:, 1],
size=vertex_size/10,
brush=cmap.map(signal, 'qcolor'))
view.addItem(gp)
if G.coords.shape[1] == 3:
gp = gl.GLScatterPlotItem(pos=G.coords,
size=vertex_size/3,
color=cmap.map(signal, 'float'))
widget.addItem(gp)
widget.setWindowTitle(title)
widget.show()
global _qtg_widgets
_qtg_widgets.append(widget)
def _plot_spectrogram(G, node_idx):
r"""Plot the graph's spectrogram.
Parameters
----------
node_idx : ndarray
Order to sort the nodes in the spectrogram.
By default, does not reorder the nodes.
Notes
-----
This function is only implemented for the pyqtgraph backend at the moment.
Examples
--------
>>> G = graphs.Ring(15)
>>> G.plot_spectrogram()
"""
from pygsp import features
qtg, _, _ = _import_qtg()
if not hasattr(G, 'spectr'):
features.compute_spectrogram(G)
M = G.spectr.shape[1]
spectr = G.spectr[node_idx, :] if node_idx is not None else G.spectr
spectr = np.ravel(spectr)
min_spec, max_spec = spectr.min(), spectr.max()
pos = np.array([0., 0.25, 0.5, 0.75, 1.])
color = [[20, 133, 212, 255], [53, 42, 135, 255], [48, 174, 170, 255],
[210, 184, 87, 255], [249, 251, 14, 255]]
color = np.array(color, dtype=np.ubyte)
cmap = qtg.ColorMap(pos, color)
spectr = (spectr.astype(float) - min_spec) / (max_spec - min_spec)
widget = qtg.GraphicsLayoutWidget()
label = 'frequencies {}:{:.2f}:{:.2f}'.format(0, G.lmax/M, G.lmax)
v = widget.addPlot(labels={'bottom': 'nodes', 'left': label})
v.setAspectLocked()
spi = qtg.ScatterPlotItem(np.repeat(np.arange(G.N), M),
np.ravel(np.tile(np.arange(M), (1, G.N))),
pxMode=False,
symbol='s',
size=1,
brush=cmap.map(spectr, 'qcolor'))
v.addItem(spi)
widget.setWindowTitle("Spectrogram of {}".format(G.__repr__(limit=4)))
widget.show()
global _qtg_widgets
_qtg_widgets.append(widget)
def _get_coords(G, edge_list=False):
sources, targets, _ = G.get_edge_list()
if edge_list:
return np.stack((sources, targets), axis=1)
coords = [np.stack((G.coords[sources, d], G.coords[targets, d]), axis=0)
for d in range(G.coords.shape[1])]
if G.coords.shape[1] == 2:
return coords
elif G.coords.shape[1] == 3:
return [coord.reshape(-1, order='F') for coord in coords]
|
|
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import xrange
from kmip.core import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.factories.attribute_values import AttributeValueFactory
from kmip.core import enums
from kmip.core.enums import AttributeType
from kmip.core.enums import Tags
from kmip.core.enums import Types
from kmip.core.enums import CredentialType
from kmip.core.enums import RevocationReasonCode as RevocationReasonCodeEnum
from kmip.core.errors import ErrorStrings
from kmip.core.misc import KeyFormatType
from kmip.core.primitives import Struct
from kmip.core.primitives import TextString
from kmip.core.primitives import ByteString
from kmip.core.primitives import Integer
from kmip.core.primitives import Enumeration
from kmip.core.utils import BytearrayStream
# 2.1
# 2.1.1
class Attribute(Struct):
class AttributeName(TextString):
def __init__(self, value=None):
super(Attribute.AttributeName, self).__init__(
value, Tags.ATTRIBUTE_NAME)
def __eq__(self, other):
if isinstance(other, Attribute.AttributeName):
if self.value != other.value:
return False
else:
return True
else:
NotImplemented
def __ne__(self, other):
if isinstance(other, Attribute.AttributeName):
return not (self == other)
else:
return NotImplemented
class AttributeIndex(Integer):
def __init__(self, value=None):
super(Attribute.AttributeIndex, self).__init__(
value, Tags.ATTRIBUTE_INDEX)
def __init__(self,
attribute_name=None,
attribute_index=None,
attribute_value=None):
super(Attribute, self).__init__(tag=Tags.ATTRIBUTE)
self.value_factory = AttributeValueFactory()
self.attribute_name = attribute_name
self.attribute_index = attribute_index
self.attribute_value = attribute_value
if attribute_value is not None:
attribute_value.tag = Tags.ATTRIBUTE_VALUE
def read(self, istream):
super(Attribute, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
# Read the name of the attribute
self.attribute_name = Attribute.AttributeName()
self.attribute_name.read(tstream)
# Read the attribute index if it is next
if self.is_tag_next(Tags.ATTRIBUTE_INDEX, tstream):
self.attribute_index = Attribute.AttributeIndex()
self.attribute_index.read(tstream)
# Lookup the attribute class that belongs to the attribute name
name = self.attribute_name.value
enum_name = name.replace('.', '_').replace(' ', '_').upper()
enum_type = None
try:
enum_type = AttributeType[enum_name]
except KeyError:
# Likely custom attribute, pass raw name string as attribute type
enum_type = name
value = self.value_factory.create_attribute_value(enum_type, None)
self.attribute_value = value
self.attribute_value.tag = Tags.ATTRIBUTE_VALUE
self.attribute_value.read(tstream)
self.is_oversized(tstream)
def write(self, ostream):
tstream = BytearrayStream()
self.attribute_name.write(tstream)
if self.attribute_index is not None:
self.attribute_index.write(tstream)
self.attribute_value.write(tstream)
# Write the length and value of the attribute
self.length = tstream.length()
super(Attribute, self).write(ostream)
ostream.write(tstream.buffer)
def __eq__(self, other):
if isinstance(other, Attribute):
if self.attribute_name != other.attribute_name:
return False
elif self.attribute_index != other.attribute_index:
return False
elif self.attribute_value != other.attribute_value:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
# 2.1.2
class Credential(Struct):
class CredentialType(Enumeration):
def __init__(self, value=None):
super(Credential.CredentialType, self).__init__(
CredentialType, value, Tags.CREDENTIAL_TYPE)
class UsernamePasswordCredential(Struct):
class Username(TextString):
def __init__(self, value=None):
super(Credential.UsernamePasswordCredential.Username,
self).__init__(
value, Tags.USERNAME)
class Password(TextString):
def __init__(self, value=None):
super(Credential.UsernamePasswordCredential.Password,
self).__init__(
value, Tags.PASSWORD)
def __init__(self, username=None, password=None):
super(Credential.UsernamePasswordCredential, self).__init__(
tag=Tags.CREDENTIAL_VALUE)
self.username = username
self.password = password
self.validate()
def read(self, istream):
super(Credential.UsernamePasswordCredential, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
# Read the username of the credential
self.username = self.Username()
self.username.read(tstream)
# Read the password if it is next
if self.is_tag_next(Tags.PASSWORD, tstream):
self.password = self.Password()
self.password.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
self.username.write(tstream)
if self.password is not None:
self.password.write(tstream)
# Write the length and value of the credential
self.length = tstream.length()
super(Credential.UsernamePasswordCredential, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
pass
class DeviceCredential(Struct):
class DeviceSerialNumber(TextString):
def __init__(self, value=None):
super(Credential.DeviceCredential.DeviceSerialNumber, self).\
__init__(value, Tags.DEVICE_SERIAL_NUMBER)
class Password(TextString):
def __init__(self, value=None):
super(Credential.DeviceCredential.Password, self).\
__init__(value, Tags.PASSWORD)
class DeviceIdentifier(TextString):
def __init__(self, value=None):
super(Credential.DeviceCredential.DeviceIdentifier, self).\
__init__(value, Tags.DEVICE_IDENTIFIER)
class NetworkIdentifier(TextString):
def __init__(self, value=None):
super(Credential.DeviceCredential.NetworkIdentifier, self).\
__init__(value, Tags.NETWORK_IDENTIFIER)
class MachineIdentifier(TextString):
def __init__(self, value=None):
super(Credential.DeviceCredential.MachineIdentifier, self).\
__init__(value, Tags.MACHINE_IDENTIFIER)
class MediaIdentifier(TextString):
def __init__(self, value=None):
super(Credential.DeviceCredential.MediaIdentifier, self).\
__init__(value, Tags.MEDIA_IDENTIFIER)
def __init__(self,
device_serial_number=None,
password=None,
device_identifier=None,
network_identifier=None,
machine_identifier=None,
media_identifier=None):
super(Credential.DeviceCredential, self).__init__(
tag=Tags.CREDENTIAL_VALUE)
self.device_serial_number = device_serial_number
self.password = password
self.device_identifier = device_identifier
self.network_identifier = network_identifier
self.machine_identifier = machine_identifier
self.media_identifier = media_identifier
def read(self, istream):
super(Credential.DeviceCredential, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
# Read the password if it is next
if self.is_tag_next(Tags.DEVICE_SERIAL_NUMBER, tstream):
self.device_serial_number = self.DeviceSerialNumber()
self.device_serial_number.read(tstream)
# Read the password if it is next
if self.is_tag_next(Tags.PASSWORD, tstream):
self.password = self.Password()
self.password.read(tstream)
# Read the password if it is next
if self.is_tag_next(Tags.DEVICE_IDENTIFIER, tstream):
self.device_identifier = self.DeviceIdentifier()
self.device_identifier.read(tstream)
# Read the password if it is next
if self.is_tag_next(Tags.NETWORK_IDENTIFIER, tstream):
self.network_identifier = self.NetworkIdentifier()
self.network_identifier.read(tstream)
# Read the password if it is next
if self.is_tag_next(Tags.MACHINE_IDENTIFIER, tstream):
self.machine_identifier = self.MachineIdentifier()
self.machine_identifier.read(tstream)
# Read the password if it is next
if self.is_tag_next(Tags.MEDIA_IDENTIFIER, tstream):
self.media_identifier = self.MediaIdentifier()
self.media_identifier.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
if self.device_serial_number is not None:
self.device_serial_number.write(tstream)
if self.password is not None:
self.password.write(tstream)
if self.device_identifier is not None:
self.device_identifier.write(tstream)
if self.network_identifier is not None:
self.network_identifier.write(tstream)
if self.machine_identifier is not None:
self.machine_identifier.write(tstream)
if self.media_identifier is not None:
self.media_identifier.write(tstream)
# Write the length and value of the credential
self.length = tstream.length()
super(Credential.DeviceCredential, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
pass
def __init__(self, credential_type=None, credential_value=None):
super(Credential, self).__init__(tag=Tags.CREDENTIAL)
self.credential_type = credential_type
self.credential_value = credential_value
def read(self, istream):
super(Credential, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
# Read the type of the credential
self.credential_type = self.CredentialType()
self.credential_type.read(tstream)
# Use the type to determine what credential value to read
if self.credential_type.value is CredentialType.USERNAME_AND_PASSWORD:
self.credential_value = self.UsernamePasswordCredential()
elif self.credential_type.value is CredentialType.DEVICE:
self.credential_value = self.DeviceCredential()
else:
# TODO (peter-hamilton) Use more descriptive error here
raise NotImplementedError()
self.credential_value.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
self.credential_type.write(tstream)
self.credential_value.write(tstream)
# Write the length and value of the credential
self.length = tstream.length()
super(Credential, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
pass
# 2.1.3
class KeyBlock(Struct):
class KeyCompressionType(Enumeration):
def __init__(self, value=None):
super(KeyBlock.KeyCompressionType, self).__init__(
enums.KeyCompressionType, value, Tags.KEY_COMPRESSION_TYPE)
def __init__(self,
key_format_type=None,
key_compression_type=None,
key_value=None,
cryptographic_algorithm=None,
cryptographic_length=None,
key_wrapping_data=None):
super(KeyBlock, self).__init__(Tags.KEY_BLOCK)
self.key_format_type = key_format_type
self.key_compression_type = key_compression_type
self.key_value = key_value
self.cryptographic_algorithm = cryptographic_algorithm
self.cryptographic_length = cryptographic_length
self.key_wrapping_data = key_wrapping_data
self.validate()
def read(self, istream):
super(KeyBlock, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.key_format_type = KeyFormatType()
self.key_format_type.read(tstream)
if self.is_tag_next(Tags.KEY_COMPRESSION_TYPE, tstream):
self.key_compression_type = KeyBlock.KeyCompressionType()
self.key_compression_type.read(tstream)
self.key_value = KeyValue()
self.key_value.read(tstream)
if self.is_tag_next(Tags.CRYPTOGRAPHIC_ALGORITHM, tstream):
self.cryptographic_algorithm = attributes.CryptographicAlgorithm()
self.cryptographic_algorithm.read(tstream)
if self.is_tag_next(Tags.CRYPTOGRAPHIC_LENGTH, tstream):
self.cryptographic_length = attributes.CryptographicLength()
self.cryptographic_length.read(tstream)
if self.is_tag_next(Tags.KEY_WRAPPING_DATA, tstream):
self.key_wrapping_data = KeyWrappingData()
self.key_wrapping_data.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
self.key_format_type.write(tstream)
if self.key_compression_type is not None:
self.key_compression_type.write(tstream)
self.key_value.write(tstream)
if self.cryptographic_algorithm is not None:
self.cryptographic_algorithm.write(tstream)
if self.cryptographic_length is not None:
self.cryptographic_length.write(tstream)
if self.key_wrapping_data is not None:
self.key_wrapping_data.write(tstream)
# Write the length and value of the credential
self.length = tstream.length()
super(KeyBlock, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
if self.key_format_type is not None:
if type(self.key_format_type) is not KeyFormatType:
member = 'KeyBlock.key_format_type'
exp_type = KeyFormatType
rcv_type = type(self.key_format_type)
msg = ErrorStrings.BAD_EXP_RECV.format(member, 'type',
exp_type, rcv_type)
raise TypeError(msg)
# 2.1.4
class KeyMaterial(ByteString):
def __init__(self, value=None):
super(KeyMaterial, self).__init__(value, Tags.KEY_MATERIAL)
# TODO (peter-hamilton) Get rid of this and replace with a KeyMaterial factory.
class KeyMaterialStruct(Struct):
def __init__(self):
super(KeyMaterialStruct, self).__init__(Tags.KEY_MATERIAL)
self.data = BytearrayStream()
self.validate()
def read(self, istream):
super(KeyMaterialStruct, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.data = BytearrayStream(tstream.read())
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
tstream.write(self.data.buffer)
self.length = tstream.length()
super(KeyMaterialStruct, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# NOTE (peter-hamilton): Intentional pass, no way to validate data.
pass
class KeyValue(Struct):
def __init__(self,
key_material=None,
attributes=None):
super(KeyValue, self).__init__(Tags.KEY_VALUE)
if key_material is None:
self.key_material = KeyMaterial()
else:
self.key_material = key_material
if attributes is None:
self.attributes = list()
else:
self.attributes = attributes
self.validate()
def read(self, istream):
super(KeyValue, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
# TODO (peter-hamilton) Replace this with a KeyMaterial factory.
if self.is_type_next(Types.STRUCTURE, tstream):
self.key_material = KeyMaterialStruct()
self.key_material.read(tstream)
else:
self.key_material = KeyMaterial()
self.key_material.read(tstream)
while self.is_tag_next(Tags.ATTRIBUTE, tstream):
attribute = Attribute()
attribute.read(tstream)
self.attributes.append(attribute)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
self.key_material.write(tstream)
for attribute in self.attributes:
attribute.write(tstream)
self.length = tstream.length()
super(KeyValue, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Replace with check against KeyMaterial factory.
if not isinstance(self.key_material, KeyMaterial):
msg = "invalid key material"
msg += "; expected {0}, received {1}".format(
KeyMaterial, self.key_material)
raise TypeError(msg)
if isinstance(self.attributes, list):
for i in xrange(len(self.attributes)):
attribute = self.attributes[i]
if not isinstance(attribute, Attribute):
msg = "invalid attribute ({0} in list)".format(i)
msg += "; expected {0}, received {1}".format(
Attribute, attribute)
raise TypeError(msg)
else:
msg = "invalid attributes list"
msg += "; expected {0}, received {1}".format(
list, self.attributes)
raise TypeError(msg)
# 2.1.5
class WrappingMethod(Enumeration):
def __init__(self, value=None):
super(WrappingMethod, self).__init__(
enums.WrappingMethod, value, Tags.WRAPPING_METHOD)
class EncodingOption(Enumeration):
def __init__(self, value=None):
super(EncodingOption, self).__init__(
enums.EncodingOption, value, Tags.ENCODING_OPTION)
class KeyInformation(Struct):
def __init__(self,
unique_identifier=None,
cryptographic_parameters=None,
tag=Tags.ENCRYPTION_KEY_INFORMATION):
super(KeyInformation, self).__init__(tag=tag)
self.unique_identifier = unique_identifier
self.cryptographic_parameters = cryptographic_parameters
self.validate()
def read(self, istream):
super(KeyInformation, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.unique_identifier = attributes.UniqueIdentifier()
self.unique_identifier.read(tstream)
if self.is_tag_next(Tags.CRYPTOGRAPHIC_PARAMETERS, tstream):
self.cryptographic_parameters = CryptographicParameters()
self.cryptographic_parameters.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
self.unique_identifier.write(tstream)
if self.cryptographic_parameters is not None:
self.cryptographic_parameters.write(tstream)
# Write the length and value of the template attribute
self.length = tstream.length()
super(KeyInformation, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
class EncryptionKeyInformation(KeyInformation):
def __init__(self,
unique_identifier=None,
cryptographic_parameters=None,
tag=Tags.ENCRYPTION_KEY_INFORMATION):
super(EncryptionKeyInformation, self).__init__(
unique_identifier, cryptographic_parameters, tag)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
class MACSignatureKeyInformation(KeyInformation):
def __init__(self,
unique_identifier=None,
cryptographic_parameters=None,
tag=Tags.MAC_SIGNATURE_KEY_INFORMATION):
super(MACSignatureKeyInformation, self).__init__(
unique_identifier, cryptographic_parameters, tag)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
class KeyWrappingData(Struct):
class MACSignature(ByteString):
def __init__(self, value=None):
super(KeyWrappingData.MACSignature, self).__init__(
value, Tags.MAC_SIGNATURE)
class IVCounterNonce(ByteString):
def __init__(self, value=None):
super(KeyWrappingData.IVCounterNonce, self).__init__(
value, Tags.IV_COUNTER_NONCE)
def __init__(self,
wrapping_method=None,
encryption_key_information=None,
mac_signature_key_information=None,
mac_signature=None,
iv_counter_nonce=None,
encoding_option=None):
super(KeyWrappingData, self).__init__(Tags.KEY_WRAPPING_DATA)
self.wrapping_method = wrapping_method
self.encryption_key_information = encryption_key_information
self.mac_signature_key_information = mac_signature_key_information
self.mac_signature = mac_signature
self.iv_counter_nonce = iv_counter_nonce
self.encoding_option = encoding_option
self.validate()
def read(self, istream):
super(KeyWrappingData, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.wrapping_method = WrappingMethod()
self.wrapping_method.read(tstream)
if self.is_tag_next(Tags.ENCRYPTION_KEY_INFORMATION, tstream):
self.encryption_key_information = EncryptionKeyInformation()
self.encryption_key_information.read(tstream)
if self.is_tag_next(Tags.MAC_SIGNATURE_KEY_INFORMATION, tstream):
self.mac_signature_key_information = MACSignatureKeyInformation()
self.mac_signature_key_information.read(tstream)
if self.is_tag_next(Tags.MAC_SIGNATURE, tstream):
self.mac_signature = KeyWrappingData.MACSignature()
self.mac_signature.read(tstream)
if self.is_tag_next(Tags.IV_COUNTER_NONCE, tstream):
self.iv_counter_nonce = KeyWrappingData.IVCounterNonce()
self.iv_counter_nonce.read(tstream)
if self.is_tag_next(Tags.ENCODING_OPTION, tstream):
self.encoding_option = EncodingOption()
self.encoding_option.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
# Write the contents of the key wrapping data
self.wrapping_method.write(tstream)
if self.encryption_key_information is not None:
self.encryption_key_information.write(tstream)
if self.mac_signature_key_information is not None:
self.mac_signature_key_information.write(tstream)
if self.mac_signature is not None:
self.mac_signature.write(tstream)
if self.iv_counter_nonce is not None:
self.iv_counter_nonce.write(tstream)
if self.encoding_option is not None:
self.encoding_option.write(tstream)
# Write the length and value of the key wrapping data
self.length = tstream.length()
super(KeyWrappingData, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation
pass
# 2.1.6
class KeyWrappingSpecification(Struct):
class AttributeName(TextString):
def __init__(self, value=None):
super(KeyWrappingSpecification.AttributeName, self).__init__(
value, Tags.ATTRIBUTE_NAME)
def __init__(self,
wrapping_method=None,
encryption_key_information=None,
mac_signature_key_information=None,
attribute_name=None,
encoding_option=None):
super(KeyWrappingSpecification, self).__init__(
tag=Tags.KEY_WRAPPING_SPECIFICATION)
self.wrapping_method = wrapping_method
self.encryption_key_information = encryption_key_information
self.mac_signature_key_information = mac_signature_key_information
self.attribute_name = attribute_name
self.encoding_option = encoding_option
def read(self, istream):
super(KeyWrappingSpecification, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.wrapping_method = WrappingMethod()
self.wrapping_method.read(tstream)
if self.is_tag_next(Tags.ENCRYPTION_KEY_INFORMATION, tstream):
self.encryption_key_information = EncryptionKeyInformation()
self.encryption_key_information.read(tstream)
if self.is_tag_next(Tags.MAC_SIGNATURE_KEY_INFORMATION, tstream):
self.mac_signature_key_information = MACSignatureKeyInformation()
self.mac_signature_key_information.read(tstream)
if self.is_tag_next(Tags.ATTRIBUTE_NAME, tstream):
self.attribute_name = KeyWrappingSpecification.AttributeName()
self.attribute_name.read(tstream)
if self.is_tag_next(Tags.ENCODING_OPTION, tstream):
self.encoding_option = EncodingOption()
self.encoding_option.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
# Write the contents of the key wrapping data
self.wrapping_method.write(tstream)
if self.encryption_key_information is not None:
self.encryption_key_information.write(tstream)
if self.mac_signature_key_information is not None:
self.mac_signature_key_information.write(tstream)
if self.attribute_name is not None:
self.attribute_name.write(tstream)
if self.encoding_option is not None:
self.encoding_option.write(tstream)
# Write the length and value of the key wrapping data
self.length = tstream.length()
super(KeyWrappingSpecification, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
# 2.1.8
class TemplateAttribute(Struct):
def __init__(self,
names=None,
attributes=None,
tag=Tags.TEMPLATE_ATTRIBUTE):
super(TemplateAttribute, self).__init__(tag)
if names is None:
self.names = list()
else:
self.names = names
if attributes is None:
self.attributes = list()
else:
self.attributes = attributes
self.validate()
def read(self, istream):
super(TemplateAttribute, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.names = list()
self.attributes = list()
# Read the names of the template attribute, 0 or more
while self.is_tag_next(Tags.NAME, tstream):
name = attributes.Name()
name.read(tstream)
self.names.append(name)
# Read the attributes of the template attribute, 0 or more
while self.is_tag_next(Tags.ATTRIBUTE, tstream):
attribute = Attribute()
attribute.read(tstream)
self.attributes.append(attribute)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
# Write the names and attributes of the template attribute
for name in self.names:
name.write(tstream)
for attribute in self.attributes:
attribute.write(tstream)
# Write the length and value of the template attribute
self.length = tstream.length()
super(TemplateAttribute, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
self.__validate()
def __validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
def __eq__(self, other):
if isinstance(other, TemplateAttribute):
if len(self.names) != len(other.names):
return False
if len(self.attributes) != len(other.attributes):
return False
for i in xrange(len(self.names)):
a = self.names[i]
b = other.names[i]
if a != b:
return False
for i in xrange(len(self.attributes)):
a = self.attributes[i]
b = other.attributes[i]
if a != b:
return False
return True
else:
return NotImplemented
class CommonTemplateAttribute(TemplateAttribute):
def __init__(self,
names=None,
attributes=None):
super(CommonTemplateAttribute, self).__init__(
names, attributes, Tags.COMMON_TEMPLATE_ATTRIBUTE)
class PrivateKeyTemplateAttribute(TemplateAttribute):
def __init__(self,
names=None,
attributes=None):
super(PrivateKeyTemplateAttribute, self).__init__(
names, attributes, Tags.PRIVATE_KEY_TEMPLATE_ATTRIBUTE)
class PublicKeyTemplateAttribute(TemplateAttribute):
def __init__(self,
names=None,
attributes=None):
super(PublicKeyTemplateAttribute, self).__init__(
names, attributes, Tags.PUBLIC_KEY_TEMPLATE_ATTRIBUTE)
# 2.1.9
class ExtensionName(TextString):
"""
The name of an extended Object.
A part of ExtensionInformation, specifically identifying an Object that is
a custom vendor addition to the KMIP specification. See Section 2.1.9 of
the KMIP 1.1 specification for more information.
Attributes:
value: The string data representing the extension name.
"""
def __init__(self, value=''):
"""
Construct an ExtensionName object.
Args:
value (str): The string data representing the extension name.
Optional, defaults to the empty string.
"""
super(ExtensionName, self).__init__(value, Tags.EXTENSION_NAME)
class ExtensionTag(Integer):
"""
The tag of an extended Object.
A part of ExtensionInformation. See Section 2.1.9 of the KMIP 1.1
specification for more information.
Attributes:
value: The tag number identifying the extended object.
"""
def __init__(self, value=0):
"""
Construct an ExtensionTag object.
Args:
value (int): A number representing the extension tag. Often
displayed in hex format. Optional, defaults to 0.
"""
super(ExtensionTag, self).__init__(value, Tags.EXTENSION_TAG)
class ExtensionType(Integer):
"""
The type of an extended Object.
A part of ExtensionInformation, specifically identifying the type of the
Object in the specification extension. See Section 2.1.9 of the KMIP 1.1
specification for more information.
Attributes:
value: The type enumeration for the extended object.
"""
def __init__(self, value=None):
"""
Construct an ExtensionType object.
Args:
value (Types): A number representing a Types enumeration value,
indicating the type of the extended Object. Optional, defaults
to None.
"""
super(ExtensionType, self).__init__(value, Tags.EXTENSION_TYPE)
class ExtensionInformation(Struct):
"""
A structure describing Objects defined in KMIP specification extensions.
It is used specifically for Objects with Item Tag values in the Extensions
range and appears in responses to Query requests for server extension
information. See Sections 2.1.9 and 4.25 of the KMIP 1.1 specification for
more information.
Attributes:
extension_name: The name of the extended Object.
extension_tag: The tag of the extended Object.
extension_type: The type of the extended Object.
"""
def __init__(self, extension_name=None, extension_tag=None,
extension_type=None):
"""
Construct an ExtensionInformation object.
Args:
extension_name (ExtensionName): The name of the extended Object.
extension_tag (ExtensionTag): The tag of the extended Object.
extension_type (ExtensionType): The type of the extended Object.
"""
super(ExtensionInformation, self).__init__(Tags.EXTENSION_INFORMATION)
if extension_name is None:
self.extension_name = ExtensionName()
else:
self.extension_name = extension_name
self.extension_tag = extension_tag
self.extension_type = extension_type
self.validate()
def read(self, istream):
"""
Read the data encoding the ExtensionInformation object and decode it
into its constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
"""
super(ExtensionInformation, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.extension_name.read(tstream)
if self.is_tag_next(Tags.EXTENSION_TAG, tstream):
self.extension_tag = ExtensionTag()
self.extension_tag.read(tstream)
if self.is_tag_next(Tags.EXTENSION_TYPE, tstream):
self.extension_type = ExtensionType()
self.extension_type.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
"""
Write the data encoding the ExtensionInformation object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
"""
tstream = BytearrayStream()
self.extension_name.write(tstream)
if self.extension_tag is not None:
self.extension_tag.write(tstream)
if self.extension_type is not None:
self.extension_type.write(tstream)
self.length = tstream.length()
super(ExtensionInformation, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
"""
Error check the attributes of the ExtensionInformation object.
"""
self.__validate()
def __validate(self):
if not isinstance(self.extension_name, ExtensionName):
msg = "invalid extension name"
msg += "; expected {0}, received {1}".format(
ExtensionName, self.extension_name)
raise TypeError(msg)
if self.extension_tag is not None:
if not isinstance(self.extension_tag, ExtensionTag):
msg = "invalid extension tag"
msg += "; expected {0}, received {1}".format(
ExtensionTag, self.extension_tag)
raise TypeError(msg)
if self.extension_type is not None:
if not isinstance(self.extension_type, ExtensionType):
msg = "invalid extension type"
msg += "; expected {0}, received {1}".format(
ExtensionType, self.extension_type)
raise TypeError(msg)
def __eq__(self, other):
if isinstance(other, ExtensionInformation):
if self.extension_name != other.extension_name:
return False
elif self.extension_tag != other.extension_tag:
return False
elif self.extension_type != other.extension_type:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, ExtensionInformation):
return not (self == other)
else:
return NotImplemented
def __repr__(self):
name = "extension_name={0}".format(repr(self.extension_name))
tag = "extension_tag={0}".format(repr(self.extension_tag))
typ = "extension_type={0}".format(repr(self.extension_type))
return "ExtensionInformation({0}, {1}, {2})".format(name, tag, typ)
def __str__(self):
return repr(self)
@classmethod
def create(cls, extension_name=None, extension_tag=None,
extension_type=None):
"""
Construct an ExtensionInformation object from provided extension
values.
Args:
extension_name (str): The name of the extension. Optional,
defaults to None.
extension_tag (int): The tag number of the extension. Optional,
defaults to None.
extension_type (int): The type index of the extension. Optional,
defaults to None.
Returns:
ExtensionInformation: The newly created set of extension
information.
Example:
>>> x = ExtensionInformation.create('extension', 1, 1)
>>> x.extension_name.value
ExtensionName(value='extension')
>>> x.extension_tag.value
ExtensionTag(value=1)
>>> x.extension_type.value
ExtensionType(value=1)
"""
extension_name = ExtensionName(extension_name)
extension_tag = ExtensionTag(extension_tag)
extension_type = ExtensionType(extension_type)
return ExtensionInformation(
extension_name=extension_name,
extension_tag=extension_tag,
extension_type=extension_type)
# 3.31, 9.1.3.2.19
class RevocationReasonCode(Enumeration):
def __init__(self, value=RevocationReasonCodeEnum.UNSPECIFIED):
super(RevocationReasonCode, self).__init__(
RevocationReasonCodeEnum, value=value,
tag=Tags.REVOCATION_REASON_CODE)
# 3.31
class RevocationReason(Struct):
"""
A structure describing the reason for a revocation operation.
See Sections 2.1.9 and 4.25 of the KMIP 1.1 specification for
more information.
Attributes:
code: The revocation reason code enumeration
message: An optional revocation message
"""
def __init__(self, code=None, message=None):
"""
Construct a RevocationReason object.
Parameters:
code(RevocationReasonCode): revocation reason code
message(string): An optional revocation message
"""
super(RevocationReason, self).__init__(tag=Tags.REVOCATION_REASON)
if code is not None:
self.revocation_code = RevocationReasonCode(value=code)
else:
self.revocation_code = RevocationReasonCode()
if message is not None:
self.revocation_message = TextString(
value=message,
tag=Tags.REVOCATION_MESSAGE)
else:
self.revocation_message = None
self.validate()
def read(self, istream):
"""
Read the data encoding the RevocationReason object and decode it
into its constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
"""
super(RevocationReason, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.revocation_code = RevocationReasonCode()
self.revocation_code.read(tstream)
if self.is_tag_next(Tags.REVOCATION_MESSAGE, tstream):
self.revocation_message = TextString()
self.revocation_message.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
"""
Write the data encoding the RevocationReason object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
"""
tstream = BytearrayStream()
self.revocation_code.write(tstream)
if self.revocation_message is not None:
self.revocation_message.write(tstream)
# Write the length and value
self.length = tstream.length()
super(RevocationReason, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
"""
validate the RevocationReason object
"""
if not isinstance(self.revocation_code, RevocationReasonCode):
msg = "RevocationReaonCode expected"
raise TypeError(msg)
if self.revocation_message is not None:
if not isinstance(self.revocation_message, TextString):
msg = "TextString expect"
raise TypeError(msg)
|
|
"""Tests the substate_context
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import unittest
import threading
import multiprocessing
from six import StringIO
from pyexperiment import state
from pyexperiment.state_context import substate_context
from pyexperiment.state_context import thread_state_context
from pyexperiment.state_context import processing_state_context
from pyexperiment.utils.stdout_redirector import stdout_err_redirector
class TestSubStateContext(unittest.TestCase):
"""Test the substate_context
"""
def tearDown(self):
"""Teardown test fixture
"""
state.reset_instance()
def test_set_get_first_level(self):
"""Test setting, getting sub-state at the lowest level
"""
with substate_context('test'):
state['a'] = 123
self.assertEqual(state['a'], 123)
self.assertEqual(state['test.a'], 123)
self.assertRaises(KeyError, state.__getitem__, 'a')
def test_set_get_higher_levels(self):
"""Test setting, getting sub-state at the higher levels
"""
with substate_context('test'):
state['a.b'] = 123
state['c.d.e'] = 345
self.assertEqual(state['a.b'], 123)
self.assertEqual(state['c.d.e'], 345)
self.assertEqual(state['test.a.b'], 123)
self.assertEqual(state['test.c.d.e'], 345)
self.assertRaises(KeyError, state.__getitem__, 'a.b')
self.assertRaises(KeyError, state.__getitem__, 'c.d.e')
def test_global_state(self):
"""Test setting, getting global state in sub-state context
"""
with substate_context('test'):
state['a.b'] = 123
state['c.d.e'] = 345
state['__foo'] = 42
state['__bar.foo'] = 43
self.assertEqual(state['a.b'], 123)
self.assertEqual(state['c.d.e'], 345)
self.assertEqual(state['__foo'], 42)
self.assertEqual(state['__bar.foo'], 43)
self.assertEqual(state['test.a.b'], 123)
self.assertEqual(state['test.c.d.e'], 345)
self.assertEqual(state['__foo'], 42)
self.assertEqual(state['__bar.foo'], 43)
self.assertRaises(KeyError, state.__getitem__, 'a.b')
self.assertRaises(KeyError, state.__getitem__, 'c.d.e')
def test_get_section(self):
"""Test getting a section of the state
"""
with substate_context('test'):
state['a.a'] = 12
state['a.b'] = 13
self.assertIn('a', state)
self.assertIn('a.a', state)
self.assertIn('a.b', state)
self.assertEqual(state['a.a'], 12)
self.assertEqual(state['a.b'], 13)
def test_get_nonexisting(self):
"""Test getting an item of the state that does not exist
"""
with substate_context('test'):
self.assertRaises(KeyError, lambda: state['bla'])
def test_iterate(self):
"""Test iterating over sub state
"""
with substate_context('test'):
state['a'] = 1
state['b'] = 2
for elem in state:
if elem == 'a':
self.assertEqual(state[elem], 1)
elif elem == 'b':
self.assertEqual(state[elem], 2)
else:
assert False
class TestThreadStateContext(unittest.TestCase):
"""Test the thread_state_context
"""
def tearDown(self):
"""Teardown test fixture
"""
state.reset_instance()
def test_basic_functionality(self):
"""Test setting, getting sub-state in 20 threads
"""
with thread_state_context():
def worker(i):
"""thread worker function"""
state[str(i)] = i
self.assertEqual(state[str(i)], i)
threads = []
for i in range(20):
thread = threading.Thread(target=worker, args=(i,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
for i in range(len(threads)):
self.assertEqual(state[str(i)], i)
def test_delete_nonexisting(self):
"""Test deleting non-existing sub-state in threads
"""
with thread_state_context():
def worker():
"""thread worker function"""
def dell():
"""Test function"""
del state['foo']
self.assertRaises(KeyError, dell)
threads = []
for _ in range(20):
thread = threading.Thread(target=worker)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def test_after_exception(self):
"""Test setting, getting state after exception in threads
"""
state['a'] = 1
buf_out = StringIO()
buf_err = StringIO()
try:
with stdout_err_redirector(buf_out, buf_err):
with thread_state_context():
def worker():
"""thread worker function"""
raise RuntimeError
threads = []
for _ in range(20):
thread = threading.Thread(target=worker)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
raise RuntimeError
except RuntimeError:
pass
self.assertEqual(state['a'], 1)
def worker1(i):
"""Process worker function, needs to be defined at top level"""
state[str(i)] = i
if not state[str(i)] == i:
return False
else:
return True
def worker2(i):
"""Process worker function, needs to be defined at top level"""
state[str(i)] = 'bla'
del state[str(i)]
def worker3(i):
"""Process worker function, needs to be defined at top level"""
try:
_ = state[str(i)]
except KeyError:
return True
return False
def worker4():
"""Process worker function, needs to be defined at top level"""
try:
state[[1, 2, 3]] = 12
except TypeError:
return True
return False
def worker5():
"""Process worker function, needs to be defined at top level"""
try:
del state['bla']
except KeyError:
return True
return False
class TestProcessingStateContext(unittest.TestCase):
"""Test the processing_state_context
"""
def tearDown(self):
"""Teardown test fixture
"""
state.reset_instance()
def test_basic_functionality(self):
"""Test setting, getting state in 4 processes
"""
with processing_state_context():
n_jobs = 2
pool = multiprocessing.Pool(processes=4)
results = []
for i in range(n_jobs):
results.append(pool.apply_async(worker1, (i,)))
pool.close()
pool.join()
for i in range(n_jobs):
self.assertTrue(results[i].get())
self.assertEqual(state[str(i)], i)
def test_deleting(self):
"""Test deleting state in 4 processes
"""
with processing_state_context():
n_jobs = 2
pool = multiprocessing.Pool(processes=4)
results = []
for i in range(n_jobs):
results.append(pool.apply_async(worker2, (i,)))
pool.close()
pool.join()
for i in range(n_jobs):
self.assertNotIn(str(i), state)
def test_raises_on_getting(self):
"""Test getting non-existing state in 4 processes
"""
with processing_state_context():
n_jobs = 200
pool = multiprocessing.Pool(processes=4)
results = []
for i in range(n_jobs):
results.append(pool.apply_async(worker3, (i,)))
pool.close()
pool.join()
for i in range(n_jobs):
self.assertTrue(results[i].get())
def test_raises_on_setting(self):
"""Test setting bad state in 4 processes
"""
with processing_state_context():
n_jobs = 200
pool = multiprocessing.Pool(processes=4)
results = []
for _ in range(n_jobs):
results.append(pool.apply_async(worker4))
pool.close()
pool.join()
for i in range(n_jobs):
self.assertTrue(results[i].get())
def test_raises_on_deleting(self):
"""Test deleting bad state in 4 processes
"""
with processing_state_context():
n_jobs = 200
pool = multiprocessing.Pool(processes=4)
results = []
for _ in range(n_jobs):
results.append(pool.apply_async(worker5))
pool.close()
pool.join()
for i in range(n_jobs):
self.assertTrue(results[i].get())
def test_after_exception(self):
"""Test deleting bad state in 4 processes
"""
state['a'] = 12
try:
with processing_state_context():
raise RuntimeError
except RuntimeError:
pass
self.assertEqual(state['a'], 12)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
#-*- coding:utf-8 -*-
import datetime
import os
import shutil
import sys
import yaml
def gen_report_header(car_type, protocol, output_dir):
"""
doc string:
"""
report_header_tpl_file = "template/report_protocol.h.tpl"
FMT = get_tpl_fmt(report_header_tpl_file)
report_header_file = output_dir + "%s.h" % protocol["name"]
with open(report_header_file, 'w') as h_fp:
fmt_val = {}
fmt_val["car_type_lower"] = car_type.lower()
fmt_val["car_type_upper"] = car_type.upper()
fmt_val["protocol_name_upper"] = protocol["name"].upper()
fmt_val["classname"] = protocol["name"].replace('_', '').capitalize()
func_declare_list = []
for var in protocol["vars"]:
fmt = """
// config detail: %s
%s %s(const std::uint8_t* bytes, const int32_t length) const;"""
returntype = var["type"]
if var["type"] == "enum":
returntype = protocol["name"].capitalize(
) + "::" + var["name"].capitalize() + "Type"
declare = fmt % (str(var), returntype, var["name"].lower())
func_declare_list.append(declare)
fmt_val["func_declare_list"] = "\n".join(func_declare_list)
h_fp.write(FMT % fmt_val)
def gen_report_cpp(car_type, protocol, output_dir):
"""
doc string:
"""
report_cpp_tpl_file = "template/report_protocol.cc.tpl"
FMT = get_tpl_fmt(report_cpp_tpl_file)
report_cpp_file = output_dir + "%s.cc" % protocol["name"]
with open(report_cpp_file, 'w') as fp:
fmt_val = {}
fmt_val["car_type_lower"] = car_type
fmt_val["protocol_name_lower"] = protocol["name"]
classname = protocol["name"].replace('_', '').capitalize()
fmt_val["classname"] = classname
protocol_id = int(protocol["id"].upper(), 16)
if protocol_id > 2048:
fmt_val["id_upper"] = gen_esd_can_extended(protocol["id"].upper())
else:
fmt_val["id_upper"] = protocol["id"].upper()
set_var_to_protocol_list = []
func_impl_list = []
for var in protocol["vars"]:
var["name"] = var["name"].lower()
returntype = var["type"]
if var["type"] == "enum":
returntype = protocol["name"].capitalize(
) + "::" + var["name"].capitalize() + "Type"
# gen func top
fmt = """
// config detail: %s
%s %s::%s(const std::uint8_t* bytes, int32_t length) const {"""
impl = fmt % (str(var), returntype, classname, var["name"])
byte_info = get_byte_info(var)
impl = impl + gen_parse_value_impl(var, byte_info)
impl = impl + gen_report_value_offset_precision(var, protocol)
impl = impl + "}"
func_impl_list.append(impl)
proto_set_fmt = " chassis->mutable_%s()->mutable_%s()->set_%s(%s(bytes, length));"
func_name = var["name"]
proto_set = proto_set_fmt % (car_type, protocol["name"], var["name"],
func_name)
set_var_to_protocol_list.append(proto_set)
fmt_val["set_var_to_protocol_list"] = "\n".join(
set_var_to_protocol_list)
fmt_val["func_impl_list"] = "\n".join(func_impl_list)
fp.write(FMT % fmt_val)
def gen_report_value_offset_precision(var, protocol):
"""
doc string:
"""
impl = ""
if var["is_signed_var"]:
fmt = "\n x <<= %d;\n x >>= %d;\n"
# x is a int32_t var
shift_bit = 32 - var["len"]
impl = impl + fmt % (shift_bit, shift_bit)
returntype = var["type"]
if var["type"] == "enum":
returntype = protocol["name"].capitalize() + "::" + var["name"].capitalize(
) + "Type"
impl = impl + "\n " + returntype + " ret = "
if var["type"] == "enum":
impl = impl + " static_cast<" + returntype + ">(x);\n"
else:
impl = impl + "x"
if var["precision"] != 1.0:
impl = impl + " * %f" % var["precision"]
if var["offset"] != 0.0:
impl = impl + " + %f" % (var["offset"])
impl = impl + ";\n"
return impl + " return ret;\n"
def gen_parse_value_impl(var, byte_info):
"""
doc string:
"""
impl = ""
fmt = "\n Byte t%d(bytes + %d);\n"
shift_bit = 0
for i in range(0, len(byte_info)):
info = byte_info[i]
impl = impl + fmt % (i, info["byte"])
if i == 0:
impl = impl + " int32_t x = t%d.get_byte(%d, %d);\n" %\
(i, info["start_bit"], info["len"])
elif i == 1:
impl = impl + " int32_t t = t%d.get_byte(%d, %d);\n x <<= %d;\n x |= t;\n" %\
(i, info["start_bit"], info["len"], info["len"])
else:
impl = impl + " t = t%d.get_byte(%d, %d);\n x <<= %d;\n x |= t;\n" %\
(i, info["start_bit"], info["len"], info["len"])
shift_bit = shift_bit + info["len"]
return impl
def gen_control_header(car_type, protocol, output_dir):
"""
doc string:
"""
control_header_tpl_file = "template/control_protocol.h.tpl"
FMT = get_tpl_fmt(control_header_tpl_file)
control_header_file = output_dir + "%s.h" % protocol["name"]
with open(control_header_file, 'w') as h_fp:
fmt_val = {}
fmt_val["car_type_lower"] = car_type
fmt_val["car_type_upper"] = car_type.upper()
fmt_val["protocol_name_upper"] = protocol["name"].upper()
classname = protocol["name"].replace('_', '').capitalize()
fmt_val["classname"] = classname
declare_public_func_list = []
declare_private_func_list = []
declare_private_var_list = []
fmtpub = "\n // config detail: %s\n %s* set_%s(%s %s);"
fmtpri = "\n // config detail: %s\n void set_p_%s(uint8_t* data, %s %s);"
for var in protocol["vars"]:
returntype = var["type"]
if var["type"] == "enum":
returntype = protocol["name"].capitalize(
) + "::" + var["name"].capitalize() + "Type"
private_var = ""
public_func_declare = fmtpub % (str(var), classname,
var["name"].lower(), returntype,
var["name"].lower())
private_func_declare = fmtpri % (str(var), var["name"].lower(),
returntype, var["name"].lower())
private_var = " %s %s_;" % (returntype, var["name"].lower())
declare_private_var_list.append(private_var)
declare_public_func_list.append(public_func_declare)
declare_private_func_list.append(private_func_declare)
fmt_val["declare_public_func_list"] = "\n".join(
declare_public_func_list)
fmt_val["declare_private_func_list"] = "\n".join(
declare_private_func_list)
fmt_val["declare_private_var_list"] = "\n".join(
declare_private_var_list)
h_fp.write(FMT % fmt_val)
def get_byte_info(var):
"""
doc string: https://wenku.baidu.com/view/3fe9a7a4dd3383c4bb4cd293.html
u can reference this link to known the difference between motorola and intel encoding
return : the byte info of a variable in the protocol how many bytes are, and every byte use
how many bits, and bit start position
for the purpose of easily parsing value from CAN frame, the byte_info is arranged
from msb byte to lsb byte order
"""
bit = var["bit"]
byte_info = []
left_len = var["len"]
byte_idx = bit / 8
bit_start = bit % 8
if var["order"] == "motorola":
while left_len > 0:
info = {}
info["byte"] = byte_idx
info["len"] = min(bit_start + 1, left_len)
# start_bit is always the lowest bit
info["start_bit"] = bit_start - info["len"] + 1
byte_info.append(info)
left_len = left_len - info["len"]
byte_idx = byte_idx + 1
bit_start = 7
else:
while left_len > 0:
info = {}
info["byte"] = byte_idx
info["len"] = min(8 - bit_start, left_len)
info["start_bit"] = bit_start
byte_info.append(info)
left_len = left_len - info["len"]
byte_idx = byte_idx + 1
bit_start = 0
# byte_info is always construct with msb(most significant bit) byte to lsb byte
byte_info.reverse()
return byte_info
def gen_control_decode_offset_precision(var):
"""
doc string:
"""
impl = "\n"
range_info = get_range_info(var)
if var["type"] == "double":
if range_info["low"].find(".") == -1:
range_info["low"] = "%s.0" % range_info["low"]
if range_info["high"].find(".") == -1:
range_info["high"] = "%s.0" % range_info["high"]
if var["type"] != "enum" and var["type"] != "bool":
impl = impl + " %s = ProtocolData::BoundedValue(%s, %s, %s);\n" %\
(var["name"].lower(), range_info["low"],
range_info["high"], var["name"].lower())
impl = impl + " int x ="
if var["offset"] != 0.0:
impl = impl + " (%s - %f)" % (var["name"].lower(), var["offset"])
else:
impl = impl + " %s" % var["name"].lower()
if var["precision"] != 1.0:
impl = impl + " / %f" % var["precision"]
return impl + ";\n"
def gen_control_encode_one_byte_value_impl(var, byte_info):
"""
only has int and double, int can hold all the value whatever it is signed or unsigned
"""
fmt = """
Byte to_set(data + %d);
to_set.set_value(x, %d, %d);
"""
return fmt % (byte_info["byte"], byte_info["start_bit"], byte_info["len"])
def get_range_info(var):
"""
doc string:
"""
info = {}
if "physical_range" not in var.keys():
return info
items = var["physical_range"].split('|')
info["low"] = items[0].split('[')[1]
info["high"] = items[1].split(']')[0]
return info
def gen_control_encode_value_impl(var, byte_info):
"""
doc string:
"""
impl = " uint8_t t = 0;\n"
fmt = """
t = x & %s;
Byte to_set%d(data + %d);
to_set%d.set_value(t, %d, %d);
"""
shift_bit = 0
for i in range(0, len(byte_info)):
info = byte_info[i]
if i != 0:
impl = impl + " x >>= %d;\n" % shift_bit
mask_bit = "0x%X" % ((1 << info["len"]) - 1)
impl = impl + fmt % (mask_bit, i, info["byte"], i, info["start_bit"],
info["len"])
shift_bit = info["len"]
return impl
def gen_control_value_func_impl(classname, var, protocol):
"""
doc string:
"""
impl = ""
if var["len"] > 32:
print "This generator not support big than four bytes var." + \
"protocol classname: %s, var_name:%s " % (
class_name, var["name"])
return impl
fmt = """
%(classname)s* %(classname)s::set_%(var_name)s(
%(var_type)s %(var_name)s) {
%(var_name)s_ = %(var_name)s;
return this;
}
// config detail: %(config)s
void %(classname)s::set_p_%(var_name)s(uint8_t* data,
%(var_type)s %(var_name)s) {"""
fmt_val = {}
fmt_val["classname"] = classname
fmt_val["var_name"] = var["name"].lower()
returntype = var["type"]
if var["type"] == "enum":
returntype = protocol["name"].capitalize() + "::" + var["name"].capitalize(
) + "Type"
fmt_val["var_type"] = returntype
fmt_val["config"] = str(var)
impl = impl + fmt % fmt_val
impl = impl + gen_control_decode_offset_precision(var)
# get lsb to msb order
byte_info = get_byte_info(var)
byte_info.reverse()
if len(byte_info) == 1:
impl = impl + gen_control_encode_one_byte_value_impl(var, byte_info[0])
else:
impl = impl + gen_control_encode_value_impl(var, byte_info)
return impl + "}\n"
def gen_control_cpp(car_type, protocol, output_dir):
"""
doc string:
"""
control_cpp_tpl_file = "template/control_protocol.cc.tpl"
FMT = get_tpl_fmt(control_cpp_tpl_file)
control_cpp_file = output_dir + "%s.cc" % protocol["name"]
with open(control_cpp_file, 'w') as fp:
fmt_val = {}
fmt_val["car_type_lower"] = car_type
fmt_val["protocol_name_lower"] = protocol["name"]
protocol_id = int(protocol["id"].upper(), 16)
if protocol_id > 2048:
fmt_val["id_upper"] = gen_esd_can_extended(protocol["id"].upper())
else:
fmt_val["id_upper"] = protocol["id"].upper()
classname = protocol["name"].replace('_', '').capitalize()
fmt_val["classname"] = classname
set_private_var_list = []
set_private_var_init_list = []
set_func_impl_list = []
for var in protocol["vars"]:
func_impl = gen_control_value_func_impl(classname, var, protocol)
set_func_impl_list.append(func_impl)
set_private_var = " set_p_%s(data, %s_);" % (var["name"].lower(),
var["name"].lower())
set_private_var_list.append(set_private_var)
init_val = "0"
if var["type"] == "double":
init_val = "0.0"
elif var["type"] == "bool":
init_val = "false"
elif var["type"] == "enum":
if 0 in var["enum"]:
init_val = protocol["name"].capitalize(
) + "::" + var["enum"][0].upper()
else:
init_val = protocol["name"].capitalize(
) + "::" + var["enum"].values()[0].upper()
set_private_var_init_list.append(" %s_ = %s;" %
(var["name"].lower(), init_val))
fmt_val["set_private_var_list"] = "\n".join(set_private_var_list)
fmt_val["set_private_var_init_list"] = "\n".join(
set_private_var_init_list)
fmt_val["set_func_impl_list"] = "\n".join(set_func_impl_list)
fp.write(FMT % fmt_val)
def get_tpl_fmt(tpl_file):
"""
get fmt from tpl_file
"""
with open(tpl_file, 'r') as tpl:
fmt = tpl.readlines()
fmt = "".join(fmt)
return fmt
def gen_build_file(car_type, work_dir):
"""
doc string:
"""
build_tpl_file = "template/protocol_BUILD.tpl"
fmt = get_tpl_fmt(build_tpl_file)
with open(work_dir + "BUILD", "w") as build_fp:
fmt_var = {}
fmt_var["car_type"] = car_type.lower()
build_fp.write(fmt % fmt_var)
def gen_protocols(protocol_conf_file, protocol_dir):
"""
doc string:
"""
print "Generating protocols"
if not os.path.exists(protocol_dir):
os.makedirs(protocol_dir)
with open(protocol_conf_file, 'r') as fp:
content = yaml.load(fp)
protocols = content["protocols"]
car_type = content["car_type"]
for p_name in protocols:
protocol = protocols[p_name]
if protocol["protocol_type"] == "report":
gen_report_header(car_type, protocol, protocol_dir)
gen_report_cpp(car_type, protocol, protocol_dir)
elif protocol["protocol_type"] == "control":
gen_control_header(car_type, protocol, protocol_dir)
gen_control_cpp(car_type, protocol, protocol_dir)
else:
print "Unknown protocol_type:%s" % protocol["protocol_type"]
gen_build_file(car_type, protocol_dir)
def gen_esd_can_extended(str):
"""
id string:
"""
int_id = int(str,16)
int_id &= 0x1FFFFFFF
int_id |= 0x20000000
str = hex(int_id).replace('0x', '')
return str
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Usage:\npython %s some_config.yml" % sys.argv[0]
sys.exit(0)
with open(sys.argv[1], 'r') as fp:
conf = yaml.load(fp)
protocol_conf = conf["protocol_conf"]
protocol_dir = conf["output_dir"] + "vehicle/" + conf["car_type"].lower(
) + "/protocol/"
shutil.rmtree(output_dir, True)
os.makedirs(output_dir)
gen_protocols(protocol_conf, protocol_dir)
|
|
from monsterrules.common import *
from data.models import Monster, Profile
from collections import OrderedDict
class CoreMonsterBuilder(MonsterBuilder):
"""A MonsterBuilder for the core rules.
Builds a monster based on the rules presented in the published version of
Dungeon World."""
def __init__(self):
self.monster = Monster()
self.monster.creation_rules = CoreMonsterBuilder.id
self.damage = DiceBuilder()
self.hp = 0
self.armor = 0
self.piercing = 0
self.weapon = ""
# Name to be displayed in UI
name = "Core"
# Unique ID to identify these rules
id = "core"
# Builder
# The build method returns the completed monster
def Build(self):
self.monster.damage = self.weapon +" ("+ self.damage.Build()+" damage"
if self.piercing > 0:
self.monster.damage += " "+str(self.piercing)+" piercing"
self.monster.damage += ")"
self.monster.hp = str(self.hp)+" HP"
self.monster.armor = str(self.armor)+" armor"
self.monster.creation_rules = CoreMonsterBuilder.id
return self.monster
# Deltas
# This class and the ___apply_delta method provide a simple pattern for
# declaring modifications to be made to the monster under construction
class CoreMonsterDelta(object):
"""A delta between the current state of a monster and a desired state."""
def __init__(self):
self.damage_die = 0
self.hp_bonus = 0
self.tags_to_add = []
self.damage_tags_to_add = []
self.damage_bonus = 0
self.armor_bonus = 0
self.best_damage = False
self.worst_damage = False
self.piercing = 0
self.die_size_increases = 0
self.die_size_decreases = 0
def SetDamageDie(self, dicesize):
self.damage_die = dicesize
return self
def AddHP(self, value):
self.hp_bonus += value
return self
def AddTag(self, tag):
if isinstance(tag, basestring):
self.tags_to_add.append(tag)
return self
def AddDamageTag(self, tag):
if isinstance(tag, basestring):
self.damage_tags_to_add.append(tag)
return self
def AddDamage(self, value):
self.damage_bonus += value
return self
def AddArmor(self, value):
self.armor_bonus += value
return self
def SetBest(self):
self.best_damage = True
return self
def SetWorst(self):
self.worst_damage = True
return self
def AddPiercing(self, value):
self.piercing += value
return self
def IncreaseDieSize(self):
self.die_size_increases += 1
return self
def DecreaseDieSize(self):
self.die_size_decreases += 1
return self
def ___apply_delta(self, delta):
"""Apply a delta object.
Args:
delta: the delta object to be applied"""
if delta.damage_die:
self.damage.SetDieSize(delta.damage_die)
self.hp += delta.hp_bonus
self.damage.AddBonus(delta.damage_bonus)
for tag in delta.tags_to_add:
if tag not in self.monster.tags:
self.monster.tags.append(tag)
for tag in delta.damage_tags_to_add:
if tag not in self.monster.damage_tags:
self.monster.damage_tags.append(tag)
self.armor += delta.armor_bonus
self.piercing += delta.piercing
if delta.best_damage:
self.damage.SetBest()
if delta.worst_damage:
self.damage.SetWorst()
for i in xrange(0, delta.die_size_increases):
self.damage.IncreaseDieSize()
for i in xrange(0, delta.die_size_decreases):
self.damage.DecreaseDieSize()
# Questions
@Question(0)
@Prompt("What is it called?")
@ExpectsShortText
@Required
def name(self, value):
self.monster.name = value
@Question(1)
@Prompt("What is it known to do?")
@Description("Write a monster move describing what it does.")
@ExpectsShortText
@Required
def firstMove(self, value):
self.monster.moves.append(value)
@Question(2)
@Prompt("What does it want that causes problems for others?")
@Description("This is its instinct. Write it as an intended action, like 'to destroy.'")
@ExpectsShortText
@Required
def instinct(self, value):
self.monster.instinct = value
organizationOptions = OrderedDict([
("In large groups" , Option(CoreMonsterDelta()
.SetDamageDie(6)
.AddHP(3)
.AddTag("Horde"))),
("In small groups" , Option(CoreMonsterDelta()
.SetDamageDie(8)
.AddHP(6)
.AddTag("Group"))),
("All by its lonesome" , Option(CoreMonsterDelta()
.SetDamageDie(10)
.AddHP(12)
.AddTag("Solitary"))),
])
@Question(3)
@Prompt("How does it usually hunt or fight?")
@ExpectsOne(organizationOptions)
@Required
def organization(self, value):
self.___apply_delta(value)
sizeOptions = OrderedDict([
("Smaller than a house cat" , Option(CoreMonsterDelta()
.AddTag("Tiny")
.AddDamage(-2)
.AddDamageTag("Hand"))),
("Halfling-esque" , Option(CoreMonsterDelta()
.AddTag("Small")
.AddDamageTag("Close"))),
("About human size" , Option(CoreMonsterDelta()
.AddDamageTag("Close"))),
("As big as a cart" , Option(CoreMonsterDelta()
.AddTag("Large")
.AddDamageTag(["Close", "Reach"])
.AddHP(4))),
("Much larger than a cart" , Option(CoreMonsterDelta()
.AddTag("Huge")
.AddDamageTag("Reach")
.AddHP(8)
.AddDamage(3))),
])
@Question(4)
@Prompt("How big is it?")
@ExpectsOne(sizeOptions)
@Required
def size(self, value):
self.___apply_delta(value)
defenseOptions = OrderedDict([
("Cloth or flesh", Option(CoreMonsterDelta()
.AddArmor(0))),
("Leathers or thick hide", Option(CoreMonsterDelta()
.AddArmor(1))),
("Mail or scales", Option(CoreMonsterDelta()
.AddArmor(2))),
("Plate or bone", Option(CoreMonsterDelta()
.AddArmor(3))),
("Permanent magical protection", Option(CoreMonsterDelta()
.AddTag("Magical")
.AddArmor(4))),
])
@Question(5)
@Prompt("What is its most important defense?")
@ExpectsOne(defenseOptions)
@Required
def defense(self, value):
self.___apply_delta(value)
reputationOptions = OrderedDict([
("Unrelenting strength" , Option(CoreMonsterDelta()
.AddDamage(2)
.AddDamageTag("Forceful"))),
("Skill in offense" , Option(CoreMonsterDelta()
.SetBest())),
("Skill in defense" , Option(CoreMonsterDelta()
.AddArmor(2))),
("Deft Strikes" , Option(CoreMonsterDelta()
.AddPiercing(1))),
("Uncanny endurance" , Option(CoreMonsterDelta()
.AddHP(4))),
("Deceit and trickery" , Option(CoreMonsterDelta()
.AddTag("Stealthy"))),
("A useful adaptation", Option(CoreMonsterDelta())),
("Divine power" , Option(CoreMonsterDelta()
.AddDamage(2)
.AddTag("Divine"))),
("Divine health" , Option(CoreMonsterDelta()
.AddHP(2)
.AddTag("Divine"))),
("Spells and magic" , Option(CoreMonsterDelta()
.AddTag("Magical"))),
])
@Question(6)
@Prompt("What is it known for?")
@ExpectsMultiple(reputationOptions)
def reputation(self, value):
self.___apply_delta(value)
@Question(1, reputationOptions["A useful adaptation"])
@Prompt("List the adaptiations")
@ExpectsShortText
def specialQualities(self, value):
for item in value.split(', '):
self.monster.special_qualities.append(item)
@Question(1, reputationOptions["Deceit and trickery"])
@Prompt("Write a move about dirty tricks")
@ExpectsShortText
def stealthmove(self, value):
self.monster.moves.append(value)
@Question(1, reputationOptions["Spells and magic"])
@Prompt("Write a move about its spells")
@ExpectsShortText
def spellmove(self, value):
self.monster.moves.append(value)
@Question(7)
@Prompt("What is its most common form of attack?")
@Description("Common answers include: a type of weapon, claws, a specific spell.")
@ExpectsShortText
@Required
def attack(self, value):
self.weapon = value
weaponModifierOptions = OrderedDict([
("Its armaments are vicious and obvious" , Option(CoreMonsterDelta()
.AddDamage(2))),
("It lets the monster keep others at bay" , Option(CoreMonsterDelta()
.AddDamageTag("Reach"))),
("Its armaments are small and weak" , Option(CoreMonsterDelta()
.DecreaseDieSize())),
("Its armaments can slice or pierce metal" , Option(CoreMonsterDelta()
.AddPiercing(1))),
("It can just tear metal apart" , Option(CoreMonsterDelta()
.AddPiercing(2))),
("Armor doesn't help with the damage it deals (due to magic, size, etc.)" , Option(CoreMonsterDelta()
.AddDamageTag("Ignores Armor"))),
("It can attack from a few paces" , Option(CoreMonsterDelta()
.AddDamageTag("Near"))),
("It can attack from anywhere it can see you" , Option(CoreMonsterDelta()
.AddDamageTag("Far"))),
])
@Question(8)
@Prompt("Which of these apply to its form of attack?")
@ExpectsMultiple(weaponModifierOptions)
def weaponModifier(self, value):
self.___apply_delta(value)
generalOptions = OrderedDict([
("It isn't dangerous because of the wounds it inflicts, but for other reasons", Option(CoreMonsterDelta()
.AddTag("Devious")
.DecreaseDieSize())),
("It organizes into larger groups that it can call on for support", Option(CoreMonsterDelta()
.AddTag("Organized"))),
("It's as smart as a human or thereabouts", Option(CoreMonsterDelta()
.AddTag("Intelligent"))),
("It actively defends itself with a shield or similar", Option(CoreMonsterDelta()
.AddArmor(1)
.AddTag("Cautious"))),
("It collects trinkets that humans would consider valuable (gold, gems, secrets)", Option(CoreMonsterDelta()
.AddTag("Hoarder"))),
("It's from beyond this world", Option(CoreMonsterDelta()
.AddTag("Planar"))),
("It's kept alive by something beyond simple biology", Option(CoreMonsterDelta()
.AddHP(4))),
("It was made by someone", Option(CoreMonsterDelta()
.AddTag("Construct"))),
("Its appearance is disturbing, terrible, or horrible", Option(CoreMonsterDelta()
.AddTag("Terrifying"))),
("It doesn't have organs or discernible anatomy", Option(CoreMonsterDelta()
.AddTag("Amorphous")
.AddHP(3)
.AddArmor(1))),
("It (or its species) is ancient, older than man, elves, and dwarves", Option(CoreMonsterDelta()
.IncreaseDieSize())),
("It abhors violence", Option(CoreMonsterDelta()
.SetWorst())),
])
@Question(9)
@Prompt("Which of these describe it?")
@ExpectsMultiple(generalOptions)
def general(self, value):
self.___apply_delta(value)
@Question(1, generalOptions["It isn't dangerous because of the wounds it inflicts, but for other reasons"])
@Prompt("Write a move about why it's dangerous")
@ExpectsShortText
def craftymove(self, value):
self.monster.moves.append(value)
@Question(1, generalOptions["It organizes into larger groups that it can call on for support"])
@Prompt("Write a move about calling on others for help")
@ExpectsShortText
def organizedmove(self, value):
self.monster.moves.append(value)
@Question(1, generalOptions["It's from beyond this world"])
@Prompt("Write a move about using its otherworldly knowledge and power")
@ExpectsShortText
def planarmove(self, value):
self.monster.moves.append(value)
@Question(1, generalOptions["It was made by someone"])
@Prompt("Give it a special quality or two about its construction or purpose")
@ExpectsShortText
def morespecialqualities(self, value):
for item in value.split(', '):
self.monster.special_qualities.append(item)
@Question(1, generalOptions["Its appearance is disturbing, terrible, or horrible"])
@Prompt("Write a special quality about why it's so horrendous")
@ExpectsShortText
def horriblequality(self, value):
for item in value.split(', '):
self.monster.special_qualities.append(item)
@Question(11)
@Prompt("Describe the monster:")
@ExpectsLongText
def describe(self, value):
self.monster.description = value
|
|
import json
import us
from django.shortcuts import render, get_object_or_404
from django.db.models import Count
from openstates.data.models import LegislativeSession, Person
from utils.common import abbr_to_jid, sessions_with_bills, states
from django.views.decorators.http import require_http_methods
from django.views.decorators.cache import never_cache
from django.contrib.auth.decorators import user_passes_test
from django.http import JsonResponse
from .unmatched import unmatched_to_deltas
from people_admin.models import (
UnmatchedName,
NameStatus,
DeltaSet,
PersonDelta,
PersonRetirement,
NewPerson,
)
from people_admin.git import delta_set_to_pr
MATCHER_PERM = "people_admin.can_match_names"
EDIT_PERM = "people_admin.can_edit"
RETIRE_PERM = "people_admin.can_retire"
def person_data(person):
"""similar to utils.people.person_as_dict but customized for editable fields"""
extras = {}
identifier_types = ("twitter", "facebook", "instagram", "youtube")
for identifier in person.identifiers.all():
for itype in identifier_types:
if identifier.scheme == itype:
extras[itype] = identifier.identifier
for off in person.offices.all():
if off.fax:
extras[off.classification + "_fax"] = off.fax
if off.voice:
extras[off.classification + "_voice"] = off.voice
if off.address:
extras[off.classification + "_address"] = off.address
return {
"id": person.id,
"name": person.name,
"title": person.current_role["title"],
"district": person.current_role["district"],
"party": person.primary_party,
"image": person.image,
"email": person.email,
**extras,
}
@user_passes_test(lambda u: u.has_perm(MATCHER_PERM) or u.has_perm(EDIT_PERM))
def jurisdiction_list(request):
state_people_data = {}
unmatched_by_state = dict(
UnmatchedName.objects.filter(status="U")
.values_list("session__jurisdiction__name")
.annotate(number=Count("id"))
)
for state in states + [us.unitedstatesofamerica]:
jid = abbr_to_jid(state.abbr)
current_people = [
person_data(p)
for p in Person.objects.filter(
current_jurisdiction_id=jid, current_role__isnull=False
).prefetch_related("offices")
]
photoless = 0
phoneless = 0
addressless = 0
for person in current_people:
if "image" not in person or person["image"] == "":
photoless += 1
elif "capitol_voice" not in person and "district_voice" not in person:
phoneless += 1
elif "capitol_address" not in person and "district_address" not in person:
addressless += 1
jurisdiction = "United States" if state.abbr == "US" else state.name
state_people_data[state.abbr.lower()] = {
"state": jurisdiction,
"unmatched": unmatched_by_state.get(state.name, 0),
"missing_photo": photoless,
"missing_phone": phoneless,
"missing_address": addressless,
}
return render(
request,
"people_admin/jurisdiction_list.html",
{"state_people_data": state_people_data},
)
@never_cache
@user_passes_test(lambda u: u.has_perm(EDIT_PERM))
def people_list(request, state):
jid = abbr_to_jid(state)
current_people = [
person_data(p)
for p in Person.objects.filter(
current_jurisdiction_id=jid, current_role__isnull=False
)
.order_by("family_name", "name")
.prefetch_related("identifiers", "offices")
]
context = {
"current_people": current_people,
}
return render(request, "people_admin/person_list.html", {"context": context})
@never_cache
@user_passes_test(lambda u: u.has_perm(MATCHER_PERM))
def people_matcher(request, state, session=None):
jid = abbr_to_jid(state)
all_sessions = sessions_with_bills(jid)
if session:
session = get_object_or_404(
LegislativeSession, identifier=session, jurisdiction_id=jid
)
unmatched = UnmatchedName.objects.filter(
session_id=session, status="U"
).order_by("-sponsorships_count")
else:
unmatched = UnmatchedName.objects.filter(
session__jurisdiction__id=jid, status="U"
).order_by("-sponsorships_count")
state_sponsors = Person.objects.filter(current_jurisdiction_id=jid)
unmatched_total = unmatched.count()
context = {
"state": state,
"session": session,
"all_sessions": all_sessions,
"unmatched": unmatched,
"state_sponsors": state_sponsors,
"unmatched_total": unmatched_total,
}
return render(request, "people_admin/people_matcher.html", context)
@user_passes_test(lambda u: u.has_perm(MATCHER_PERM))
@require_http_methods(["POST"])
def apply_match(request):
form_data = json.load(request)["match_data"]
button = form_data["button"]
match_id = form_data["matchedId"]
unmatched_id = form_data["unmatchedId"]
unmatched_name = get_object_or_404(UnmatchedName, pk=unmatched_id)
if button == "Match":
unmatched_name.matched_person_id = match_id
unmatched_name.status = NameStatus.MATCHED_PERSON
elif button == "Source Error":
unmatched_name.status = NameStatus.SOURCE_ERROR
elif button == "Ignore":
unmatched_name.status = NameStatus.IGNORED
else:
unmatched_name.status = NameStatus.UNMATCHED
unmatched_name.save()
return JsonResponse({"status": "success"})
@user_passes_test(lambda u: u.has_perm(RETIRE_PERM))
@require_http_methods(["POST"])
def apply_retirement(request):
retirement = json.load(request)
name = retirement["name"]
delta = DeltaSet.objects.create(
name=f"retire {name}",
created_by=request.user,
)
PersonRetirement.objects.create(
delta_set=delta,
person_id=retirement["id"],
date=retirement["retirementDate"],
reason=retirement["reason"] or "",
is_dead=retirement["isDead"],
is_vacant=retirement["vacantSeat"],
)
return JsonResponse({"status": "success"})
@user_passes_test(lambda u: u.has_perm(EDIT_PERM))
def new_legislator(request, state):
context = {
"state": state,
}
return render(request, "people_admin/new_person.html", {"context": context})
@user_passes_test(lambda u: u.has_perm(EDIT_PERM))
@require_http_methods(["POST"])
def apply_new_legislator(request):
addition = json.load(request)
name = addition[person_data]["name"]
delta = DeltaSet.objects.create(
name=f"add {name}",
created_by=request.user,
)
NewPerson.objects.create(
name=name,
delta_set=delta,
state=addition["state"],
district=addition["district"],
chamber=addition["chamber"],
)
return JsonResponse({"status": "success"})
@user_passes_test(lambda u: u.has_perm(EDIT_PERM))
@require_http_methods(["POST"])
def apply_bulk_edits(request):
edits = json.load(request)
delta = DeltaSet.objects.create(
name=f"edit by {request.user}",
created_by=request.user,
)
for person in edits:
updates = []
for key in person:
if key != "id":
change = {"action": "set", "key": key, "param": person[key]}
updates.append(change)
PersonDelta.objects.create(
delta_set=delta,
person_id=person["id"],
data_changes=updates,
)
return JsonResponse({"status": "success"})
@never_cache
@user_passes_test(lambda u: u.has_perm(EDIT_PERM))
def create_delta_sets(request, state):
matches = unmatched_to_deltas(state)
name = f"{state.upper()} legislator matching"
delta = DeltaSet.objects.get(name=name, pr_status="N")
people_deltas = PersonDelta.objects.filter(delta_set_id=delta).order_by("person_id")
context = {
"people": people_deltas,
"matches": matches,
}
return render(request, "people_admin/deltasets.html", context)
@never_cache
@user_passes_test(lambda u: u.has_perm(EDIT_PERM))
@require_http_methods(["POST"])
def create_pr(request):
delta = json.load(request)["delta"]
ds = DeltaSet.objects.get(id=delta, pr_status="N")
print(f"creating {ds.id} | {ds.name} | {ds.created_by}")
ds.pr_url = delta_set_to_pr(ds)
ds.pr_status = "C"
ds.save()
return JsonResponse({"status": request})
|
|
#!/usr/bin/env python
import subprocess
import praw
import datetime
import pyperclip
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
challengePageSubmissionId = '8g7uir'
flaskport = 8990
thisMonthName = "May"
nextMonthName = "June"
readAllCommentsWhichCanBeSlower = False
sorryTooLateToSignUpReplyText = "Sorry, but the late signup grace period for " + thisMonthName + " is over, so you can't officially join this challenge. But feel free to follow along anyway, and comment all you want. And be sure to join us for the " + nextMonthName + " challenge. Signup posts for " + nextMonthName + " will begin during the last week of " + thisMonthName + "."
reinstatedReplyText = "OK, I've reinstated you. You should start showing up on the list again starting tomorrow."
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
submission = None
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionForRedditSession(redditSession):
# submission = redditSession.get_submission(submission_id=challengePageSubmissionId)
submission = redditSession.submission(id=challengePageSubmissionId)
if readAllCommentsWhichCanBeSlower:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submission
def getCommentsForSubmission(submission):
# return [comment for comment in praw.helpers.flatten_tree(submission.comments) if comment.__class__ == praw.models.Comment]
commentForest = submission.comments
# commentForest.replace_more(limit=None, threshold=0)
return [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatechallenge.html')
def moderatechallenge():
currentDayOfMonthIndex = datetime.date.today().day
lateCheckinGracePeriodIsInEffect = currentDayOfMonthIndex <= 3
global commentHashesAndComments
global submission
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submission = getSubmissionForRedditSession(redditSession)
flat_comments = getCommentsForSubmission(submission)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
stringio.write(submission.title)
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplaytoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Copy display.py stdout to clipboard">')
stringio.write('<input type="submit" name="actiontotake" value="Automatically post display.py stdout">')
stringio.write('</form>')
stringio.write('<form action="updategooglechart.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="update-google-chart.py">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
participant = ParticipantCollection().participantNamed(authorName)
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName)
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
if participant.isStillIn:
stringio.write(' <small><font color="green">(still in)</font></small>')
else:
stringio.write(' <small><font color="red">(out)</font></small>')
if participant.hasCheckedIn:
stringio.write(' <small><font color="green">(checked in)</font></small>')
else:
stringio.write(' <small><font color="orange">(not checked in)</font></small>')
if participant.hasRelapsed:
stringio.write(' <small><font color="red">(relapsed)</font></small>')
else:
stringio.write(' <small><font color="green">(not relapsed)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
if lateCheckinGracePeriodIsInEffect:
stringio.write('<input type="submit" name="actiontotake" value="Checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin" style="color:white;background-color:green">')
else:
stringio.write('<input type="submit" name="actiontotake" value="Checkin" style="color:white;background-color:green">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Relapse" style="color:white;background-color:red">')
stringio.write('<input type="submit" name="actiontotake" value="Reinstate with automatic comment">')
stringio.write('<input type="submit" name="actiontotake" value="Reply with sorry-too-late comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Checkin':
print "checkin - " + username
subprocess.call(['./checkin.py', username])
comment.upvote()
retireCommentHash(commentHash)
if actionToTake == 'Signup and checkin':
print "signup and checkin - " + username
subprocess.call(['./signup-and-checkin.sh', username])
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Relapse':
print "relapse - " + username
subprocess.call(['./relapse.py', username])
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Reinstate with automatic comment':
print "reinstate - " + username
subprocess.call(['./reinstate.py', username])
comment.reply(reinstatedReplyText)
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Reply with sorry-too-late comment':
print "reply with sorry-too-late comment - " + username
comment.reply(sorryTooLateToSignUpReplyText)
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplaytoclipboard.html', methods=["POST"])
def copydisplaytoclipboard():
actionToTake = request.form["actiontotake"]
if actionToTake == 'Copy display.py stdout to clipboard':
subprocess.call(['./display.py'])
if actionToTake == 'Automatically post display.py stdout':
subprocess.call(['./display.py'])
submissionText = pyperclip.paste()
submission.edit(submissionText)
return Response("hello", mimetype='text/html')
@app.route('/updategooglechart.html', methods=["POST"])
def updategooglechart():
print "TODO: Copy display to clipboard"
subprocess.call(['./update-google-chart.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
|
# Natural Language Toolkit: CONLL Corpus Reader
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Steven Bird <[email protected]>
# Edward Loper <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Read CoNLL-style chunk fileids.
"""
import os
import codecs
import textwrap
from nltk.tree import Tree
from nltk.util import LazyMap, LazyConcatenation
from util import *
from api import *
class ConllCorpusReader(CorpusReader):
"""
A corpus reader for CoNLL-style files. These files consist of a
series of sentences, separated by blank lines. Each sentence is
encoded using a table (or I{grid}) of values, where each line
corresponds to a single word, and each column corresponds to an
annotation type. The set of columns used by CoNLL-style files can
vary from corpus to corpus; the C{ConllCorpusReader} constructor
therefore takes an argument, C{columntypes}, which is used to
specify the columns that are used by a given corpus.
@todo: Add support for reading from corpora where different
parallel files contain different columns.
@todo: Possibly add caching of the grid corpus view? This would
allow the same grid view to be used by different data access
methods (eg words() and parsed_sents() could both share the
same grid corpus view object).
@todo: Better support for -DOCSTART-. Currently, we just ignore
it, but it could be used to define methods that retrieve a
document at a time (eg parsed_documents()).
"""
#/////////////////////////////////////////////////////////////////
# Column Types
#/////////////////////////////////////////////////////////////////
WORDS = 'words' #: column type for words
POS = 'pos' #: column type for part-of-speech tags
TREE = 'tree' #: column type for parse trees
CHUNK = 'chunk' #: column type for chunk structures
NE = 'ne' #: column type for named entities
SRL = 'srl' #: column type for semantic role labels
IGNORE = 'ignore' #: column type for column that should be ignored
#: A list of all column types supported by the conll corpus reader.
COLUMN_TYPES = (WORDS, POS, TREE, CHUNK, NE, SRL, IGNORE)
#/////////////////////////////////////////////////////////////////
# Constructor
#/////////////////////////////////////////////////////////////////
def __init__(self, root, fileids, columntypes,
chunk_types=None, top_node='S', pos_in_tree=False,
srl_includes_roleset=True, encoding=None,
tree_class=Tree, tag_mapping_function=None):
for columntype in columntypes:
if columntype not in self.COLUMN_TYPES:
raise ValueError('Bad column type %r' % columntype)
if isinstance(chunk_types, basestring):
chunk_types = [chunk_types]
self._chunk_types = chunk_types
self._colmap = dict((c,i) for (i,c) in enumerate(columntypes))
self._pos_in_tree = pos_in_tree
self._top_node = top_node # for chunks
self._srl_includes_roleset = srl_includes_roleset
self._tree_class = tree_class
CorpusReader.__init__(self, root, fileids, encoding)
self._tag_mapping_function = tag_mapping_function
#/////////////////////////////////////////////////////////////////
# Data Access Methods
#/////////////////////////////////////////////////////////////////
def raw(self, fileids=None):
if fileids is None: fileids = self._fileids
elif isinstance(fileids, basestring): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def words(self, fileids=None):
self._require(self.WORDS)
return LazyConcatenation(LazyMap(self._get_words, self._grids(fileids)))
def sents(self, fileids=None):
self._require(self.WORDS)
return LazyMap(self._get_words, self._grids(fileids))
def tagged_words(self, fileids=None, simplify_tags=False):
self._require(self.WORDS, self.POS)
def get_tagged_words(grid):
return self._get_tagged_words(grid, simplify_tags)
return LazyConcatenation(LazyMap(get_tagged_words,
self._grids(fileids)))
def tagged_sents(self, fileids=None, simplify_tags=False):
self._require(self.WORDS, self.POS)
def get_tagged_words(grid):
return self._get_tagged_words(grid, simplify_tags)
return LazyMap(get_tagged_words, self._grids(fileids))
def chunked_words(self, fileids=None, chunk_types=None,
simplify_tags=False):
self._require(self.WORDS, self.POS, self.CHUNK)
if chunk_types is None: chunk_types = self._chunk_types
def get_chunked_words(grid): # capture chunk_types as local var
return self._get_chunked_words(grid, chunk_types, simplify_tags)
return LazyConcatenation(LazyMap(get_chunked_words,
self._grids(fileids)))
def chunked_sents(self, fileids=None, chunk_types=None,
simplify_tags=False):
self._require(self.WORDS, self.POS, self.CHUNK)
if chunk_types is None: chunk_types = self._chunk_types
def get_chunked_words(grid): # capture chunk_types as local var
return self._get_chunked_words(grid, chunk_types, simplify_tags)
return LazyMap(get_chunked_words, self._grids(fileids))
def parsed_sents(self, fileids=None, pos_in_tree=None, simplify_tags=False):
self._require(self.WORDS, self.POS, self.TREE)
if pos_in_tree is None: pos_in_tree = self._pos_in_tree
def get_parsed_sent(grid): # capture pos_in_tree as local var
return self._get_parsed_sent(grid, pos_in_tree, simplify_tags)
return LazyMap(get_parsed_sent, self._grids(fileids))
def srl_spans(self, fileids=None):
self._require(self.SRL)
return LazyMap(self._get_srl_spans, self._grids(fileids))
def srl_instances(self, fileids=None, pos_in_tree=None, flatten=True):
self._require(self.WORDS, self.POS, self.TREE, self.SRL)
if pos_in_tree is None: pos_in_tree = self._pos_in_tree
def get_srl_instances(grid): # capture pos_in_tree as local var
return self._get_srl_instances(grid, pos_in_tree)
result = LazyMap(get_srl_instances, self._grids(fileids))
if flatten: result = LazyConcatenation(result)
return result
def iob_words(self, fileids=None, simplify_tags=False):
"""
@return: a list of word/tag/IOB tuples
@rtype: C{list} of C{tuple}
@param fileids: the list of fileids that make up this corpus
@type fileids: C{None} or C{str} or C{list}
"""
self._require(self.WORDS, self.POS, self.CHUNK)
def get_iob_words(grid):
return self._get_iob_words(grid, simplify_tags)
return LazyConcatenation(LazyMap(get_iob_words, self._grids(fileids)))
def iob_sents(self, fileids=None, simplify_tags=False):
"""
@return: a list of lists of word/tag/IOB tuples
@rtype: C{list} of C{list}
@param fileids: the list of fileids that make up this corpus
@type fileids: C{None} or C{str} or C{list}
"""
self._require(self.WORDS, self.POS, self.CHUNK)
def get_iob_words(grid):
return self._get_iob_words(grid, simplify_tags)
return LazyMap(get_iob_words, self._grids(fileids))
#/////////////////////////////////////////////////////////////////
# Grid Reading
#/////////////////////////////////////////////////////////////////
def _grids(self, fileids=None):
# n.b.: we could cache the object returned here (keyed on
# fileids), which would let us reuse the same corpus view for
# different things (eg srl and parse trees).
return concat([StreamBackedCorpusView(fileid, self._read_grid_block,
encoding=enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def _read_grid_block(self, stream):
grids = []
for block in read_blankline_block(stream):
block = block.strip()
if not block: continue
grid = [line.split() for line in block.split('\n')]
# If there's a docstart row, then discard. ([xx] eventually it
# would be good to actually use it)
if grid[0][self._colmap.get('words', 0)] == '-DOCSTART-':
del grid[0]
# Check that the grid is consistent.
for row in grid:
if len(row) != len(grid[0]):
raise ValueError('Inconsistent number of columns:\n%s'
% block)
grids.append(grid)
return grids
#/////////////////////////////////////////////////////////////////
# Transforms
#/////////////////////////////////////////////////////////////////
# given a grid, transform it into some representation (e.g.,
# a list of words or a parse tree).
def _get_words(self, grid):
return self._get_column(grid, self._colmap['words'])
def _get_tagged_words(self, grid, simplify_tags=False):
pos_tags = self._get_column(grid, self._colmap['pos'])
if simplify_tags:
pos_tags = [self._tag_mapping_function(t) for t in pos_tags]
return zip(self._get_column(grid, self._colmap['words']), pos_tags)
def _get_iob_words(self, grid, simplify_tags=False):
pos_tags = self._get_column(grid, self._colmap['pos'])
if simplify_tags:
pos_tags = [self._tag_mapping_function(t) for t in pos_tags]
return zip(self._get_column(grid, self._colmap['words']), pos_tags,
self._get_column(grid, self._colmap['chunk']))
def _get_chunked_words(self, grid, chunk_types, simplify_tags=False):
# n.b.: this method is very similar to conllstr2tree.
words = self._get_column(grid, self._colmap['words'])
pos_tags = self._get_column(grid, self._colmap['pos'])
if simplify_tags:
pos_tags = [self._tag_mapping_function(t) for t in pos_tags]
chunk_tags = self._get_column(grid, self._colmap['chunk'])
stack = [Tree(self._top_node, [])]
for (word, pos_tag, chunk_tag) in zip(words, pos_tags, chunk_tags):
if chunk_tag == 'O':
state, chunk_type = 'O', ''
else:
(state, chunk_type) = chunk_tag.split('-')
# If it's a chunk we don't care about, treat it as O.
if chunk_types is not None and chunk_type not in chunk_types:
state = 'O'
# Treat a mismatching I like a B.
if state == 'I' and chunk_type != stack[-1].node:
state = 'B'
# For B or I: close any open chunks
if state in 'BO' and len(stack) == 2:
stack.pop()
# For B: start a new chunk.
if state == 'B':
new_chunk = Tree(chunk_type, [])
stack[-1].append(new_chunk)
stack.append(new_chunk)
# Add the word token.
stack[-1].append((word, pos_tag))
return stack[0]
def _get_parsed_sent(self, grid, pos_in_tree, simplify_tags=False):
words = self._get_column(grid, self._colmap['words'])
pos_tags = self._get_column(grid, self._colmap['pos'])
if simplify_tags:
pos_tags = [self._tag_mapping_function(t) for t in pos_tags]
parse_tags = self._get_column(grid, self._colmap['tree'])
treestr = ''
for (word, pos_tag, parse_tag) in zip(words, pos_tags, parse_tags):
if word == '(': word = '-LRB-'
if word == ')': word = '-RRB-'
if pos_tag == '(': pos_tag = '-LRB-'
if pos_tag == ')': pos_tag = '-RRB-'
(left, right) = parse_tag.split('*')
right = right.count(')')*')' # only keep ')'.
treestr += '%s (%s %s) %s' % (left, pos_tag, word, right)
try:
tree = self._tree_class.parse(treestr)
except (ValueError, IndexError):
tree = self._tree_class.parse('(%s %s)' %
(self._top_node, treestr))
if not pos_in_tree:
for subtree in tree.subtrees():
for i, child in enumerate(subtree):
if (isinstance(child, nltk.Tree) and len(child)==1 and
isinstance(child[0], basestring)):
subtree[i] = (child[0], child.node)
return tree
def _get_srl_spans(self, grid):
"""
list of list of (start, end), tag) tuples
"""
if self._srl_includes_roleset:
predicates = self._get_column(grid, self._colmap['srl']+1)
start_col = self._colmap['srl']+2
else:
predicates = self._get_column(grid, self._colmap['srl'])
start_col = self._colmap['srl']+1
# Count how many predicates there are. This tells us how many
# columns to expect for SRL data.
num_preds = len([p for p in predicates if p != '-'])
spanlists = []
for i in range(num_preds):
col = self._get_column(grid, start_col+i)
spanlist = []
stack = []
for wordnum, srl_tag in enumerate(col):
(left, right) = srl_tag.split('*')
for tag in left.split('('):
if tag:
stack.append((tag, wordnum))
for i in range(right.count(')')):
(tag, start) = stack.pop()
spanlist.append( ((start, wordnum+1), tag) )
spanlists.append(spanlist)
return spanlists
def _get_srl_instances(self, grid, pos_in_tree):
tree = self._get_parsed_sent(grid, pos_in_tree)
spanlists = self._get_srl_spans(grid)
if self._srl_includes_roleset:
predicates = self._get_column(grid, self._colmap['srl']+1)
rolesets = self._get_column(grid, self._colmap['srl'])
else:
predicates = self._get_column(grid, self._colmap['srl'])
rolesets = [None] * len(predicates)
instances = ConllSRLInstanceList(tree)
for wordnum, predicate in enumerate(predicates):
if predicate == '-': continue
# Decide which spanlist to use. Don't assume that they're
# sorted in the same order as the predicates (even though
# they usually are).
for spanlist in spanlists:
for (start, end), tag in spanlist:
if wordnum in range(start,end) and tag in ('V', 'C-V'):
break
else: continue
break
else:
raise ValueError('No srl column found for %r' % predicate)
instances.append(ConllSRLInstance(tree, wordnum, predicate,
rolesets[wordnum], spanlist))
return instances
#/////////////////////////////////////////////////////////////////
# Helper Methods
#/////////////////////////////////////////////////////////////////
def _require(self, *columntypes):
for columntype in columntypes:
if columntype not in self._colmap:
raise ValueError('This corpus does not contain a %s '
'column.' % columntype)
@staticmethod
def _get_column(grid, column_index):
return [grid[i][column_index] for i in range(len(grid))]
class ConllSRLInstance(object):
"""
An SRL instance from a CoNLL corpus, which identifies and
providing labels for the arguments of a single verb.
"""
# [xx] add inst.core_arguments, inst.argm_arguments?
def __init__(self, tree, verb_head, verb_stem, roleset, tagged_spans):
self.verb = []
"""A list of the word indices of the words that compose the
verb whose arguments are identified by this instance.
This will contain multiple word indices when multi-word
verbs are used (e.g. 'turn on')."""
self.verb_head = verb_head
"""The word index of the head word of the verb whose arguments
are identified by this instance. E.g., for a sentence that
uses the verb 'turn on,' C{verb_head} will be the word index
of the word 'turn'."""
self.verb_stem = verb_stem
self.roleset = roleset
self.arguments = []
"""A list of C{(argspan, argid)} tuples, specifying the location
and type for each of the arguments identified by this
instance. C{argspan} is a tuple C{start, end}, indicating
that the argument consists of the C{words[start:end]}."""
self.tagged_spans = tagged_spans
"""A list of C{(span, id)} tuples, specifying the location and
type for each of the arguments, as well as the verb pieces,
that make up this instance."""
self.tree = tree
"""The parse tree for the sentence containing this instance."""
self.words = tree.leaves()
"""A list of the words in the sentence containing this
instance."""
# Fill in the self.verb and self.arguments values.
for (start, end), tag in tagged_spans:
if tag in ('V', 'C-V'):
self.verb += range(start, end)
else:
self.arguments.append( ((start, end), tag) )
def __repr__(self):
plural = len(self.arguments)!=1 and 's' or ''
return '<ConllSRLInstance for %r with %d argument%s>' % (
(self.verb_stem, len(self.arguments), plural))
def pprint(self):
verbstr = ' '.join(self.words[i][0] for i in self.verb)
hdr = 'SRL for %r (stem=%r):\n' % (verbstr, self.verb_stem)
s = ''
for i, word in enumerate(self.words):
if isinstance(word, tuple): word = word[0]
for (start, end), argid in self.arguments:
if i == start: s += '[%s ' % argid
if i == end: s += '] '
if i in self.verb: word = '<<%s>>' % word
s += word + ' '
return hdr + textwrap.fill(s.replace(' ]', ']'),
initial_indent=' ',
subsequent_indent=' ')
class ConllSRLInstanceList(list):
"""
Set of instances for a single sentence
"""
def __init__(self, tree, instances=()):
self.tree = tree
list.__init__(self, instances)
def __str__(self):
return self.pprint()
def pprint(self, include_tree=False):
# Sanity check: trees should be the same
for inst in self:
if inst.tree != self.tree:
raise ValueError('Tree mismatch!')
# If desired, add trees:
if include_tree:
words = self.tree.leaves()
pos = [None] * len(words)
synt = ['*'] * len(words)
self._tree2conll(self.tree, 0, words, pos, synt)
s = ''
for i in range(len(words)):
# optional tree columns
if include_tree:
s += '%-20s ' % words[i]
s += '%-8s ' % pos[i]
s += '%15s*%-8s ' % tuple(synt[i].split('*'))
# verb head column
for inst in self:
if i == inst.verb_head:
s += '%-20s ' % inst.verb_stem
break
else:
s += '%-20s ' % '-'
# Remaining columns: self
for inst in self:
argstr = '*'
for (start, end), argid in inst.tagged_spans:
if i==start: argstr = '(%s%s' % (argid, argstr)
if i==(end-1): argstr += ')'
s += '%-12s ' % argstr
s += '\n'
return s
def _tree2conll(self, tree, wordnum, words, pos, synt):
assert isinstance(tree, Tree)
if len(tree) == 1 and isinstance(tree[0], basestring):
pos[wordnum] = tree.node
assert words[wordnum] == tree[0]
return wordnum+1
elif len(tree) == 1 and isinstance(tree[0], tuple):
assert len(tree[0]) == 2
pos[wordnum], pos[wordnum] = tree[0]
return wordnum+1
else:
synt[wordnum] = '(%s%s' % (tree.node, synt[wordnum])
for child in tree:
wordnum = self._tree2conll(child, wordnum, words,
pos, synt)
synt[wordnum-1] += ')'
return wordnum
class ConllChunkCorpusReader(ConllCorpusReader):
"""
A ConllCorpusReader whose data file contains three columns: words,
pos, and chunk.
"""
def __init__(self, root, fileids, chunk_types, encoding=None,
tag_mapping_function=None):
ConllCorpusReader.__init__(
self, root, fileids, ('words', 'pos', 'chunk'),
chunk_types=chunk_types, encoding=encoding,
tag_mapping_function=tag_mapping_function)
|
|
#!/usr/bin/python
#
# Copyright (C) 2010 Leandro Lisboa Penz <[email protected]>
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
import re
import urllib
import base64
from xml.etree import ElementTree
try:
import httplib
except ImportError:
import http.client as httplib
PROGRAM_NAME = "atdtool"
PROGRAM_VERSION = "1.3.3"
def checkDocument(cfg, fd):
'''Invoke checkDocument service and return a list of errors.'''
server = re.sub(r'^https?://', '', cfg.server)
if cfg.atdlang != '':
server = cfg.atdlang + '.' + server
if cfg.server.startswith('https'):
service = httplib.HTTPSConnection(server, cfg.port)
else:
service = httplib.HTTPConnection(server, cfg.port)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
if cfg.username:
headers["Authorization"] = "Basic %s" % \
(base64.b64encode("%s:%s" % (cfg.username, cfg.password)))
params = {'key': cfg.key, 'data': fd.read()}
if cfg.lang != '':
params['lang'] = cfg.lang
service.request(method='POST',
url='/checkDocument',
body=urllib.urlencode(params),
headers=headers)
response = service.getresponse()
if response.status != httplib.OK:
service.close()
raise Exception('Unexpected response code from AtD server %s: %d' %
(cfg.server, response.status))
print(response.read())
et = ElementTree.fromstring(response.read())
service.close()
errs = et.findall('message')
if len(errs) > 0:
raise Exception('Server returned an error: %s' % errs[0].text)
return [Error(e) for e in et.findall('error')]
class Error(object):
'''Error objects.
>>> xmlstr = '<error>\
<string>sysexit</string>\
<description>Spelling</description>\
<precontext></precontext>\
<suggestions>\
<option>sexist</option>\
<option>systemic</option>\
<option>syenite</option>\
<option>seit</option>\
</suggestions>\
<type>spelling</type>\
</error>'
>>> et = ElementTree.fromstring(xmlstr)
>>> e = Error(et)
>>> import sys
>>> showerrs('', sys.stdout, [e])
:1:0: (?) Spelling ""
suggestions: sexist, systemic, syenite, seit
'''
def __init__(self, e):
self.string = e.find('string').text
self.description = e.find('description').text
self.precontext = e.find('precontext').text
self.type = e.find('type').text
self.url = ''
if not e.find('url') is None:
self.url = e.find('url').text
self.suggestions = []
if not e.find('suggestions') is None:
self.suggestions = [
o.text for o in e.find('suggestions').findall('option')
]
class FileWords:
'''Parser class, keeps line and column position.'''
def __init__(self, fd):
fd.seek(0)
self.re = re.compile('([^a-z0-9A-Z_-])')
self.skipre = re.compile('[^a-z0-9A-Z_-]+')
self.text = self.re.split(fd.read())
self.len = len(self.text)
self.reset()
def reset(self):
'''Goes to start of file.'''
self.i = 0
self.line = 1
self.col = 0
self.eof = False
def next(self):
'''Goes to next token.'''
if self.eof:
return
self.col = self.col + len(self.text[self.i])
self.i = self.i + 1
if self.i >= self.len:
self.eof = True
return
if self.text[self.i] == '\n':
self.line = self.line + 1
self.col = 0
def skipnw(self):
'''Skips non-word tokens.'''
while self.skipre.match(self.text[self.i]) or self.text[self.i] == '':
self.next()
def checkpos(self, words0):
'''Checks if words0 is in current position.'''
words = tuple(self.re.split(words0))
text = self.text
t = []
j = 0
w = ''
while len(t) < len(words):
if self.i + j == self.len:
self.eof = True
return False, ''
t.append(text[self.i + j])
w = w + text[self.i + j]
if self.i + j + 1 < self.len and text[self.i + j + 1] == '.':
t.append(t.pop() + text[self.i + j + 2])
w = w + '.' + text[self.i + j + 2]
j = j + 1
return tuple(t) == words, w
def goto(self, prec, words):
'''Goes to words preceded by prec;
returns False and stays at eof if not found.'''
found = False
w = ''
if prec:
target = prec
else:
target = words
while not self.eof and not found:
found, w = self.checkpos(target)
if not found:
self.next()
elif prec:
self.next()
self.skipnw()
found, w = self.checkpos(words)
if found:
self.words = w
return True
return False
def find(self, prec, words):
'''Tries to find words preceded by prec from current position,
then from start of file.'''
found = self.goto(prec, words)
if not found:
self.reset()
found = self.goto(prec, words)
return found
def showerrs(filename, fd, errs):
'''Shows the errors found, in the context of the file.'''
t = FileWords(fd)
for e in errs:
exactstr = ''
if not t.find(e.precontext, e.string):
exactstr = ' (?)'
print('%s:%d:%d:%s %s "%s"' %
(filename,
t.line,
t.col,
exactstr,
e.description,
t.words if hasattr(t, 'words') else ''))
if len(e.suggestions) > 0:
print(' suggestions: %s' % ', '.join(e.suggestions))
|
|
# -*- coding: UTF-8 -*-
import simplejson
from jukebox.jukebox_core.tests.api import ApiTestBase
class ApiQueueTest(ApiTestBase):
def testIndexEmpty(self):
result = simplejson.loads(
self.httpGet(
"/api/v1/queue"
).content
)
self.assertEquals(len(result["itemList"]), 0)
def testAddAndIndex(self):
song = self.addSong(artist=self.addArtist())
# check that song is not in queue
result = simplejson.loads(
self.httpGet(
"/api/v1/songs"
).content
)
self.assertEquals(len(result["itemList"]), 1)
self.assertEquals(result["itemList"][0]["id"], song.id)
self.assertFalse(result["itemList"][0]["queued"])
# add to queue
response = self.httpPost(
"/api/v1/queue",
{"id": song.id}
)
content = simplejson.loads(
response.content
)
self.assertEqual(response.status_code, 201)
self.assertEqual(content["id"], song.id)
# check queue
result = simplejson.loads(
self.httpGet(
"/api/v1/queue"
).content
)
self.assertEquals(len(result["itemList"]), 1)
self.assertEquals(result["itemList"][0]["id"], song.id)
# check that song is marked as queued
result = simplejson.loads(
self.httpGet(
"/api/v1/songs"
).content
)
self.assertEquals(len(result["itemList"]), 1)
self.assertEquals(result["itemList"][0]["id"], song.id)
self.assertTrue(result["itemList"][0]["queued"])
def testDeleteAndIndex(self):
song = self.addSong(artist=self.addArtist())
# add to queue
response = self.httpPost(
"/api/v1/queue",
{"id": song.id}
)
content = simplejson.loads(
response.content
)
self.assertEqual(response.status_code, 201)
self.assertEqual(content["id"], song.id)
# check queue
result = simplejson.loads(
self.httpGet(
"/api/v1/queue"
).content
)
self.assertEquals(len(result["itemList"]), 1)
self.assertEquals(result["itemList"][0]["id"], song.id)
# remove from queue
response = self.httpDelete(
"/api/v1/queue/" + str(song.id),
)
content = simplejson.loads(
response.content
)
self.assertEqual(response.status_code, 200)
self.assertEqual(content["id"], str(song.id))
# check queue
result = simplejson.loads(
self.httpGet(
"/api/v1/queue"
).content
)
self.assertEquals(len(result["itemList"]), 0)
def addToQueue(self, song):
return self.httpPost(
"/api/v1/queue",
{"id": song.id}
)
def testIndexOrderByTitle(self):
song_a = self.addSong(artist=self.addArtist(), title="A Title")
song_b = self.addSong(artist=self.addArtist(), title="B Title")
self.addToQueue(song_a)
self.addToQueue(song_b)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?order_by=title"
).content
)
self.assertEquals(len(result["itemList"]), 2)
self.assertEquals(result["itemList"][0]["id"], song_a.id)
self.assertEquals(result["itemList"][1]["id"], song_b.id)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?order_by=title&order_direction=desc"
).content
)
self.assertEquals(len(result["itemList"]), 2)
self.assertEquals(result["itemList"][0]["id"], song_b.id)
self.assertEquals(result["itemList"][1]["id"], song_a.id)
def testIndexOrderByArtist(self):
song_a = self.addSong(artist=self.addArtist(name="A Name"))
song_b = self.addSong(artist=self.addArtist(name="B Name"))
self.addToQueue(song_a)
self.addToQueue(song_b)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?order_by=artist"
).content
)
self.assertEquals(len(result["itemList"]), 2)
self.assertEquals(result["itemList"][0]["id"], song_a.id)
self.assertEquals(result["itemList"][1]["id"], song_b.id)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?order_by=artist&order_direction=desc"
).content
)
self.assertEquals(len(result["itemList"]), 2)
self.assertEquals(result["itemList"][0]["id"], song_b.id)
self.assertEquals(result["itemList"][1]["id"], song_a.id)
def testIndexOrderByAlbum(self):
album_a = self.addAlbum(title="A Title")
album_b = self.addAlbum(title="B Title")
song_a = self.addSong(artist=self.addArtist(), album=album_a)
song_b = self.addSong(artist=self.addArtist(), album=album_b)
self.addToQueue(song_a)
self.addToQueue(song_b)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?order_by=album"
).content
)
self.assertEquals(len(result["itemList"]), 2)
self.assertEquals(result["itemList"][0]["id"], song_a.id)
self.assertEquals(result["itemList"][1]["id"], song_b.id)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?order_by=album&order_direction=desc"
).content
)
self.assertEquals(len(result["itemList"]), 2)
self.assertEquals(result["itemList"][0]["id"], song_b.id)
self.assertEquals(result["itemList"][1]["id"], song_a.id)
def testIndexOrderByYear(self):
song_a = self.addSong(artist=self.addArtist(), year=2000)
song_b = self.addSong(artist=self.addArtist(), year=2001)
self.addToQueue(song_a)
self.addToQueue(song_b)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?order_by=year"
).content
)
self.assertEquals(len(result["itemList"]), 2)
self.assertEquals(result["itemList"][0]["id"], song_a.id)
self.assertEquals(result["itemList"][1]["id"], song_b.id)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?order_by=year&order_direction=desc"
).content
)
self.assertEquals(len(result["itemList"]), 2)
self.assertEquals(result["itemList"][0]["id"], song_b.id)
self.assertEquals(result["itemList"][1]["id"], song_a.id)
def testIndexOrderByGenre(self):
song_a = self.addSong(
artist=self.addArtist(),
genre=self.addGenre(name="A Genre")
)
song_b = self.addSong(
artist=self.addArtist(),
genre=self.addGenre(name="B Genre")
)
self.addToQueue(song_a)
self.addToQueue(song_b)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?order_by=genre"
).content
)
self.assertEquals(len(result["itemList"]), 2)
self.assertEquals(result["itemList"][0]["id"], song_a.id)
self.assertEquals(result["itemList"][1]["id"], song_b.id)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?order_by=genre&order_direction=desc"
).content
)
self.assertEquals(len(result["itemList"]), 2)
self.assertEquals(result["itemList"][0]["id"], song_b.id)
self.assertEquals(result["itemList"][1]["id"], song_a.id)
def testIndexOrderByCreated(self):
song_a = self.addSong(artist=self.addArtist())
song_b = self.addSong(artist=self.addArtist())
self.addToQueue(song_a)
self.addToQueue(song_b)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?order_by=created"
).content
)
self.assertEquals(len(result["itemList"]), 2)
self.assertEquals(result["itemList"][0]["id"], song_a.id)
self.assertEquals(result["itemList"][1]["id"], song_b.id)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?order_by=created&order_direction=desc"
).content
)
self.assertEquals(len(result["itemList"]), 2)
self.assertEquals(result["itemList"][0]["id"], song_b.id)
self.assertEquals(result["itemList"][1]["id"], song_a.id)
def testCount(self):
song_a = self.addSong(artist=self.addArtist())
song_b = self.addSong(artist=self.addArtist())
song_c = self.addSong(artist=self.addArtist())
self.addToQueue(song_a)
self.addToQueue(song_b)
self.addToQueue(song_c)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?count=1"
).content
)
self.assertEquals(len(result["itemList"]), 1)
self.assertEquals(result["itemList"][0]["id"], song_a.id)
self.assertTrue(result["hasNextPage"])
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?count=3"
).content
)
self.assertEquals(len(result["itemList"]), 3)
self.assertEquals(result["itemList"][0]["id"], song_a.id)
self.assertEquals(result["itemList"][1]["id"], song_b.id)
self.assertEquals(result["itemList"][2]["id"], song_c.id)
self.assertFalse(result["hasNextPage"])
def testCountAndPage(self):
song_a = self.addSong(artist=self.addArtist())
song_b = self.addSong(artist=self.addArtist())
song_c = self.addSong(artist=self.addArtist())
self.addToQueue(song_a)
self.addToQueue(song_b)
self.addToQueue(song_c)
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?count=1&page=1"
).content
)
self.assertEquals(len(result["itemList"]), 1)
self.assertEquals(result["itemList"][0]["id"], song_a.id)
self.assertTrue(result["hasNextPage"])
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?count=1&page=2"
).content
)
self.assertEquals(len(result["itemList"]), 1)
self.assertEquals(result["itemList"][0]["id"], song_b.id)
self.assertTrue(result["hasNextPage"])
result = simplejson.loads(
self.httpGet(
"/api/v1/queue?count=1&page=3"
).content
)
self.assertEquals(len(result["itemList"]), 1)
self.assertEquals(result["itemList"][0]["id"], song_c.id)
self.assertFalse(result["hasNextPage"])
|
|
from __future__ import unicode_literals
import os
import re
from django.utils import six
from django.utils.six.moves import range
from reviewboard.diffviewer.processors import (filter_interdiff_opcodes,
post_process_filtered_equals)
class MoveRange(object):
"""Stores information on a move range.
This will store the start and end of the range, and all groups that
are a part of it.
"""
def __init__(self, start, end, groups=[]):
self.start = start
self.end = end
self.groups = groups
@property
def last_group(self):
return self.groups[-1]
def add_group(self, group, group_index):
if self.groups[-1] != group:
self.groups.append((group, group_index))
def __repr__(self):
return '<MoveRange(%d, %d, %r)>' % (self.start, self.end, self.groups)
class DiffOpcodeGenerator(object):
ALPHANUM_RE = re.compile(r'\w')
WHITESPACE_RE = re.compile(r'\s')
MOVE_PREFERRED_MIN_LINES = 2
MOVE_MIN_LINE_LENGTH = 20
TAB_SIZE = 8
def __init__(self, differ, filediff=None, interfilediff=None):
self.differ = differ
self.filediff = filediff
self.interfilediff = interfilediff
def __iter__(self):
"""Returns opcodes from the differ with extra metadata.
This is a wrapper around a differ's get_opcodes function, which returns
extra metadata along with each range. That metadata includes
information on moved blocks of code and whitespace-only lines.
This returns a list of opcodes as tuples in the form of
(tag, i1, i2, j1, j2, meta).
"""
self.groups = []
self.removes = {}
self.inserts = []
# Run the opcodes through the chain.
opcodes = self.differ.get_opcodes()
opcodes = self._apply_processors(opcodes)
opcodes = self._generate_opcode_meta(opcodes)
opcodes = self._apply_meta_processors(opcodes)
self._group_opcodes(opcodes)
self._compute_moves()
for opcodes in self.groups:
yield opcodes
def _apply_processors(self, opcodes):
if self.interfilediff:
# Filter out any lines unrelated to these changes from the
# interdiff. This will get rid of any merge information.
opcodes = filter_interdiff_opcodes(opcodes, self.filediff.diff,
self.interfilediff.diff)
for opcode in opcodes:
yield opcode
def _generate_opcode_meta(self, opcodes):
for tag, i1, i2, j1, j2 in opcodes:
meta = {
# True if this chunk is only whitespace.
'whitespace_chunk': False,
# List of tuples (i, j), with whitespace changes.
'whitespace_lines': [],
}
if tag == 'replace':
# replace groups are good for whitespace only changes.
assert (i2 - i1) == (j2 - j1)
for i, j in zip(range(i1, i2), range(j1, j2)):
if (self.WHITESPACE_RE.sub('', self.differ.a[i]) ==
self.WHITESPACE_RE.sub('', self.differ.b[j])):
# Both original lines are equal when removing all
# whitespace, so include their original line number in
# the meta dict.
meta['whitespace_lines'].append((i + 1, j + 1))
# If all lines are considered to have only whitespace change,
# the whole chunk is considered a whitespace-only chunk.
if len(meta['whitespace_lines']) == (i2 - i1):
meta['whitespace_chunk'] = True
elif tag == 'equal':
for group in self._compute_chunk_indentation(i1, i2, j1, j2):
ii1, ii2, ij1, ij2, indentation_changes = group
if indentation_changes:
new_meta = dict({
'indentation_changes': indentation_changes,
}, **meta)
else:
new_meta = meta
yield tag, ii1, ii2, ij1, ij2, new_meta
continue
yield tag, i1, i2, j1, j2, meta
def _apply_meta_processors(self, opcodes):
if self.interfilediff:
# When filtering out opcodes, we may have converted chunks into
# "filtered-equal" chunks. This allowed us to skip any additional
# processing, particularly the indentation highlighting. It's
# now time to turn those back into "equal" chunks.
opcodes = post_process_filtered_equals(opcodes)
for opcode in opcodes:
yield opcode
def _group_opcodes(self, opcodes):
for group_index, group in enumerate(opcodes):
self.groups.append(group)
# Store delete/insert ranges for later lookup. We will be building
# keys that in most cases will be unique for the particular block
# of text being inserted/deleted. There is a chance of collision,
# so we store a list of matching groups under that key.
#
# Later, we will loop through the keys and attempt to find insert
# keys/groups that match remove keys/groups.
tag = group[0]
if tag in ('delete', 'replace'):
i1 = group[1]
i2 = group[2]
for i in range(i1, i2):
line = self.differ.a[i].strip()
if line:
self.removes.setdefault(line, []).append(
(i, group, group_index))
if tag in ('insert', 'replace'):
self.inserts.append(group)
def _compute_chunk_indentation(self, i1, i2, j1, j2):
# We'll be going through all the opcodes in this equals chunk and
# grouping with adjacent opcodes based on whether they have
# indentation changes or not. This allows us to keep the lines with
# indentation changes from being collapsed in the diff viewer.
indentation_changes = {}
prev_has_indent = False
prev_start_i = i1
prev_start_j = j1
for i, j in zip(range(i1, i2), range(j1, j2)):
old_line = self.differ.a[i]
new_line = self.differ.b[j]
new_indentation_changes = {}
indent_info = self._compute_line_indentation(old_line, new_line)
has_indent = indent_info is not None
if has_indent:
key = '%d-%d' % (i + 1, j + 1)
new_indentation_changes[key] = indent_info
if has_indent != prev_has_indent:
if prev_start_i != i or prev_start_j != j:
# Yield the previous group.
yield prev_start_i, i, prev_start_j, j, indentation_changes
# We have a new group. Set it up, starting with the current
# calculated state.
prev_start_i = i
prev_start_j = j
prev_has_indent = has_indent
indentation_changes = new_indentation_changes
elif has_indent:
indentation_changes.update(new_indentation_changes)
# Yield the last group, if we haven't already yielded it.
if prev_start_i != i2 or prev_start_j != j2:
yield prev_start_i, i2, prev_start_j, j2, indentation_changes
def _compute_line_indentation(self, old_line, new_line):
if old_line == new_line:
return None
old_line_stripped = old_line.lstrip()
new_line_stripped = new_line.lstrip()
# These are fake-equal. They really have some indentation changes.
# We want to mark those up.
#
# Our goal for this function from here on out is to figure out whether
# the new line has increased or decreased its indentation, and then
# to determine how much that has increased or decreased by.
#
# Since we may be dealing with the additional or removal of tabs,
# we have some challenges here. We need to expand those tabs in
# order to determine if the new line is indented further or not,
# and then we need to figure out how much of the leading whitespace
# on either side represents new indentation levels.
#
# We do this by chopping off all leading whitespace and expanding
# any tabs, and then figuring out the total line lengths. That gives
# us a basis for comparison to determine whether we've indented
# or unindented.
#
# We can then later figure out exactly which indentation characters
# were added or removed, and then store that information.
old_line_indent_len = len(old_line) - len(old_line_stripped)
new_line_indent_len = len(new_line) - len(new_line_stripped)
old_line_indent = old_line[:old_line_indent_len]
new_line_indent = new_line[:new_line_indent_len]
norm_old_line_indent = old_line_indent.expandtabs(self.TAB_SIZE)
norm_new_line_indent = new_line_indent.expandtabs(self.TAB_SIZE)
norm_old_line_indent_len = len(norm_old_line_indent)
norm_new_line_indent_len = len(norm_new_line_indent)
norm_old_line_len = (norm_old_line_indent_len +
len(old_line_stripped))
norm_new_line_len = (norm_new_line_indent_len +
len(new_line_stripped))
line_len_diff = norm_new_line_len - norm_old_line_len
if line_len_diff == 0:
return None
# We know that a spacing change did take place. We need to figure
# out now how many characters of indentation were actually
# added or removed.
is_indent = (line_len_diff > 0)
if is_indent:
raw_indent_len = new_line_indent_len
else:
raw_indent_len = old_line_indent_len
# Figure out how many characters of indentation were in common
# at the end of the strings. We'll want to exclude these
# characters when showing indentation changes.
#
# This is the area after any new indentation. If the indentation
# style changed (such as going from tabs to spaces), then nothing
# will be in common.
#
# We figure out the common trailing indentation by reversing both
# strings and then finding the common prefix. We only care about
# the length, so we can throw the string away.
#
# It may seem odd that we're using os.path.commonprefix, but this
# isn't really limited to paths. Certainly not in our case. It's
# worth not re-implementing that logic.
raw_indent_len -= len(os.path.commonprefix([
old_line_indent[::-1],
new_line_indent[::-1],
]))
return (is_indent,
raw_indent_len,
abs(norm_old_line_indent_len - norm_new_line_indent_len))
def _compute_moves(self):
# We now need to figure out all the moved locations.
#
# At this point, we know all the inserted groups, and all the
# individually deleted lines. We'll be going through and finding
# consecutive groups of matching inserts/deletes that represent a
# move block.
#
# The algorithm will be documented as we go in the code.
#
# We start by looping through all the inserted groups.
for insert in self.inserts:
self._compute_move_for_insert(*insert)
def _compute_move_for_insert(self, itag, ii1, ii2, ij1, ij2, imeta):
# Store some state on the range we'll be working with inside this
# insert group.
#
# i_move_cur is the current location inside the insert group
# (from ij1 through ij2).
#
# i_move_range is the current range of consecutive lines that
# we'll use for a move. Each line in this range has a
# corresponding consecutive delete line.
#
# r_move_ranges represents deleted move ranges. The key is a
# string in the form of "{i1}-{i2}-{j1}-{j2}", with those
# positions taken from the remove group for the line. The value
# is an instance of MoveRange. The values in MoveRange are used to
# quickly locate deleted lines we've found that match the inserted
# lines, so we can assemble ranges later.
i_move_cur = ij1
i_move_range = MoveRange(i_move_cur, i_move_cur)
r_move_ranges = {} # key -> (start, end, group)
move_key = None
is_replace = (itag == 'replace')
# Loop through every location from ij1 through ij2 - 1 until we've
# reached the end.
while i_move_cur < ij2:
try:
iline = self.differ.b[i_move_cur].strip()
except IndexError:
iline = None
updated_range = False
if iline and iline in self.removes:
# The inserted line at this location has a corresponding
# removed line.
#
# If there's already some information on removed line ranges
# for this particular move block we're processing then we'll
# update the range.
#
# The way we do that is to find each removed line that matches
# this inserted line, and for each of those find out if there's
# an existing move range that the found removed line
# immediately follows. If there is, we update the existing
# range.
#
# If there isn't any move information for this line, we'll
# simply add it to the move ranges.
for ri, rgroup, rgroup_index in self.removes.get(iline, []):
r_move_range = r_move_ranges.get(move_key)
if not r_move_range or ri != r_move_range.end + 1:
# We either didn't have a previous range, or this
# group didn't immediately follow it, so we need
# to start a new one.
move_key = '%s-%s-%s-%s' % rgroup[1:5]
r_move_range = r_move_ranges.get(move_key)
if r_move_range:
# If the remove information for the line is next in
# the sequence for this calculated move range...
if ri == r_move_range.end + 1:
# This is part of the current range, so update
# the end of the range to include it.
r_move_range.end = ri
r_move_range.add_group(rgroup, rgroup_index)
updated_range = True
else:
# Check that this isn't a replace line that's just
# "replacing" itself (which would happen if it's just
# changing whitespace).
if not is_replace or i_move_cur - ij1 != ri - ii1:
# We don't have any move ranges yet, or we're done
# with the existing range, so it's time to build one
# based on any removed lines we find that match the
# inserted line.
r_move_ranges[move_key] = \
MoveRange(ri, ri, [(rgroup, rgroup_index)])
updated_range = True
if not updated_range and r_move_ranges:
# We didn't find a move range that this line is a part
# of, but we do have some existing move ranges stored.
#
# Given that updated_range is set, we'll be processing
# the known move ranges below. We'll actually want to
# re-check this line afterward, so that we can start a
# new move range after we've finished processing the
# current ones.
#
# To do that, just i_move_cur back by one. That negates
# the increment below.
i_move_cur -= 1
move_key = None
elif iline == '' and move_key:
# This is a blank or whitespace-only line, which would not
# be in the list of removed lines above. We also have been
# working on a move range.
#
# At this point, the plan is to just attach this blank
# line onto the end of the last range being operated on.
#
# This blank line will help tie together adjacent move
# ranges. If it turns out to be a trailing line, it'll be
# stripped later in _determine_move_range.
r_move_range = r_move_ranges.get(move_key)
if r_move_range:
new_end_i = r_move_range.end + 1
if (new_end_i < len(self.differ.a) and
self.differ.a[new_end_i].strip() == ''):
# There was a matching blank line on the other end
# of the range, so we should feel more confident about
# adding the blank line here.
r_move_range.end = new_end_i
# It's possible that this blank line is actually an
# "equal" line. Though technically it didn't move,
# we're trying to create a logical, seamless move
# range, so we need to try to find that group and
# add it to the list of groups in the range, if it'
# not already there.
last_group, last_group_index = r_move_range.last_group
if new_end_i >= last_group[2]:
# This is in the next group, which hasn't been
# added yet. So add it.
cur_group_index = r_move_range.last_group[1] + 1
r_move_range.add_group(
self.groups[cur_group_index],
cur_group_index)
updated_range = True
i_move_cur += 1
if not updated_range or i_move_cur == ij2:
# We've reached the very end of the insert group. See if
# we have anything that looks like a move.
if r_move_ranges:
r_move_range = self._find_longest_move_range(r_move_ranges)
# If we have a move range, see if it's one we want to
# include or filter out. Some moves are not impressive
# enough to display. For example, a small portion of a
# comment, or whitespace-only changes.
r_move_range = self._determine_move_range(r_move_range)
if r_move_range:
# Rebuild the insert and remove ranges based on where
# we are now and which range we won.
#
# The new ranges will be actual lists of positions,
# rather than a beginning and end. These will be
# provided to the renderer.
#
# The ranges expected by the renderers are 1-based,
# whereas our calculations for this algorithm are
# 0-based, so we add 1 to the numbers.
#
# The upper boundaries passed to the range() function
# must actually be one higher than the value we want.
# So, for r_move_range, we actually increment by 2. We
# only increment i_move_cur by one, because i_move_cur
# already factored in the + 1 by being at the end of
# the while loop.
i_range = range(i_move_range.start + 1,
i_move_cur + 1)
r_range = range(r_move_range.start + 1,
r_move_range.end + 2)
moved_to_ranges = dict(zip(r_range, i_range))
for group, group_index in r_move_range.groups:
rmeta = group[-1]
rmeta.setdefault('moved-to', {}).update(
moved_to_ranges)
imeta.setdefault('moved-from', {}).update(
dict(zip(i_range, r_range)))
# Reset the state for the next range.
move_key = None
i_move_range = MoveRange(i_move_cur, i_move_cur)
r_move_ranges = {}
def _find_longest_move_range(self, r_move_ranges):
# Go through every range of lines we've found and find the longest.
#
# The longest move range wins. If we find two ranges that are equal,
# though, we'll ignore both. The idea is that if we have two identical
# moves, then it's probably common enough code that we don't want to
# show the move. An example might be some standard part of a comment
# block, with no real changes in content.
#
# Note that with the current approach, finding duplicate moves doesn't
# cause us to reset the winning range to the second-highest identical
# match. We may want to do that down the road, but it means additional
# state, and this is hopefully uncommon enough to not be a real
# problem.
r_move_range = None
for iter_move_range in six.itervalues(r_move_ranges):
if not r_move_range:
r_move_range = iter_move_range
else:
len1 = r_move_range.end - r_move_range.start
len2 = iter_move_range.end - iter_move_range.start
if len1 < len2:
r_move_range = iter_move_range
elif len1 == len2:
# If there are two that are the same, it may be common
# code that we don't want to see moves for. Comments,
# for example.
r_move_range = None
return r_move_range
def _determine_move_range(self, r_move_range):
"""Determines if a move range is valid and should be included.
This performs some tests to try to eliminate trivial changes that
shouldn't have moves associated.
Specifically, a move range is valid if it has at least one line
with alpha-numeric characters and is at least 4 characters long when
stripped.
If the move range is valid, any trailing whitespace-only lines will
be stripped, ensuring it covers only a valid range of content.
"""
if not r_move_range:
return None
end_i = r_move_range.end
lines = self.differ.a[r_move_range.start:end_i + 1]
new_end_i = None
valid = False
for i, line in enumerate(reversed(lines)):
line = line.strip()
if line:
if len(line) >= 4 and self.ALPHANUM_RE.search(line):
valid = True
if new_end_i is None or valid:
new_end_i = end_i - i
if valid:
break
# Accept this if there's more than one line or if the first
# line is long enough, in order to filter out small bits of garbage.
valid = (
valid and
(new_end_i - r_move_range.start + 1 >=
self.MOVE_PREFERRED_MIN_LINES or
len(self.differ.a[r_move_range.start].strip()) >=
self.MOVE_MIN_LINE_LENGTH))
if not valid:
return None
assert new_end_i is not None
return MoveRange(r_move_range.start, new_end_i, r_move_range.groups)
_generator = DiffOpcodeGenerator
def get_diff_opcode_generator_class():
"""Returns the DiffOpcodeGenerator class used for generating opcodes."""
return _generator
def set_diff_opcode_generator_class(renderer):
"""Sets the DiffOpcodeGenerator class used for generating opcodes."""
assert renderer
globals()['_generator'] = renderer
def get_diff_opcode_generator(*args, **kwargs):
"""Returns a DiffOpcodeGenerator instance used for generating opcodes."""
return _generator(*args, **kwargs)
|
|
import os
import tempfile
import pytest
from pyjob.exception import PyJobError
from pyjob.script import (
LocalScriptCreator,
Script,
ScriptCollector,
ScriptProperty,
is_valid_script_path,
)
class TestScriptCollector(object):
def test_1(self):
sc = ScriptCollector([])
assert sc.scripts == []
def test_2(self):
sc = ScriptCollector(None)
assert sc.scripts == []
def test_3(self):
script = pytest.helpers.get_py_script(0, 1)
sc = ScriptCollector(script)
assert sc.scripts == [script]
def test_4(self):
script = pytest.helpers.get_py_script(0, 1)
script.write()
sc = ScriptCollector(script.path)
assert len(sc.scripts) == 1
assert isinstance(sc.scripts[0], Script)
pytest.helpers.unlink([script.path])
def test_5(self):
scripts = [pytest.helpers.get_py_script(i, 1) for i in range(2)]
sc = ScriptCollector(scripts)
assert sc.scripts == scripts
def test_6(self):
scripts = [pytest.helpers.get_py_script(i, 1) for i in range(2)]
[s.write() for s in scripts]
sc = ScriptCollector([s.path for s in scripts])
assert len(sc.scripts) == 2
assert all(isinstance(s, Script) for s in sc)
pytest.helpers.unlink([s.path for s in scripts])
def test_7(self):
with pytest.raises(PyJobError):
ScriptCollector([1])
def test_8(self):
with pytest.raises(ValueError):
ScriptCollector(["test"])
def test_9(self):
with pytest.raises(IOError):
ScriptCollector(["test.sh"])
def test_10(self):
scripts = [pytest.helpers.get_py_script(i, 1) for i in range(2)]
sc = ScriptCollector(scripts[:1])
sc.add(scripts[1:])
assert sc.scripts == scripts
def test_11(self):
sc = ScriptCollector([])
scripts = [pytest.helpers.get_py_script(i, 1) for i in range(2)]
sc.add(scripts)
assert sc.scripts == scripts
def test_12(self):
scripts = [pytest.helpers.get_py_script(i, 1) for i in range(2)]
sc = ScriptCollector(scripts)
sc.add([])
assert sc.scripts == scripts
def test_13(self):
scripts1 = [pytest.helpers.get_py_script(i, 1) for i in range(2)]
sc = ScriptCollector(scripts1)
assert sc.scripts == scripts1
scripts2 = [pytest.helpers.get_py_script(i, 1) for i in range(2)]
sc.scripts = scripts2
assert sc.scripts == scripts2
def test_14(self):
scripts1 = [pytest.helpers.get_py_script(i, 1) for i in range(2)]
sc = ScriptCollector(scripts1)
assert sc.scripts == scripts1
sc.scripts = []
assert sc.scripts == []
def test_15(self):
scripts = [pytest.helpers.get_py_script(i, 1) for i in range(2)]
sc = ScriptCollector(scripts)
assert sc.scripts == scripts
sc.dump()
paths = [s.path for s in scripts]
assert all(os.path.isfile(p) for p in paths)
pytest.helpers.unlink(paths)
def test_16(self):
with pytest.raises(ValueError):
Script(suffix=None)
def test_17(self):
with pytest.raises(ValueError):
Script(suffix="")
def test_18(self):
with pytest.raises(ValueError):
Script(suffix="x")
def test_19(self):
with pytest.raises(ValueError):
Script(suffix=",x")
def test_20(self):
script = Script()
script.append("test line")
script.content = ["what the hell"]
assert script == ["what the hell"]
def test_21(self):
script = Script()
script.append("test line")
content = ["what the hell"]
script.content = content
assert script == ["what the hell"]
assert script is not content
class TestScriptRead(object):
def test_read_1(self):
fh = tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".py")
fh.write('#!/usr/bin/env python\nprint("PyJob is cool!")\n')
fh.close()
script = Script.read(fh.name)
assert script.shebang == "#!/usr/bin/env python"
assert script.content == ['print("PyJob is cool!")']
pytest.helpers.unlink([fh.name])
def test_read_2(self):
fh = tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".py")
fh.write('print("PyJob is cool!")\n')
fh.close()
script = Script.read(fh.name)
assert script.shebang == ""
assert script.content == ['print("PyJob is cool!")']
pytest.helpers.unlink([fh.name])
def test_read_3(self):
fh = tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=ScriptProperty.SHELL.suffix
)
fh.close()
script = Script.read(fh.name)
assert script.shebang == ""
assert script.content == []
pytest.helpers.unlink([fh.name])
def test_read_4(self):
fh = tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=ScriptProperty.SHELL.suffix
)
fh.write(ScriptProperty.SHELL.shebang)
fh.close()
script = Script.read(fh.name)
assert script.shebang == ScriptProperty.SHELL.shebang
assert script.content == []
pytest.helpers.unlink([fh.name])
@pytest.mark.skipif(pytest.on_windows, reason="Unavailable on Windows")
def test_read_5(self):
fh = tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=ScriptProperty.SHELL.suffix
)
fh.write("\n" + ScriptProperty.SHELL.shebang)
fh.close()
script = Script.read(fh.name)
assert script.shebang == ""
assert script.content == ["", ScriptProperty.SHELL.shebang]
pytest.helpers.unlink([fh.name])
def test_read_6(self):
fh = tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=ScriptProperty.SHELL.suffix
)
fh.write("\n" + "")
fh.close()
script = Script.read(fh.name)
assert script.shebang == ""
assert script.content == [""]
pytest.helpers.unlink([fh.name])
@pytest.mark.skipif(pytest.on_windows, reason="Unavailable on Windows")
def test_read_7(self):
fh = tempfile.NamedTemporaryFile(
mode="w",
dir=".",
delete=True,
prefix="pyjob",
suffix=ScriptProperty.SHELL.suffix,
)
script = Script.read(fh.name)
fh.close()
assert script.directory == os.getcwd()
assert script.prefix == ""
assert script.stem[:5] == "pyjob"
assert script.suffix == ScriptProperty.SHELL.suffix
class TestIsValidScriptPath(object):
def test_is_valid_script_path_1(self):
fh = tempfile.NamedTemporaryFile(delete=True)
fh.close()
assert not is_valid_script_path(fh.name)
@pytest.mark.skipif(pytest.on_windows, reason="Unavailable on Windows")
def test_is_valid_script_path_2(self):
fh = tempfile.NamedTemporaryFile(delete=True)
assert not is_valid_script_path(fh.name)
fh.close()
def test_is_valid_script_path_3(self):
fh = tempfile.NamedTemporaryFile(delete=True)
os.chmod(fh.name, 0o777)
assert is_valid_script_path(fh.name)
fh.close()
class TestScriptProperty(object):
def test_1(self):
if pytest.on_windows:
assert ScriptProperty.PERL.shebang == ""
else:
assert ScriptProperty.PERL.shebang == "#!/usr/bin/env perl"
assert ScriptProperty.PERL.suffix == ".pl"
def test_2(self):
if pytest.on_windows:
assert ScriptProperty.PYTHON.shebang == ""
else:
assert ScriptProperty.PYTHON.shebang == "#!/usr/bin/env python"
assert ScriptProperty.PYTHON.suffix == ".py"
def test_3(self):
if pytest.on_windows:
assert ScriptProperty.SHELL.shebang == ""
assert ScriptProperty.SHELL.suffix == ".bat"
else:
assert ScriptProperty.SHELL.shebang == "#!/bin/bash"
assert ScriptProperty.SHELL.suffix == ".sh"
class TestLocalScriptCreator(object):
def __call__(self, option):
return self.example_function(option)
@staticmethod
def example_function(option):
cmd = ["echo {}".format(option)]
script = Script(directory=os.getcwd())
for c in cmd:
script.append(c)
return script
@pytest.mark.skipif(pytest.on_windows, reason="Unavailable on Windows")
def test_1(self):
nproc = 2
options = [1, 2, 3, 4, 5]
script_creator = LocalScriptCreator(
func=self, iterable=options, processes=nproc
)
assert script_creator.collector.scripts == [
["echo 1"],
["echo 2"],
["echo 3"],
["echo 4"],
["echo 5"],
]
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from requests import exceptions
import six
import yaml
from heat.common import exception
from heat.common import identifier
from heat.common import template_format
from heat.common import urlfetch
from heat.engine import resource
from heat.engine.resources.aws.cfn import stack as stack_res
from heat.engine import rsrc_defn
from heat.engine import stack as parser
from heat.engine import template
from heat.objects import resource_data as resource_data_object
from heat.tests import common
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
class NestedStackTest(common.HeatTestCase):
test_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: https://server.test/the.template
Parameters:
KeyName: foo
'''
nested_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
KeyName:
Type: String
Outputs:
Foo:
Value: bar
'''
update_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
KeyName:
Type: String
Outputs:
Bar:
Value: foo
'''
def setUp(self):
super(NestedStackTest, self).setUp()
self.patchobject(urlfetch, 'get')
def validate_stack(self, template):
t = template_format.parse(template)
stack = self.parse_stack(t)
res = stack.validate()
self.assertIsNone(res)
return stack
def parse_stack(self, t, data=None):
ctx = utils.dummy_context('test_username', 'aaaa', 'password')
stack_name = 'test_stack'
tmpl = template.Template(t)
stack = parser.Stack(ctx, stack_name, tmpl, adopt_stack_data=data)
stack.store()
return stack
@mock.patch.object(parser.Stack, 'root_stack_id')
@mock.patch.object(parser.Stack, 'total_resources')
def test_nested_stack_three_deep(self, tr, rsi):
root_template = '''
HeatTemplateFormatVersion: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth1.template'
'''
depth1_template = '''
HeatTemplateFormatVersion: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth2.template'
'''
depth2_template = '''
HeatTemplateFormatVersion: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth3.template'
Parameters:
KeyName: foo
'''
urlfetch.get.side_effect = [
depth1_template,
depth2_template,
self.nested_template]
rsi.return_value = '1234'
tr.return_value = 2
self.validate_stack(root_template)
calls = [mock.call('https://server.test/depth1.template'),
mock.call('https://server.test/depth2.template'),
mock.call('https://server.test/depth3.template')]
urlfetch.get.assert_has_calls(calls)
tr.assert_called_with('1234')
@mock.patch.object(parser.Stack, 'root_stack_id')
@mock.patch.object(parser.Stack, 'total_resources')
def test_nested_stack_six_deep(self, tr, rsi):
tmpl = '''
HeatTemplateFormatVersion: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth%i.template'
'''
root_template = tmpl % 1
depth1_template = tmpl % 2
depth2_template = tmpl % 3
depth3_template = tmpl % 4
depth4_template = tmpl % 5
depth5_template = tmpl % 6
depth5_template += '''
Parameters:
KeyName: foo
'''
urlfetch.get.side_effect = [
depth1_template,
depth2_template,
depth3_template,
depth4_template,
depth5_template,
self.nested_template]
rsi.return_value = '1234'
tr.return_value = 5
t = template_format.parse(root_template)
stack = self.parse_stack(t)
res = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('Recursion depth exceeds', six.text_type(res))
calls = [mock.call('https://server.test/depth1.template'),
mock.call('https://server.test/depth2.template'),
mock.call('https://server.test/depth3.template'),
mock.call('https://server.test/depth4.template'),
mock.call('https://server.test/depth5.template'),
mock.call('https://server.test/depth6.template')]
urlfetch.get.assert_has_calls(calls)
def test_nested_stack_four_wide(self):
root_template = '''
HeatTemplateFormatVersion: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth1.template'
Parameters:
KeyName: foo
Nested2:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth2.template'
Parameters:
KeyName: foo
Nested3:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth3.template'
Parameters:
KeyName: foo
Nested4:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth4.template'
Parameters:
KeyName: foo
'''
urlfetch.get.return_value = self.nested_template
self.validate_stack(root_template)
calls = [mock.call('https://server.test/depth1.template'),
mock.call('https://server.test/depth2.template'),
mock.call('https://server.test/depth3.template'),
mock.call('https://server.test/depth4.template')]
urlfetch.get.assert_has_calls(calls, any_order=True)
@mock.patch.object(parser.Stack, 'root_stack_id')
@mock.patch.object(parser.Stack, 'total_resources')
def test_nested_stack_infinite_recursion(self, tr, rsi):
tmpl = '''
HeatTemplateFormatVersion: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/the.template'
'''
urlfetch.get.return_value = tmpl
t = template_format.parse(tmpl)
stack = self.parse_stack(t)
rsi.return_value = '1234'
tr.return_value = 2
res = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('Recursion depth exceeds', six.text_type(res))
expected_count = cfg.CONF.get('max_nested_stack_depth') + 1
self.assertEqual(expected_count, urlfetch.get.call_count)
def test_child_params(self):
t = template_format.parse(self.test_template)
stack = self.parse_stack(t)
nested_stack = stack['the_nested']
nested_stack.properties.data[nested_stack.PARAMETERS] = {'foo': 'bar'}
self.assertEqual({'foo': 'bar'}, nested_stack.child_params())
def test_child_template_when_file_is_fetched(self):
urlfetch.get.return_value = 'template_file'
t = template_format.parse(self.test_template)
stack = self.parse_stack(t)
nested_stack = stack['the_nested']
with mock.patch('heat.common.template_format.parse') as mock_parse:
mock_parse.return_value = 'child_template'
self.assertEqual('child_template', nested_stack.child_template())
mock_parse.assert_called_once_with('template_file')
def test_child_template_when_fetching_file_fails(self):
urlfetch.get.side_effect = exceptions.RequestException()
t = template_format.parse(self.test_template)
stack = self.parse_stack(t)
nested_stack = stack['the_nested']
self.assertRaises(ValueError, nested_stack.child_template)
def test_child_template_when_io_error(self):
msg = 'Failed to retrieve template'
urlfetch.get.side_effect = urlfetch.URLFetchError(msg)
t = template_format.parse(self.test_template)
stack = self.parse_stack(t)
nested_stack = stack['the_nested']
self.assertRaises(ValueError, nested_stack.child_template)
class ResDataResource(generic_rsrc.GenericResource):
def handle_create(self):
self.data_set("test", 'A secret value', True)
class ResDataStackTest(common.HeatTestCase):
tmpl = '''
HeatTemplateFormatVersion: "2012-12-12"
Parameters:
KeyName:
Type: String
Resources:
res:
Type: "res.data.resource"
Outputs:
Foo:
Value: bar
'''
def setUp(self):
super(ResDataStackTest, self).setUp()
resource._register_class("res.data.resource", ResDataResource)
def create_stack(self, template):
t = template_format.parse(template)
stack = utils.parse_stack(t)
stack.create()
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
return stack
def test_res_data_delete(self):
stack = self.create_stack(self.tmpl)
res = stack['res']
stack.delete()
self.assertEqual((stack.DELETE, stack.COMPLETE), stack.state)
self.assertRaises(
exception.NotFound,
resource_data_object.ResourceData.get_val, res, 'test')
class NestedStackCrudTest(common.HeatTestCase):
nested_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
KeyName:
Type: String
Outputs:
Foo:
Value: bar
'''
def setUp(self):
super(NestedStackCrudTest, self).setUp()
self.ctx = utils.dummy_context('test_username', 'aaaa', 'password')
empty_template = {"HeatTemplateFormatVersion": "2012-12-12"}
self.stack = parser.Stack(self.ctx, 'test',
template.Template(empty_template))
self.stack.store()
self.patchobject(urlfetch, 'get', return_value=self.nested_template)
self.nested_parsed = yaml.load(self.nested_template)
self.nested_params = {"KeyName": "foo"}
self.defn = rsrc_defn.ResourceDefinition(
'test_t_res',
'AWS::CloudFormation::Stack',
{"TemplateURL": "https://server.test/the.template",
"Parameters": self.nested_params})
self.res = stack_res.NestedStack('test_t_res',
self.defn, self.stack)
self.assertIsNone(self.res.validate())
self.res._store()
def test_handle_create(self):
self.res.create_with_template = mock.Mock(return_value=None)
self.res.handle_create()
self.res.create_with_template.assert_called_once_with(
self.nested_parsed, self.nested_params, None, adopt_data=None)
def test_handle_adopt(self):
self.res.create_with_template = mock.Mock(return_value=None)
self.res.handle_adopt(resource_data={'resource_id': 'fred'})
self.res.create_with_template.assert_called_once_with(
self.nested_parsed, self.nested_params, None,
adopt_data={'resource_id': 'fred'})
def test_handle_update(self):
self.res.update_with_template = mock.Mock(return_value=None)
self.res.handle_update(self.defn, None, None)
self.res.update_with_template.assert_called_once_with(
self.nested_parsed, self.nested_params, None)
def test_handle_delete(self):
self.res.rpc_client = mock.MagicMock()
self.res.action = self.res.CREATE
self.res.nested = mock.MagicMock()
stack_identity = identifier.HeatIdentifier(
self.ctx.tenant_id,
self.res.physical_resource_name(),
self.res.resource_id)
self.res.nested().identifier.return_value = stack_identity
self.res.handle_delete()
self.res.rpc_client.return_value.delete_stack.assert_called_once_with(
self.ctx, self.res.nested().identifier())
|
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# pylint: disable=maybe-no-member
"""Test request import and updates."""
from nose.plugins import skip
from ggrc import models
from ggrc.converters import errors
from integration.ggrc import converters
class TestRequestImport(converters.TestCase):
"""Basic Request import tests with.
This test suite should test new Request imports and updates. The main focus
of these tests is checking error messages for invalid state transitions.
"""
def setUp(self):
""" Set up for Request test cases """
converters.TestCase.setUp(self)
self.client.get("/login")
def _test_request_users(self, request, users):
""" Test that all users have correct roles on specified Request"""
verification_errors = ""
for user_name, expected_types in users.items():
try:
user = models.Person.query.filter_by(name=user_name).first()
rel = models.Relationship.find_related(request, user)
if expected_types:
self.assertNotEqual(
rel,
None,
"User {} is not mapped to {}".format(user.email, request.slug)
)
self.assertIn("AssigneeType", rel.relationship_attrs)
self.assertEqual(
set(rel.relationship_attrs[
"AssigneeType"].attr_value.split(",")),
expected_types
)
else:
self.assertEqual(
rel,
None,
"User {} is mapped to {}".format(user.email, request.slug)
)
except AssertionError as error:
verification_errors += "\n\nChecks for Users-Request mapping failed "\
"for user '{}' with:\n{}".format(user_name, str(error))
self.assertEqual(verification_errors, "", verification_errors)
def test_request_full_no_warnings(self):
""" Test full request import with no warnings
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=704933240&vpid=A7
"""
filename = "request_full_no_warnings.csv"
response = self.import_file(filename)
messages = ("block_errors", "block_warnings", "row_errors", "row_warnings")
for response_block in response:
for message in messages:
self.assertEqual(set(), set(response_block[message]))
# Test first request line in the CSV file
request_1 = models.Request.query.filter_by(slug="Request 1").first()
users = {
"user 1": {"Assignee"},
"user 2": {"Assignee", "Requester"},
"user 3": {"Requester", "Verifier"},
"user 4": {"Verifier"},
"user 5": {"Verifier"},
}
self._test_request_users(request_1, users)
self.assertEqual(request_1.status, models.Request.START_STATE)
self.assertEqual(request_1.request_type, "documentation")
# Test second request line in the CSV file
request_2 = models.Request.query.filter_by(slug="Request 2").first()
users = {
"user 1": {"Assignee"},
"user 2": {"Requester"},
"user 3": {"Verifier"},
"user 4": {},
"user 5": {},
}
self._test_request_users(request_2, users)
self.assertEqual(request_2.status, models.Request.PROGRESS_STATE)
self.assertEqual(request_2.request_type, "interview")
def test_request_import_states(self):
""" Test Request state imports
These tests are an intermediate part for zucchini release and will be
updated in the next release.
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=299569476
"""
self.import_file("request_full_no_warnings.csv")
response = self.import_file("request_update_intermediate.csv")
message_types = (
"block_errors",
"block_warnings",
"row_errors",
"row_warnings"
)
messages = {
"block_errors": set(),
"block_warnings": set(),
"row_errors": set(),
"row_warnings": set([
errors.REQUEST_INVALID_STATE.format(line=5),
errors.REQUEST_INVALID_STATE.format(line=6),
errors.REQUEST_INVALID_STATE.format(line=11),
errors.REQUEST_INVALID_STATE.format(line=12),
]),
}
for message_type in message_types:
self.assertEqual(len(set(response[0][message_type])),
len(response[0][message_type]))
self.assertEqual(set(response[0][message_type]), messages[message_type])
requests = {r.slug: r for r in models.Request.query.all()}
self.assertEqual(requests["Request 60"].status, models.Request.START_STATE)
self.assertEqual(requests["Request 61"].status,
models.Request.PROGRESS_STATE)
self.assertEqual(requests["Request 62"].status, models.Request.DONE_STATE)
self.assertEqual(requests["Request 63"].status,
models.Request.PROGRESS_STATE)
self.assertEqual(requests["Request 64"].status,
models.Request.PROGRESS_STATE)
self.assertEqual(requests["Request 3"].status,
models.Request.PROGRESS_STATE)
self.assertEqual(requests["Request 4"].status,
models.Request.PROGRESS_STATE)
@skip.SkipTest
def test_request_warnings_errors(self):
""" Test full request import with warnings and errors
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=889865936
"""
self.import_file("request_full_no_warnings.csv")
response = self.import_file("request_with_warnings_and_errors.csv")
message_types = (
"block_errors",
"block_warnings",
"row_errors",
"row_warnings"
)
messages = {
"block_errors": set([]),
"block_warnings": set([
errors.UNKNOWN_COLUMN.format(
line=2,
column_name="error description - non existing column will be "
"ignored"
),
errors.UNKNOWN_COLUMN.format(
line=2,
column_name="actual error ""message"
),
]),
"row_errors": set([
errors.UNKNOWN_OBJECT.format(
line=18,
object_type="Audit",
slug="not existing"
),
errors.DUPLICATE_VALUE_IN_CSV.format(
line_list="19, 21",
column_name="Code",
value="Request 22",
s="",
ignore_lines="21",
),
]),
"row_warnings": set([
errors.UNKNOWN_USER_WARNING.format(
line=14,
email="[email protected]",
),
errors.UNKNOWN_OBJECT.format(
line=14,
object_type="Project",
slug="proj-55"
),
]),
}
for message_type in message_types:
self.assertEqual(len(set(response[0][message_type])),
len(response[0][message_type]))
self.assertEqual(set(response[0][message_type]), messages[message_type])
|
|
# Copyright 2013, Big Switch Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: KC Wang
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators
from openstack_dashboard import api
port_validator = validators.validate_port_or_colon_separated_port_range
LOG = logging.getLogger(__name__)
class UpdateRule(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(
required=False,
max_length=80, label=_("Description"))
protocol = forms.ChoiceField(
label=_("Protocol"), required=False,
help_text=_('Protocol for the firewall rule'))
action = forms.ChoiceField(
label=_("Action"), required=False,
help_text=_('Action for the firewall rule'))
source_ip_address = forms.IPField(
label=_("Source IP Address/Subnet"),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True,
help_text=_('Source IP address or subnet'))
destination_ip_address = forms.IPField(
label=_('Destination IP Address/Subnet'),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True,
help_text=_('Destination IP address or subnet'))
source_port = forms.CharField(
max_length=80,
label=_("Source Port/Port Range"),
required=False,
validators=[port_validator],
help_text=_('Source port (integer in [1, 65535] or range in a:b)'))
destination_port = forms.CharField(
max_length=80,
label=_("Destination Port/Port Range"),
required=False,
validators=[port_validator],
help_text=_('Destination port (integer in [1, 65535] or range'
' in a:b)'))
shared = forms.BooleanField(label=_("Shared"), required=False)
enabled = forms.BooleanField(label=_("Enabled"), required=False)
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(UpdateRule, self).__init__(request, *args, **kwargs)
protocol = kwargs['initial']['protocol'].upper()
action = kwargs['initial']['action'].upper()
protocol_choices = [(protocol, protocol)]
for tup in [('TCP', _('TCP')), ('UDP', _('UDP')), ('ICMP', _('ICMP'))]:
if tup[0] != protocol:
protocol_choices.append(tup)
self.fields['protocol'].choices = protocol_choices
action_choices = [(action, action)]
for tup in [('ALLOW', _('ALLOW')), ('DENY', _('DENY'))]:
if tup[0] != action:
action_choices.append(tup)
self.fields['action'].choices = action_choices
def handle(self, request, context):
rule_id = self.initial['rule_id']
name_or_id = context.get('name') or rule_id
for f in ['source_ip_address', 'destination_ip_address',
'source_port', 'destination_port']:
if not context[f]:
context[f] = None
try:
rule = api.fwaas.rule_update(request, rule_id, **context)
msg = _('Rule %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return rule
except Exception as e:
msg = (_('Failed to update rule %(name)s: %(reason)s') %
{'name': name_or_id, 'reason': e})
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdatePolicy(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(required=False,
max_length=80, label=_("Description"))
shared = forms.BooleanField(label=_("Shared"), required=False)
audited = forms.BooleanField(label=_("Audited"), required=False)
failure_url = 'horizon:project:firewalls:index'
def handle(self, request, context):
policy_id = self.initial['policy_id']
name_or_id = context.get('name') or policy_id
try:
policy = api.fwaas.policy_update(request, policy_id, **context)
msg = _('Policy %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to update policy %(name)s: %(reason)s') % {
'name': name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdateFirewall(forms.SelfHandlingForm):
name = forms.CharField(max_length=80,
label=_("Name"),
required=False)
description = forms.CharField(max_length=80,
label=_("Description"),
required=False)
firewall_policy_id = forms.ChoiceField(label=_("Policy"))
admin_state_up = forms.BooleanField(label=_("Admin State Up"),
required=False)
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(UpdateFirewall, self).__init__(request, *args, **kwargs)
try:
tenant_id = self.request.user.tenant_id
policies = api.fwaas.policy_list(request, tenant_id=tenant_id)
policies = sorted(policies, key=lambda policy: policy.name)
except Exception:
exceptions.handle(request,
_('Unable to retrieve policy list.'))
policies = []
policy_id = kwargs['initial']['firewall_policy_id']
policy_name = [p.name for p in policies if p.id == policy_id][0]
firewall_policy_id_choices = [(policy_id, policy_name)]
for p in policies:
if p.id != policy_id:
p.set_id_as_name_if_empty()
firewall_policy_id_choices.append((p.id, p.name))
self.fields['firewall_policy_id'].choices = firewall_policy_id_choices
def handle(self, request, context):
firewall_id = self.initial['firewall_id']
name_or_id = context.get('name') or firewall_id
try:
firewall = api.fwaas.firewall_update(request, firewall_id,
**context)
msg = _('Firewall %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return firewall
except Exception as e:
msg = _('Failed to update firewall %(name)s: %(reason)s') % {
'name': name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class InsertRuleToPolicy(forms.SelfHandlingForm):
firewall_rule_id = forms.ChoiceField(label=_("Insert Rule"))
insert_before = forms.ChoiceField(label=_("Before"),
required=False)
insert_after = forms.ChoiceField(label=_("After"),
required=False)
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(InsertRuleToPolicy, self).__init__(request, *args, **kwargs)
tenant_id = self.request.user.tenant_id
try:
all_rules = api.fwaas.rule_list(request, tenant_id=tenant_id)
for r in all_rules:
r.set_id_as_name_if_empty()
all_rules = sorted(all_rules, key=lambda rule: rule.name)
available_rules = [r for r in all_rules
if not r.firewall_policy_id]
current_rules = []
for r in kwargs['initial']['firewall_rules']:
r_obj = [rule for rule in all_rules if r == rule.id][0]
current_rules.append(r_obj)
available_choices = [(r.id, r.name) for r in available_rules]
current_choices = [(r.id, r.name) for r in current_rules]
except Exception as e:
msg = _('Failed to retrieve available rules: %s') % e
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
self.fields['firewall_rule_id'].choices = available_choices
self.fields['insert_before'].choices = [('', '')] + current_choices
self.fields['insert_after'].choices = [('', '')] + current_choices
def handle(self, request, context):
policy_id = self.initial['policy_id']
policy_name_or_id = self.initial['name'] or policy_id
try:
insert_rule_id = context['firewall_rule_id']
insert_rule = api.fwaas.rule_get(request, insert_rule_id)
body = {'firewall_rule_id': insert_rule_id,
'insert_before': context['insert_before'],
'insert_after': context['insert_after']}
policy = api.fwaas.policy_insert_rule(request, policy_id, **body)
msg = _('Rule %(rule)s was successfully inserted to policy '
'%(policy)s.') % {
'rule': insert_rule.name or insert_rule.id,
'policy': policy_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to insert rule to policy %(name)s: %(reason)s') % {
'name': policy_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class RemoveRuleFromPolicy(forms.SelfHandlingForm):
firewall_rule_id = forms.ChoiceField(label=_("Remove Rule"))
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(RemoveRuleFromPolicy, self).__init__(request, *args, **kwargs)
tenant_id = request.user.tenant_id
try:
all_rules = api.fwaas.rule_list(request, tenant_id=tenant_id)
for r in all_rules:
r.set_id_as_name_if_empty()
current_rules = []
for r in kwargs['initial']['firewall_rules']:
r_obj = [rule for rule in all_rules if r == rule.id][0]
current_rules.append(r_obj)
current_choices = [(r.id, r.name) for r in current_rules]
except Exception as e:
msg = _('Failed to retrieve current rules in policy %(name)s: '
'%(reason)s') % {'name': self.initial['name'], 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
self.fields['firewall_rule_id'].choices = current_choices
def handle(self, request, context):
policy_id = self.initial['policy_id']
policy_name_or_id = self.initial['name'] or policy_id
try:
remove_rule_id = context['firewall_rule_id']
remove_rule = api.fwaas.rule_get(request, remove_rule_id)
body = {'firewall_rule_id': remove_rule_id}
policy = api.fwaas.policy_remove_rule(request, policy_id, **body)
msg = _('Rule %(rule)s was successfully removed from policy '
'%(policy)s.') % {
'rule': remove_rule.name or remove_rule.id,
'policy': policy_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to remove rule from policy %(name)s: '
'%(reason)s') % {'name': self.initial['name'],
'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
|
|
import time
import numpy as np
try:
import matplotlib.pyplot as plt
has_mpl = True
except ImportError:
has_mpl = False
print 'Warning: findlines.py: No matplotlib found'
print ' Some plotting routines are disabled'
#from sigma_clip import sigma_clipping
# TBD: Should this be separated in code that requires sherpa and code that does not?
import sherpa.astro.ui as ui
import shmodelshelper as smh
def maximum_filter_noscipy(input, size):
'''reimplement scipy.ndimage.maximum_filter1d
This implementation is in pure python for compatability in case
scipy is not available. The scipy version is written in C and should
be faster for larger arrays.
This procedure implemnts only a subset of the options from the
scipy verion.
Calculate a one-dimensional maximum filter along a 1-d array.
Parameters
----------
input : array-like
input array to filter
size : int
length along which to calculate 1D maximum
'''
if input.ndim != 1:
raise ValueError('Input array must have exactly one dimension')
maxfilter = np.zeros_like(input)
for i in range(input.size):
maxfilter[i] = np.maximum(input[max(0, i-size):min(i+size, input.size)])
return maxfilter
try:
from scipy.ndimage import maximum_filter1d
except ImportError:
maximum_filter1d = maximum_filter_noscipy
def findlines(x, y, fwhm, smoothwindow = 'hanning', sigma_threshold = 3.):
'''
Several things here and I am not quite sure yet what turn out to be useful
- smoothing: show real peaks and not just noise
- maximum_filter = array: will find the peaks
- sigma_clipping = are the peaks large enough to be relevant?
Parameters
----------
x : ndarray
x values, e.g. wavelength
y : ndarray
y values, e.g. flux or res_flux / error
fwhm : float
estimate for FWHM of lines. Used as smoothing scale
smoothwindow : string or None
if `smoothwindow` is on of `['flat', 'hanning', 'hamming',
'bartlett', 'blackman']` a correspondig window function
will be used to smooth the signal before line detection.
Returns
-------
peaks : ndarray
index numbers for peaks found
'''
fwhminpix = int(fwhm / np.diff(x).mean())
if smoothwindow is not None:
#print smoothwindow
#print fwhminpix
y = smooth(y, window_len = 3*fwhminpix, window = smoothwindow)
maxindex = (maximum_filter1d(y, max(fwhminpix,3)) == y)
maxindex = maxindex & (y > (y.mean() + sigma_threshold * y.std()))
# sigma_clipping works only if there is plenty of continuum
#clipped_y = sigma_clipping(y, threshold = sigma_threshold)
# believe only peaks which are so large, that the get clipped by sigma_clipping
#maxindex = maxindex & (clipped_y.mask == False)
return np.flatnonzero(maxindex)
def smooth(x,window_len=11, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
Parameters
----------
x: ndarray
the input signal
window_len: integer , optional
The dimension of the smoothing window; should be an odd integer
window: string, optional
The type of window from `['flat', 'hanning', 'hamming', 'bartlett',
'blackman']`. A 'flat' window will produce a moving average
smoothing.
Returns
-------
y : ndarray
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
See also
--------
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
window could accept even number
from http://www.scipy.org/Cookbook/SignalSmooth
"""
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
# make it an odd number, so that reflection of values is same on each side
if np.mod(window_len,2) != 1:
window_len +=1
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=np.r_[x[(window_len-1)/2:0:-1],x,x[-1:-window_len/2:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
smoothwindow = 'hanning'
sigma_threshold = 2.
def mainloop(mymodel, fwhm, id = None, maxiter = 5, mindist = 0., do_plots = 0):
if id is None:
id = ui.get_default_id()
data = ui.get_data(id)
wave = data.get_indep()[0]
error = data.get_error()[0]
# model could habe been initalized with arbitrary values
ui.fit(id)
for i in range(maxiter):
oldmodel = smh.get_model_parts(id)
res_flux = ui.get_resid_plot(id).y
if smoothwindow is not None:
fwhminpix = int(fwhm / np.diff(wave).mean())
y = smooth(res_flux/error, window_len = 3*fwhminpix, window = smoothwindow)
else:
y = res_flux/error
peaks = findlines(wave, y, fwhm, smoothwindow = None, sigma_threshold = sigma_threshold)
if has_mpl and (do_plots > 2):
plt.figure()
plt.plot(wave, res_flux/error, 's')
for pos in mymodel.line_value_list('pos'):
plt.plot([pos, pos], plt.ylim(),'k:')
for peak in peaks:
plt.plot([wave[peak], wave[peak]], plt.ylim())
plt.plot(wave, y)
plt.draw()
for peak in peaks:
if (len(mymodel.line_value_list('pos')) == 0) or (min(np.abs(mymodel.line_value_list('pos') - wave[peak])) >= mindist):
mymodel.add_line(**mymodel.guess(wave, smooth(res_flux, window_len = 3*fwhminpix, window = smoothwindow), peak, fwhm = fwhm))
newmodel = smh.get_model_parts(id)
print 'Iteration {0:3n}: {1:3n} lines added'.format(i, len(newmodel) - len(oldmodel))
if set(newmodel) == set(oldmodel):
print 'No new lines added this step - fitting finished'
break
# Now do the fitting in Sherpa
#ui.set_method('simplex')
ui.fit(id)
#ui.set_method('moncar')
#ui.fit(id)
if has_mpl and (do_plots > 0):
if do_plots > 1:
plt.figure()
else:
plt.clf()
ui.plot_fit(id)
for pos in mymodel.line_value_list('pos'):
plt.plot([pos, pos], plt.ylim(),'k:')
for peak in peaks:
plt.plot([wave[peak], wave[peak]], plt.ylim())
plt.plot(wave, res_flux)
plt.draw()
else:
print 'Max number of iterations reached'
#model.cleanup() #remove lines running to 0 etc.
return mymodel
|
|
"""Helper classes for Google Assistant integration."""
from asyncio import gather
from collections.abc import Mapping
import logging
import pprint
from typing import List, Optional
from aiohttp.web import json_response
from homeassistant.core import Context, callback, HomeAssistant, State
from homeassistant.helpers.event import async_call_later
from homeassistant.components import webhook
from homeassistant.const import (
CONF_NAME,
STATE_UNAVAILABLE,
ATTR_SUPPORTED_FEATURES,
ATTR_DEVICE_CLASS,
CLOUD_NEVER_EXPOSED_ENTITIES,
)
from . import trait
from .const import (
DOMAIN,
DOMAIN_TO_GOOGLE_TYPES,
CONF_ALIASES,
ERR_FUNCTION_NOT_SUPPORTED,
DEVICE_CLASS_TO_GOOGLE_TYPES,
CONF_ROOM_HINT,
)
from .error import SmartHomeError
SYNC_DELAY = 15
_LOGGER = logging.getLogger(__name__)
class AbstractConfig:
"""Hold the configuration for Google Assistant."""
_unsub_report_state = None
def __init__(self, hass):
"""Initialize abstract config."""
self.hass = hass
self._google_sync_unsub = None
self._local_sdk_active = False
@property
def enabled(self):
"""Return if Google is enabled."""
return False
@property
def agent_user_id(self):
"""Return Agent User Id to use for query responses."""
return None
@property
def entity_config(self):
"""Return entity config."""
return {}
@property
def secure_devices_pin(self):
"""Return entity config."""
return None
@property
def is_reporting_state(self):
"""Return if we're actively reporting states."""
return self._unsub_report_state is not None
@property
def is_local_sdk_active(self):
"""Return if we're actively accepting local messages."""
return self._local_sdk_active
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
# pylint: disable=no-self-use
return False
@property
def local_sdk_webhook_id(self):
"""Return the local SDK webhook ID.
Return None to disable the local SDK.
"""
return None
@property
def local_sdk_user_id(self):
"""Return the user ID to be used for actions received via the local SDK."""
raise NotImplementedError
def should_expose(self, state) -> bool:
"""Return if entity should be exposed."""
raise NotImplementedError
def should_2fa(self, state):
"""If an entity should have 2FA checked."""
# pylint: disable=no-self-use
return True
async def async_report_state(self, message):
"""Send a state report to Google."""
raise NotImplementedError
def async_enable_report_state(self):
"""Enable proactive mode."""
# Circular dep
from .report_state import async_enable_report_state
if self._unsub_report_state is None:
self._unsub_report_state = async_enable_report_state(self.hass, self)
def async_disable_report_state(self):
"""Disable report state."""
if self._unsub_report_state is not None:
self._unsub_report_state()
self._unsub_report_state = None
async def async_sync_entities(self):
"""Sync all entities to Google."""
# Remove any pending sync
if self._google_sync_unsub:
self._google_sync_unsub()
self._google_sync_unsub = None
return await self._async_request_sync_devices()
async def _schedule_callback(self, _now):
"""Handle a scheduled sync callback."""
self._google_sync_unsub = None
await self.async_sync_entities()
@callback
def async_schedule_google_sync(self):
"""Schedule a sync."""
if self._google_sync_unsub:
self._google_sync_unsub()
self._google_sync_unsub = async_call_later(
self.hass, SYNC_DELAY, self._schedule_callback
)
async def _async_request_sync_devices(self) -> int:
"""Trigger a sync with Google.
Return value is the HTTP status code of the sync request.
"""
raise NotImplementedError
async def async_deactivate_report_state(self):
"""Turn off report state and disable further state reporting.
Called when the user disconnects their account from Google.
"""
@callback
def async_enable_local_sdk(self):
"""Enable the local SDK."""
webhook_id = self.local_sdk_webhook_id
if webhook_id is None:
return
webhook.async_register(
self.hass, DOMAIN, "Local Support", webhook_id, self._handle_local_webhook
)
self._local_sdk_active = True
@callback
def async_disable_local_sdk(self):
"""Disable the local SDK."""
if not self._local_sdk_active:
return
webhook.async_unregister(self.hass, self.local_sdk_webhook_id)
self._local_sdk_active = False
async def _handle_local_webhook(self, hass, webhook_id, request):
"""Handle an incoming local SDK message."""
from . import smart_home
payload = await request.json()
if _LOGGER.isEnabledFor(logging.DEBUG):
_LOGGER.debug("Received local message:\n%s\n", pprint.pformat(payload))
if not self.enabled:
return json_response(smart_home.turned_off_response(payload))
result = await smart_home.async_handle_message(
self.hass, self, self.local_sdk_user_id, payload
)
if _LOGGER.isEnabledFor(logging.DEBUG):
_LOGGER.debug("Responding to local message:\n%s\n", pprint.pformat(result))
return json_response(result)
class RequestData:
"""Hold data associated with a particular request."""
def __init__(
self,
config: AbstractConfig,
user_id: str,
request_id: str,
devices: Optional[List[dict]],
):
"""Initialize the request data."""
self.config = config
self.request_id = request_id
self.context = Context(user_id=user_id)
self.devices = devices
def get_google_type(domain, device_class):
"""Google type based on domain and device class."""
typ = DEVICE_CLASS_TO_GOOGLE_TYPES.get((domain, device_class))
return typ if typ is not None else DOMAIN_TO_GOOGLE_TYPES[domain]
class GoogleEntity:
"""Adaptation of Entity expressed in Google's terms."""
def __init__(self, hass: HomeAssistant, config: AbstractConfig, state: State):
"""Initialize a Google entity."""
self.hass = hass
self.config = config
self.state = state
self._traits = None
@property
def entity_id(self):
"""Return entity ID."""
return self.state.entity_id
@callback
def traits(self):
"""Return traits for entity."""
if self._traits is not None:
return self._traits
state = self.state
domain = state.domain
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
self._traits = [
Trait(self.hass, state, self.config)
for Trait in trait.TRAITS
if Trait.supported(domain, features, device_class)
]
return self._traits
@callback
def should_expose(self):
"""If entity should be exposed."""
return self.config.should_expose(self.state)
@callback
def is_supported(self) -> bool:
"""Return if the entity is supported by Google."""
return self.state.state != STATE_UNAVAILABLE and bool(self.traits())
@callback
def might_2fa(self) -> bool:
"""Return if the entity might encounter 2FA."""
state = self.state
domain = state.domain
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
return any(
trait.might_2fa(domain, features, device_class) for trait in self.traits()
)
async def sync_serialize(self):
"""Serialize entity for a SYNC response.
https://developers.google.com/actions/smarthome/create-app#actiondevicessync
"""
state = self.state
entity_config = self.config.entity_config.get(state.entity_id, {})
name = (entity_config.get(CONF_NAME) or state.name).strip()
domain = state.domain
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
traits = self.traits()
device_type = get_google_type(domain, device_class)
device = {
"id": state.entity_id,
"name": {"name": name},
"attributes": {},
"traits": [trait.name for trait in traits],
"willReportState": self.config.should_report_state,
"type": device_type,
}
# use aliases
aliases = entity_config.get(CONF_ALIASES)
if aliases:
device["name"]["nicknames"] = aliases
if self.config.is_local_sdk_active:
device["otherDeviceIds"] = [{"deviceId": self.entity_id}]
device["customData"] = {
"webhookId": self.config.local_sdk_webhook_id,
"httpPort": self.hass.config.api.port,
"httpSSL": self.hass.config.api.use_ssl,
"proxyDeviceId": self.config.agent_user_id,
}
for trt in traits:
device["attributes"].update(trt.sync_attributes())
room = entity_config.get(CONF_ROOM_HINT)
if room:
device["roomHint"] = room
return device
dev_reg, ent_reg, area_reg = await gather(
self.hass.helpers.device_registry.async_get_registry(),
self.hass.helpers.entity_registry.async_get_registry(),
self.hass.helpers.area_registry.async_get_registry(),
)
entity_entry = ent_reg.async_get(state.entity_id)
if not (entity_entry and entity_entry.device_id):
return device
device_entry = dev_reg.devices.get(entity_entry.device_id)
if not (device_entry and device_entry.area_id):
return device
area_entry = area_reg.areas.get(device_entry.area_id)
if area_entry and area_entry.name:
device["roomHint"] = area_entry.name
return device
@callback
def query_serialize(self):
"""Serialize entity for a QUERY response.
https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
"""
state = self.state
if state.state == STATE_UNAVAILABLE:
return {"online": False}
attrs = {"online": True}
for trt in self.traits():
deep_update(attrs, trt.query_attributes())
return attrs
@callback
def reachable_device_serialize(self):
"""Serialize entity for a REACHABLE_DEVICE response."""
return {"verificationId": self.entity_id}
async def execute(self, data, command_payload):
"""Execute a command.
https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
"""
command = command_payload["command"]
params = command_payload.get("params", {})
challenge = command_payload.get("challenge", {})
executed = False
for trt in self.traits():
if trt.can_execute(command, params):
await trt.execute(command, data, params, challenge)
executed = True
break
if not executed:
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
f"Unable to execute {command} for {self.state.entity_id}",
)
@callback
def async_update(self):
"""Update the entity with latest info from Home Assistant."""
self.state = self.hass.states.get(self.entity_id)
if self._traits is None:
return
for trt in self._traits:
trt.state = self.state
def deep_update(target, source):
"""Update a nested dictionary with another nested dictionary."""
for key, value in source.items():
if isinstance(value, Mapping):
target[key] = deep_update(target.get(key, {}), value)
else:
target[key] = value
return target
@callback
def async_get_entities(hass, config) -> List[GoogleEntity]:
"""Return all entities that are supported by Google."""
entities = []
for state in hass.states.async_all():
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
continue
entity = GoogleEntity(hass, config, state)
if entity.is_supported():
entities.append(entity)
return entities
|
|
# -*- coding: utf-8 -*-
"""
Test cases related to SAX I/O
"""
import unittest, sys, os.path
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import HelperTestCase, doctest, make_doctest, BytesIO, _bytes
from lxml import sax
from xml.dom import pulldom
class ETreeSaxTestCase(HelperTestCase):
def test_etree_sax_simple(self):
tree = self.parse('<a>ab<b/>ba</a>')
xml_out = self._saxify_serialize(tree)
self.assertEqual(_bytes('<a>ab<b/>ba</a>'),
xml_out)
def test_etree_sax_double(self):
tree = self.parse('<a>ab<b>bb</b>ba</a>')
xml_out = self._saxify_serialize(tree)
self.assertEqual(_bytes('<a>ab<b>bb</b>ba</a>'),
xml_out)
def test_etree_sax_comment(self):
tree = self.parse('<a>ab<!-- TEST -->ba</a>')
xml_out = self._saxify_serialize(tree)
self.assertEqual(_bytes('<a>abba</a>'),
xml_out)
def test_etree_sax_pi(self):
tree = self.parse('<a>ab<?this and that?>ba</a>')
xml_out = self._saxify_serialize(tree)
self.assertEqual(_bytes('<a>ab<?this and that?>ba</a>'),
xml_out)
def test_etree_sax_comment_root(self):
tree = self.parse('<!-- TEST --><a>ab</a>')
xml_out = self._saxify_serialize(tree)
self.assertEqual(_bytes('<a>ab</a>'),
xml_out)
def test_etree_sax_pi_root(self):
tree = self.parse('<?this and that?><a>ab</a>')
xml_out = self._saxify_serialize(tree)
self.assertEqual(_bytes('<?this and that?><a>ab</a>'),
xml_out)
def test_etree_sax_attributes(self):
tree = self.parse('<a aa="5">ab<b b="5"/>ba</a>')
xml_out = self._saxify_serialize(tree)
self.assertEqual(_bytes('<a aa="5">ab<b b="5"/>ba</a>'),
xml_out)
def test_etree_sax_ns1(self):
tree = self.parse('<a xmlns="bla">ab<b>bb</b>ba</a>')
new_tree = self._saxify_unsaxify(tree)
root = new_tree.getroot()
self.assertEqual('{bla}a',
root.tag)
self.assertEqual('{bla}b',
root[0].tag)
def test_etree_sax_ns2(self):
tree = self.parse('<a xmlns="blaA">ab<b:b xmlns:b="blaB">bb</b:b>ba</a>')
new_tree = self._saxify_unsaxify(tree)
root = new_tree.getroot()
self.assertEqual('{blaA}a',
root.tag)
self.assertEqual('{blaB}b',
root[0].tag)
def test_sax_to_pulldom(self):
tree = self.parse('<a xmlns="blaA">ab<b:b xmlns:b="blaB">bb</b:b>ba</a>')
handler = pulldom.SAX2DOM()
sax.saxify(tree, handler)
dom = handler.document
self.assertEqual('a',
dom.firstChild.localName)
self.assertEqual('blaA',
dom.firstChild.namespaceURI)
children = dom.firstChild.childNodes
self.assertEqual('ab',
children[0].nodeValue)
self.assertEqual('blaB',
children[1].namespaceURI)
self.assertEqual('ba',
children[2].nodeValue)
def test_element_sax(self):
tree = self.parse('<a><b/></a>')
a = tree.getroot()
b = a[0]
xml_out = self._saxify_serialize(a)
self.assertEqual(_bytes('<a><b/></a>'),
xml_out)
xml_out = self._saxify_serialize(b)
self.assertEqual(_bytes('<b/>'),
xml_out)
def test_element_sax_ns(self):
tree = self.parse('<a:a xmlns:a="blaA"><b/></a:a>')
a = tree.getroot()
b = a[0]
new_tree = self._saxify_unsaxify(a)
root = new_tree.getroot()
self.assertEqual('{blaA}a',
root.tag)
self.assertEqual('b',
root[0].tag)
new_tree = self._saxify_unsaxify(b)
root = new_tree.getroot()
self.assertEqual('b',
root.tag)
self.assertEqual(0,
len(root))
def test_etree_sax_handler_default_ns(self):
handler = sax.ElementTreeContentHandler()
handler.startDocument()
handler.startPrefixMapping(None, 'blaA')
handler.startElementNS(('blaA', 'a'), 'a', {})
handler.startPrefixMapping(None, 'blaB')
handler.startElementNS(('blaB', 'b'), 'b', {})
handler.endElementNS( ('blaB', 'b'), 'b')
handler.endPrefixMapping(None)
handler.startElementNS(('blaA', 'c'), 'c', {})
handler.endElementNS( ('blaA', 'c'), 'c')
handler.endElementNS( ('blaA', 'a'), 'a')
handler.endPrefixMapping(None)
handler.endDocument()
new_tree = handler.etree
root = new_tree.getroot()
self.assertEqual('{blaA}a',
root.tag)
self.assertEqual('{blaB}b',
root[0].tag)
self.assertEqual('{blaA}c',
root[1].tag)
def test_etree_sax_handler_default_ns_None(self):
handler = sax.ElementTreeContentHandler()
handler.startDocument()
handler.startPrefixMapping(None, 'blaA')
handler.startElementNS((None, 'a'), 'a', {})
handler.startPrefixMapping(None, 'blaB')
handler.startElementNS((None, 'b'), 'b', {})
handler.endElementNS( (None, 'b'), 'b')
handler.endPrefixMapping(None)
handler.startElementNS((None, 'c'), 'c', {})
handler.endElementNS( (None, 'c'), 'c')
handler.endElementNS( (None, 'a'), 'a')
handler.endPrefixMapping(None)
handler.endDocument()
new_tree = handler.etree
root = new_tree.getroot()
self.assertEqual('{blaA}a',
root.tag)
self.assertEqual('{blaB}b',
root[0].tag)
self.assertEqual('{blaA}c',
root[1].tag)
def test_etree_sax_redefine_ns(self):
handler = sax.ElementTreeContentHandler()
handler.startDocument()
handler.startPrefixMapping('ns', 'blaA')
handler.startElementNS(('blaA', 'a'), 'ns:a', {})
handler.startPrefixMapping('ns', 'blaB')
handler.startElementNS(('blaB', 'b'), 'ns:b', {})
handler.endElementNS( ('blaB', 'b'), 'ns:b')
handler.endPrefixMapping('ns')
handler.startElementNS(('blaA', 'c'), 'ns:c', {})
handler.endElementNS( ('blaA', 'c'), 'ns:c')
handler.endElementNS( ('blaA', 'a'), 'ns:a')
handler.endPrefixMapping('ns')
handler.endDocument()
new_tree = handler.etree
root = new_tree.getroot()
self.assertEqual('{blaA}a',
root.tag)
self.assertEqual('{blaB}b',
root[0].tag)
self.assertEqual('{blaA}c',
root[1].tag)
def test_etree_sax_no_ns(self):
handler = sax.ElementTreeContentHandler()
handler.startDocument()
handler.startElement('a', {})
handler.startElement('b', {})
handler.endElement('b')
handler.startElement('c') # with empty attributes
handler.endElement('c')
handler.endElement('a')
handler.endDocument()
new_tree = handler.etree
root = new_tree.getroot()
self.assertEqual('a', root.tag)
self.assertEqual('b', root[0].tag)
self.assertEqual('c', root[1].tag)
def test_etree_sax_no_ns_attributes(self):
handler = sax.ElementTreeContentHandler()
handler.startDocument()
handler.startElement('a', {"attr_a1": "a1"})
handler.startElement('b', {"attr_b1": "b1"})
handler.endElement('b')
handler.endElement('a')
handler.endDocument()
new_tree = handler.etree
root = new_tree.getroot()
self.assertEqual('a', root.tag)
self.assertEqual('b', root[0].tag)
self.assertEqual('a1', root.attrib["attr_a1"])
self.assertEqual('b1', root[0].attrib["attr_b1"])
def test_etree_sax_ns_attributes(self):
handler = sax.ElementTreeContentHandler()
handler.startDocument()
self.assertRaises(ValueError,
handler.startElement,
'a', {"blaA:attr_a1": "a1"}
)
def test_etree_sax_error(self):
handler = sax.ElementTreeContentHandler()
handler.startDocument()
handler.startElement('a')
self.assertRaises(sax.SaxError, handler.endElement, 'b')
def test_etree_sax_error2(self):
handler = sax.ElementTreeContentHandler()
handler.startDocument()
handler.startElement('a')
handler.startElement('b')
self.assertRaises(sax.SaxError, handler.endElement, 'a')
def _saxify_unsaxify(self, saxifiable):
handler = sax.ElementTreeContentHandler()
sax.ElementTreeProducer(saxifiable, handler).saxify()
return handler.etree
def _saxify_serialize(self, tree):
new_tree = self._saxify_unsaxify(tree)
f = BytesIO()
new_tree.write(f)
return f.getvalue().replace(_bytes('\n'), _bytes(''))
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeSaxTestCase)])
suite.addTests(
[make_doctest('../../../doc/sax.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
|
|
# Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# Author: Di Yao ([email protected])
# Name: CreateCsysFromStep.py
# Description: Contains classes and functions to an xml file conforming to CADDatumEditor schema.
# Calls StepFileUtility to parse a STEP file to Coordinate System objects.
# Populates the xml file with Coordinate System objects.
#
# Version: 0.1 (11/15/2012) - first draft
# 0.2 (11/29/2012) - formatted xml to look pretty using minidom
# bug fix so <AddCoordinateSystems> is before <AddDatums>
# bug fix so <Origin> is before <XVector> and <YVector>
# 0.3 (12/11/2012) - schema change: Type, Format and File attributes to <CADComponent>
# changed cmd line arguments to -i, -m, and -t
# 0.4 (12/20/2012) - DirectoryPath attribute changed to "." from "C:\Temp\scratch\2012_10_02\DeleteDatums"
import os
import sys
import xml.etree.cElementTree as ET
import logging
import StepFileUtility
from time import gmtime, strftime
from xml.dom import minidom #for making output xml prettier
def setup_logger():
logger = logging.getLogger('root')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('Log_CreateCsy.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
return logger
module_logger = setup_logger()
class CADDatumXmlHandler:
def __init__(self, part_name, part_step_file, part_type, csys):
self._filename = part_name + '_CSYS.xml'
self._coordinate_system = csys
self._logger = logging.getLogger('root.XmlHandler')
self._part_name = part_name
self._part_type = part_type
self._part_step_file = part_step_file
def Deserialize(self):
try:
import shutil
shutil.copyfile(self._filename, 'ORIGINAL_' + self._filename)
count = 0
tree = ET.ElementTree(file=self._filename)
for elem in tree.iter(tag='CADComponent'):
count+1
if count > 1:
self._logger.error('%d CADComponents found in xml file, should only have 1' %count)
sys.exit(0)
for component in tree.iter(tag='CADComponent'):
found_add_csys = False
#print component.tag, component.attrib
for add in component.iterfind('Add'):
for add_coord in add.iterfind('AddCoordinateSystems'):
self.SerializeCoordinateSystems(add_coord)
found_add_csys = True
if found_add_csys == False: #create a new AddCoordinateSystems node
add_coord = ET.Element('AddCoordinateSystems')
add.insert(0, add_coord)
self.SerializeCoordinateSystems(add_coord)
#No pretty print of xml
#tree.write(self._filename,
# encoding='utf-8')
except Exception, inst:
self._logger.error('Unexpected error in handling %s: %s' %(self._filename, str(inst)))
return
def Serialize(self):
root = ET.Element('CADDatumEditor')
root.set('xmlns:' + 'xsi', 'http://www.w3.org/2001/XMLSchema-instance')
root.set('xsi:' + 'noNamespaceSchemaLocation', 'CADDatumEditor.xsd')
cad_components_node = ET.SubElement(root, 'CADComponents')
libraries_node = ET.SubElement(root, 'Libraries')
library_node = ET.SubElement(libraries_node, 'Library')
library_node.attrib['DirectoryPath'] = '.'
library_node.attrib['ID'] = 'Lib1'
cad_component_node = ET.SubElement(cad_components_node, 'CADComponent')
cad_component_node.attrib['LibraryID'] = 'Lib1'
cad_component_node.attrib['Type'] = self._part_type
cad_component_node.attrib['Name'] = self._part_name
cad_component_node.attrib['File'] = self._part_step_file
cad_component_node.attrib['Format'] = 'STEP'
add_node = ET.SubElement(cad_component_node, 'Add')
add_csys_node = ET.SubElement(add_node, 'AddCoordinateSystems')
self.SerializeCoordinateSystems(add_csys_node)
import shutil
if os.path.exists(self._filename):
shutil.copyfile(self._filename, 'ORIGINAL_' + self._filename)
rough_string = ET.tostring(root, 'utf-8')
reparsed = minidom.parseString(rough_string)
f=open(self._filename, 'w')
f.write(reparsed.toprettyxml(indent=" "))
def SerializeCoordinateSystems(self, add_csys_main_node):
import decimal
for key, item in self._coordinate_system.items():
add_coord = ET.SubElement(add_csys_main_node, 'AddCoordinateSystem')
add_coord.attrib['ReplaceIfExists'] = 'False'
add_coord.attrib['CoordinateSystemName'] = 'CSY_%d' %key
origin = None
if item._x is not None:
if item._x._origin is not None:
origin = item._x._origin
xvector = ET.SubElement(add_coord, 'XVector')
xvector.attrib['X'] = "%0.19f" %item._x._vector[0]
xvector.attrib['Y'] = "%0.19f" %item._x._vector[1]
xvector.attrib['Z'] = "%0.19f" %item._x._vector[2]
if item._y is not None:
if item._y._origin is not None:
origin = item._x._origin
yvector = ET.SubElement(add_coord, 'YVector')
yvector.attrib['X'] = "%0.19f" %item._y._vector[0]
yvector.attrib['Y'] = "%0.19f" %item._y._vector[1]
yvector.attrib['Z'] = "%0.19f" %item._y._vector[2]
if origin is not None:
origin_node = ET.Element('Origin')
origin_node.attrib['X'] = "%0.19f" %origin[0]
origin_node.attrib['Y'] = "%0.19f" %origin[1]
origin_node.attrib['Z'] = "%0.19f" %origin[2]
add_coord.insert(0, origin_node)
if __name__ == '__main__':
from optparse import OptionParser
import os.path
usage = "usage: %prog [options] arg1"
cmd_line_parser = OptionParser(usage=usage)
cmd_line_parser.add_option("-i", "",
action="store", type='string', dest="csys_file",
help="Name of coordinate system STEP file")
cmd_line_parser.add_option("-m", "",
action="store", type='string', dest="model_step_file",
help="Name of component step file")
cmd_line_parser.add_option("-t", "",
action="store", type='string', dest="component_type",
help="Type of component (Part/Assembly)")
(options, args) = cmd_line_parser.parse_args()
entity_name = ''
if not os.path.exists(options.csys_file):
module_logger.debug('Csys STEP file does not exist: {0}'.format(options.csys_file))
sys.exit(0)
path, filename = os.path.split(options.model_step_file)
filename, ext = os.path.splitext(filename)
print filename
simple_parser = StepFileUtility.SimpleStepParser(options.csys_file)
coordinate_system_populator = StepFileUtility.PopulateCoordinateInfo(simple_parser)
csys_map = coordinate_system_populator.create_coordinate_vector()
xml_handler = CADDatumXmlHandler(filename, options.model_step_file, options.component_type, csys_map)
xml_handler.Serialize()
|
|
from ee.cli.plugins.stack import EEStackController
from ee.core.fileutils import EEFileUtils
from ee.core.mysql import *
from ee.core.shellexec import *
from ee.core.variables import EEVariables
from ee.cli.plugins.sitedb import *
from ee.core.aptget import EEAptGet
from ee.core.git import EEGit
from ee.core.logging import Log
from ee.core.services import EEService
import subprocess
from subprocess import CalledProcessError
import os
import random
import string
import sys
import getpass
import glob
import re
import platform
class SiteError(Exception):
"""Custom Exception Occured when setting up site"""
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
def pre_run_checks(self):
# Check nginx configuration
Log.info(self, "Running pre-update checks, please wait...")
try:
Log.debug(self, "checking NGINX configuration ...")
FNULL = open('/dev/null', 'w')
ret = subprocess.check_call(["nginx", "-t"], stdout=FNULL,
stderr=subprocess.STDOUT)
except CalledProcessError as e:
Log.debug(self, "{0}".format(str(e)))
raise SiteError("nginx configuration check failed.")
def check_domain_exists(self, domain):
if getSiteInfo(self, domain):
return True
else:
return False
def setupdomain(self, data):
ee_domain_name = data['site_name']
ee_site_webroot = data['webroot'] if 'webroot' in data.keys() else ''
# Check if nginx configuration already exists
# if os.path.isfile('/etc/nginx/sites-available/{0}'
# .format(ee_domain_name)):
# raise SiteError("nginx configuration already exists for site")
Log.info(self, "Setting up NGINX configuration \t", end='')
# write nginx config for file
try:
ee_site_nginx_conf = open('/etc/nginx/sites-available/{0}'
.format(ee_domain_name), encoding='utf-8',
mode='w')
self.app.render((data), 'virtualconf.mustache',
out=ee_site_nginx_conf)
ee_site_nginx_conf.close()
except IOError as e:
Log.debug(self, "{0}".format(e))
raise SiteError("create nginx configuration failed for site")
except Exception as e:
Log.debug(self, "{0}".format(e))
raise SiteError("create nginx configuration failed for site")
finally:
# Check nginx -t and return status over it
try:
Log.debug(self, "Checking generated nginx conf, please wait...")
FNULL = open('/dev/null', 'w')
ret = subprocess.check_call(["nginx", "-t"], stdout=FNULL,
stderr=subprocess.STDOUT)
Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]")
except CalledProcessError as e:
Log.debug(self, "{0}".format(str(e)))
Log.info(self, "[" + Log.ENDC + Log.FAIL + "Fail"
+ Log.OKBLUE + "]")
raise SiteError("created nginx configuration failed for site."
" check with `nginx -t`")
# create symbolic link for
EEFileUtils.create_symlink(self, ['/etc/nginx/sites-available/{0}'
.format(ee_domain_name),
'/etc/nginx/sites-enabled/{0}'
.format(ee_domain_name)])
if 'proxy' in data.keys() and data['proxy']:
return
# Creating htdocs & logs directory
Log.info(self, "Setting up webroot \t\t", end='')
try:
if not os.path.exists('{0}/htdocs'.format(ee_site_webroot)):
os.makedirs('{0}/htdocs'.format(ee_site_webroot))
if not os.path.exists('{0}/logs'.format(ee_site_webroot)):
os.makedirs('{0}/logs'.format(ee_site_webroot))
if not os.path.exists('{0}/conf/nginx'.format(ee_site_webroot)):
os.makedirs('{0}/conf/nginx'.format(ee_site_webroot))
EEFileUtils.create_symlink(self, ['/var/log/nginx/{0}.access.log'
.format(ee_domain_name),
'{0}/logs/access.log'
.format(ee_site_webroot)])
EEFileUtils.create_symlink(self, ['/var/log/nginx/{0}.error.log'
.format(ee_domain_name),
'{0}/logs/error.log'
.format(ee_site_webroot)])
except Exception as e:
Log.debug(self, "{0}".format(e))
raise SiteError("setup webroot failed for site")
finally:
# TODO Check if directories are setup
if (os.path.exists('{0}/htdocs'.format(ee_site_webroot)) and
os.path.exists('{0}/logs'.format(ee_site_webroot))):
Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]")
else:
Log.info(self, "[" + Log.ENDC + "Fail" + Log.OKBLUE + "]")
raise SiteError("setup webroot failed for site")
def setupdatabase(self, data):
ee_domain_name = data['site_name']
ee_random = (''.join(random.sample(string.ascii_uppercase +
string.ascii_lowercase + string.digits, 15)))
ee_replace_dot = ee_domain_name.replace('.', '_')
prompt_dbname = self.app.config.get('mysql', 'db-name')
prompt_dbuser = self.app.config.get('mysql', 'db-user')
ee_mysql_grant_host = self.app.config.get('mysql', 'grant-host')
ee_db_name = ''
ee_db_username = ''
ee_db_password = ''
if prompt_dbname == 'True' or prompt_dbname == 'true':
try:
ee_db_name = input('Enter the MySQL database name [{0}]: '
.format(ee_replace_dot))
except EOFError as e:
Log.debug(self, "{0}".format(e))
raise SiteError("Unable to input database name")
if not ee_db_name:
ee_db_name = ee_replace_dot
if prompt_dbuser == 'True' or prompt_dbuser == 'true':
try:
ee_db_username = input('Enter the MySQL database user name [{0}]: '
.format(ee_replace_dot))
ee_db_password = getpass.getpass(prompt='Enter the MySQL database'
' password [{0}]: '
.format(ee_random))
except EOFError as e:
Log.debug(self, "{0}".format(e))
raise SiteError("Unable to input database credentials")
if not ee_db_username:
ee_db_username = ee_replace_dot
if not ee_db_password:
ee_db_password = ee_random
if len(ee_db_username) > 16:
Log.debug(self, 'Autofix MySQL username (ERROR 1470 (HY000)),'
' please wait')
ee_db_username = (ee_db_name[0:6] + generate_random())
# create MySQL database
Log.info(self, "Setting up database\t\t", end='')
Log.debug(self, "Creating database {0}".format(ee_db_name))
try:
if EEMysql.check_db_exists(self, ee_db_name):
Log.debug(self, "Database already exists, Updating DB_NAME .. ")
ee_db_name = (ee_db_name[0:6] + generate_random())
ee_db_username = (ee_db_name[0:6] + generate_random())
except MySQLConnectionError as e:
raise SiteError("MySQL Connectivity problem occured")
try:
EEMysql.execute(self, "create database `{0}`"
.format(ee_db_name))
except StatementExcecutionError as e:
Log.info(self, "[" + Log.ENDC + Log.FAIL + "Failed" + Log.OKBLUE + "]")
raise SiteError("create database execution failed")
# Create MySQL User
Log.debug(self, "Creating user {0}".format(ee_db_username))
Log.debug(self, "create user `{0}`@`{1}` identified by ''"
.format(ee_db_username, ee_mysql_grant_host))
try:
EEMysql.execute(self,
"create user `{0}`@`{1}` identified by '{2}'"
.format(ee_db_username, ee_mysql_grant_host,
ee_db_password), log=False)
except StatementExcecutionError as e:
Log.info(self, "[" + Log.ENDC + Log.FAIL + "Failed" + Log.OKBLUE + "]")
raise SiteError("creating user failed for database")
# Grant permission
Log.debug(self, "Setting up user privileges")
try:
EEMysql.execute(self,
"grant all privileges on `{0}`.* to `{1}`@`{2}`"
.format(ee_db_name,
ee_db_username, ee_mysql_grant_host))
except StatementExcecutionError as e:
Log.info(self, "[" + Log.ENDC + Log.FAIL + "Failed" + Log.OKBLUE + "]")
SiteError("grant privileges to user failed for database ")
Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]")
data['ee_db_name'] = ee_db_name
data['ee_db_user'] = ee_db_username
data['ee_db_pass'] = ee_db_password
data['ee_db_host'] = EEVariables.ee_mysql_host
return(data)
def setupwordpress(self, data):
ee_domain_name = data['site_name']
ee_site_webroot = data['webroot']
prompt_wpprefix = self.app.config.get('wordpress', 'prefix')
ee_wp_user = self.app.config.get('wordpress', 'user')
ee_wp_pass = self.app.config.get('wordpress', 'password')
ee_wp_email = self.app.config.get('wordpress', 'email')
# Random characters
ee_random = (''.join(random.sample(string.ascii_uppercase +
string.ascii_lowercase + string.digits, 15)))
ee_wp_prefix = ''
# ee_wp_user = ''
# ee_wp_pass = ''
if 'wp-user' in data.keys() and data['wp-user']:
ee_wp_user = data['wp-user']
if 'wp-email' in data.keys() and data['wp-email']:
ee_wp_email = data['wp-email']
if 'wp-pass' in data.keys() and data['wp-pass']:
ee_wp_pass = data['wp-pass']
Log.info(self, "Downloading Wordpress \t\t", end='')
EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot))
try:
if EEShellExec.cmd_exec(self, "wp --allow-root core"
" download"):
pass
else:
Log.info(self, "[" + Log.ENDC + Log.FAIL + "Fail" + Log.OKBLUE + "]")
raise SiteError("download wordpress core failed")
except CommandExecutionError as e:
Log.info(self, "[" + Log.ENDC + Log.FAIL + "Fail" + Log.OKBLUE + "]")
raise SiteError(self, "download wordpress core failed")
Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]")
if not (data['ee_db_name'] and data['ee_db_user'] and data['ee_db_pass']):
data = setupdatabase(self, data)
if prompt_wpprefix == 'True' or prompt_wpprefix == 'true':
try:
ee_wp_prefix = input('Enter the WordPress table prefix [wp_]: ')
while not re.match('^[A-Za-z0-9_]*$', ee_wp_prefix):
Log.warn(self, "table prefix can only "
"contain numbers, letters, and underscores")
ee_wp_prefix = input('Enter the WordPress table prefix [wp_]: '
)
except EOFError as e:
Log.debug(self, "{0}".format(e))
raise SiteError("input table prefix failed")
if not ee_wp_prefix:
ee_wp_prefix = 'wp_'
# Modify wp-config.php & move outside the webroot
EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot))
Log.debug(self, "Setting up wp-config file")
if not data['multisite']:
Log.debug(self, "Generating wp-config for WordPress Single site")
Log.debug(self, "bash -c \"php {0} --allow-root "
.format(EEVariables.ee_wpcli_path)
+ "core config "
+ "--dbname=\'{0}\' --dbprefix=\'{1}\' --dbuser=\'{2}\' "
"--dbhost=\'{3}\' "
.format(data['ee_db_name'], ee_wp_prefix,
data['ee_db_user'], data['ee_db_host'])
+ "--dbpass= "
"--extra-php<<PHP \n {1}\nPHP\""
.format(data['ee_db_pass'],
"\n\ndefine(\'WP_DEBUG\', false);"))
try:
if EEShellExec.cmd_exec(self, "bash -c \"php {0} --allow-root"
.format(EEVariables.ee_wpcli_path)
+ " core config "
+ "--dbname=\'{0}\' --dbprefix=\'{1}\' "
"--dbuser=\'{2}\' --dbhost=\'{3}\' "
.format(data['ee_db_name'], ee_wp_prefix,
data['ee_db_user'], data['ee_db_host']
)
+ "--dbpass=\'{0}\' "
"--extra-php<<PHP \n {1} {redissalt}\nPHP\""
.format(data['ee_db_pass'],
"\n\ndefine(\'WP_DEBUG\', false);",
redissalt="\n\ndefine( \'WP_CACHE_KEY_SALT\', \'{0}:\' );"
.format(ee_domain_name) if data['wpredis']
else ''),
log=False
):
pass
else :
raise SiteError("generate wp-config failed for wp single site")
except CommandExecutionError as e:
raise SiteError("generate wp-config failed for wp single site")
else:
Log.debug(self, "Generating wp-config for WordPress multisite")
Log.debug(self, "bash -c \"php {0} --allow-root "
.format(EEVariables.ee_wpcli_path)
+ "core config "
+ "--dbname=\'{0}\' --dbprefix=\'{1}\' --dbhost=\'{2}\' "
.format(data['ee_db_name'], ee_wp_prefix, data['ee_db_host'])
+ "--dbuser=\'{0}\' --dbpass= "
"--extra-php<<PHP \n {2} {3} {4}\nPHP\""
.format(data['ee_db_user'], data['ee_db_pass'],
"\ndefine(\'WP_ALLOW_MULTISITE\', "
"true);",
"\ndefine(\'WPMU_ACCEL_REDIRECT\',"
" true);",
"\n\ndefine(\'WP_DEBUG\', false);"))
try:
if EEShellExec.cmd_exec(self, "bash -c \"php {0} --allow-root"
.format(EEVariables.ee_wpcli_path)
+ " core config "
+ "--dbname=\'{0}\' --dbprefix=\'{1}\' "
"--dbhost=\'{2}\' "
.format(data['ee_db_name'], ee_wp_prefix,
data['ee_db_host'])
+ "--dbuser=\'{0}\' --dbpass=\'{1}\' "
"--extra-php<<PHP \n {2} {3} {4} {redissalt}\nPHP\""
.format(data['ee_db_user'],
data['ee_db_pass'],
"\ndefine(\'WP_ALLOW_MULTISITE\', "
"true);",
"\ndefine(\'WPMU_ACCEL_REDIRECT\',"
" true);",
"\n\ndefine(\'WP_DEBUG\', false);",
redissalt="\n\ndefine( \'WP_CACHE_KEY_SALT\', \'{0}:\' );"
.format(ee_domain_name) if data['wpredis']
else ''),
log=False
):
pass
else:
raise SiteError("generate wp-config failed for wp multi site")
except CommandExecutionError as e:
raise SiteError("generate wp-config failed for wp multi site")
#EEFileUtils.mvfile(self, os.getcwd()+'/wp-config.php',
# os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
try:
import shutil
Log.debug(self, "Moving file from {0} to {1}".format(os.getcwd()+'/wp-config.php',os.path.abspath(os.path.join(os.getcwd(), os.pardir))))
shutil.move(os.getcwd()+'/wp-config.php',os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
except Exception as e:
Log.error(self, 'Unable to move file from {0} to {1}'
.format(os.getcwd()+'/wp-config.php', os.path.abspath(os.path.join(os.getcwd(), os.pardir))),False)
raise SiteError("Unable to move wp-config.php")
if not ee_wp_user:
ee_wp_user = EEVariables.ee_user
while not ee_wp_user:
Log.warn(self, "Username can have only alphanumeric"
"characters, spaces, underscores, hyphens,"
"periods and the @ symbol.")
try:
ee_wp_user = input('Enter WordPress username: ')
except EOFError as e:
Log.debug(self, "{0}".format(e))
raise SiteError("input wordpress username failed")
if not ee_wp_pass:
ee_wp_pass = ee_random
if not ee_wp_email:
ee_wp_email = EEVariables.ee_email
while not ee_wp_email:
try:
ee_wp_email = input('Enter WordPress email: ')
except EOFError as e:
Log.debug(self, "{0}".format(e))
raise SiteError("input wordpress username failed")
try:
while not re.match(r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$",
ee_wp_email):
Log.info(self, "EMail not Valid in config, "
"Please provide valid email id")
ee_wp_email = input("Enter your email: ")
except EOFError as e:
Log.debug(self, "{0}".format(e))
raise SiteError("input WordPress user email failed")
Log.debug(self, "Setting up WordPress tables")
if not data['multisite']:
Log.debug(self, "Creating tables for WordPress Single site")
Log.debug(self, "php {0} --allow-root core install "
.format(EEVariables.ee_wpcli_path)
+ "--url=\'{0}\' --title=\'{0}\' --admin_name=\'{1}\' "
.format(data['www_domain'], ee_wp_user)
+ "--admin_password= --admin_email=\'{1}\'"
.format(ee_wp_pass, ee_wp_email))
try:
if EEShellExec.cmd_exec(self, "php {0} --allow-root core "
.format(EEVariables.ee_wpcli_path)
+ "install --url=\'{0}\' --title=\'{0}\' "
"--admin_name=\'{1}\' "
.format(data['www_domain'], ee_wp_user)
+ "--admin_password=\'{0}\' "
"--admin_email=\'{1}\'"
.format(ee_wp_pass, ee_wp_email),
log=False):
pass
else:
raise SiteError("setup wordpress tables failed for single site")
except CommandExecutionError as e:
raise SiteError("setup wordpress tables failed for single site")
else:
Log.debug(self, "Creating tables for WordPress multisite")
Log.debug(self, "php {0} --allow-root "
.format(EEVariables.ee_wpcli_path)
+ "core multisite-install "
"--url=\'{0}\' --title=\'{0}\' --admin_name=\'{1}\' "
.format(data['www_domain'], ee_wp_user)
+ "--admin_password= --admin_email=\'{1}\' "
"{subdomains}"
.format(ee_wp_pass, ee_wp_email,
subdomains='--subdomains'
if not data['wpsubdir'] else ''))
try:
if EEShellExec.cmd_exec(self, "php {0} --allow-root "
.format(EEVariables.ee_wpcli_path)
+ "core multisite-install "
"--url=\'{0}\' --title=\'{0}\' "
"--admin_name=\'{1}\' "
.format(data['www_domain'], ee_wp_user)
+ "--admin_password=\'{0}\' "
"--admin_email=\'{1}\' "
"{subdomains}"
.format(ee_wp_pass, ee_wp_email,
subdomains='--subdomains'
if not data['wpsubdir'] else ''),
log=False):
pass
else:
raise SiteError("setup wordpress tables failed for wp multi site")
except CommandExecutionError as e:
raise SiteError("setup wordpress tables failed for wp multi site")
Log.debug(self, "Updating WordPress permalink")
try:
EEShellExec.cmd_exec(self, " php {0} --allow-root "
.format(EEVariables.ee_wpcli_path)
+ "rewrite structure "
"/%year%/%monthnum%/%day%/%postname%/")
except CommandExecutionError as e:
raise SiteError("Update wordpress permalinks failed")
"""Install nginx-helper plugin """
installwp_plugin(self, 'nginx-helper', data)
if data['wpfc']:
plugin_data = '{"log_level":"INFO","log_filesize":5,"enable_purge":1,"enable_map":0,"enable_log":0,"enable_stamp":0,"purge_homepage_on_new":1,"purge_homepage_on_edit":1,"purge_homepage_on_del":1,"purge_archive_on_new":1,"purge_archive_on_edit":0,"purge_archive_on_del":0,"purge_archive_on_new_comment":0,"purge_archive_on_deleted_comment":0,"purge_page_on_mod":1,"purge_page_on_new_comment":1,"purge_page_on_deleted_comment":1,"cache_method":"enable_fastcgi","purge_method":"get_request","redis_hostname":"127.0.0.1","redis_port":"6379","redis_prefix":"nginx-cache:"}'
setupwp_plugin(self, 'nginx-helper', 'rt_wp_nginx_helper_options', plugin_data, data)
elif data['wpredis']:
plugin_data = '{"log_level":"INFO","log_filesize":5,"enable_purge":1,"enable_map":0,"enable_log":0,"enable_stamp":0,"purge_homepage_on_new":1,"purge_homepage_on_edit":1,"purge_homepage_on_del":1,"purge_archive_on_new":1,"purge_archive_on_edit":0,"purge_archive_on_del":0,"purge_archive_on_new_comment":0,"purge_archive_on_deleted_comment":0,"purge_page_on_mod":1,"purge_page_on_new_comment":1,"purge_page_on_deleted_comment":1,"cache_method":"enable_redis","purge_method":"get_request","redis_hostname":"127.0.0.1","redis_port":"6379","redis_prefix":"nginx-cache:"}'
setupwp_plugin(self, 'nginx-helper', 'rt_wp_nginx_helper_options', plugin_data, data)
"""Install Wp Super Cache"""
if data['wpsc']:
installwp_plugin(self, 'wp-super-cache', data)
"""Install Redis Cache"""
if data['wpredis']:
installwp_plugin(self, 'redis-cache', data)
"""Install W3 Total Cache"""
if data['w3tc'] or data['wpfc']:
installwp_plugin(self, 'w3-total-cache', data)
wp_creds = dict(wp_user=ee_wp_user, wp_pass=ee_wp_pass,
wp_email=ee_wp_email)
return(wp_creds)
def setupwordpressnetwork(self, data):
ee_site_webroot = data['webroot']
EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot))
Log.info(self, "Setting up WordPress Network \t", end='')
try:
if EEShellExec.cmd_exec(self, 'wp --allow-root core multisite-convert'
' --title=\'{0}\' {subdomains}'
.format(data['www_domain'],
subdomains='--subdomains'
if not data['wpsubdir'] else '')):
pass
else:
Log.info(self, "[" + Log.ENDC + Log.FAIL + "Fail" + Log.OKBLUE + "]")
raise SiteError("setup wordpress network failed")
except CommandExecutionError as e:
Log.info(self, "[" + Log.ENDC + Log.FAIL + "Fail" + Log.OKBLUE + "]")
raise SiteError("setup wordpress network failed")
Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]")
def installwp_plugin(self, plugin_name, data):
ee_site_webroot = data['webroot']
Log.info(self, "Installing plugin {0}, please wait..."
.format(plugin_name))
EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot))
try:
EEShellExec.cmd_exec(self, "php {0} plugin "
.format(EEVariables.ee_wpcli_path)
+ "--allow-root install "
"{0}".format(plugin_name))
except CommandExecutionError as e:
raise SiteError("plugin installation failed")
try:
EEShellExec.cmd_exec(self, "php {0} plugin "
.format(EEVariables.ee_wpcli_path)
+ "--allow-root activate "
"{0} {na}"
.format(plugin_name,
na='--network' if data['multisite']
else ''
))
except CommandExecutionError as e:
raise SiteError("plugin activation failed")
return 1
def uninstallwp_plugin(self, plugin_name, data):
ee_site_webroot = data['webroot']
Log.debug(self, "Uninstalling plugin {0}, please wait..."
.format(plugin_name))
EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot))
Log.info(self, "Uninstalling plugin {0}, please wait..."
.format(plugin_name))
try:
EEShellExec.cmd_exec(self, "php {0} plugin "
.format(EEVariables.ee_wpcli_path)
+ "--allow-root deactivate "
"{0}".format(plugin_name))
EEShellExec.cmd_exec(self, "php {0} plugin "
.format(EEVariables.ee_wpcli_path)
+ "--allow-root uninstall "
"{0}".format(plugin_name))
except CommandExecutionError as e:
raise SiteError("plugin uninstall failed")
def setupwp_plugin(self, plugin_name, plugin_option, plugin_data, data):
ee_site_webroot = data['webroot']
Log.info(self, "Setting plugin {0}, please wait..."
.format(plugin_name))
EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot))
if not data['multisite']:
try:
EEShellExec.cmd_exec(self, "php {0} "
.format(EEVariables.ee_wpcli_path)
+ "--allow-root option update "
"{0} \'{1}\' --format=json".format(plugin_option, plugin_data))
except CommandExecutionError as e:
raise SiteError("plugin setup failed")
else:
try:
EEShellExec.cmd_exec(self, "php {0} "
.format(EEVariables.ee_wpcli_path)
+ "--allow-root network meta update 1 "
"{0} \'{1}\' --format=json"
.format(plugin_option, plugin_data
))
except CommandExecutionError as e:
raise SiteError("plugin setup failed")
def setwebrootpermissions(self, webroot):
Log.debug(self, "Setting up permissions")
try:
EEFileUtils.chown(self, webroot, EEVariables.ee_php_user,
EEVariables.ee_php_user, recursive=True)
except Exception as e:
Log.debug(self, str(e))
raise SiteError("problem occured while settingup webroot permissions")
def sitebackup(self, data):
ee_site_webroot = data['webroot']
backup_path = ee_site_webroot + '/backup/{0}'.format(EEVariables.ee_date)
if not EEFileUtils.isexist(self, backup_path):
EEFileUtils.mkdir(self, backup_path)
Log.info(self, "Backup location : {0}".format(backup_path))
EEFileUtils.copyfile(self, '/etc/nginx/sites-available/{0}'
.format(data['site_name']), backup_path)
if data['currsitetype'] in ['html', 'php', 'proxy', 'mysql']:
Log.info(self, "Backing up Webroot \t\t", end='')
EEFileUtils.mvfile(self, ee_site_webroot + '/htdocs', backup_path)
Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]")
configfiles = glob.glob(ee_site_webroot + '/*-config.php')
# if configfiles and EEFileUtils.isexist(self, configfiles[0]):
# ee_db_name = (EEFileUtils.grep(self, configfiles[0],
# 'DB_NAME').split(',')[1]
# .split(')')[0].strip().replace('\'', ''))
if data['ee_db_name']:
Log.info(self, 'Backing up database \t\t', end='')
try:
if not EEShellExec.cmd_exec(self, "mysqldump {0} > {1}/{0}.sql"
.format(data['ee_db_name'],
backup_path)):
Log.info(self,
"[" + Log.ENDC + Log.FAIL + "Fail" + Log.OKBLUE + "]")
raise SiteError("mysqldump failed to backup database")
except CommandExecutionError as e:
Log.info(self, "[" + Log.ENDC + "Fail" + Log.OKBLUE + "]")
raise SiteError("mysqldump failed to backup database")
Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]")
# move wp-config.php/ee-config.php to backup
if data['currsitetype'] in ['mysql', 'proxy']:
EEFileUtils.mvfile(self, configfiles[0], backup_path)
else:
EEFileUtils.copyfile(self, configfiles[0], backup_path)
def site_package_check(self, stype):
apt_packages = []
packages = []
stack = EEStackController()
stack.app = self.app
if stype in ['html', 'proxy', 'php', 'mysql', 'wp', 'wpsubdir',
'wpsubdomain']:
Log.debug(self, "Setting apt_packages variable for Nginx")
# Check if server has nginx-custom package
if not EEAptGet.is_installed(self, 'nginx-custom'):
# check if Server has nginx-plus installed
if EEAptGet.is_installed(self, 'nginx-plus'):
# do something
# do post nginx installation configuration
Log.info(self, "NGINX PLUS Detected ...")
apt = ["nginx-plus"] + EEVariables.ee_nginx
#apt_packages = apt_packages + EEVariables.ee_nginx
stack.post_pref(apt, packages)
else:
apt_packages = apt_packages + EEVariables.ee_nginx
else:
# Fix for Nginx white screen death
if not EEFileUtils.grep(self, '/etc/nginx/fastcgi_params',
'SCRIPT_FILENAME'):
with open('/etc/nginx/fastcgi_params', encoding='utf-8',
mode='a') as ee_nginx:
ee_nginx.write('fastcgi_param \tSCRIPT_FILENAME '
'\t$request_filename;\n')
if stype in ['php', 'mysql', 'wp', 'wpsubdir', 'wpsubdomain']:
Log.debug(self, "Setting apt_packages variable for PHP")
if not EEAptGet.is_installed(self, 'php5-fpm'):
apt_packages = apt_packages + EEVariables.ee_php
if stype in ['mysql', 'wp', 'wpsubdir', 'wpsubdomain']:
Log.debug(self, "Setting apt_packages variable for MySQL")
if not EEShellExec.cmd_exec(self, "mysqladmin ping"):
apt_packages = apt_packages + EEVariables.ee_mysql
packages = packages + [["https://raw.githubusercontent.com/"
"major/MySQLTuner-perl/master/"
"mysqltuner.pl", "/usr/bin/mysqltuner",
"MySQLTuner"]]
if stype in ['php', 'mysql', 'wp', 'wpsubdir', 'wpsubdomain']:
Log.debug(self, "Setting apt_packages variable for Postfix")
if not EEAptGet.is_installed(self, 'postfix'):
apt_packages = apt_packages + EEVariables.ee_postfix
if stype in ['wp', 'wpsubdir', 'wpsubdomain']:
Log.debug(self, "Setting packages variable for WP-CLI")
if not EEShellExec.cmd_exec(self, "which wp"):
packages = packages + [["https://github.com/wp-cli/wp-cli/"
"releases/download/v{0}/"
"wp-cli-{0}.phar"
.format(EEVariables.ee_wp_cli),
"/usr/bin/wp", "WP-CLI"]]
if self.app.pargs.wpredis:
Log.debug(self, "Setting apt_packages variable for redis")
if not EEAptGet.is_installed(self, 'redis-server'):
apt_packages = apt_packages + EEVariables.ee_redis
if os.path.isfile("/etc/nginx/nginx.conf") and (not
os.path.isfile("/etc/nginx/common/redis.conf")):
data = dict()
Log.debug(self, 'Writting the nginx configuration to '
'file /etc/nginx/common/redis.conf')
ee_nginx = open('/etc/nginx/common/redis.conf',
encoding='utf-8', mode='w')
self.app.render((data), 'redis.mustache',
out=ee_nginx)
ee_nginx.close()
if os.path.isfile("/etc/nginx/nginx.conf") and (not
os.path.isfile("/etc/nginx/common/redis-hhvm.conf")):
data = dict()
Log.debug(self, 'Writting the nginx configuration to '
'file /etc/nginx/common/redis-hhvm.conf')
ee_nginx = open('/etc/nginx/common/redis-hhvm.conf',
encoding='utf-8', mode='w')
self.app.render((data), 'redis-hhvm.mustache',
out=ee_nginx)
ee_nginx.close()
if os.path.isfile("/etc/nginx/conf.d/upstream.conf"):
if not EEFileUtils.grep(self, "/etc/nginx/conf.d/"
"upstream.conf",
"redis"):
with open("/etc/nginx/conf.d/upstream.conf",
"a") as redis_file:
redis_file.write("upstream redis {\n"
" server 127.0.0.1:6379;\n"
" keepalive 10;\n}")
if os.path.isfile("/etc/nginx/nginx.conf") and (not
os.path.isfile("/etc/nginx/conf.d/redis.conf")):
with open("/etc/nginx/conf.d/redis.conf", "a") as redis_file:
redis_file.write("# Log format Settings\n"
"log_format rt_cache_redis '$remote_addr $upstream_response_time $srcache_fetch_status [$time_local] '\n"
"'$http_host \"$request\" $status $body_bytes_sent '\n"
"'\"$http_referer\" \"$http_user_agent\"';\n")
if self.app.pargs.hhvm:
if platform.architecture()[0] is '32bit':
Log.error(self, "HHVM is not supported by 32bit system")
Log.debug(self, "Setting apt_packages variable for HHVM")
if not EEAptGet.is_installed(self, 'hhvm'):
apt_packages = apt_packages + EEVariables.ee_hhvm
if os.path.isdir("/etc/nginx/common") and (not
os.path.isfile("/etc/nginx/common/php-hhvm.conf")):
data = dict()
Log.debug(self, 'Writting the nginx configuration to '
'file /etc/nginx/common/php-hhvm.conf')
ee_nginx = open('/etc/nginx/common/php-hhvm.conf',
encoding='utf-8', mode='w')
self.app.render((data), 'php-hhvm.mustache',
out=ee_nginx)
ee_nginx.close()
Log.debug(self, 'Writting the nginx configuration to '
'file /etc/nginx/common/w3tc-hhvm.conf')
ee_nginx = open('/etc/nginx/common/w3tc-hhvm.conf',
encoding='utf-8', mode='w')
self.app.render((data), 'w3tc-hhvm.mustache', out=ee_nginx)
ee_nginx.close()
Log.debug(self, 'Writting the nginx configuration to '
'file /etc/nginx/common/wpfc-hhvm.conf')
ee_nginx = open('/etc/nginx/common/wpfc-hhvm.conf',
encoding='utf-8', mode='w')
self.app.render((data), 'wpfc-hhvm.mustache',
out=ee_nginx)
ee_nginx.close()
Log.debug(self, 'Writting the nginx configuration to '
'file /etc/nginx/common/wpsc-hhvm.conf')
ee_nginx = open('/etc/nginx/common/wpsc-hhvm.conf',
encoding='utf-8', mode='w')
self.app.render((data), 'wpsc-hhvm.mustache',
out=ee_nginx)
ee_nginx.close()
if os.path.isfile("/etc/nginx/conf.d/upstream.conf"):
if not EEFileUtils.grep(self, "/etc/nginx/conf.d/upstream.conf",
"hhvm"):
with open("/etc/nginx/conf.d/upstream.conf", "a") as hhvm_file:
hhvm_file.write("upstream hhvm {\nserver 127.0.0.1:8000;\n"
"server 127.0.0.1:9000 backup;\n}\n")
# Check if Nginx is allready installed and Pagespeed config there or not
# If not then copy pagespeed config
if self.app.pargs.pagespeed:
if (os.path.isfile('/etc/nginx/nginx.conf') and
(not os.path.isfile('/etc/nginx/conf.d/pagespeed.conf'))):
# Pagespeed configuration
data = dict()
Log.debug(self, 'Writting the Pagespeed Global '
'configuration to file /etc/nginx/conf.d/'
'pagespeed.conf')
ee_nginx = open('/etc/nginx/conf.d/pagespeed.conf',
encoding='utf-8', mode='w')
self.app.render((data), 'pagespeed-global.mustache',
out=ee_nginx)
ee_nginx.close()
return(stack.install(apt_packages=apt_packages, packages=packages,
disp_msg=False))
def updatewpuserpassword(self, ee_domain, ee_site_webroot):
ee_wp_user = ''
ee_wp_pass = ''
EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot))
# Check if ee_domain is wordpress install
try:
is_wp = EEShellExec.cmd_exec(self, "wp --allow-root core"
" version")
except CommandExecutionError as e:
raise SiteError("is wordpress site? check command failed ")
# Exit if ee_domain is not wordpress install
if not is_wp:
Log.error(self, "{0} does not seem to be a WordPress site"
.format(ee_domain))
try:
ee_wp_user = input("Provide WordPress user name [admin]: ")
except Exception as e:
Log.debug(self, "{0}".format(e))
Log.error(self, "\nCould not update password")
if ee_wp_user == "?":
Log.info(self, "Fetching WordPress user list")
try:
EEShellExec.cmd_exec(self, "wp --allow-root user list "
"--fields=user_login | grep -v user_login")
except CommandExecutionError as e:
raise SiteError("fetch wp userlist command failed")
if not ee_wp_user:
ee_wp_user = 'admin'
try:
is_user_exist = EEShellExec.cmd_exec(self, "wp --allow-root user list "
"--fields=user_login | grep {0}$ "
.format(ee_wp_user))
except CommandExecutionError as e:
raise SiteError("if wp user exists check command failed")
if is_user_exist:
try:
ee_wp_pass = getpass.getpass(prompt="Provide password for "
"{0} user: "
.format(ee_wp_user))
while not ee_wp_pass:
ee_wp_pass = getpass.getpass(prompt="Provide password for "
"{0} user: "
.format(ee_wp_user))
except Exception as e:
Log.debug(self, "{0}".format(e))
raise SiteError("failed to read password input ")
try:
EEShellExec.cmd_exec(self, "wp --allow-root user update {0}"
" --user_pass={1}"
.format(ee_wp_user, ee_wp_pass))
except CommandExecutionError as e:
raise SiteError("wp user password update command failed")
Log.info(self, "Password updated successfully")
else:
Log.error(self, "Invalid WordPress user {0} for {1}."
.format(ee_wp_user, ee_domain))
def display_cache_settings(self, data):
if data['wpsc']:
if data['multisite']:
Log.info(self, "Configure WPSC:"
"\t\thttp://{0}/wp-admin/network/settings.php?"
"page=wpsupercache"
.format(data['site_name']))
else:
Log.info(self, "Configure WPSC:"
"\t\thttp://{0}/wp-admin/options-general.php?"
"page=wpsupercache"
.format(data['site_name']))
if data['wpredis']:
if data['multisite']:
Log.info(self, "Configure redis-cache:"
"\thttp://{0}/wp-admin/network/settings.php?"
"page=redis-cache".format(data['site_name']))
else:
Log.info(self, "Configure redis-cache:"
"\thttp://{0}/wp-admin/options-general.php?"
"page=redis-cache".format(data['site_name']))
Log.info(self, "Object Cache:\t\tEnable")
if data['wpfc'] or data['w3tc']:
if data['multisite']:
Log.info(self, "Configure W3TC:"
"\t\thttp://{0}/wp-admin/network/admin.php?"
"page=w3tc_general".format(data['site_name']))
else:
Log.info(self, "Configure W3TC:"
"\t\thttp://{0}/wp-admin/admin.php?"
"page=w3tc_general".format(data['site_name']))
if data['wpfc']:
Log.info(self, "Page Cache:\t\tDisable")
elif data['w3tc']:
Log.info(self, "Page Cache:\t\tDisk Enhanced")
Log.info(self, "Database Cache:\t\tMemcached")
Log.info(self, "Object Cache:\t\tMemcached")
Log.info(self, "Browser Cache:\t\tDisable")
def logwatch(self, logfiles):
import zlib
import base64
import time
from ee.core import logwatch
def callback(filename, lines):
for line in lines:
if line.find(':::') == -1:
print(line)
else:
data = line.split(':::')
try:
print(data[0], data[1],
zlib.decompress(base64.decodestring(data[2])))
except Exception as e:
Log.info(time.time(),
'caught exception rendering a new log line in %s'
% filename)
l = logwatch.LogWatcher(logfiles, callback)
l.loop()
def detSitePar(opts):
"""
Takes dictionary of parsed arguments
1.returns sitetype and cachetype
2. raises RuntimeError when wrong combination is used like
"--wp --wpsubdir" or "--html --wp"
"""
sitetype, cachetype = '', ''
typelist = list()
cachelist = list()
for key, val in opts.items():
if val and key in ['html', 'php', 'mysql', 'wp',
'wpsubdir', 'wpsubdomain']:
typelist.append(key)
elif val and key in ['wpfc', 'wpsc', 'w3tc', 'wpredis']:
cachelist.append(key)
if len(typelist) > 1 or len(cachelist) > 1:
if len(cachelist) > 1:
raise RuntimeError("Could not determine cache type.Multiple cache parameter entered")
elif False not in [x in ('php','mysql','html') for x in typelist]:
sitetype = 'mysql'
if not cachelist:
cachetype = 'basic'
else:
cachetype = cachelist[0]
elif False not in [x in ('php','mysql') for x in typelist]:
sitetype = 'mysql'
if not cachelist:
cachetype = 'basic'
else:
cachetype = cachelist[0]
elif False not in [x in ('html','mysql') for x in typelist]:
sitetype = 'mysql'
if not cachelist:
cachetype = 'basic'
else:
cachetype = cachelist[0]
elif False not in [x in ('php','html') for x in typelist]:
sitetype = 'php'
if not cachelist:
cachetype = 'basic'
else:
cachetype = cachelist[0]
elif False not in [x in ('wp','wpsubdir') for x in typelist]:
sitetype = 'wpsubdir'
if not cachelist:
cachetype = 'basic'
else:
cachetype = cachelist[0]
elif False not in [x in ('wp','wpsubdomain') for x in typelist]:
sitetype = 'wpsubdomain'
if not cachelist:
cachetype = 'basic'
else:
cachetype = cachelist[0]
else:
raise RuntimeError("could not determine site and cache type")
else:
if not typelist and not cachelist:
sitetype = None
cachetype = None
elif (not typelist) and cachelist:
sitetype = 'wp'
cachetype = cachelist[0]
elif typelist and (not cachelist):
sitetype = typelist[0]
cachetype = 'basic'
else:
sitetype = typelist[0]
cachetype = cachelist[0]
return (sitetype, cachetype)
def generate_random():
ee_random10 = (''.join(random.sample(string.ascii_uppercase +
string.ascii_lowercase + string.digits, 10)))
return ee_random10
def deleteDB(self, dbname, dbuser, dbhost, exit=True):
try:
# Check if Database exists
try:
if EEMysql.check_db_exists(self, dbname):
# Drop database if exists
Log.debug(self, "dropping database `{0}`".format(dbname))
EEMysql.execute(self,
"drop database `{0}`".format(dbname),
errormsg='Unable to drop database {0}'
.format(dbname))
except StatementExcecutionError as e:
Log.debug(self, "drop database failed")
Log.info(self, "Database {0} not dropped".format(dbname))
except MySQLConnectionError as e:
Log.debug(self, "Mysql Connection problem occured")
if dbuser != 'root':
Log.debug(self, "dropping user `{0}`".format(dbuser))
try:
EEMysql.execute(self,
"drop user `{0}`@`{1}`"
.format(dbuser, dbhost))
except StatementExcecutionError as e:
Log.debug(self, "drop database user failed")
Log.info(self, "Database {0} not dropped".format(dbuser))
try:
EEMysql.execute(self, "flush privileges")
except StatementExcecutionError as e:
Log.debug(self, "drop database failed")
Log.info(self, "Database {0} not dropped".format(dbname))
except Exception as e:
Log.error(self, "Error occured while deleting database", exit)
def deleteWebRoot(self, webroot):
# do some preprocessing before proceeding
webroot = webroot.strip()
if (webroot == "/var/www/" or webroot == "/var/www"
or webroot == "/var/www/.." or webroot == "/var/www/."):
Log.debug(self, "Tried to remove {0}, but didn't remove it"
.format(webroot))
return False
if os.path.isdir(webroot):
Log.debug(self, "Removing {0}".format(webroot))
EEFileUtils.rm(self, webroot)
return True
else:
Log.debug(self, "{0} does not exist".format(webroot))
return False
def removeNginxConf(self, domain):
if os.path.isfile('/etc/nginx/sites-available/{0}'
.format(domain)):
Log.debug(self, "Removing Nginx configuration")
EEFileUtils.rm(self, '/etc/nginx/sites-enabled/{0}'
.format(domain))
EEFileUtils.rm(self, '/etc/nginx/sites-available/{0}'
.format(domain))
EEService.reload_service(self, 'nginx')
EEGit.add(self, ["/etc/nginx"],
msg="Deleted {0} "
.format(domain))
def doCleanupAction(self, domain='', webroot='', dbname='', dbuser='',
dbhost=''):
"""
Removes the nginx configuration and database for the domain provided.
doCleanupAction(self, domain='sitename', webroot='',
dbname='', dbuser='', dbhost='')
"""
if domain:
if os.path.isfile('/etc/nginx/sites-available/{0}'
.format(domain)):
removeNginxConf(self, domain)
if webroot:
deleteWebRoot(self, webroot)
if dbname:
if not dbuser:
raise SiteError("dbuser not provided")
if not dbhost:
raise SiteError("dbhost not provided")
deleteDB(self, dbname, dbuser, dbhost)
def operateOnPagespeed(self, data):
ee_domain_name = data['site_name']
ee_site_webroot = data['webroot']
if data['pagespeed'] is True:
if not os.path.isfile("{0}/conf/nginx/pagespeed.conf.disabled"
.format(ee_site_webroot)):
Log.debug(self, 'Writting the Pagespeed common '
'configuration to file {0}/conf/nginx/pagespeed.conf'
'pagespeed.conf'.format(ee_site_webroot))
ee_nginx = open('{0}/conf/nginx/pagespeed.conf'
.format(ee_site_webroot), encoding='utf-8',
mode='w')
self.app.render((data), 'pagespeed-common.mustache',
out=ee_nginx)
ee_nginx.close()
else:
EEFileUtils.mvfile(self, "{0}/conf/nginx/pagespeed.conf.disabled"
.format(ee_site_webroot),
'{0}/conf/nginx/pagespeed.conf'
.format(ee_site_webroot))
elif data['pagespeed'] is False:
if os.path.isfile("{0}/conf/nginx/pagespeed.conf"
.format(ee_site_webroot)):
EEFileUtils.mvfile(self, "{0}/conf/nginx/pagespeed.conf"
.format(ee_site_webroot),
'{0}/conf/nginx/pagespeed.conf.disabled'
.format(ee_site_webroot))
# Add nginx conf folder into GIT
EEGit.add(self, ["{0}/conf/nginx".format(ee_site_webroot)],
msg="Adding Pagespeed config of site: {0}"
.format(ee_domain_name))
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from typing import cast
from pyspark.rdd import PythonEvalType
from pyspark.sql import Row
from pyspark.sql.functions import (
array,
explode,
col,
lit,
mean,
sum,
udf,
pandas_udf,
PandasUDFType,
)
from pyspark.sql.types import ArrayType, TimestampType
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import (
ReusedSQLTestCase,
have_pandas,
have_pyarrow,
pandas_requirement_message,
pyarrow_requirement_message,
)
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
cast(str, pandas_requirement_message or pyarrow_requirement_message),
)
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return (
self.spark.range(10)
.toDF("id")
.withColumn("vs", array([lit(i * 1.0) + col("id") for i in range(20, 30)]))
.withColumn("v", explode(col("vs")))
.drop("vs")
.withColumn("w", lit(1.0))
)
@property
def python_plus_one(self):
@udf("double")
def plus_one(v):
assert isinstance(v, (int, float))
return float(v + 1)
return plus_one
@property
def pandas_scalar_plus_two(self):
@pandas_udf("double", PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
@pandas_udf("double", PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
@pandas_udf("double", PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
@pandas_udf("double", PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType,
)
result1 = (
df.groupby("id")
.agg(sum_udf(df.v), mean_udf(df.v), mean_arr_udf(array(df.v)))
.sort("id")
)
expected1 = self.spark.createDataFrame(
[
[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]],
],
["id", "sum(v)", "avg(v)", "avg(array(v))"],
)
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_basic(self):
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby("id").agg(weighted_mean_udf(df.v, lit(1.0))).sort("id")
expected1 = df.groupby("id").agg(mean(df.v).alias("weighted_mean(v, 1.0)")).sort("id")
assert_frame_equal(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col("id") + 1)).agg(weighted_mean_udf(df.v, lit(1.0))).sort(df.id + 1)
expected2 = (
df.groupby((col("id") + 1))
.agg(mean(df.v).alias("weighted_mean(v, 1.0)"))
.sort(df.id + 1)
)
assert_frame_equal(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby("id").agg(weighted_mean_udf(df.v, df.w)).sort("id")
expected3 = df.groupby("id").agg(mean(df.v).alias("weighted_mean(v, w)")).sort("id")
assert_frame_equal(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = (
df.groupby((col("id") + 1).alias("id")).agg(weighted_mean_udf(df.v, df.w)).sort("id")
)
expected4 = (
df.groupby((col("id") + 1).alias("id"))
.agg(mean(df.v).alias("weighted_mean(v, w)"))
.sort("id")
)
assert_frame_equal(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, "not supported"):
pandas_udf(
lambda x: x, ArrayType(ArrayType(TimestampType())), PandasUDFType.GROUPED_AGG
)
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, "not supported"):
@pandas_udf("mean double, std double", PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, "not supported"):
@pandas_udf(ArrayType(TimestampType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v): # noqa: F811
return {v.mean(): v.std()}
def test_alias(self):
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby("id").agg(mean_udf(df.v).alias("mean_alias"))
expected1 = df.groupby("id").agg(mean(df.v).alias("mean_alias"))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = df.groupby("id").agg(sum_udf(df.v) + 1).sort("id")
expected1 = df.groupby("id").agg(sum(df.v) + 1).sort("id")
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = df.groupby("id").agg(sum_udf(df.v + 1)).sort("id")
expected2 = df.groupby("id").agg(sum(df.v + 1)).sort("id")
# Wrap group aggregate pandas UDF with two sql expressions
result3 = df.groupby("id").agg(sum_udf(df.v + 1) + 2).sort("id")
expected3 = df.groupby("id").agg(sum(df.v + 1) + 2).sort("id")
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = df.groupby("id").agg(plus_one(sum_udf(df.v))).sort("id")
expected1 = df.groupby("id").agg(plus_one(sum(df.v))).sort("id")
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = df.groupby("id").agg(sum_udf(plus_one(df.v))).sort("id")
expected2 = df.groupby("id").agg(sum(plus_one(df.v))).sort("id")
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = df.groupby("id").agg(sum_udf(plus_two(df.v))).sort("id")
expected3 = df.groupby("id").agg(sum(plus_two(df.v))).sort("id")
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = df.groupby("id").agg(plus_two(sum_udf(df.v))).sort("id")
expected4 = df.groupby("id").agg(plus_two(sum(df.v))).sort("id")
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (
df.groupby(plus_one(df.id)).agg(plus_one(sum_udf(plus_one(df.v)))).sort("plus_one(id)")
)
expected5 = (
df.groupby(plus_one(df.id)).agg(plus_one(sum(plus_one(df.v)))).sort("plus_one(id)")
)
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (
df.groupby(plus_two(df.id)).agg(plus_two(sum_udf(plus_two(df.v)))).sort("plus_two(id)")
)
expected6 = (
df.groupby(plus_two(df.id)).agg(plus_two(sum(plus_two(df.v)))).sort("plus_two(id)")
)
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (
df.groupBy("id")
.agg(mean_udf(df.v), sum_udf(df.v), weighted_mean_udf(df.v, df.w))
.sort("id")
.toPandas()
)
expected1 = (
df.groupBy("id")
.agg(mean(df.v), sum(df.v), mean(df.v).alias("weighted_mean(v, w)"))
.sort("id")
.toPandas()
)
assert_frame_equal(expected1, result1)
def test_complex_groupby(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)).orderBy(df.id, df.v % 2)
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)).orderBy(df.id, df.v % 2)
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v)).sort("plus_one(id)")
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v)).sort("plus_one(id)")
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v)).sort("sum(v)")
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v)).sort("sum(v)")
# groupby one expression and one python UDF
result6 = (
df.groupby(df.v % 2, plus_one(df.id))
.agg(sum_udf(df.v))
.sort(["(v % 2)", "plus_one(id)"])
)
expected6 = (
df.groupby(df.v % 2, plus_one(df.id)).agg(sum(df.v)).sort(["(v % 2)", "plus_one(id)"])
)
# groupby one expression and one scalar pandas UDF
result7 = (
df.groupby(df.v % 2, plus_two(df.id))
.agg(sum_udf(df.v))
.sort(["sum(v)", "plus_two(id)"])
)
expected7 = (
df.groupby(df.v % 2, plus_two(df.id)).agg(sum(df.v)).sort(["sum(v)", "plus_two(id)"])
)
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
assert_frame_equal(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (
df.withColumn("v1", plus_one(df.v))
.withColumn("v2", df.v + 2)
.groupby(df.id, df.v % 2)
.agg(
sum_udf(col("v")),
sum_udf(col("v1") + 3),
sum_udf(col("v2")) + 5,
plus_one(sum_udf(col("v1"))),
sum_udf(plus_one(col("v2"))),
)
.sort(["id", "(v % 2)"])
.toPandas()
.sort_values(by=["id", "(v % 2)"])
)
expected1 = (
df.withColumn("v1", df.v + 1)
.withColumn("v2", df.v + 2)
.groupby(df.id, df.v % 2)
.agg(
sum(col("v")),
sum(col("v1") + 3),
sum(col("v2")) + 5,
plus_one(sum(col("v1"))),
sum(plus_one(col("v2"))),
)
.sort(["id", "(v % 2)"])
.toPandas()
.sort_values(by=["id", "(v % 2)"])
)
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (
df.withColumn("v1", plus_one(df.v))
.withColumn("v2", df.v + 2)
.groupby(df.id, df.v % 2)
.agg(
sum_udf(col("v")),
sum_udf(col("v1") + 3),
sum_udf(col("v2")) + 5,
plus_two(sum_udf(col("v1"))),
sum_udf(plus_two(col("v2"))),
)
.sort(["id", "(v % 2)"])
.toPandas()
.sort_values(by=["id", "(v % 2)"])
)
expected2 = (
df.withColumn("v1", df.v + 1)
.withColumn("v2", df.v + 2)
.groupby(df.id, df.v % 2)
.agg(
sum(col("v")),
sum(col("v1") + 3),
sum(col("v2")) + 5,
plus_two(sum(col("v1"))),
sum(plus_two(col("v2"))),
)
.sort(["id", "(v % 2)"])
.toPandas()
.sort_values(by=["id", "(v % 2)"])
)
# Test sequential groupby aggregate
result3 = (
df.groupby("id")
.agg(sum_udf(df.v).alias("v"))
.groupby("id")
.agg(sum_udf(col("v")))
.sort("id")
.toPandas()
)
expected3 = (
df.groupby("id")
.agg(sum(df.v).alias("v"))
.groupby("id")
.agg(sum(col("v")))
.sort("id")
.toPandas()
)
assert_frame_equal(expected1, result1)
assert_frame_equal(expected2, result2)
assert_frame_equal(expected3, result3)
def test_retain_group_columns(self):
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_array_type(self):
df = self.data
array_udf = pandas_udf(lambda x: [1.0, 2.0], "array<double>", PandasUDFType.GROUPED_AGG)
result1 = df.groupby("id").agg(array_udf(df["v"]).alias("v2"))
self.assertEqual(result1.first()["v2"], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegex(AnalysisException, "nor.*aggregate function"):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException, "aggregate function.*argument.*aggregate function"
):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException, "mixture.*aggregate function.*group aggregate pandas UDF"
):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
def test_register_vectorized_udf_basic(self):
sum_pandas_udf = pandas_udf(
lambda v: v.sum(), "integer", PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
)
self.assertEqual(sum_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
group_agg_pandas_udf = self.spark.udf.register("sum_pandas_udf", sum_pandas_udf)
self.assertEqual(group_agg_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
q = "SELECT sum_pandas_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
actual = sorted(map(lambda r: r[0], self.spark.sql(q).collect()))
expected = [1, 5]
self.assertEqual(actual, expected)
def test_grouped_with_empty_partition(self):
data = [Row(id=1, x=2), Row(id=1, x=3), Row(id=2, x=4)]
expected = [Row(id=1, sum=5), Row(id=2, x=4)]
num_parts = len(data) + 1
df = self.spark.createDataFrame(self.sc.parallelize(data, numSlices=num_parts))
f = pandas_udf(lambda x: x.sum(), "int", PandasUDFType.GROUPED_AGG)
result = df.groupBy("id").agg(f(df["x"]).alias("sum")).collect()
self.assertEqual(result, expected)
def test_grouped_without_group_by_clause(self):
@pandas_udf("double", PandasUDFType.GROUPED_AGG)
def max_udf(v):
return v.max()
df = self.spark.range(0, 100)
self.spark.udf.register("max_udf", max_udf)
with self.tempView("table"):
df.createTempView("table")
agg1 = df.agg(max_udf(df["id"]))
agg2 = self.spark.sql("select max_udf(id) from table")
assert_frame_equal(agg1.toPandas(), agg2.toPandas())
def test_no_predicate_pushdown_through(self):
# SPARK-30921: We should not pushdown predicates of PythonUDFs through Aggregate.
import numpy as np
@pandas_udf("float", PandasUDFType.GROUPED_AGG)
def mean(x):
return np.mean(x)
df = self.spark.createDataFrame([Row(id=1, foo=42), Row(id=2, foo=1), Row(id=2, foo=2)])
agg = df.groupBy("id").agg(mean("foo").alias("mean"))
filtered = agg.filter(agg["mean"] > 40.0)
assert filtered.collect()[0]["mean"] == 42.0
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_grouped_agg import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = ""
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "runipy-"
cfg.versionfile_source = "runipy/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
|
# config.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
"""Module containing module parser implementation able to properly read and write
configuration files"""
import re
try:
import ConfigParser as cp
except ImportError:
# PY3
import configparser as cp
import inspect
import logging
import abc
import os
from git.odict import OrderedDict
from git.util import LockFile
from git.compat import (
string_types,
FileType,
defenc,
force_text,
with_metaclass,
PY3
)
__all__ = ('GitConfigParser', 'SectionConstraint')
log = logging.getLogger('git.config')
log.addHandler(logging.NullHandler())
class MetaParserBuilder(abc.ABCMeta):
"""Utlity class wrapping base-class methods into decorators that assure read-only properties"""
def __new__(metacls, name, bases, clsdict):
"""
Equip all base-class methods with a needs_values decorator, and all non-const methods
with a set_dirty_and_flush_changes decorator in addition to that."""
kmm = '_mutating_methods_'
if kmm in clsdict:
mutating_methods = clsdict[kmm]
for base in bases:
methods = (t for t in inspect.getmembers(base, inspect.isroutine) if not t[0].startswith("_"))
for name, method in methods:
if name in clsdict:
continue
method_with_values = needs_values(method)
if name in mutating_methods:
method_with_values = set_dirty_and_flush_changes(method_with_values)
# END mutating methods handling
clsdict[name] = method_with_values
# END for each name/method pair
# END for each base
# END if mutating methods configuration is set
new_type = super(MetaParserBuilder, metacls).__new__(metacls, name, bases, clsdict)
return new_type
def needs_values(func):
"""Returns method assuring we read values (on demand) before we try to access them"""
def assure_data_present(self, *args, **kwargs):
self.read()
return func(self, *args, **kwargs)
# END wrapper method
assure_data_present.__name__ = func.__name__
return assure_data_present
def set_dirty_and_flush_changes(non_const_func):
"""Return method that checks whether given non constant function may be called.
If so, the instance will be set dirty.
Additionally, we flush the changes right to disk"""
def flush_changes(self, *args, **kwargs):
rval = non_const_func(self, *args, **kwargs)
self._dirty = True
self.write()
return rval
# END wrapper method
flush_changes.__name__ = non_const_func.__name__
return flush_changes
class SectionConstraint(object):
"""Constrains a ConfigParser to only option commands which are constrained to
always use the section we have been initialized with.
It supports all ConfigParser methods that operate on an option"""
__slots__ = ("_config", "_section_name")
_valid_attrs_ = ("get_value", "set_value", "get", "set", "getint", "getfloat", "getboolean", "has_option",
"remove_section", "remove_option", "options")
def __init__(self, config, section):
self._config = config
self._section_name = section
def __del__(self):
# Yes, for some reason, we have to call it explicitly for it to work in PY3 !
# Apparently __del__ doesn't get call anymore if refcount becomes 0
# Ridiculous ... .
self._config.release()
def __getattr__(self, attr):
if attr in self._valid_attrs_:
return lambda *args, **kwargs: self._call_config(attr, *args, **kwargs)
return super(SectionConstraint, self).__getattribute__(attr)
def _call_config(self, method, *args, **kwargs):
"""Call the configuration at the given method which must take a section name
as first argument"""
return getattr(self._config, method)(self._section_name, *args, **kwargs)
@property
def config(self):
"""return: Configparser instance we constrain"""
return self._config
def release(self):
"""Equivalent to GitConfigParser.release(), which is called on our underlying parser instance"""
return self._config.release()
class GitConfigParser(with_metaclass(MetaParserBuilder, cp.RawConfigParser, object)):
"""Implements specifics required to read git style configuration files.
This variation behaves much like the git.config command such that the configuration
will be read on demand based on the filepath given during initialization.
The changes will automatically be written once the instance goes out of scope, but
can be triggered manually as well.
The configuration file will be locked if you intend to change values preventing other
instances to write concurrently.
:note:
The config is case-sensitive even when queried, hence section and option names
must match perfectly."""
#{ Configuration
# The lock type determines the type of lock to use in new configuration readers.
# They must be compatible to the LockFile interface.
# A suitable alternative would be the BlockingLockFile
t_lock = LockFile
re_comment = re.compile('^\s*[#;]')
#} END configuration
OPTCRE = re.compile(
r'\s*(?P<option>[^:=\s][^:=]*)' # very permissive, incuding leading whitespace
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
# list of RawConfigParser methods able to change the instance
_mutating_methods_ = ("add_section", "remove_section", "remove_option", "set")
def __init__(self, file_or_files, read_only=True, merge_includes=True):
"""Initialize a configuration reader to read the given file_or_files and to
possibly allow changes to it by setting read_only False
:param file_or_files:
A single file path or file objects or multiple of these
:param read_only:
If True, the ConfigParser may only read the data , but not change it.
If False, only a single file path or file object may be given. We will write back the changes
when they happen, or when the ConfigParser is released. This will not happen if other
configuration files have been included
:param merge_includes: if True, we will read files mentioned in [include] sections and merge their
contents into ours. This makes it impossible to write back an individual configuration file.
Thus, if you want to modify a single conifguration file, turn this off to leave the original
dataset unaltered when reading it."""
cp.RawConfigParser.__init__(self, dict_type=OrderedDict)
# Used in python 3, needs to stay in sync with sections for underlying implementation to work
if not hasattr(self, '_proxies'):
self._proxies = self._dict()
self._file_or_files = file_or_files
self._read_only = read_only
self._dirty = False
self._is_initialized = False
self._merge_includes = merge_includes
self._lock = None
if not read_only:
if isinstance(file_or_files, (tuple, list)):
raise ValueError(
"Write-ConfigParsers can operate on a single file only, multiple files have been passed")
# END single file check
if not isinstance(file_or_files, string_types):
file_or_files = file_or_files.name
# END get filename from handle/stream
# initialize lock base - we want to write
self._lock = self.t_lock(file_or_files)
self._lock._obtain_lock()
# END read-only check
def __del__(self):
"""Write pending changes if required and release locks"""
# NOTE: only consistent in PY2
self.release()
def release(self):
"""Flush changes and release the configuration write lock. This instance must not be used anymore afterwards.
In Python 3, it's required to explicitly release locks and flush changes, as __del__ is not called
deterministically anymore."""
# checking for the lock here makes sure we do not raise during write()
# in case an invalid parser was created who could not get a lock
if self.read_only or (self._lock and not self._lock._has_lock()):
return
try:
try:
self.write()
except IOError:
log.error("Exception during destruction of GitConfigParser", exc_info=True)
except ReferenceError:
# This happens in PY3 ... and usually means that some state cannot be written
# as the sections dict cannot be iterated
# Usually when shutting down the interpreter, don'y know how to fix this
pass
finally:
self._lock._release_lock()
def optionxform(self, optionstr):
"""Do not transform options in any way when writing"""
return optionstr
def _read(self, fp, fpname):
"""A direct copy of the py2.4 version of the super class's _read method
to assure it uses ordered dicts. Had to change one line to make it work.
Future versions have this fixed, but in fact its quite embarassing for the
guys not to have done it right in the first place !
Removed big comments to make it more compact.
Made sure it ignores initial whitespace as git uses tabs"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
is_multi_line = False
e = None # None, or an exception
def string_decode(v):
if v[-1] == '\\':
v = v[:-1]
# end cut trailing escapes to prevent decode error
if PY3:
return v.encode(defenc).decode('unicode_escape')
else:
return v.decode('string_escape')
# end
# end
while True:
# we assume to read binary !
line = fp.readline().decode(defenc)
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or self.re_comment.match(line):
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# is it a section header?
mo = self.SECTCRE.match(line.strip())
if not is_multi_line and mo:
sectname = mo.group('header').strip()
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == cp.DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict((('__name__', sectname),))
self._sections[sectname] = cursect
self._proxies[sectname] = None
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise cp.MissingSectionHeaderError(fpname, lineno, line)
# an option line?
elif not is_multi_line:
mo = self.OPTCRE.match(line)
if mo:
# We might just have handled the last line, which could contain a quotation we want to remove
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval and not optval.strip().startswith('"'):
pos = optval.find(';')
if pos != -1 and optval[pos - 1].isspace():
optval = optval[:pos]
optval = optval.strip()
if optval == '""':
optval = ''
# end handle empty string
optname = self.optionxform(optname.rstrip())
if len(optval) > 1 and optval[0] == '"' and optval[-1] != '"':
is_multi_line = True
optval = string_decode(optval[1:])
# end handle multi-line
cursect[optname] = optval
else:
if not e:
e = cp.ParsingError(fpname)
e.append(lineno, repr(line))
continue
else:
line = line.rstrip()
if line.endswith('"'):
is_multi_line = False
line = line[:-1]
# end handle quotations
cursect[optname] += string_decode(line)
# END parse section or option
# END while reading
# if any parsing errors occurred, raise an exception
if e:
raise e
def _has_includes(self):
return self._merge_includes and self.has_section('include')
def read(self):
"""Reads the data stored in the files we have been initialized with. It will
ignore files that cannot be read, possibly leaving an empty configuration
:return: Nothing
:raise IOError: if a file cannot be handled"""
if self._is_initialized:
return
self._is_initialized = True
if not isinstance(self._file_or_files, (tuple, list)):
files_to_read = [self._file_or_files]
else:
files_to_read = list(self._file_or_files)
# end assure we have a copy of the paths to handle
seen = set(files_to_read)
num_read_include_files = 0
while files_to_read:
file_path = files_to_read.pop(0)
fp = file_path
close_fp = False
# assume a path if it is not a file-object
if not hasattr(fp, "seek"):
try:
fp = open(file_path, 'rb')
close_fp = True
except IOError:
continue
# END fp handling
try:
self._read(fp, fp.name)
finally:
if close_fp:
fp.close()
# END read-handling
# Read includes and append those that we didn't handle yet
# We expect all paths to be normalized and absolute (and will assure that is the case)
if self._has_includes():
for _, include_path in self.items('include'):
if include_path.startswith('~'):
include_path = os.path.expanduser(include_path)
if not os.path.isabs(include_path):
if not close_fp:
continue
# end ignore relative paths if we don't know the configuration file path
assert os.path.isabs(file_path), "Need absolute paths to be sure our cycle checks will work"
include_path = os.path.join(os.path.dirname(file_path), include_path)
# end make include path absolute
include_path = os.path.normpath(include_path)
if include_path in seen or not os.access(include_path, os.R_OK):
continue
seen.add(include_path)
files_to_read.append(include_path)
num_read_include_files += 1
# each include path in configuration file
# end handle includes
# END for each file object to read
# If there was no file included, we can safely write back (potentially) the configuration file
# without altering it's meaning
if num_read_include_files == 0:
self._merge_includes = False
# end
def _write(self, fp):
"""Write an .ini-format representation of the configuration state in
git compatible format"""
def write_section(name, section_dict):
fp.write(("[%s]\n" % name).encode(defenc))
for (key, value) in section_dict.items():
if key != "__name__":
fp.write(("\t%s = %s\n" % (key, self._value_to_string(value).replace('\n', '\n\t'))).encode(defenc))
# END if key is not __name__
# END section writing
if self._defaults:
write_section(cp.DEFAULTSECT, self._defaults)
for name, value in self._sections.items():
write_section(name, value)
def items(self, section_name):
""":return: list((option, value), ...) pairs of all items in the given section"""
return [(k, v) for k, v in super(GitConfigParser, self).items(section_name) if k != '__name__']
@needs_values
def write(self):
"""Write changes to our file, if there are changes at all
:raise IOError: if this is a read-only writer instance or if we could not obtain
a file lock"""
self._assure_writable("write")
if not self._dirty:
return
if isinstance(self._file_or_files, (list, tuple)):
raise AssertionError("Cannot write back if there is not exactly a single file to write to, have %i files"
% len(self._file_or_files))
# end assert multiple files
if self._has_includes():
log.debug("Skipping write-back of confiuration file as include files were merged in." +
"Set merge_includes=False to prevent this.")
return
# end
fp = self._file_or_files
close_fp = False
# we have a physical file on disk, so get a lock
if isinstance(fp, string_types + (FileType, )):
self._lock._obtain_lock()
# END get lock for physical files
if not hasattr(fp, "seek"):
fp = open(self._file_or_files, "wb")
close_fp = True
else:
fp.seek(0)
# make sure we do not overwrite into an existing file
if hasattr(fp, 'truncate'):
fp.truncate()
# END
# END handle stream or file
# WRITE DATA
try:
self._write(fp)
finally:
if close_fp:
fp.close()
# END data writing
# we do not release the lock - it will be done automatically once the
# instance vanishes
def _assure_writable(self, method_name):
if self.read_only:
raise IOError("Cannot execute non-constant method %s.%s" % (self, method_name))
def add_section(self, section):
"""Assures added options will stay in order"""
return super(GitConfigParser, self).add_section(section)
@property
def read_only(self):
""":return: True if this instance may change the configuration file"""
return self._read_only
def get_value(self, section, option, default=None):
"""
:param default:
If not None, the given default value will be returned in case
the option did not exist
:return: a properly typed value, either int, float or string
:raise TypeError: in case the value could not be understood
Otherwise the exceptions known to the ConfigParser will be raised."""
try:
valuestr = self.get(section, option)
except Exception:
if default is not None:
return default
raise
types = (int, float)
for numtype in types:
try:
val = numtype(valuestr)
# truncated value ?
if val != float(valuestr):
continue
return val
except (ValueError, TypeError):
continue
# END for each numeric type
# try boolean values as git uses them
vl = valuestr.lower()
if vl == 'false':
return False
if vl == 'true':
return True
if not isinstance(valuestr, string_types):
raise TypeError("Invalid value type: only int, long, float and str are allowed", valuestr)
return valuestr
def _value_to_string(self, value):
if isinstance(value, (int, float, bool)):
return str(value)
return force_text(value)
@needs_values
@set_dirty_and_flush_changes
def set_value(self, section, option, value):
"""Sets the given option in section to the given value.
It will create the section if required, and will not throw as opposed to the default
ConfigParser 'set' method.
:param section: Name of the section in which the option resides or should reside
:param option: Name of the options whose value to set
:param value: Value to set the option to. It must be a string or convertible
to a string
:return: this instance"""
if not self.has_section(section):
self.add_section(section)
self.set(section, option, self._value_to_string(value))
return self
def rename_section(self, section, new_name):
"""rename the given section to new_name
:raise ValueError: if section doesn't exit
:raise ValueError: if a section with new_name does already exist
:return: this instance
"""
if not self.has_section(section):
raise ValueError("Source section '%s' doesn't exist" % section)
if self.has_section(new_name):
raise ValueError("Destination section '%s' already exists" % new_name)
super(GitConfigParser, self).add_section(new_name)
for k, v in self.items(section):
self.set(new_name, k, self._value_to_string(v))
# end for each value to copy
# This call writes back the changes, which is why we don't have the respective decorator
self.remove_section(section)
return self
|
|
"""
Parsers are used to parse the content of incoming HTTP requests.
They give us a generic way of being able to handle various media types
on the request, such as form content or json encoded data.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.core.files.uploadhandler import StopFutureHandlers
from django.http import QueryDict
from django.http.multipartparser import MultiPartParser as DjangoMultiPartParser
from django.http.multipartparser import MultiPartParserError, parse_header, ChunkIter
from rest_framework.compat import etree, six, yaml
from rest_framework.exceptions import ParseError
from rest_framework import renderers
import json
import datetime
import decimal
class DataAndFiles(object):
def __init__(self, data, files):
self.data = data
self.files = files
class BaseParser(object):
"""
All parsers should extend `BaseParser`, specifying a `media_type`
attribute, and overriding the `.parse()` method.
"""
media_type = None
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
raise NotImplementedError(".parse() must be overridden.")
class JSONParser(BaseParser):
"""
Parses JSON-serialized data.
"""
media_type = 'application/json'
renderer_class = renderers.UnicodeJSONRenderer
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON and returns the resulting data.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
try:
data = stream.read().decode(encoding)
return json.loads(data)
except ValueError as exc:
raise ParseError('JSON parse error - %s' % six.text_type(exc))
class YAMLParser(BaseParser):
"""
Parses YAML-serialized data.
"""
media_type = 'application/yaml'
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as YAML and returns the resulting data.
"""
assert yaml, 'YAMLParser requires pyyaml to be installed'
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
try:
data = stream.read().decode(encoding)
return yaml.safe_load(data)
except (ValueError, yaml.parser.ParserError) as exc:
raise ParseError('YAML parse error - %s' % six.u(exc))
class FormParser(BaseParser):
"""
Parser for form data.
"""
media_type = 'application/x-www-form-urlencoded'
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as a URL encoded form,
and returns the resulting QueryDict.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
data = QueryDict(stream.read(), encoding=encoding)
return data
class MultiPartParser(BaseParser):
"""
Parser for multipart form data, which may include file data.
"""
media_type = 'multipart/form-data'
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as a multipart encoded form,
and returns a DataAndFiles object.
`.data` will be a `QueryDict` containing all the form parameters.
`.files` will be a `QueryDict` containing all the form files.
"""
parser_context = parser_context or {}
request = parser_context['request']
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
meta = request.META.copy()
meta['CONTENT_TYPE'] = media_type
upload_handlers = request.upload_handlers
try:
parser = DjangoMultiPartParser(meta, stream, upload_handlers, encoding)
data, files = parser.parse()
return DataAndFiles(data, files)
except MultiPartParserError as exc:
raise ParseError('Multipart form parse error - %s' % str(exc))
class XMLParser(BaseParser):
"""
XML parser.
"""
media_type = 'application/xml'
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as XML and returns the resulting data.
"""
assert etree, 'XMLParser requires defusedxml to be installed'
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
parser = etree.DefusedXMLParser(encoding=encoding)
try:
tree = etree.parse(stream, parser=parser, forbid_dtd=True)
except (etree.ParseError, ValueError) as exc:
raise ParseError('XML parse error - %s' % six.u(exc))
data = self._xml_convert(tree.getroot())
return data
def _xml_convert(self, element):
"""
convert the xml `element` into the corresponding python object
"""
children = list(element)
if len(children) == 0:
return self._type_convert(element.text)
else:
# if the fist child tag is list-item means all children are list-item
if children[0].tag == "list-item":
data = []
for child in children:
data.append(self._xml_convert(child))
else:
data = {}
for child in children:
data[child.tag] = self._xml_convert(child)
return data
def _type_convert(self, value):
"""
Converts the value returned by the XMl parse into the equivalent
Python type
"""
if value is None:
return value
try:
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
try:
return int(value)
except ValueError:
pass
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
pass
return value
class FileUploadParser(BaseParser):
"""
Parser for file upload data.
"""
media_type = '*/*'
def parse(self, stream, media_type=None, parser_context=None):
"""
Treats the incoming bytestream as a raw file upload and returns
a `DateAndFiles` object.
`.data` will be None (we expect request body to be a file content).
`.files` will be a `QueryDict` containing one 'file' element.
"""
parser_context = parser_context or {}
request = parser_context['request']
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
meta = request.META
upload_handlers = request.upload_handlers
filename = self.get_filename(stream, media_type, parser_context)
# Note that this code is extracted from Django's handling of
# file uploads in MultiPartParser.
content_type = meta.get('HTTP_CONTENT_TYPE',
meta.get('CONTENT_TYPE', ''))
try:
content_length = int(meta.get('HTTP_CONTENT_LENGTH',
meta.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = None
# See if the handler will want to take care of the parsing.
for handler in upload_handlers:
result = handler.handle_raw_input(None,
meta,
content_length,
None,
encoding)
if result is not None:
return DataAndFiles(None, {'file': result[1]})
# This is the standard case.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
chunk_size = min([2 ** 31 - 4] + possible_sizes)
chunks = ChunkIter(stream, chunk_size)
counters = [0] * len(upload_handlers)
for handler in upload_handlers:
try:
handler.new_file(None, filename, content_type,
content_length, encoding)
except StopFutureHandlers:
break
for chunk in chunks:
for i, handler in enumerate(upload_handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk, counters[i])
counters[i] += chunk_length
if chunk is None:
break
for i, handler in enumerate(upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
return DataAndFiles(None, {'file': file_obj})
raise ParseError("FileUpload parse error - "
"none of upload handlers can handle the stream")
def get_filename(self, stream, media_type, parser_context):
"""
Detects the uploaded file name. First searches a 'filename' url kwarg.
Then tries to parse Content-Disposition header.
"""
try:
return parser_context['kwargs']['filename']
except KeyError:
pass
try:
meta = parser_context['request'].META
disposition = parse_header(meta['HTTP_CONTENT_DISPOSITION'])
return disposition[1]['filename']
except (AttributeError, KeyError):
pass
|
|
import warnings
import numpy as np
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.exceptions import ConvergenceWarning
from scipy.optimize import minimize, OptimizeWarning
from skgstat.util import shannon_entropy
def even_width_lags(distances, n, maxlag):
"""Even lag edges
Calculate the lag edges for a given amount of bins using the same lag
step width for all bins.
.. versionchanged:: 0.3.8
Function returns `None` as second value to indicate that
The number of lag classes was not changed
Parameters
----------
distances : numpy.array
Flat numpy array representing the upper triangle of
the distance matrix.
n : integer
Amount of lag classes to find
maxlag : integer, float
Limit the last lag class to this separating distance.
Returns
-------
bin_edges : numpy.ndarray
The **upper** bin edges of the lag classes
"""
# maxlags larger than the maximum separating distance will be ignored
if maxlag is None or maxlag > np.nanmax(distances):
maxlag = np.nanmax(distances)
return np.linspace(0, maxlag, n + 1)[1:], None
def uniform_count_lags(distances, n, maxlag):
"""Uniform lag counts
Calculate the lag edges for a given amount of bins with the same amount
of observations in each lag class. The lag step width will be variable.
.. versionchanged:: 0.3.8
Function returns `None` as second value to indicate that
The number of lag classes was not changed
Parameters
----------
distances : numpy.array
Flat numpy array representing the upper triangle of
the distance matrix.
n : integer
Amount of lag classes to find
maxlag : integer, float
Limit the last lag class to this separating distance.
Returns
-------
bin_edges : numpy.ndarray
The **upper** bin edges of the lag classes
"""
# maxlags larger than the maximum separating distance will be ignored
if maxlag is None or maxlag > np.nanmax(distances):
maxlag = np.nanmax(distances)
# filter for distances < maxlag
d = distances[np.where(distances <= maxlag)]
return np.fromiter(
(np.nanpercentile(d, (i / n) * 100) for i in range(1, n + 1)),
dtype=float
), None
def auto_derived_lags(distances, method_name, maxlag):
"""Derive bins automatically
.. versionadded:: 0.3.8
Uses `histogram_bin_edges <numpy.histogram_bin_edges>` to derive the
lag classes automatically. Supports any method supported by
`histogram_bin_edges <numpy.histogram_bin_edges>`. It is recommended
to use `'sturges'`, `'doane'` or `'fd'`.
Parameters
----------
distances : numpy.array
Flat numpy array representing the upper triangle of
the distance matrix.
maxlag : integer, float
Limit the last lag class to this separating distance.
method_name : str
Any method supported by
`histogram_bin_edges <numpy.histogram_bin_edges>`
Returns
-------
bin_edges : numpy.ndarray
The **upper** bin edges of the lag classes
See Also
--------
numpy.histogram_bin_edges
"""
# maxlags larger than maximum separating distance will be ignored
if maxlag is None or maxlag > np.nanmax(distances):
maxlag = np.nanmax(distances)
# filter for distances < maxlag
d = distances[np.where(distances <= maxlag)]
# calculate the edges
edges = np.histogram_bin_edges(d, bins=method_name)[1:]
return edges, len(edges)
def kmeans(distances, n, maxlag, binning_random_state=42, **kwargs):
"""
.. versionadded:: 0.3.9
Clustering of pairwise separating distances between locations up to
maxlag. The lag class edges are formed equidistant from each cluster
center. Note: this does not necessarily result in equidistance lag classes.
Parameters
----------
distances : numpy.array
Flat numpy array representing the upper triangle of
the distance matrix.
n : integer
Amount of lag classes to find
maxlag : integer, float
Limit the last lag class to this separating distance.
Returns
-------
bin_edges : numpy.ndarray
The **upper** bin edges of the lag classes
See Also
--------
sklearn.cluster.KMeans
Note
----
The :class:`KMeans <sklearn.cluster.KMeans>` that is used under the hood is not
a deterministic algorithm, as the starting cluster centroids are seeded
randomly. This can yield slightly different results on reach run.
Thus, for this application, the random_state on KMeans is fixed to a
specific value. You can change the seed by passing another seed to
:class:`Variogram <skgstat.Variogram>` as `binning_random_state`.
"""
# maxlags larger than maximum separating distance will be ignored
if maxlag is None or maxlag > np.nanmax(distances):
maxlag = np.nanmax(distances)
# filter for distances < maxlag
d = distances[np.where(distances <= maxlag)]
# filter the sklearn convervence warning, because working with
# undefined state in binning does not make any sense
with warnings.catch_warnings():
warnings.filterwarnings('error')
# cluster the filtered distances
try:
km = KMeans(n_clusters=n, random_state=binning_random_state).fit(d.reshape(-1, 1))
except ConvergenceWarning:
raise ValueError("KMeans failed to converge. Maybe you need to use a different n_lags.")
# get the centers
_centers = np.sort(km.cluster_centers_.flatten())
# build the upper edges
bounds = zip([0] + list(_centers)[:-1], _centers)
edges = np.fromiter(((low + up) / 2 for low, up in bounds), dtype=float)
return edges, None
def ward(distances, n, maxlag, **kwargs):
"""
.. versionadded:: 0.3.9
Clustering of pairwise separating distances between locations up to
maxlag. The lag class edges are formed equidistant from each cluster
center. Note: this does not necessarily result in equidistance lag classes.
The clustering is done by merging pairs of clusters that minimize the
variance for the merged clusters, unitl `n` clusters are found.
Parameters
----------
distances : numpy.array
Flat numpy array representing the upper triangle of
the distance matrix.
n : integer
Amount of lag classes to find
maxlag : integer, float
Limit the last lag class to this separating distance.
Returns
-------
bin_edges : numpy.ndarray
The **upper** bin edges of the lag classes
See Also
--------
sklearn.clsuter.AgglomerativeClustering
"""
# maxlags larger than maximum separating distance will be ignored
if maxlag is None or maxlag > np.nanmax(distances):
maxlag = np.nanmax(distances)
# filter for distances < maxlag
d = distances[np.where(distances <= maxlag)]
# cluster the filtered distances
w = AgglomerativeClustering(linkage='ward', n_clusters=n).fit(d.reshape(-1, 1))
# get the aggregation function
if kwargs.get('binning_agg_func', False) == 'median':
agg = np.median
else:
agg = np.mean
# get the centers
_centers = np.sort([agg(d[np.where(w.labels_ == i)[0]]) for i in np.unique(w.labels_)])
# build the upper edges
bounds = zip([0] + list(_centers)[:-1], _centers)
edges = np.fromiter(((low + up) / 2 for low, up in bounds), dtype=float)
return edges, None
def stable_entropy_lags(distances, n, maxlag, **kwargs):
"""
.. versionadded: 0.4.0
Optimizes the lag class edges for `n` lag classes.
The algorithm minimizes the difference between Shannon
Entropy for each lag class. Consequently, the final
lag classes should be of comparable uncertainty.
Parameters
----------
distances : numpy.array
Flat numpy array representing the upper triangle of
the distance matrix.
n : integer
Amount of lag classes to find
maxlag : integer, float
Limit the last lag class to this separating distance.
Keyword Arguments
-----------------
binning_maxiter : int
Maximum iterations before the optimization is stopped,
if the lag edges do not converge.
binning_entropy_bins : int, str
Binning method for calculating the shannon entropy
on each iteration.
Returns
-------
bin_edges : numpy.ndarray
The **upper** bin edges of the lag classes
"""
# maxlags larger than maximum separating distance will be ignored
if maxlag is None or maxlag > np.nanmax(distances):
maxlag = np.nanmax(distances)
# filter for distances < maxlag
d = distances[np.where(distances <= maxlag)]
# create a global binning and initial guess
bins = np.histogram_bin_edges(d, bins=kwargs.get('binning_entropy_bins', 'sqrt'))
initial_guess = np.linspace(0, np.nanmax(d), n + 1)[1:]
# define the loss function
def loss(edges):
# get the shannon entropy for the current binning
h = np.ones(len(edges) - 1) * 9999
for i, bnd in enumerate(zip(edges, edges[1:])):
l, u = bnd
x = d[np.where((d >= l) & (d < u))[0]]
if len(x) == 0:
continue
else:
h[i] = shannon_entropy(x, bins)
# return the absolute differences between the bins
return np.sum(np.abs(np.diff(h)))
# minimize the loss function
opt = dict(maxiter=kwargs.get('binning_maxiter', 5000))
res = minimize(loss, initial_guess, method='Nelder-Mead', options=opt)
if res.success:
return res.x, None
else: # pragma: no cover
raise OptimizeWarning("Failed to find optimal lag classes.")
|
|
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from panda3d.core import *
from panda3d.direct import *
from math import *
import math
from direct.fsm.FSM import FSM
from toontown.minigame import ArrowKeys
from direct.showbase import PythonUtil
from direct.task import Task
from direct.distributed.ClockDelta import *
import BuildGeometry
from toontown.golf import GolfGlobals
import random, time
def scalp(vec, scal):
vec0 = vec[0] * scal
vec1 = vec[1] * scal
vec2 = vec[2] * scal
vec = Vec3(vec0, vec1, vec2)
def length(vec):
return sqrt(vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2)
class PhysicsWorldBase:
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPhysicsWorld')
def __init__(self, canRender = 0):
self.canRender = canRender
self.world = OdeWorld()
self.space = OdeSimpleSpace()
self.contactgroup = OdeJointGroup()
self.bodyList = []
self.geomList = []
self.massList = []
self.rayList = []
self.showContacts = 0
self.jointMarkers = []
self.jointMarkerCount = 64
self.meshDataList = []
self.geomDataList = []
self.commonObjectInfoDict = {}
self.maxColCount = 0
if self.canRender:
self.odePandaRelationList = self.bodyList
self.root = render.attachNewNode('physics root node')
else:
self.root = NodePath('physics root node')
self.placerNode = self.root.attachNewNode('Placer')
self.subPlacerNode = self.placerNode.attachNewNode('Placer Sub Node')
self.commonObjectDict = {}
self.commonId = 0
self.worldAttach = self.root.attachNewNode('physics geom attach point')
self.timingCycleLength = 10.0
self.timingCycleOffset = 0.0
self.timingSimTime = 0.0
self.FPS = 90.0
self.refFPS = 60.0
self.DTAStep = 1.0 / self.FPS
self.refCon = 1.2
self.collisionEventName = 'ode-collision-%d' % id(self)
self.space.setCollisionEvent(self.collisionEventName)
self.accept(self.collisionEventName, self.__collisionHandler)
def delete(self):
self.notify.debug('Max Collision Count was %s' % self.maxColCount)
self.stopSim()
self.commonObjectDict = None
if self.canRender:
for pair in self.odePandaRelationList:
pair[0].removeNode()
pair[1].destroy()
self.odePandaRelationList = None
else:
for body in self.bodyList:
body[1].destroy()
self.bodyList = None
for mass in self.massList:
mass = None
for geom in self.geomList:
geom.destroy()
geom = None
for ray in self.rayList:
ray.destroy()
ray = None
self.placerNode.removeNode()
self.root.removeNode()
for marker in self.jointMarkers:
marker.removeNode()
self.jointMarkers = None
for data in self.geomDataList:
data.destroy()
for data in self.meshDataList:
data.destroy()
self.floor.destroy()
self.floor = None
self.contactgroup.empty()
self.world.destroy()
self.space.destroy()
self.world = None
self.space = None
self.ignore(self.collisionEventName)
def setupSimulation(self):
self.world.setAutoDisableFlag(0)
self.world.setAutoDisableLinearThreshold(0.15)
self.world.setAutoDisableAngularThreshold(0.15)
self.world.setAutoDisableSteps(2)
self.world.setGravity(0, 0, -25)
self.world.setErp(0.8)
self.world.setCfm(1e-05)
self.world.initSurfaceTable(5)
self.world.setSurfaceEntry(0, 0, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 1, 1500, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.001 / self.refCon)
self.world.setSurfaceEntry(2, 2, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(0, 2, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(0, 3, 150, 0.0, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 3, 150, 0.0, 99.1, 0.9, 1e-05, 0.0, 1.0 / self.refCon)
self.world.setSurfaceEntry(2, 3, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(3, 3, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(4, 4, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 4, 150, 0.0, 99.1, 0.9, 1e-05, 0.0, 0.001 / self.refCon)
self.world.setSurfaceEntry(pos1=0, pos2=1, mu=80, bounce=0.15, bounce_vel=0.1, soft_erp=0.9, soft_cfm=1e-05, slip=0.0, dampen=0.35 / self.refCon)
self.world.setSurfaceEntry(pos1=2, pos2=1, mu=1500, bounce=0.9, bounce_vel=0.01, soft_erp=0.9, soft_cfm=1e-05, slip=0.0, dampen=0.001 / self.refCon)
self.floor = OdePlaneGeom(self.space, Vec4(0.0, 0.0, 1.0, -20.0))
self.floor.setCollideBits(BitMask32(0))
self.floor.setCategoryBits(BitMask32(3840))
self.space.setAutoCollideWorld(self.world)
self.space.setAutoCollideJointGroup(self.contactgroup)
self.world.setQuickStepNumIterations(8)
self.DTA = 0.0
self.frameCounter = 0
if self.canRender:
for count in xrange(self.jointMarkerCount):
testMarker = render.attachNewNode('Joint Marker')
ballmodel = loader.loadModel('phase_3/models/misc/sphere')
ballmodel.reparentTo(testMarker)
ballmodel.setScale(0.1)
testMarker.setPos(0.0, 0.0, -100.0)
self.jointMarkers.append(testMarker)
def setTimingCycleLength(self, time):
self.timingCycleLength = time
def getTimingCycleLength(self):
return self.timingCycleLength
def getCycleTime(self, doprint = 0):
cycleTime = (globalClock.getRealTime() + self.timingCycleOffset) % self.timingCycleLength
if doprint:
print 'Get Cycle Time %s' % cycleTime
return cycleTime
def setTimeIntoCycle(self, time, doprint = 0):
trueCycleTime = globalClock.getRealTime() % self.timingCycleLength
self.timingCycleOffset = time - trueCycleTime
if doprint:
self.notify.debug('Set Cycle Time %s' % self.timingCycleOffset)
self.notify.debug('SET cycle time %s' % ((globalClock.getRealTime() + self.timingCycleOffset) % self.timingCycleLength))
def getSimCycleTime(self):
return
return self.timingSimTime % self.timingCycleLength
def startSim(self):
taskMgr.add(self.__simulationTask, 'simulation task')
def stopSim(self):
taskMgr.remove('simulation task')
def __simulationTask(self, task):
self.DTA += globalClock.getDt()
self.frameCounter += 1
if self.frameCounter >= 10:
self.frameCounter = 0
startTime = globalClock.getRealTime()
colCount = 0
while self.DTA >= self.DTAStep:
self.DTA -= self.DTAStep
self.preStep()
self.simulate()
self.postStep()
if self.canRender:
self.placeBodies()
if self.frameCounter == 0:
endTime = globalClock.getRealTime() - startTime
return task.cont
def __collisionHandler(self, entry):
self.colEntries.append(entry)
def simulate(self):
self.colEntries = []
self.space.autoCollide()
# We need the callbacks processed now, before we try to look at colEntries, so:
eventMgr.doEvents()
self.colCount = len(self.colEntries)
if self.maxColCount < self.colCount:
self.maxColCount = self.colCount
self.notify.debug('New Max Collision Count %s' % self.maxColCount)
self.world.quickStep(self.DTAStep)
for bodyPair in self.bodyList:
self.world.applyDampening(self.DTAStep, bodyPair[1])
self.contactgroup.empty()
self.commonObjectControl()
self.timingSimTime = self.timingSimTime + self.DTAStep
def placeBodies(self):
for pair in self.odePandaRelationList:
pandaNodePathGeom = pair[0]
odeBody = pair[1]
if pandaNodePathGeom:
pandaNodePathGeom.setPos(odeBody.getPosition())
rotation = odeBody.getRotation() * (180.0 / math.pi)
pandaNodePathGeom.setQuat(Quat(odeBody.getQuaternion()[0], odeBody.getQuaternion()[1], odeBody.getQuaternion()[2], odeBody.getQuaternion()[3]))
def preStep(self):
pass
def postStep(self):
if self.showContacts and self.canRender:
for count in xrange(self.jointMarkerCount):
pandaNodePathGeom = self.jointMarkers[count]
if count < self.colCount:
pandaNodePathGeom.setPos(self.space.getContactData(count * 3 + 0), self.space.getContactData(count * 3 + 1), self.space.getContactData(count * 3 + 2))
else:
pandaNodePathGeom.setPos(0.0, 0.0, -100.0)
def commonObjectControl(self):
time = self.getCycleTime()
for key in self.commonObjectDict:
if key not in self.commonObjectInfoDict:
self.commonObjectInfoDict[key] = None
entry = self.commonObjectDict[key]
if entry[1] in [2, 4]:
type = entry[1]
body = entry[2]
motor = entry[3]
timeData = entry[4]
forceData = entry[5]
eventData = entry[6]
model = entry[7]
force = 0.0
for index in xrange(len(timeData)):
if index == len(timeData) - 1 and timeData[index] < time or timeData[index] < time and timeData[index + 1] > time:
force = forceData[index]
event = eventData[index]
if event != self.commonObjectInfoDict[key]:
self.commonObjectEvent(key, model, type, force, event)
self.commonObjectInfoDict[key] = event
motor.setParamVel(force)
return
def commonObjectEvent(self, key, model, type, force, event):
self.notify.debug('commonObjectForceEvent %s %s %s %s %s' % (key,
model,
type,
force,
event))
def getCommonObjectData(self):
objectStream = [(0,
0,
self.getCycleTime(),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0)]
for key in self.commonObjectDict:
objectPair = self.commonObjectDict[key]
object = objectPair[2]
pos3 = object.getPosition()
quat4 = object.getQuaternion()
anV3 = object.getAngularVel()
lnV3 = object.getLinearVel()
data = (objectPair[0],
objectPair[1],
pos3[0],
pos3[1],
pos3[2],
quat4[0],
quat4[1],
quat4[2],
quat4[3],
anV3[0],
anV3[1],
anV3[2],
lnV3[0],
lnV3[1],
lnV3[2])
objectStream.append(data)
if len(objectStream) <= 1:
data = (0, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
objectStream.append(data)
return objectStream
def useCommonObjectData(self, objectData, enable = 1):
if not objectData:
return
if objectData[1][1] == 99:
return
time = objectData[0]
self.setTimeIntoCycle(time[2])
if time[2] > self.timingCycleLength:
pass
for dataIndex in xrange(1, len(objectData)):
data = objectData[dataIndex]
commonObject = self.commonObjectDict[data[0]]
commonObject[2].setPosition(data[2], data[3], data[4])
commonObject[2].setQuaternion(Quat(data[5], data[6], data[7], data[8]))
commonObject[2].setAngularVel(data[9], data[10], data[11])
commonObject[2].setLinearVel(data[12], data[13], data[14])
if enable:
commonObject[2].enable()
else:
commonObject[2].disable()
def createCommonObject(self, type, commonId, pos, hpr, sizeX = 0, sizeY = 0, moveDistance = 0):
if commonId == None:
commonId = self.commonId
self.commonId += 1
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]))
vHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
rHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
if type == 0:
model, box = self.createBox(self.world, self.space, 10.0, 5.0, 5.0, 5.0)
box.setPosition(vPos)
self.placerNode.setHpr(vHpr)
box.setQuaternion(self.placerNode.getQuat())
self.commonObjectDict[commonId] = (commonId, type, box)
elif type == 1:
model, cross = self.createCross(self.world, self.space, 1.0, 3.0, 12.0, 2.0, 2)
motor = OdeHingeJoint(self.world)
cross.setPosition(vPos)
cross.setQuaternion(self.placerNode.getQuat())
ourAxis = render.getRelativeVector(self.placerNode, Vec3(0, 0, 1))
motor.setParamVel(1.5)
motor.setParamFMax(500000000.0)
boxsize = Vec3(1.0, 1.0, 1.0)
motor.attachBody(cross, 0)
motor.setAnchor(vPos)
motor.setAxis(ourAxis)
self.cross = cross
cross.enable()
self.commonObjectDict[commonId] = (commonId, type, cross)
elif type == 2:
ourAxis = render.getRelativeVector(self.placerNode, Vec3(0, 0, 1))
model, box = self.createBox(self.world, self.space, 10.0, 5.0, 5.0, 5.0, 2)
box.setPosition(vPos)
box.setQuaternion(self.placerNode.getQuat())
motor = OdeSliderJoint(self.world)
motor.attachBody(box, 0)
motor.setAxis(ourAxis)
motor.setParamVel(3.0)
motor.setParamFMax(5000000.0)
motor.setParamHiStop(10.0)
motor.setParamLoStop(-10.0)
timeData = (0.0, 5.0)
forceData = (3.0, -3.0)
eventData = (1, 2)
self.commonObjectDict[commonId] = (commonId,
type,
box,
motor,
timeData,
forceData,
eventData,
model)
elif type == 3:
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]))
vHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(0, 0, 0)
if self.canRender:
myModel = loader.loadModel('phase_6/models/golf/golf_windmill_b')
else:
myModel = loader.loadModel('phase_6/models/golf/golf_windmill_b.bam')
myModel.reparentTo(self.root)
myModel.setPos(vPos)
myModel.setHpr(vHpr)
millFan = myModel.find('**/windmillFan0')
millBase = myModel.find('**/arm')
rod = myModel.find('**/rod')
rod.wrtReparentTo(millBase)
self.windmillFanNodePath = millFan
self.windmillBaseNodePath = millBase
millData = OdeTriMeshData(millBase)
millGeom = OdeTriMeshGeom(self.space, millData)
self.meshDataList.append(millData)
millGeom.setPosition(self.subPlacerNode.getPos(self.root))
millGeom.setQuaternion(self.subPlacerNode.getQuat())
millGeom.setCollideBits(BitMask32(251658240))
millGeom.setCategoryBits(BitMask32(8388608))
self.space.setCollideId(millGeom, 8)
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]) + 5)
vHpr = Vec3(float(hpr[0]), float(hpr[1] + 90), float(hpr[2]) - 90)
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(-1, 0, 0.0)
model, cross = self.createPinWheel(self.world, self.space, 10.0, 1.6, 4.0, 0.6, 5, 3.7, 1.2, 1, millFan, (0, 0, 90), (-4.6, -0.5, -0.25), 20)
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(-1, 0, 0.0)
motor = OdeHingeJoint(self.world)
cross.setPosition(self.subPlacerNode.getPos(self.root))
cross.setQuaternion(self.placerNode.getQuat())
ourAxis = self.root.getRelativeVector(self.subPlacerNode, Vec3(0, 0, 1))
motor.setParamVel(1.0)
motor.setParamFMax(50000.0)
boxsize = Vec3(1.0, 1.0, 1.0)
motor.attachBody(cross, 0)
motor.setAnchor(self.subPlacerNode.getPos(self.root))
motor.setAxis(ourAxis)
self.cross = cross
cross.enable()
self.commonObjectDict[commonId] = (commonId, type, cross)
elif type == 4:
ourAxis = self.root.getRelativeVector(self.placerNode, Vec3(0, 1, 0))
model, box = self.createBox(self.world, self.space, 50.0, sizeX, sizeY, 1.0, 2)
box.setPosition(vPos)
box.setQuaternion(self.placerNode.getQuat())
motor = OdeSliderJoint(self.world)
motor.attachBody(box, 0)
motor.setAxis(ourAxis)
motor.setParamVel(moveDistance / 4.0)
motor.setParamFMax(25000.0)
motor.setParamHiStop(moveDistance)
motor.setParamLoStop(0)
timeData = (0.0, 1.0, 5.0, 6.0)
forceData = (-moveDistance / 4.0,
moveDistance / 4.0,
moveDistance / 4.0,
-moveDistance / 4.0)
eventData = (-1, 1, -2, 2)
radius = moveDistance + sizeY * 0.5
self.commonObjectDict[commonId] = (commonId,
type,
box,
motor,
timeData,
forceData,
eventData,
model,
radius)
return [type,
commonId,
(pos[0], pos[1], pos[2]),
(hpr[0], hpr[1], hpr[2]),
sizeX,
sizeY,
moveDistance]
def createSphere(self, world, space, density, radius, ballIndex = None):
self.notify.debug('create sphere index %s' % ballIndex)
body = OdeBody(world)
M = OdeMass()
M.setSphere(density, radius)
body.setMass(M)
body.setPosition(0, 0, -100)
geom = OdeSphereGeom(space, radius)
self.space.setSurfaceType(geom, 1)
self.notify.debug('collide ID is %s' % self.space.setCollideId(geom, 42))
self.massList.append(M)
self.geomList.append(geom)
if ballIndex == 1:
self.notify.debug('1')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 2:
self.notify.debug('2')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 3:
self.notify.debug('3')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 4:
self.notify.debug('4')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
else:
geom.setCollideBits(BitMask32(4294967295L))
geom.setCategoryBits(BitMask32(4294967295L))
geom.setBody(body)
if self.notify.getDebug():
self.notify.debug('golf ball geom id')
geom.write()
self.notify.debug(' -')
self.notify.debug('Collide Bits %s' % geom.getCollideBits())
if self.canRender:
testball = render.attachNewNode('Ball Holder')
ballmodel = loader.loadModel('phase_6/models/golf/golf_ball')
ballmodel.reparentTo(testball)
ballmodel.setColor(*GolfGlobals.PlayerColors[ballIndex - 1])
testball.setPos(0, 0, -100)
self.odePandaRelationList.append((testball, body))
else:
testball = None
self.bodyList.append((None, body))
return (testball, body, geom)
def createBox(self, world, space, density, lx, ly, lz, colOnlyBall = 0):
body = OdeBody(self.world)
M = OdeMass()
M.setSphere(density, 0.3 * (lx + ly + lz))
body.setMass(M)
boxsize = Vec3(lx, ly, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 7)
self.massList.append(M)
self.geomList.append(geom)
if colOnlyBall:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
if self.canRender:
color = random.choice([Vec4(1.0, 0.0, 0.5, 1.0), Vec4(0.5, 0.5, 1.0, 1.0), Vec4(0.5, 1.0, 0.5, 1.0)])
boxsize = Vec3(lx, ly, lz)
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(self.worldAttach, lx, ly, lz, color, 1)
boxNodePathGeom.setPos(0, 0, -100)
self.odePandaRelationList.append((boxNodePathGeom, body))
else:
boxNodePathGeom = None
self.bodyList.append((None, body))
return (boxNodePathGeom, body)
def createCross(self, world, space, density, lx, ly, lz, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly, lz)
boxsize2 = Vec3(ly, lx, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
geom2 = OdeBoxGeom(space, boxsize2)
geom2.setBody(body)
self.space.setSurfaceType(geom2, 0)
self.space.setCollideId(geom2, 26)
self.massList.append(M)
self.geomList.append(geom)
self.geomList.append(geom2)
self.odePandaRelationList.append((boxNodePathGeom, body))
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(251658240))
geom2.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(0))
geom2.setCategoryBits(BitMask32(0))
if self.canRender:
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(self.worldAttach, lx, ly, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(0, 0, -100)
boxNodePathGeom2, t1, t2 = BuildGeometry.addBoxGeom(boxNodePathGeom, ly, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom2.setPos(0, 0, 0)
if attachedGeo:
attachedGeo.reparentTo(boxNodePathGeom)
attachedGeo.setHpr(0, 0, 90)
attachedGeo.setPos(-4.8, 0, -2.0)
self.odePandaRelationList.append((boxNodePathGeom, body))
else:
boxNodePathGeom = None
self.bodyList.append((None, body))
return (boxNodePathGeom, body)
def createCross2(self, world, space, density, lx, ly, lz, latSlide, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly * 0.5, lz)
boxsize2 = Vec3(ly * 0.5, lx, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
geom.setOffsetPosition(-latSlide, ly * 0.25, 0)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
geom2 = OdeBoxGeom(space, boxsize2)
geom2.setBody(body)
geom2.setOffsetPosition(ly * 0.25, latSlide, 0)
self.space.setSurfaceType(geom2, 0)
self.space.setCollideId(geom2, 13)
geom3 = OdeBoxGeom(space, boxsize)
geom3.setBody(body)
geom3.setOffsetPosition(latSlide, -ly * 0.25, 0)
self.space.setSurfaceType(geom3, 0)
self.space.setCollideId(geom3, 13)
geom4 = OdeBoxGeom(space, boxsize2)
geom4.setBody(body)
geom4.setOffsetPosition(-ly * 0.25, -latSlide, 0)
self.space.setSurfaceType(geom4, 0)
self.space.setCollideId(geom4, 13)
self.massList.append(M)
self.geomList.append(geom)
self.geomList.append(geom2)
self.geomList.append(geom3)
self.geomList.append(geom4)
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(251658240))
geom2.setCategoryBits(BitMask32(0))
geom3.setCollideBits(BitMask32(251658240))
geom3.setCategoryBits(BitMask32(0))
geom4.setCollideBits(BitMask32(251658240))
geom4.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(0))
geom2.setCategoryBits(BitMask32(0))
geom3.setCollideBits(BitMask32(0))
geom3.setCategoryBits(BitMask32(0))
geom4.setCollideBits(BitMask32(0))
geom4.setCategoryBits(BitMask32(0))
if self.canRender:
someNodePathGeom = render.attachNewNode('pinwheel')
if attachedGeo:
attachedGeo.reparentTo(someNodePathGeom)
attachedGeo.setHpr(aHPR[0], aHPR[1], aHPR[2])
attachedGeo.setPos(aPos[0], aPos[1], aPos[2])
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(-latSlide, ly * 0.25, 0)
boxNodePathGeom2, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, ly * 0.5, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom2.setPos(ly * 0.25, latSlide, 0)
boxNodePathGeom3, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom3.setPos(latSlide, -ly * 0.25, 0)
boxNodePathGeom4, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, ly * 0.5, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom4.setPos(-ly * 0.25, -latSlide, 0)
self.odePandaRelationList.append((someNodePathGeom, body))
else:
someNodePathGeom = None
self.bodyList.append((None, body))
return (someNodePathGeom, body)
def createPinWheel(self, world, space, density, lx, ly, lz, numBoxes, disV, disH, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None, offRot = 0):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly * 0.5, lz)
boxsize2 = Vec3(ly * 0.5, lx, lz)
self.massList.append(M)
self.placerNode.setPos(0, 0, 0)
self.placerNode.setHpr(0, 0, 0)
self.subPlacerNode.setHpr(0, 0, 0)
self.subPlacerNode.setPos(disH, disV, 0)
if self.canRender:
someNodePathGeom = render.attachNewNode('pinwheel')
else:
someNodePathGeom = self.root.attachNewNode('pinwheel')
for num in xrange(numBoxes):
spin = 360.0 * float(num) / float(numBoxes) + float(offRot)
self.placerNode.setH(spin)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
geom.setOffsetPosition(self.subPlacerNode.getPos(self.root))
geom.setOffsetQuaternion(self.subPlacerNode.getQuat(self.root))
self.geomList.append(geom)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
if not attachedGeo:
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(self.subPlacerNode.getPos(self.root))
boxNodePathGeom.setHpr(self.subPlacerNode.getHpr(self.root))
if attachedGeo and self.canRender:
attachedGeo.reparentTo(someNodePathGeom)
attachedGeo.setHpr(aHPR[0], aHPR[1], aHPR[2])
attachedGeo.setPos(aPos[0], aPos[1], aPos[2])
if self.canRender:
self.odePandaRelationList.append((someNodePathGeom, body))
else:
someNodePathGeom = None
self.bodyList.append((None, body))
return (someNodePathGeom, body)
def attachMarker(self, body):
if self.canRender:
testMarker = render.attachNewNode('Joint Marker')
ballmodel = loader.loadModel('models/misc/sphere')
ballmodel.reparentTo(testMarker)
ballmodel.setScale(0.25)
testMarker.setPos(0.0, 0.0, -100.0)
self.odePandaRelationList.append((testMarker, body))
|
|
"""Extract reference documentation from the NumPy source tree.
"""
import inspect
import textwrap
import re
import pydoc
from warnings import warn
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
except:
from io import StringIO
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l + 1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class NumpyDocString(object):
def __init__(self, docstring, config={}):
docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': [''],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'Warnings': [],
'References': '',
'Examples': '',
'index': {}
}
self._parse()
def __getitem__(self, key):
return self._parsed_data[key]
def __setitem__(self, key, val):
if key not in self._parsed_data:
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith('-' * len(l1)) or l2.startswith('=' * len(l1))
def _strip(self, doc):
i = 0
j = 0
for i, line in enumerate(doc):
if line.strip():
break
for j, line in enumerate(doc[::-1]):
if line.strip():
break
return doc[i:len(doc) - j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self, content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
params.append((arg_name, arg_type, desc))
return params
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
self['Summary'] = self._doc.read_to_next_empty_line()
else:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section, content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize()
for s in section.split(' ')])
if section in ('Parameters', 'Attributes', 'Methods',
'Returns', 'Raises', 'Warns'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name) * symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*', '\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param, param_type, desc in self[name]:
out += ['%s : %s' % (param, param_type)]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']:
return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc, role in self['See Also']:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
out += ['']
out += [link]
else:
out[-1] += ", %s" % link
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
for s in ('Notes', 'References', 'Examples'):
out += self._str_section(s)
for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
out += self._str_index()
return '\n'.join(out)
def indent(str, indent=4):
indent_str = ' ' * indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style='-'):
return text + '\n' + style * len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ''
NumpyDocString.__init__(self, doc)
if not self['Signature'] and func is not None:
func, func_name = self.get_func()
try:
# try to read signature
argspec = inspect.getargspec(func)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*', '\*')
signature = '%s%s' % (func_name, argspec)
except TypeError as e:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
signature = self['Signature'].replace('*', '\*')
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if not roles.has_key(self._role):
print("Warning: invalid role %s" % self._role)
out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
config=None):
if not inspect.isclass(cls) and cls is not None:
raise ValueError("Expected a class or None, but got %r" % cls)
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
if config is not None and config.get('show_class_members', True):
if not self['Methods']:
self['Methods'] = [(name, '', '')
for name in sorted(self.methods)]
if not self['Attributes']:
self['Attributes'] = [(name, '', '')
for name in sorted(self.properties)]
@property
def methods(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if not name.startswith('_') and callable(func)]
@property
def properties(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if not name.startswith('_') and func is None]
|
|
# -*- coding: utf-8 -*-
from os import path
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.storage import Storage
from s3 import *
THEME = "DRRPP"
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
request = current.request
response = current.response
view = path.join(request.folder, "private", "templates",
THEME, "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
# Show full width instead of login box if user is logged in
if current.auth.is_logged_in():
grid = "grid_12"
else:
grid = "grid_8"
latest_projects = DIV(_id="front-latest-body",
_class="%s alpha" % grid)
lappend = latest_projects.append
db = current.db
s3db = current.s3db
table = s3db.project_project
table_drrpp = s3db.project_drrpp
query = (table.deleted != True) & \
(table.approved_by != None)
rows = db(query).select(table.id,
table.name,
table_drrpp.activities,
table.organisation_id,
table.start_date,
left=table_drrpp.on(table.id == table_drrpp.project_id),
limitby=(0, 3))
project_ids = [r.project_project.id for r in rows]
ltable = s3db.project_location
gtable = s3db.gis_location
query = (ltable.deleted != True) & \
(ltable.project_id == table.id) & \
(gtable.id == ltable.location_id) & \
(gtable.level == "L0")
locations = db(query).select(ltable.project_id,
gtable.L0)
odd = True
for row in rows:
countries = [l.gis_location.L0 for l in locations if l.project_location.project_id == row.project_project.id]
location = ", ".join(countries)
if odd:
_class = "front-latest-item odd %s alpha" % grid
else:
_class = "front-latest-item even %s alpha" % grid
card = DIV(DIV(A(row.project_project.name,
_href=URL(c="project", f="project", args=[row.project_project.id])),
_class="front-latest-title %s" % grid,
),
DIV("Lead Organization: %s" % s3db.org_organisation_represent(row.project_project.organisation_id),
_class="front-latest-desc %s" % grid,
),
DIV(SPAN("Start Date: %s" % row.project_project.start_date,
_class="front-latest-info-date"),
SPAN("Countries: %s" % location,
_class="front-latest-info-location"),
_class="front-latest-info %s" % grid,
),
DIV(row.project_drrpp.activities or "",
_class="front-latest-desc %s" % grid,
),
_class=_class,
)
lappend(card)
odd = False if odd else True
login = current.auth.login(inline=True)
appname = request.application
s3 = response.s3
if current.session.s3.debug:
s3.scripts.append("/%s/static/themes/DRRPP/js/slides.jquery.js" % appname)
else:
s3.scripts.append("/%s/static/themes/DRRPP/js/slides.min.jquery.js" % appname)
s3.jquery_ready.append('''
$('#slides').slides({
play:8000,
animationStart:function(current){
$('.caption').animate({
bottom:-35
},100);
},
animationComplete:function(current){
$('.caption').animate({
bottom:0
},200);
},
slidesLoaded:function() {
$('.caption').animate({
bottom:0
},200);
}
})''')
return dict(title = "Home",
form = login,
latest_projects = latest_projects,
)
# =============================================================================
class register():
""" Custom Registration Page """
def __call__(self):
request = current.request
response = current.response
view = path.join(request.folder, "private", "templates",
THEME, "views", "register.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
T = current.T
auth = current.auth
_settings = auth.settings
# Default the profile language to the one currently active
table = _settings.table_user
table.language.default = T.accepted_language
# Combo box for Organisation
table.organisation_id.widget = S3OrganisationAutocompleteWidget()
table.organisation_id.requires = IS_COMBO_BOX("org_organisation",
current.s3db.org_organisation_id.attr.requires)
# Custom onaccept to process custom fields
_settings.register_onaccept = register_onaccept
# Build the registration form
form = auth.register(js_validation=False)
# Set the formstyle
# @ToDo: Update to the fact that Auth now uses formstyle & use s3_addrow to add new rows
_form = form[0]
_form[-1] = TR(TD(_class="w2p_fl"),
TD(_class="w2p_fc"),
TD(INPUT(_type="submit",
_value=T("Register")),
_class="w2p_fw"),
_id="submit_record_row"
)
_form[0] = TR(TD(SPAN(" *", _class="req"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("First Name")),
_id="auth_user_first_name__label",
_for="auth_user_first_name"),
_class="w2p_fc"),
TD(INPUT(_id="auth_user_first_name",
_class="string",
_type="text",
_name="first_name",
_size="62"),
_class="w2p_fw"),
_id="auth_user_first_name_row"
)
_form[1] = TR(TD(SPAN(" *", _class="req"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("Last Name")),
_id="auth_user_last_name__label",
_for="auth_user_last_name"),
_class="w2p_fc"),
TD(INPUT(_id="auth_user_last_name",
_class="string",
_type="text",
_name="last_name",
_size="62"),
_class="w2p_fw"),
_id="auth_user_last_name_row"
)
_form[2] = TR(TD(_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("Organization")),
_id="auth_user_organisation_id__label",
_for="auth_user_organisation_id"),
_class="w2p_fc"),
TD(form.custom.widget.organisation_id,
_class="w2p_fw"),
_id="auth_user_organisation_id_row"
)
_form[3] = TR(TD(SPAN(" *", _class="req"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("E-Mail")),
_id="auth_user_email__label",
_for="auth_user_email"),
_class="w2p_fc"),
TD(INPUT(_id="auth_user_email",
_class="string",
_type="text",
_name="email",
_size="62"),
_class="w2p_fw"),
_id="auth_user_email_row"
)
_form[4] = TR(TD(SPAN(" *", _class="req"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("Password")),
_id="auth_user_password__label",
_for="auth_user_password"),
_class="w2p_fc"),
TD(INPUT(_id="auth_user_password",
_type="password",
_name="password",
_class="password",
),
_class="w2p_fw"),
_id="auth_user_password_row"
)
_form[5] = TR(TD(SPAN(" *", _class="req"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("Verify Password")),
_id="auth_user_password_two__label",
_for="auth_user_password_two"),
_class="w2p_fc"),
TD(INPUT(_id="auth_user_password_two",
_type="password",
_name="password_two",
_class="password",
),
_class="w2p_fw"),
_id="auth_user_password_two_row"
)
# Add custom fields
append = _form[2].append
append(
TR(TD(SPAN(" *", _class="req"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("Role")),
_id="auth_user_position__label",
_for="auth_user_position"),
_class="w2p_fc"),
TD(SELECT(OPTION(_value=""),
OPTION(T("Practitioner"),
_value="1"),
OPTION(T("Consultant"),
_value="2"),
OPTION(T("Researcher"),
_value="3"),
OPTION(T("Academic"),
_value="4"),
OPTION(T("Student"),
_value="5"),
_name="position",
_id="auth_user_position",
_class="integer"
),
_class="w2p_fw"),
_id="auth_user_position_row"
)
)
append(
TR(TD(SPAN(" *", _class="req"),
DIV(_rel="If you do not specify an organisation, please enter your reason for using the DRR Project Portal.",
_class="labeltip"),
_class="w2p_fl"),
TD(LABEL(DIV("%s: " % T("Reason")),
_id="auth_user_reason__label",
_for="auth_user_reason"),
_class="w2p_fc"),
TD(TEXTAREA(_id="auth_user_reason",
_class="text",
_name="reason",
_rows="10",
_cols="50",
),
_class="w2p_fw"),
_id="auth_user_reason_row"
)
)
# Add client-side validation
s3 = response.s3
appname = request.application
if s3.debug:
s3.scripts.append("/%s/static/scripts/jquery.pstrength.2.1.0.js" % appname)
s3.scripts.append("/%s/static/scripts/jquery.validate.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/jquery.pstrength.2.1.0.min.js" % appname)
s3.scripts.append("/%s/static/scripts/jquery.validate.min.js" % appname)
s3.jquery_ready.append("".join(('''
$('.auth_register').validate({
errorClass:'req',
rules:{
first_name:{
required:true
},
last_name:{
required:true
},
position:{
required:true,
},
reason:{
required:true,
},
email:{
required:true,
email:true
},
password:{
required:true
},
password_two:{
required:true,
equalTo:'.password:first'
}
},
messages:{
first_name:"''', str(T("Enter your first name")), '''",
last_name:"''', str(T("Enter your last name")), '''",
position:"''', str(T("Select your role")), '''",
reason:"''', str(T("Enter a reason")), '''",
password:{
required:"''', str(T("Provide a password")), '''"
},
password_two:{
required:"''', str(T("Repeat your password")), '''",
equalTo:"''', str(T("Enter the same password as above")), '''"
},
email:{
required:"''', str(T("Please enter a valid email address")), '''",
email:"''', str(T("Please enter a valid email address")), '''"
}
},
errorPlacement:function(error,element){
error.appendTo(element.parent())
},
submitHandler:function(form){
form.submit()
}
})
$('.password:first').pstrength({minchar:''', str(_settings.password_min_length), ''',minchar_label:"''', str(T("The minimum number of characters is ")), '''"})
$('.labeltip').cluetip({activation:'hover',position:'mouse',sticky:false,showTitle:false,local:true})''')))
response.title = T("DRRPP - Register")
return dict(form=form)
# -----------------------------------------------------------------------------
def register_onaccept(form):
""" Tasks to be performed after a new user registers """
# Process Custom Fields
vars = form.request_vars
position = vars.get("position", "")
reason = vars.get("reason", "")
id = form.vars.id
db = current.db
table = db.auth_user
db(table.id == form.vars.id).update(
comments = "%s | %s" % (position, reason)
)
# =============================================================================
class contact():
""" Contact Form """
def __call__(self):
request = current.request
response = current.response
view = path.join(request.folder, "private", "templates",
THEME, "views", "contact.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
if request.env.request_method == "POST":
# Processs Form
vars = request.post_vars
result = current.msg.send_email(
#to=current.deployment_settings.get_mail_approver(),
to="[email protected]",
subject=vars.subject,
message=vars.message,
reply_to=vars.address,
)
if result:
response.confirmation = "Thankyou for your message - we'll be in touch shortly"
#T = current.T
# form = FORM(TABLE(
# TR(LABEL("Your name:",
# SPAN(" *", _class="req"),
# _for="name")),
# TR(INPUT(_name="name", _type="text", _size=62, _maxlength="255")),
# TR(LABEL("Your e-mail address:",
# SPAN(" *", _class="req"),
# _for="address")),
# TR(INPUT(_name="address", _type="text", _size=62, _maxlength="255")),
# TR(LABEL("Subject:",
# SPAN(" *", _class="req"),
# _for="subject")),
# TR(INPUT(_name="subject", _type="text", _size=62, _maxlength="255")),
# TR(LABEL("Message:",
# SPAN(" *", _class="req"),
# _for="name")),
# TR(TEXTAREA(_name="message", _class="resizable", _rows=5, _cols=62)),
# TR(INPUT(_type="submit", _value="Send e-mail")),
# ),
# _id="contact-form"
# )
s3 = response.s3
if s3.cdn:
if s3.debug:
s3.scripts.append("http://ajax.aspnetcdn.com/ajax/jquery.validate/1.9/jquery.validate.js")
else:
s3.scripts.append("http://ajax.aspnetcdn.com/ajax/jquery.validate/1.9/jquery.validate.min.js")
else:
if s3.debug:
s3.scripts.append("/%s/static/scripts/jquery.validate.js" % request.application)
else:
s3.scripts.append("/%s/static/scripts/jquery.validate.min.js" % request.application)
s3.jquery_ready.append(
'''$('#contact-form').validate({
errorClass:'req',
rules:{
name:{
required:true
},
subject:{
required:true
},
message:{
required:true
},
name:{
required:true
},
address: {
required:true,
email:true
}
},
messages:{
name:"Enter your name",
subject:"Enter a subject",
message:"Enter a message",
address:{
required:"Please enter a valid email address",
email:"Please enter a valid email address"
}
},
errorPlacement:function(error,element){
error.appendTo(element.parents('tr').prev().children())
},
submitHandler:function(form){
form.submit()
}
})''')
response.title = "Contact | DRR Project Portal"
return dict(
#form=form
)
# =============================================================================
class about():
"""
Custom About page
"""
def __call__(self):
response = current.response
request = current.request
T = current.T
view = path.join(request.folder, "private", "templates",
THEME, "views", "about.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
response.title = T("About")
return dict(
title=T("About"),
)
# =============================================================================
class admin():
"""
Custom Admin Index Page
"""
def __call__(self):
auth = current.auth
s3_has_role = auth.s3_has_role
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
ORG_ADMIN = system_roles.ORG_ADMIN
if s3_has_role(ADMIN) | s3_has_role(ORG_ADMIN):
response = current.response
request = current.request
T = current.T
view = path.join(request.folder, "private", "templates",
THEME, "views", "admin.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
response.title = T("Administration Panel")
panel_list = [A(T("Verify Users"),
_href = URL(c="admin", f = "user")
),
A(T("User List (Excel)"),
_href = URL(c="admin", f = "user.xls")
),
A(T("Manage Administrators"),
_href = URL(c="admin", f = "role", args = [1,"users"])
),
A(T("Manage Organization Contacts"),
_href = URL(c="admin", f = "role", args = [6,"users"])
),
A(T("Manage Organizations"),
_href = URL(c="org", f = "organisation")
),
A(T("Approve Projects"),
_href = URL(c="project", f = "project", args = "review")
),
A(T("Approve Frameworks"),
_href = URL(c="project", f = "framework", args = "review")
),
A(T("Approve Organisations"),
_href = URL(c="org", f = "organisation", args = "review")
),
A(T("Edit Countries and Administrative Areas"),
_href = URL(c="gis", f = "location")
),
A(T("Edit Hazards"),
_href = URL(c="project", f = "hazard")
),
A(T("Edit Themes"),
_href = URL(c="project", f = "theme")
),
]
return dict(item = UL(*panel_list,
_id = "admin_panel_list") )
else:
redirect(URL(c="default", f="index"))
# =============================================================================
class analysis():
"""
Custom page for Project Analysis
"""
def __call__(self):
response = current.response
request = current.request
T = current.T
view = path.join(request.folder, "private", "templates",
THEME, "views", "analysis.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
response.title = T("Project Analysis")
return dict(
title=T("Project Analysis"),
)
# =============================================================================
class get_started():
"""
Custom page
"""
def __call__(self):
response = current.response
request = current.request
T = current.T
view = path.join(request.folder, "private", "templates",
THEME, "views", "get_started.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
response.title = T("Get Started")
return dict(
)
# =============================================================================
class login():
"""
Custom Login page
"""
def __call__(self):
response = current.response
request = current.request
T = current.T
view = path.join(request.folder, "private", "templates",
THEME, "views", "login.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
response.title = T("Login")
return dict(
form = current.auth.login()
)
# =============================================================================
class mypage():
"""
Custom page for a User to manage their Saved Search & Subscriptions
@todo: SavedSearch deprecated,
re-implement with saved filters / S3Notify
"""
def __call__(self):
auth = current.auth
#if not auth.is_logged_in():
response = current.response
request = current.request
T = current.T
view = path.join(request.folder, "private", "templates",
THEME, "views", "mypage.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
response.title = T("My Page")
return dict(
title=T("My Page"),
)
#else:
# person_id = auth.s3_logged_in_person()
# redirect(URL(c="pr", f="person", args=[person_id, "saved_search"]))
# =============================================================================
class organisations():
"""
Custom page to show 2 dataTables on a single page:
* Regional Organisations
* Committees, Forums, Mechanism, Meetings and Networks
"""
def __call__(self):
#T = current.T
request = current.request
response = current.response
response.title = "DRR Projects Portal - Regional Organizations"
view = path.join(request.folder, "private", "templates",
THEME, "views", "organisations.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
s3 = response.s3
s3["dataTable_sDom"] = 'ripl<"dataTable_table"t>p'
tables = []
table = request.vars.get("table", None)
if table is None:
# HTML call
if s3.debug:
append = s3.scripts.append
appname = request.application
append("/%s/static/scripts/jquery.dataTables.js" % appname)
append("/%s/static/scripts/jquery.dataTables.fnSetFilteringDelay.js" % appname)
append("/%s/static/scripts/jquery.dataTables.sortFunctions.js" % appname)
append("/%s/static/scripts/S3/s3.dataTables.multi.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.dataTables.multi.min.js" % request.application)
s3.js_global.append('''S3.dataTablesInstances=[]''')
s3request, list_fields = self._regional()
tables.append(self._table("regional", s3request.resource, list_fields))
s3request, list_fields = self._groups()
tables.append(self._table("groups", s3request.resource, list_fields))
else:
# AJAX call
if table == "groups":
s3request, list_fields = self._groups()
elif table == "regional":
s3request, list_fields = self._regional()
current.s3db.configure(s3request.resource.tablename,
list_fields = list_fields)
return s3request()
return dict(tables=tables)
# -------------------------------------------------------------------------
@staticmethod
def _regional():
"""
Regional Organisations
- Filtered subset of Organisations
"""
T = current.T
s3request = s3_request("org", "organisation", extension="aadata")
# (FS("project.id") != None) & \
f = (FS("organisation_type.name").anyof(["Regional Organisation",
"Regional Office",
"Regional Center"]))
s3request.resource.add_filter(f)
list_fields = ["id",
"name",
"acronym",
(T("Type"), "organisation_organisation_type.organisation_type_id"),
"website",
"region_id",
"year",
(T("Notes"), "comments"),
]
return (s3request, list_fields)
# -------------------------------------------------------------------------
@staticmethod
def _groups():
"""
Committees/Mechanisms/Forums & Networks
- Filtered subset of Organisations
"""
T = current.T
s3db = current.s3db
table = s3db.org_organisation
table.address = Field.Method("address",
s3db.org_organisation_address)
s3request = s3_request("org", "organisation", extension="aadata")
#(FS("project.id") != None) & \
f = (FS("organisation_type.name").anyof(["Committees/Mechanism/Forum",
"Network"]))
s3request.resource.add_filter(f)
list_fields = ["id",
"name",
"acronym",
(T("Type"), "organisation_organisation_type.organisation_type_id"),
"year",
(T("Address"), "address"),
(T("Notes"), "comments"),
]
return (s3request, list_fields)
# -------------------------------------------------------------------------
@staticmethod
def _table(name, resource, field_list, limit=10, orderby="name"):
""" Generate a datatable in the organisations custom page """
data = resource.select(field_list,
limit=None,
orderby=orderby,
count=True,
represent=True)
rfields = data["rfields"]
records = data["rows"]
numrows = len(records)
rows = []
cols = []
for rfield in rfields:
colname = rfield.colname
cols.append({"name": colname, "label": rfield.label})
for i in xrange(numrows):
if len(rows) == i:
rows.append([])
rows[i].append(records[i][colname])
options = json.dumps({
"iDisplayLength": limit,
"iDeferLoading": data["numrows"],
"bProcessing": True,
#"bServerSide": True,
#"sAjaxSource": "/%s/default/index/organisations/?table=%s" % (current.request.application, name),
"aoColumnDefs": [{"bVisible": False,
"aTargets": [0]
}],
"aoColumns": [{"sName": col["name"]} for col in cols],
"sDom": 'rifpl<"dataTable_table"t>p'
})
script = '''S3.dataTablesInstances.push({'options':%s})''' % XML(options)
current.response.s3.js_global.append(script)
table = Storage(cols=cols,
rows=rows,
#options=options,
)
return table
# END =========================================================================
|
|
import argparse
import asyncio
import logging.config
import multiprocessing.connection
import operator
import os
import signal
import sys
import time
from functools import partial, reduce
from pathlib import Path
from urllib.parse import splittype # type: ignore
from urllib.request import urlopen
from . import utils
from .core import command
from .core.config import Config
from .core.context import Context, GroupResolver
from .core.plugin import Plugin, search_plugins
parser = argparse.ArgumentParser(prefix_chars='-+')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
'+g',
'++groups',
nargs='+',
action='append',
metavar='GROUP',
help='Run groups',
)
group.add_argument(
'-g',
'--groups',
nargs='*',
action='append',
dest='exclude_groups',
metavar='GROUP',
help='Run all exclude groups',
)
parser.add_argument('--multiprocessing', action='store_true')
parser.add_argument('-i', '--interact', action='store_true')
parser.add_argument('-I', '--interact-kernel', action='store_true')
parser.add_argument('-l', '--logging', help='logging level')
parser.add_argument('--shutdown-timeout', type=float, default=60)
PROMPT = "======== Running aioworkers ========\n" \
"(Press CTRL+C to quit)"
class PidFileType(argparse.FileType):
def __call__(self, string):
f = super().__call__(string)
with f:
f.write(str(os.getpid()))
return f
parser.add_argument(
'--pid-file',
help='Process ID file',
type=PidFileType('w'),
)
try:
import uvloop
except ImportError:
pass
else:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
context = Context(Config())
def main(
*config_files,
args=None,
config_dirs=(),
commands=(),
config_dict=None,
):
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.insert(0, cwd)
context.config.search_dirs.extend(config_dirs)
if not commands:
p = Path(sys.argv[0])
if __package__ in (p.parent.name, p.name):
commands += (__name__,)
elif p.name.startswith('__'):
commands += (p.parent.name,)
else:
commands += (p.name,)
plugins = search_plugins()
plugins.extend(search_plugins(*commands, force=True))
for i in plugins:
i.add_arguments(parser)
if args is None:
args, argv = parser.parse_known_args()
cmds = list(commands)
while argv and not argv[0].startswith('-'):
cmds.append(argv.pop(0))
if getattr(args, 'config', None):
config_files += tuple(args.config)
if getattr(args, 'config_stdin', None):
assert (
not args.interact
), 'Can not be used --config-stdin with --interact'
config_dict = utils.load_from_fd(sys.stdin.buffer)
if args.logging:
logging.basicConfig(level=args.logging.upper())
else:
cmds, argv = list(commands), []
config = context.config
plugins.extend(search_plugins(*cmds))
for p in plugins:
args, argv = p.parse_known_args(args=argv, namespace=args)
config.load(*p.configs)
config.update(p.get_config())
cmds = [cmd for cmd in cmds if cmd not in sys.modules]
config.load(*config_files)
config_dict and config.update(config_dict)
def sum_g(list_groups):
if list_groups:
return set(reduce(operator.add, list_groups))
run = partial(
loop_run,
group_resolver=GroupResolver(
include=sum_g(args.groups),
exclude=sum_g(args.exclude_groups),
all_groups=args.exclude_groups is not None,
default=True,
),
cmds=cmds,
argv=argv,
ns=args,
prompt=PROMPT,
)
try:
if args.multiprocessing:
with context.processes():
print(PROMPT)
logger = multiprocessing.get_logger()
processes = process_iter(config.get('processes', {}))
for p in processes:
logger.info('Create process %s', p['name'])
p['process'] = create_process(p)
while True:
multiprocessing.connection.wait(
map(lambda x: x['process'].sentinel, processes),
)
for p in processes:
proc = p['process'] # type: multiprocessing.Process
if not proc.is_alive():
logger.critical('Recreate process %s', p['name'])
p['process'] = create_process(p)
time.sleep(1)
elif args.interact:
from .core.interact import shell
args.print = lambda *args: None
os.environ['AIOWORKERS_MODE'] = 'console'
shell(run)
elif args.interact_kernel:
from .core.interact import kernel
kernel(run)
else:
run()
except KeyboardInterrupt:
pass
finally:
for p in multiprocessing.active_children():
os.kill(p.pid, signal.SIGTERM)
t = time.monotonic()
sentinels = [p.sentinel for p in multiprocessing.active_children()]
while sentinels and time.monotonic() - t < args.shutdown_timeout:
multiprocessing.connection.wait(sentinels)
sentinels = [p.sentinel for p in multiprocessing.active_children()]
while multiprocessing.active_children():
print('killall children')
for p in multiprocessing.active_children():
os.kill(p.pid, signal.SIGKILL)
time.sleep(0.3)
def process_iter(cfg, cpus=os.cpu_count() or 1):
result = []
for k, v in cfg.items():
if 'count' in v or 'cpus' in v:
c = v.get_int('count', 0) + int(cpus * v.get_float('cpus', 0))
for i in range(c):
result.append(
{
'name': '{}-{}'.format(k, i),
'groups': v.get('groups', ()),
}
)
else:
result.append(
{
'name': k,
'groups': v.get('groups', ()),
}
)
return result
def create_process(cfg):
p = multiprocessing.Process(
target=loop_run,
kwargs=dict(
group_resolver=GroupResolver(
include=set(cfg['groups']),
exclude=set(),
all_groups=False,
default=True,
),
process_name=cfg['name'],
),
name=cfg['name'],
daemon=True,
)
p.start()
return p
def loop_run(
conf=None,
future=None,
group_resolver=None,
ns=None,
cmds=None,
argv=None,
loop=None,
prompt=None,
process_name=None,
):
if process_name:
utils.setproctitle(process_name)
utils.random_seed()
if loop is None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if conf:
context.set_config(conf)
if loop is not None:
context.set_loop(loop)
if group_resolver is not None:
context.set_group_resolver(group_resolver)
if not cmds:
cmds = ['run_forever']
prompt and print(prompt)
argv = argv or []
ns = ns or argparse.Namespace()
async def shutdown():
await context.__aexit__(None, None, None)
if hasattr(loop, 'shutdown_asyncgens'):
await loop.shutdown_asyncgens()
loop.stop()
with utils.monkey_close(loop), context:
context.loop.add_signal_handler(
signal.SIGTERM, lambda *args: loop.create_task(shutdown())
)
if future is not None:
future.set_result(context)
for cmd in cmds:
try:
result = command.run(cmd, context, argv=argv, ns=ns)
except command.CommandNotFound:
print('Command {} not found'.format(cmd))
continue
if result is not None:
print('{} => {}'.format(cmd, result))
if not loop.is_closed() and hasattr(loop, 'shutdown_asyncgens'):
loop.run_until_complete(loop.shutdown_asyncgens())
class UriType(argparse.FileType):
def __call__(self, string):
t, path = splittype(string)
if not t:
return super().__call__(string)
return urlopen(string)
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
getattr(namespace, self.dest).extend(values)
class plugin(Plugin):
def add_arguments(self, parser):
default = []
parser.set_defaults(config=default)
parser.add_argument(
'-c',
'--config',
nargs='+',
action=ExtendAction,
type=UriType('r', encoding='utf-8'),
)
parser.add_argument('--config-stdin', action='store_true')
def main_with_conf(*args, **kwargs):
import warnings
warnings.warn(
'Deprecated main_with_conf, use main',
DeprecationWarning,
stacklevel=2,
)
main(*args, **kwargs)
if __name__ == '__main__':
main()
|
|
"""Tests for modularity code.
"""
#-----------------------------------------------------------------------------
# Library imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import copy
import math
import time
# Third party
import networkx as nx
import numpy as np
import nose.tools as nt
import numpy.testing as npt
# Our own
from brainx import modularity as mod
from brainx import util
# While debugging the library, reload everything
#map(reload,[mod,util])
#-----------------------------------------------------------------------------
# Local utility functions
#-----------------------------------------------------------------------------
def betweenness_to_modularity(g,ppart):
"""Function to convert between betweenness fractions and modularity
Parameters:
----------
g = graph object
ppart = perfect partition
Returns:
--------
mod = best modularity associated with this graph object
"""
graph_partition = mod.GraphPartition(g,ppart)
return graph_partition.modularity()
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_graphpartition():
""" test GraphPartition correctly handles graph whose
nodes are strings"""
graph = nx.Graph()
graph.add_edge('a','b')
graph.add_edge('c','d')
index = {0:set([0,1]), 1:set([2,3])}
gpart = mod.GraphPartition(graph, index)
assert gpart._node_set == set([0,1,2,3])
# test raise error if matrix unweighted
jnk = np.random.random((10,10))
jnk = np.triu(jnk,1)
graph = nx.from_numpy_matrix(jnk, nx.Graph(weighted=False))
npt.assert_raises(ValueError, mod.GraphPartition, graph, index)
def test_find_unconnected_nodes():
jnk = np.zeros((10,10))
jnk[:6,:6] = 1
jnk = np.triu(jnk,1)
graph = nx.from_numpy_matrix(jnk>0, nx.Graph(weighted=False))
index = {0:set([0,1,2,3,4,5,6,7,8,9])}
graph_partition = mod.GraphPartition(graph, index)
solitary_nodes = graph_partition.find_unconnected_nodes()
npt.assert_equal(solitary_nodes, [6,7,8,9])
def test_index_as_node_names():
graph = nx.Graph()
graph.add_edge('a','b')
graph.add_edge('c','d')
## NOTE network x does not store names as added
## for this graph ['a', 'c', 'b', 'd']
index = {0:set([0,2]), 1:set([1,3])}
gpart = mod.GraphPartition(graph, index)
named_index = gpart.index_as_node_names()
assert ['a','b'] in named_index
def test_random_modular_graph_between_fraction():
"""Test for graphs with non-zero between_fraction"""
# We need to measure the degree within/between modules
nnods = 120, 240, 360
nmods = 2, 3, 4
av_degrees = 8, 10, 16
btwn_fractions = 0, 0.1, 0.3, 0.5
for nnod in nnods:
for nmod in nmods:
for av_degree in av_degrees:
for btwn_fraction in btwn_fractions:
g = mod.random_modular_graph(nnod, nmod, av_degree,
btwn_fraction)
# First, check the average degree.
av_degree_actual = np.mean(g.degree().values())
# Since we are generating random graphs, the actual average
# degree we get may be off from the reuqested one by a bit.
# We allow it to be off by up to 1.
#print 'av deg:',av_degree, av_degree_actual # dbg
nt.assert_true (abs(av_degree-av_degree_actual)<1.25,
"""av deg: %.2f av deg actual: %.2f -
This is a stochastic test - repeat to confirm.""" %
(av_degree, av_degree_actual))
# Now, check the between fraction
mat = nx.adj_matrix(g)
#compute the total number of edges in the real graph
nedg = nx.number_of_edges(g)
# sanity checks:
if nnod%nmod:
raise ValueError("nmod must divide nnod evenly")
#Compute the of nodes per module
nnod_mod = nnod/nmod
#compute what the values are in the real graph
blocks = [np.ones((nnod_mod, nnod_mod))] * nmod
mask = util.diag_stack(blocks)
mask[mask==0] = 2
mask = np.triu(mask,1)
btwn_real = np.sum(mat[mask == 2].flatten())
btwn_real_frac = btwn_real / nedg
#compare to what the actual values are
nt.assert_almost_equal(btwn_fraction,
btwn_real_frac, 1,
"This is a stochastic test, repeat to confirm failure")
def test_modularity():
"""Test the values that go into the modularity calculation after randomly
creating a graph"""
# Given a partition with the correct labels of the input graph, verify that
# the modularity returns 1
# We need to measure the degree within/between modules
nnods = 120, 240, 360
nmods = 2, 3, 4
av_degrees = 8, 10, 16
for nnod in nnods:
for nmod in nmods:
for av_degree in av_degrees:
g = mod.random_modular_graph(nnod, nmod, av_degree)
#Compute the of nodes per module
nnod_mod = nnod/nmod
#Make a "correct" partition for the graph
part = mod.perfect_partition(nmod,nnod_mod)
#Make a graphpartition object
graph_partition = mod.GraphPartition(g,part)
#call modularity
mod_meas = graph_partition.modularity()
mod_true = 1.0 - 1.0/nmod
npt.assert_almost_equal(mod_meas, mod_true, 2)
def test_apply_module_merge():
"""Test the GraphPartition operation that merges modules so that it returns
a change in modularity that reflects the difference between the modularity
of the new and old parititions"""
# nnod_mod, av_degrees, nmods
networks = [ [3, [2], [3, 4]],
[4, [2, 3], [2, 4, 6]],
[8, [4, 6], [4, 6, 8]] ]
for nnod_mod, av_degrees, nmods in networks:
for nmod in nmods:
nnod = nnod_mod*nmod
for av_degree in av_degrees:
g = mod.random_modular_graph(nnod, nmod, av_degree)
#Make a "correct" partition for the graph
part = mod.perfect_partition(nmod,nnod/nmod)
#Make a random partition for the graph
part_rand = dict()
while len(part_rand) <= 1: #check if there is only one module
part_rand = mod.rand_partition(g)
#List of modules in the partition
r_mod=range(len(part))
#Loop through pairs of modules
for i in range(1): # DB: why is this necessary?
#select two modules to merge
mod_per = np.random.permutation(r_mod)
m1 = mod_per[0]; m2 = mod_per[1]
#make a graph partition object
graph_partition = mod.GraphPartition(g,part)
#index of nodes within the original module (before merge)
n1_init = list(graph_partition.index[m1])
n2_init = list(graph_partition.index[m2])
n_all_init = n1_init+n2_init
#calculate modularity before merging
mod_init = graph_partition.modularity()
#merge modules
merge_module,e1,a1,delta_energy_meas,type,m1,m2,m2 = \
graph_partition.compute_module_merge(m1,m2)
graph_part2 = copy.deepcopy(graph_partition)
graph_part2.apply_module_merge(m1,m2,merge_module,e1,a1)
#index of nodes within the modules after merging
n_all = list(graph_part2.index[min(m1,m2)])
# recalculate modularity after splitting
mod_new = graph_part2.modularity()
# difference between new and old modularity
delta_energy_true = -(mod_new - mod_init)
# Test the measured difference in energy against the
# function that calculates the difference in energy
npt.assert_almost_equal(delta_energy_meas,
delta_energy_true)
# Check that the list of nodes in the two original modules
# is equal to the list of nodes in the merged module
n_all_init.sort()
n_all.sort()
npt.assert_equal(n_all_init, n_all)
# Test that the keys are equivalent after merging modules
npt.assert_equal(r_mod[:-1],
sorted(graph_part2.index.keys()))
# Test that the values in the mod_e and mod_a matrices for
# the merged module are correct.
npt.assert_equal(graph_part2.mod_e[min(m1,m2)],e1)
npt.assert_equal(graph_part2.mod_a[min(m1,m2)],a1)
def test_rename_keys():
a = {0:0,1:1,2:2,4:4,5:5}
mod.rename_keys(a, 3)
npt.assert_equal(a, {0:0,1:1,2:2,3:4,4:5})
a = {0:0,1:1,3:3,}
mod.rename_keys(a, 2)
npt.assert_equal(a, {0:0,1:1,2:3})
# If called with the last key in dict, it should leave the input alone
a = {0:0,1:1,2:2,3:3}
mod.rename_keys(a, 3)
npt.assert_equal(a, a)
def danon_benchmark():
"""This test comes from Danon et al 2005. It will create the line plot of
Mututal Information vs. betweenness fraction to assess the performance of
the simulated annealing algorithm."""
networks = [[32, [16], [6]]]
btwn_fracs = [float(i)/100 for i in range(0,80,3)]
temperature = 0.1
temp_scaling = 0.9995
tmin=1e-4
num_reps = range(1)
mi_arr=np.empty((len(btwn_fracs),len(num_reps)))
#keep time
for rep in num_reps:
t1 = time.clock()
for nnod_mod, av_degrees, nmods in networks:
for nmod in nmods:
nnod = nnod_mod*nmod
for av_degree in av_degrees:
x_mod = []
for ix,btwn_frac in enumerate(btwn_fracs):
print 'btwn_frac: ',btwn_frac
g = mod.random_modular_graph(nnod, nmod, av_degree,btwn_frac)
#Compute the # of nodes per module
nnod_mod = nnod/nmod
#Make a "correct" partition for the graph
ppart = mod.perfect_partition(nmod,nnod_mod)
graph_out, graph_dict =mod.simulated_annealing(g,
temperature = temperature,temp_scaling = temp_scaling,
tmin=tmin, extra_info = True)
#print "SA partition",graph_out.index
mi = mod.mutual_information(ppart,graph_out.index)
t2 = time.clock()
print 'Elapsed time: ', (float(t2-t1)/60), ' minutes'
print 'partition similarity: ',mi
mi_arr[ix,rep] = mi
## plot_partition(g,graph_out.index,'mi: '+ str(mi),'danon_test_6mod'+str(btwn_frac)+'_graph.png')
x_mod.append(betweenness_to_modularity(g,ppart))
## mi_arr_avg = np.mean(mi_arr,1)
## plt.figure()
## plt.plot(btwn_fracs,mi_arr_avg)
## plt.xlabel('Betweenness fraction')
## plt.ylabel('Mutual information')
## plt.savefig('danon_test_6mod/danontest_btwn.png')
## plt.figure()
## plt.plot(x_mod,mi_arr_avg)
## plt.xlabel('Modularity')
## plt.ylabel('Mutual information')
## plt.savefig('danon_test_6mod/danontest_mod.png')
#plt.figure()
#plt.plot(graph_dict['energy'], label = 'energy')
#plt.plot(graph_dict['temperature'], label = 'temperature')
#plt.xlabel('Iteration')
return mi_arr
def SA():
""" Test the simulated annealing script"""
#nnod_mod, av_degrees, nmods
#networks = [ [4, [2, 3], [2, 4, 6]]]#,
#networks = [ [8, [4, 6], [4, 6, 8]]]
#networks = [[40, [20], [2]]]
networks = [[32, [16], [4]]]
#networks = [[64, [12], [6]]]
btwn_fracs = [0]
temperature = 10
temp_scaling = 0.9995
tmin=1e-4
nochange_ratio_min=0.01
#keep time
for nnod_mod, av_degrees, nmods in networks:
for nmod in nmods:
nnod = nnod_mod*nmod
for av_degree in av_degrees:
for btwn_frac in btwn_fracs:
t1=time.clock()
g = mod.random_modular_graph(nnod, nmod, av_degree,btwn_frac)
#Compute the # of nodes per module
nnod_mod = nnod/nmod
#Make a "correct" partition for the graph
ppart = mod.perfect_partition(nmod,nnod_mod)
graph_out, energy_array, rej_array, temp_array =mod.simulated_annealing(g,
temperature = temperature,temp_scaling = temp_scaling,
tmin=tmin, nochange_ratio_min = nochange_ratio_min)
print "perfect partition", ppart
print "SA partition",graph_out.index
t2 = time.clock()
print 'Elapsed time: ', float(t2-t1)/60, 'minutes'
print 'partition similarity: ',mod.mutual_information(ppart,graph_out.index)
return graph_out, g, energy_array, rej_array, ppart, temp_array
def test_mutual_information_simple():
"""MI computations with hand-validated values.
"""
from math import log
# Define two simple partitions that are off by one assignment
a = {0:[0, 1], 1:[2, 3], 2:[4, 5]}
b = {0:[0, 1], 1:[2, 3, 4], 2:[5]}
N_true = np.array([ [2,0,0], [0,2,0], [0,1,1] ], dtype=float)
N = mod.confusion_matrix(a, b)
# test confusion matrix
npt.assert_equal(N, N_true)
# Now compute mi by hand
num = -6*log(3)-4*log(2)
den = -(3*log(2)+8*log(3)+log(6))
mi_true = num/den
mi = mod.mutual_information(a, b)
npt.assert_almost_equal(mi, mi_true)
# Let's now flip the labels and confirm that the computation is impervious
# to module labels
b2 = {2:[0, 1], 0:[2, 3, 4], 1:[5]}
npt.assert_almost_equal(mod.mutual_information(b, b2), 1)
npt.assert_almost_equal(mod.mutual_information(a, b2), mi)
def test_mutual_information_empty():
"""Validate that empty modules don't affect MI.
"""
# Define two simple partitions that are off by one assignment
a = {0:[0, 1], 1:[2, 3], 2:[4, 5]}
b = {0:[0, 1], 1:[2, 3], 2:[4, 5], 3:[]}
try:
mod.mutual_information(a, b)
except ValueError, e:
nt.assert_equals(e.args[0], "Empty module in second partition.")
try:
mod.mutual_information(b, a)
except ValueError, e:
nt.assert_equals(e.args[0], "Empty module in first partition.")
def test_mutual_information():
""" Test the function which returns the mutual information in two
partitions
XXX - This test is currently incomplete - it only checks the most basic
case of MI(x, x)==1, but doesn't do any non-trivial checks.
"""
# nnod_mod, av_degrees, nmods
networks = [ [4, [2, 3], [2, 4, 6]],
[8, [4, 6], [4, 6, 8]],
[40, [20], [2]] ]
for nnod_mod, av_degrees, nmods in networks:
for nmod in nmods:
nnod = nnod_mod*nmod
for av_degree in av_degrees:
#make a graph object
g = mod.random_modular_graph(nnod, nmod, av_degree)
#Compute the of nodes per module
nnod_mod = nnod/nmod
#Make a "correct" partition for the graph
ppart = mod.perfect_partition(nmod,nnod_mod)
#graph_out, mod_array =mod.simulated_annealing(g, temperature =
#temperature,temp_scaling = temp_scaling, tmin=tmin)
#test the perfect case for now: two of the same partition
#returns 1
mi_orig = mod.mutual_information(ppart,ppart)
npt.assert_equal(mi_orig,1)
#move one node and test that mutual_information comes out
#correctly
graph_partition = mod.GraphPartition(g,ppart)
graph_partition.node_update(0,0,1)
mi = mod.mutual_information(ppart,graph_partition.index)
npt.assert_array_less(mi, mi_orig)
## NOTE: CORRECTNESS NOT TESTED YET
#merge modules and check that mutual information comes out
#correctly/lower
graph_partition2 = mod.GraphPartition(g,ppart)
merged_module, e_new, a_new, d,t,m1,m2,x = graph_partition2.compute_module_merge(0,1)
graph_partition2.apply_module_merge(m1,m2,merged_module,e_new,a_new)
mi2 = mod.mutual_information(ppart,graph_partition2.index)
npt.assert_array_less(mi2,mi_orig)
## NOTE: CORRECTNESS NOT TESTED YET
#split modules and check that mutual information comes out
#correclty/lower
graph_partition3 = mod.GraphPartition(g,ppart)
n1 = set(list(graph_partition3.index[0])[::2])
n2 = set(list(graph_partition3.index[0])[1::2])
(split_modules, e_new,
a_new, d, t, m,
n1,n2) = graph_partition3.compute_module_split(0,n1,n2)
graph_partition3.apply_module_split(m, n1, n2,
split_modules,
e_new, a_new)
mi3 = mod.mutual_information(ppart, graph_partition3.index)
npt.assert_array_less(mi3,mi_orig)
## NOTE: CORRECTNESS NOT TESTED YET
def test_random_mod():
""" Test the GraphPartition operation that selects random modules
to merge and split
XXX not working yet"""
#nnod_mod, av_degrees, nmods
networks = [ [4, [2, 3], [2, 4, 6]],
[8, [4, 6], [4, 6, 8]] ]
n_iter = 100
for nnod_mod, av_degrees, nmods in networks:
for nmod in nmods:
nnod = nnod_mod*nmod
for av_degree in av_degrees:
g = mod.random_modular_graph(nnod, nmod, av_degree)
part = dict()
while (len(part) <= 1) or (len(part) == nnod):
part = mod.rand_partition(g)
graph_partition = mod.GraphPartition(g,part)
for i in range(n_iter):
graph_partition.random_mod()
#check that the partition has > 1 modules
true = len(graph_partition)>1
npt.assert_equal(true,1)
#check that the partition has < nnod modules
true = len(graph_partition)<nnod
npt.assert_equal(true,1)
def test_random_nod():
""" Test the GraphPartition operation that selects random nodes to move
between modules """
def test_decide_if_keeping():
""" Test the function which decides whether or not to keep the new
partition"""
dEs = [-.5,-.4,-.3,-.2,-.1,0,.1,.2,.3,.4,.5]
temperatures = [.01,1,10,100]
iter = 1000
tolerance = 1
for temp in temperatures:
for dE in dEs:
keep_list = np.empty(iter)
for i in range(iter):
keep_list[i] = float(mod.decide_if_keeping(dE,temp))
if dE <= 0:
keep_correct = np.ones(iter)
npt.assert_equal(keep_list,keep_correct)
else:
mean_keep = np.mean(keep_list)
mean_correct = math.exp(-dE/temp)
npt.assert_almost_equal(mean_keep,mean_correct, tolerance)
def test_sim_anneal_simple():
"""Very simple simulated_annealing test with a small network"""
#
nnod, nmod, av_degree, btwn_frac = 24, 3, 4, 0
g = mod.random_modular_graph(nnod, nmod, av_degree, btwn_frac)
#Compute the # of nodes per module
## nnod_mod = nnod/nmod
#Make a "correct" partition for the graph
## ppart = mod.perfect_partition(nmod,nnod_mod)
temperature = 10
temp_scaling = 0.95
tmin=1
graph_out, graph_dict = mod.simulated_annealing(g,
temperature = temperature, temp_scaling = temp_scaling,
tmin=tmin, extra_info = True, debug=True)
# Ensure that there are no empty modules
util.assert_no_empty_modules(graph_out.index)
## mi = mod.mutual_information(ppart, graph_out.index)
#nt.assert_equal(mi, 1)
def test_apply_module_split():
"""Test the GraphPartition operation that splits modules so that it returns
a change in modularity that reflects the difference between the modularity
of the new and old parititions.
Also test that the module that was split now contains the correct nodes,
the correct modularity update, the correct energy,and that no empty modules
result from it."""
# nnod_mod, av_degrees, nmods
networks = [ [3, [2], [2, 3, 4]],
[4, [2, 3], [2, 4, 6]],
[8, [4, 6], [4, 6, 8]] ]
for nnod_mod, av_degrees, nmods in networks:
for nmod in nmods:
nnod = nnod_mod*nmod
for av_degree in av_degrees:
g = mod.random_modular_graph(nnod, nmod, av_degree)
# Make a "correct" partition for the graph
## part = mod.perfect_partition(nmod,nnod/nmod)
# Make a random partition for the graph
part_rand = mod.rand_partition(g, nnod/2)
#List of modules in the partition that have two or more nodes
r_mod = []
for m, nodes in part_rand.iteritems():
if len(nodes)>2:
r_mod.append(m)
# Note: The above can be written in this more compact, if
# slightly less intuitively clear, list comprehension:
# r_mod = [ m for m, nodes in part_rand.iteritems() if
# len(nodes)>2 ]
#Module that we are splitting
for m in r_mod:
graph_partition = mod.GraphPartition(g,part_rand)
#index of nodes within the original module (before split)
n_init = list(graph_partition.index[m])
#calculate modularity before splitting
mod_init = graph_partition.modularity()
# assign nodes to two groups
n1_orig,n2_orig = graph_partition.determine_node_split(m)
# make sure neither of these is empty
nt.assert_true(len(n1_orig)>= 1)
nt.assert_true(len(n2_orig)>= 1)
#make sure that there are no common nodes between the two
node_intersection = set.intersection(n1_orig,n2_orig)
nt.assert_equal(node_intersection,set([]))
#make sure that sum of the two node sets equals the
#original set
node_union = set.union(n1_orig,n2_orig)
npt.assert_equal(np.sort(list(node_union)),np.sort(n_init))
# split modules
split_modules,e1,a1,delta_energy_meas,type,m,n1,n2 = \
graph_partition.compute_module_split(m,n1_orig,n2_orig)
#note: n1 and n2 are output from this function (as well as
#inputs) because the function is called from within another
#(rand_mod) def but then output to the simulated script, so
#the node split needs to be passed along.
#as a simple confirmation, can make sure they match
npt.assert_equal(n1_orig,n1)
npt.assert_equal(n2_orig,n2)
#split_moduels should be a dictionary with two modules
#(0,1) that contain the node sets n1 and n2 respectively.
#test this.
npt.assert_equal(split_modules[0],n1)
npt.assert_equal(split_modules[1],n2)
#make a new graph partition equal to the old one and apply
#the module split to it (graph_part2)
graph_part2 = copy.deepcopy(graph_partition)
graph_part2.apply_module_split(m,n1,n2,split_modules,e1,a1)
#make a third graph partition using only the original graph
#and the partition from graph_part2
graph_part3 = mod.GraphPartition(g,graph_part2.index)
#index of nodes within the modules after splitting
n1_new = list(graph_part2.index[m])
n2_new = list(graph_part2.index[len(graph_part2)-1])
n_all = n1_new + n2_new
# recalculate modularity after splitting
mod_new = graph_part2.modularity()
mod_new_3 = graph_part3.modularity()
# difference between new and old modularity
delta_energy_true = -(mod_new - mod_init)
# Test that the measured change in energy by splitting a
# module is equal to the function output from module_split
npt.assert_almost_equal(delta_energy_meas,
delta_energy_true)
# Test that the nodes in the split modules are equal to the
# original nodes of the module
nt.assert_equal(sorted(list(n1)), sorted(n1_new))
nt.assert_equal(sorted(list(n2)), sorted(n2_new))
n_init.sort()
n_all.sort()
# Test that the initial list of nodes in the module are
# equal to the nodes in m1 and m2 (split modules)
npt.assert_equal(n_init,n_all)
# Test that the computed modularity found when
# apply_module_split is used is equal to the modularity you
# would find if using that partition and that graph
npt.assert_almost_equal(mod_new,mod_new_3)
# Check that there are no empty modules in the final
# partition
for m in graph_part2.index:
nt.assert_true(len(graph_part2.index[m]) > 0)
def test_apply_node_move():
"""Test the GraphPartition operation that moves a single node so that it
returns a change in modularity that reflects the difference between the
modularity of the new and old parititions"""
# nnod_mod, av_degrees, nmods
#networks = [ [3, [2], [2, 3, 4]],
# [4, [2, 3], [2, 4, 6]],
# [8, [4, 6], [4, 6, 8]] ]
networks = [ [4, [2, 3], [2, 4, 6]],
[8, [4, 6], [4, 6, 8]] ]
for nnod_mod, av_degrees, nmods in networks:
for nmod in nmods:
nnod = nnod_mod*nmod
for av_degree in av_degrees:
print nnod_mod,nmod,av_degree
g = mod.random_modular_graph(nnod, nmod, av_degree)
#Make a "correct" partition for the graph
#part = mod.perfect_partition(nmod,nnod/nmod)
#Make a random partition for the graph
part_rand = dict()
while len(part_rand) <= 1: #check if there is only one module
part_rand = mod.rand_partition(g)
#List of modules in the partition
r_mod=range(len(part_rand))
#Make a graph_partition object
graph_partition = mod.GraphPartition(g,part_rand)
#select two modules to change node assignments
mod_per = np.random.permutation(r_mod)
m1 = mod_per[0]; m2 = mod_per[1]
while len(graph_partition.index[m1]) <= 1:
mod_per = np.random.permutation(r_mod)
m1 = mod_per[0]
m2 = mod_per[1]
#pick a random node to move between modules m1 and m2
node_list=list(graph_partition.index[m1])
nod_per = np.random.permutation(node_list)
n = nod_per[0]
#list of nodes within the original modules (before node move)
## n1_init = list(nod_per) #list(graph_partition.index[m1])
## n2_init = list(graph_partition.index[m2])
## n1_new = copy.deepcopy(n1_init)
## n2_new = copy.deepcopy(n2_init)
# calculate modularity before node move
mod_init = graph_partition.modularity()
# move node from m1 to m2
node_moved_mods,e1,a1,delta_energy_meas,n,m1,m2 = \
graph_partition.compute_node_update(n,m1,m2)
graph_part2 = copy.deepcopy(graph_partition)
m2_new = graph_part2.apply_node_update(n,m1,m2,node_moved_mods,e1,a1)
#if the keys get renamed, the m1,m2 numbers are no longer the same
#test that m2 now contains n
nt.assert_true(n in graph_part2.index[m2_new])
#if n not in graph_part2.index[m2_new]:
# 1/0
# recalculate modularity after splitting
mod_new = graph_part2.modularity()
# difference between new and old modularity
delta_energy_true = -(mod_new - mod_init)
#print delta_energy_meas,delta_energy_true
# Test that the measured change in energy is equal to the true
# change in energy calculated in the node_update function
npt.assert_almost_equal(delta_energy_meas,delta_energy_true)
def test_adjust_partition():
e = np.loadtxt(os.path.join(os.path.dirname(__file__), 'jazz.net'),
skiprows=3, dtype=int)[:, :2] - 1
g = nx.Graph()
g.add_edges_from(e)
p0 = mod.newman_partition(g)
#p1 = mod.adjust_partition(g, p0, max_iter=6)
## This doesnt test what we want to test FIXME
#npt.assert_(p0 > 0.38)
#npt.assert_(p1 > 0.42)
def test_empty_graphpartition():
g = nx.Graph()
g.add_node(1)
npt.assert_raises(ValueError, mod.GraphPartition, g, {1: set(g.nodes())})
def test_badindex_graphpartition():
""" when making a GraphPArtition, check index is valid"""
## index should be dict of sets
e = np.loadtxt(os.path.join(os.path.dirname(__file__), 'jazz.net'),
skiprows=3, dtype=int)[:, :2] - 1
g = nx.Graph()
g.add_edges_from(e)
index = {0: set(g.nodes()[:100]), 1: set(g.nodes()[100:])}
gp = mod.GraphPartition(g, index)
nt.assert_true(gp.index == index)
npt.assert_raises(TypeError, mod.GraphPartition, g, {0: g.nodes()})
npt.assert_raises(ValueError, mod.GraphPartition, g,
{0:set(g.nodes()[:-1])})
npt.assert_raises(TypeError, mod.GraphPartition, g, g.nodes())
def test_newman_partition():
""" Test Newman Partition function """
tmpmat = np.random.random((10,10))
tmpmat[tmpmat < .5] = 0
graph = nx.from_numpy_matrix(tmpmat, nx.Graph(weighted = False))
npt.assert_raises(ValueError, mod.newman_partition, graph)
tmpmat[:] = 0
# test that no edges raises error (from GraphPartition)
graph = nx.from_numpy_matrix(tmpmat, nx.Graph(weighted = False))
npt.assert_raises(ValueError, mod.newman_partition, graph)
tmpmat[:] = 1
util.fill_diagonal(tmpmat, 0)
graph = nx.from_numpy_matrix(tmpmat, nx.Graph(weighted=False))
part = mod.newman_partition(graph)
## if all edges are connected expect only one partition
expected_part = {0: set([0,1,2,3,4,5,6,7,8,9])}
nt.assert_equal(part.index, expected_part)
if __name__ == "__main__":
npt.run_module_suite()
|
|
# Copyright (C) 2013-2015 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import os
from bitcoinlib.core import b2x,x
from bitcoinlib.core.script import *
class Test_CScriptOp(unittest.TestCase):
def test_pushdata(self):
def T(data, expected):
data = x(data)
expected = x(expected)
serialized_data = CScriptOp.encode_op_pushdata(data)
self.assertEqual(serialized_data, expected)
T('', '00')
T('00', '0100')
T('0011223344556677', '080011223344556677')
T('ff'*0x4b, '4b' + 'ff'*0x4b)
T('ff'*0x4c, '4c4c' + 'ff'*0x4c)
T('ff'*0x4c, '4c4c' + 'ff'*0x4c)
T('ff'*0xff, '4cff' + 'ff'*0xff)
T('ff'*0x100, '4d0001' + 'ff'*0x100)
T('ff'*0xffff, '4dffff' + 'ff'*0xffff)
T('ff'*0x10000, '4e00000100' + 'ff'*0x10000)
def test_is_singleton(self):
self.assertTrue(OP_0 is CScriptOp(0x00))
self.assertTrue(OP_1 is CScriptOp(0x51))
self.assertTrue(OP_16 is CScriptOp(0x60))
self.assertTrue(OP_CHECKSIG is CScriptOp(0xac))
for i in range(0x0, 0x100):
self.assertTrue(CScriptOp(i) is CScriptOp(i))
def test_encode_decode_op_n(self):
def t(n, op):
actual = CScriptOp.encode_op_n(n)
self.assertEqual(actual, op)
self.assertTrue(isinstance(actual, CScriptOp))
actual = op.decode_op_n()
self.assertEqual(actual, n)
self.assertTrue(isinstance(actual, int))
t(0, OP_0)
t(1, OP_1)
t(2, OP_2)
t(3, OP_3)
t(4, OP_4)
t(5, OP_5)
t(6, OP_6)
t(7, OP_7)
t(8, OP_8)
t(9, OP_9)
t(9, OP_9)
t(10, OP_10)
t(11, OP_11)
t(12, OP_12)
t(13, OP_13)
t(14, OP_14)
t(15, OP_15)
t(16, OP_16)
with self.assertRaises(ValueError):
OP_CHECKSIG.decode_op_n()
with self.assertRaises(ValueError):
CScriptOp(1).decode_op_n()
class Test_CScript(unittest.TestCase):
def test_tokenize_roundtrip(self):
def T(serialized_script, expected_tokens, test_roundtrip=True):
serialized_script = x(serialized_script)
script_obj = CScript(serialized_script)
actual_tokens = list(script_obj)
self.assertEqual(actual_tokens, expected_tokens)
if test_roundtrip:
recreated_script = CScript(actual_tokens)
self.assertEqual(recreated_script, serialized_script)
T('', [])
# standard pushdata
T('00', [b''])
T('0100', [b'\x00'])
T('4b' + 'ff'*0x4b, [b'\xff'*0x4b])
# non-optimal pushdata
T('4c00', [b''], False)
T('4c04deadbeef', [x('deadbeef')], False)
T('4d0000', [b''], False)
T('4d0400deadbeef', [x('deadbeef')], False)
T('4e00000000', [b''], False)
T('4e04000000deadbeef', [x('deadbeef')], False)
# numbers
T('4f', [OP_1NEGATE])
T('51', [0x1])
T('52', [0x2])
T('53', [0x3])
T('54', [0x4])
T('55', [0x5])
T('56', [0x6])
T('57', [0x7])
T('58', [0x8])
T('59', [0x9])
T('5a', [0xa])
T('5b', [0xb])
T('5c', [0xc])
T('5d', [0xd])
T('5e', [0xe])
T('5f', [0xf])
# some opcodes
T('9b', [OP_BOOLOR])
T('9a9b', [OP_BOOLAND, OP_BOOLOR])
T('ff', [OP_INVALIDOPCODE])
T('fafbfcfd', [CScriptOp(0xfa), CScriptOp(0xfb), CScriptOp(0xfc), CScriptOp(0xfd)])
# all three types
T('512103e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32410478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc345552ae',
[1,
x('03e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32'),
x('0478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc3455'),
2,
OP_CHECKMULTISIG])
def test_invalid_scripts(self):
def T(serialized):
with self.assertRaises(CScriptInvalidError):
list(CScript(x(serialized)))
T('01')
T('02')
T('0201')
T('4b')
T('4b' + 'ff'*0x4a)
T('4c')
T('4cff' + 'ff'*0xfe)
T('4d')
T('4dff')
T('4dffff' + 'ff'*0xfffe)
T('4e')
T('4effffff')
T('4effffffff' + 'ff'*0xfffe) # not going to test with 4GiB-1...
def test_equality(self):
# Equality is on the serialized script, not the logical meaning.
# This is important for P2SH.
def T(serialized1, serialized2, are_equal):
script1 = CScript(x(serialized1))
script2 = CScript(x(serialized2))
if are_equal:
self.assertEqual(script1, script2)
else:
self.assertNotEqual(script1, script2)
T('', '', True)
T('', '00', False)
T('00', '00', True)
T('00', '01', False)
T('01ff', '01ff', True)
T('fc01ff', '01ff', False)
# testing equality on an invalid script is legal, and evaluates based
# on the serialization
T('4e', '4e', True)
T('4e', '4e00', False)
def test_add(self):
script = CScript()
script2 = script + 1
# + operator must create a new instance
self.assertIsNot(script, script2)
script = script2
self.assertEqual(script, b'\x51')
script += 2
# += should not be done in place
self.assertIsNot(script, script2)
self.assertEqual(script, b'\x51\x52')
script += OP_CHECKSIG
self.assertEqual(script, b'\x51\x52\xac')
script += b'deadbeef'
self.assertEqual(script, b'\x51\x52\xac\x08deadbeef')
script = CScript() + 1 + 2 + OP_CHECKSIG + b'deadbeef'
self.assertEqual(script, b'\x51\x52\xac\x08deadbeef')
# big number
script = CScript() + 2**64
self.assertEqual(script, b'\x09\x00\x00\x00\x00\x00\x00\x00\x00\x01')
# some stuff we can't add
with self.assertRaises(TypeError):
script += None
self.assertEqual(script, b'\x09\x00\x00\x00\x00\x00\x00\x00\x00\x01')
with self.assertRaises(TypeError):
script += [1, 2, 3]
self.assertEqual(script, b'\x09\x00\x00\x00\x00\x00\x00\x00\x00\x01')
with self.assertRaises(TypeError):
script = script + None
self.assertEqual(script, b'\x09\x00\x00\x00\x00\x00\x00\x00\x00\x01')
def test_repr(self):
def T(script, expected_repr):
actual_repr = repr(script)
self.assertEqual(actual_repr, expected_repr)
T( CScript([]),
'CScript([])')
T( CScript([1]),
'CScript([1])')
T( CScript([1, 2, 3]),
'CScript([1, 2, 3])')
T( CScript([1, x('7ac977d8373df875eceda362298e5d09d4b72b53'), OP_DROP]),
"CScript([1, x('7ac977d8373df875eceda362298e5d09d4b72b53'), OP_DROP])")
T(CScript(x('0001ff515261ff')),
"CScript([x(''), x('ff'), 1, 2, OP_NOP, OP_INVALIDOPCODE])")
# truncated scripts
T(CScript(x('6101')),
"CScript([OP_NOP, x('')...<ERROR: PUSHDATA(1): truncated data>])")
T(CScript(x('614bff')),
"CScript([OP_NOP, x('ff')...<ERROR: PUSHDATA(75): truncated data>])")
T(CScript(x('614c')),
"CScript([OP_NOP, <ERROR: PUSHDATA1: missing data length>])")
T(CScript(x('614c0200')),
"CScript([OP_NOP, x('00')...<ERROR: PUSHDATA1: truncated data>])")
def test_is_p2sh(self):
def T(serialized, b):
script = CScript(x(serialized))
self.assertEqual(script.is_p2sh(), b)
# standard P2SH
T('a9146567e91196c49e1dffd09d5759f6bbc0c6d4c2e587', True)
# NOT a P2SH txout due to the non-optimal PUSHDATA encoding
T('a94c146567e91196c49e1dffd09d5759f6bbc0c6d4c2e587', False)
def test_is_push_only(self):
def T(serialized, b):
script = CScript(x(serialized))
self.assertEqual(script.is_push_only(), b)
T('', True)
T('00', True)
T('0101', True)
T('4c00', True)
T('4d0000', True)
T('4e00000000', True)
T('4f', True)
# OP_RESERVED *is* considered to be a pushdata op by is_push_only!
# Or specifically, the IsPushOnly() used in P2SH validation.
T('50', True)
T('51', True)
T('52', True)
T('53', True)
T('54', True)
T('55', True)
T('56', True)
T('57', True)
T('58', True)
T('59', True)
T('5a', True)
T('5b', True)
T('5c', True)
T('5d', True)
T('5e', True)
T('5f', True)
T('60', True)
T('61', False)
def test_is_push_only_on_invalid_pushdata(self):
def T(hex_script):
invalid_script = CScript(x(hex_script))
self.assertFalse(invalid_script.is_push_only())
T('01')
T('02ff')
T('4b')
T('4c01')
T('4c02ff')
T('4d')
T('4d0100')
T('4d0200ff')
T('4e')
T('4e01000000')
T('4e02000000ff')
def test_has_canonical_pushes(self):
def T(hex_script, expected_result):
script = CScript(x(hex_script))
self.assertEqual(script.has_canonical_pushes(), expected_result)
T('', True)
T('00', True)
T('FF', True)
# could have used an OP_n code, rather than a 1-byte push
T('0100', False)
T('0101', False)
T('0102', False)
T('0103', False)
T('0104', False)
T('0105', False)
T('0106', False)
T('0107', False)
T('0108', False)
T('0109', False)
T('010A', False)
T('010B', False)
T('010C', False)
T('010D', False)
T('010E', False)
T('010F', False)
T('0110', False)
T('0111', True)
# Could have used a normal n-byte push, rather than OP_PUSHDATA1
T('4c00', False)
T('4c0100', False)
T('4c01FF', False)
T('4b' + '00'*75, True)
T('4c4b' + '00'*75, False)
T('4c4c' + '00'*76, True)
# Could have used a OP_PUSHDATA1.
T('4d0000', False)
T('4d0100FF', False)
T('4dFF00' + 'FF'*0xFF, False)
T('4d0001' + 'FF'*0x100, True)
# Could have used a OP_PUSHDATA2.
T('4e00000000', False)
T('4e01000000FF', False)
T('4eFFFF0000' + 'FF'*0xFFFF, False)
T('4e00000100' + 'FF'*0x10000, True)
def test_has_canonical_pushes_with_invalid_truncated_script(self):
def T(hex_script):
script = CScript(x(hex_script))
self.assertEqual(script.has_canonical_pushes(), False)
T('01')
T('02ff')
T('4b')
T('4c01')
T('4c02ff')
T('4d')
T('4d0100')
T('4d0200ff')
T('4e')
T('4e01000000')
T('4e02000000ff')
def test_is_unspendable(self):
def T(serialized, b):
script = CScript(x(serialized))
self.assertEqual(script.is_unspendable(), b)
T('', False)
T('00', False)
T('006a', False)
T('6a', True)
T('6a6a', True)
T('6a51', True)
def test_is_valid(self):
def T(serialized, b):
script = CScript(x(serialized))
self.assertEqual(script.is_valid(), b)
T('', True)
T('00', True)
T('01', False)
# invalid opcodes do not by themselves make a script invalid
T('ff', True)
def test_to_p2sh_scriptPubKey(self):
def T(redeemScript, expected_hex_bytes):
redeemScript = CScript(redeemScript)
actual_script = redeemScript.to_p2sh_scriptPubKey()
self.assertEqual(b2x(actual_script), expected_hex_bytes)
T([],
'a914b472a266d0bd89c13706a4132ccfb16f7c3b9fcb87')
T([1,x('029b6d2c97b8b7c718c325d7be3ac30f7c9d67651bce0c929f55ee77ce58efcf84'),1,OP_CHECKMULTISIG],
'a91419a7d869032368fd1f1e26e5e73a4ad0e474960e87')
T([b'\xff'*517],
'a9140da7fa40ebf248dfbca363c79921bdd665fed5ba87')
with self.assertRaises(ValueError):
CScript([b'a' * 518]).to_p2sh_scriptPubKey()
class Test_IsLowDERSignature(unittest.TestCase):
def test_high_s_value(self):
sig = x('3046022100820121109528efda8bb20ca28788639e5ba5b365e0a84f8bd85744321e7312c6022100a7c86a21446daa405306fe10d0a9906e37d1a2c6b6fdfaaf6700053058029bbe')
self.assertFalse(IsLowDERSignature(sig))
def test_low_s_value(self):
sig = x('3045022100b135074e08cc93904a1712b2600d3cb01899a5b1cc7498caa4b8585bcf5f27e7022074ab544045285baef0a63f0fb4c95e577dcbf5c969c0bf47c7da8e478909d669')
self.assertTrue(IsLowDERSignature(sig))
|
|
#!/usr/bin/env python3
import codecs
import configparser
import difflib
import json
import logging
import praw
import prawcore
import pdb
import re
import os
import peewee
import sys
import time
import traceback
import urllib.parse
from database import *
from datetime import datetime
from random import randint
# Set up logging
logging.basicConfig(filename = os.path.join(sys.path[0], "copykun.log"), format = "%(asctime)s %(message)s")
logger = logging.getLogger("copykun")
logger.setLevel(logging.INFO)
# Read the config file
if not os.path.isfile(os.path.join(sys.path[0], "copykun.cfg")):
logger.critical("Configuration file not found: copykun.cfg")
exit(1)
config = configparser.ConfigParser(interpolation = configparser.ExtendedInterpolation())
with codecs.open(os.path.join(sys.path[0], "copykun.cfg"), "r", "utf8") as f:
config.read_file(f)
COMMENT_TYPE_PREFIX = "t1_"
# The number of /-separated segments a url with a comment id
# specified will split into (not counting empties)
URL_LENGTH_WITH_COMMENT = 8
# Message prefix for PRAW APIException when text is too long
TEXT_TOO_LONG_PREFIX = "(TOO_LONG)"
MAX_COMMENT_LENGTH = 10000;
TEXT_DIVIDER = "\n\n----\n"
user_agent = (config.get("Reddit", "user_agent"))
username = (config.get("Reddit", "username"))
reddit = praw.Reddit(
user_agent=user_agent,
client_id=config.get("OAuth", "client_id"),
client_secret=config.get("OAuth", "client_secret"),
password=config.get("Reddit", "password"),
username=config.get("Reddit", "username")
)
subreddit = reddit.subreddit(config.get("Reddit", "subreddit"))
post_limit = int(config.get("Reddit", "post_limit"))
taglines = json.loads(config.get("Reddit", "taglines"), "utf-8") if config.has_option("Reddit", "taglines") else None
forwarding_address = config.get("Reddit", "forwarding_address") if config.has_option("Reddit", "forwarding_address") else ""
auto_copy = bool(config.get("Reddit", "auto_copy")) if config.has_option("Reddit", "auto_copy") else False
comment_limit = int(config.get("Reddit", "comment_limit")) if config.has_option("Reddit", "comment_limit") else 128
summon_phrase = config.get("Reddit", "summon_phrase") if config.has_option("Reddit", "summon_phrase") else ""
footer = config.get("Reddit", "footer") if config.has_option("Reddit", "footer") else ""
error_msg = config.get("Reddit", "error_msg") if config.has_option("Reddit", "error_msg") else ""
ignore_users = set(json.loads(config.get("Reddit", "ignore_users"), "utf-8") if config.has_option("Reddit", "ignore_users") else [])
ignore_users.add(username)
link_regex = r"((?:https?://)(?:.+\.)?reddit\.com)?/r/(?P<subreddit>\w+)/comments/(?P<id>\w+)(/(?P<title>[^?\s()/]*)(/(?P<comment_id>\w+)?)?(/(?P<query>\?[\w-]+(?:=[\w-]*)?(?:&[\w-]+(?:=[\w-]*)?)*)?)?)?"
short_link_regex = r"(?:https?://)redd\.it/(?P<post_id>\w*)"
def copykun_exception_hook(excType, excValue, traceback, logger = logger):
logger.error("**** EXCEPTION: ", exc_info = (excType, excValue, traceback))
sys.excepthook = copykun_exception_hook
class CannotCopyError(Exception):
pass
class CopyKun(object):
def __init__(self):
self.database = Database()
'''
Get the correct comment or submission from a permalink since PRAW
only lets you get submissions
'''
def get_correct_reddit_object(self, link):
match = re.search(link_regex, link, re.IGNORECASE)
if not match:
raise CannotCopyError("Failure parsing link for: \"" + link + "\"")
try:
# Link is to comment
if match.group("comment_id"):
return reddit.comment(match.group("comment_id"))
# Link is to post
elif match.group("id"):
return reddit.submission(match.group("id"))
# Link is to neither
else:
raise CannotCopyError("Link was not post or comment: \"" + link + "\"")
# PRAW can throw a KeyError if it can't parse response JSON or a RedirectException for certain non-post reddit links
except (KeyError, prawcore.exceptions.Redirect) as e:
raise CannotCopyError("Failure parsing JSON for: \"" + orig_url + "\" (safe to ignore if URL is a non-submission reddit link)")
except (TypeError, praw.exceptions.APIException) as e:
logger.exception("Failure fetching url: \"" + orig_url + "\"")
return None
'''
Get the post to copy if one is linked in the original post
'''
def get_post_to_copy(self, original_post):
link = None
post_id = None
if original_post.author and original_post.author.name in ignore_users:
return None
# Check self text for link to other sub
if type(original_post) is praw.models.Comment or original_post.is_self:
text = original_post.body if type(original_post) is praw.models.Comment else original_post.selftext
# Regular link
match = re.search(link_regex, text, re.IGNORECASE)
if match:
link = match.group(0)
# Short link
match = re.search(short_link_regex, text, re.IGNORECASE)
if match:
post_id = match.group("post_id")
# Check url for reddit link
elif original_post.domain.endswith("reddit.com"):
link = original_post.url
# Check url for shortened link
elif original_post.domain == "redd.it":
match = re.search(short_link_regex, original_post.url, re.IGNORECASE)
if match:
post_id = match.group(1)
# Found reddit link
if link:
link = urllib.parse.unquote(str(link))
return self.get_correct_reddit_object(link)
# Found short link
elif post_id:
try:
# Short links can only be to posts so no comment test
return reddit.submission(id = post_id)
except (TypeError, praw.exceptions.APIException) as e:
logger.exception("Failure fetching short url: \"" + original_post.url + "\"")
return None
# Found nothing
else:
return None
'''
Get the text to be copied from a post or comment
'''
def get_post_text(self, post):
submission = post.submission if type(post) is praw.models.Comment else post
if submission:
title = submission.title
else:
title = "[removed] \n" + error_msg + "\n"
content = ""
# Copy post content
if submission and submission.is_self and len(submission.selftext) > 0:
for para in submission.selftext.split("\n"):
content += "> " + para + "\n"
# No content, copy link
elif submission:
content += submission.url + "\n"
# Copy entire comment chain
if type(post) is praw.models.Comment:
try:
content += self.get_comment_chain(post)
# Could not find a comment in the chain
except Exception as e:
content += "\n\n[Error building full comment tree] \n" + error_msg + "\n\n"
for para in post.body.split("\n"):
content += "> " + para + "\n"
logger.exception("Error building comment tree for \"" + post.id + "\"")
return title, content
'''
Build a comment chain
'''
def get_comment_chain(self, post):
submission = post.submission
op_name = submission.author.name if submission.author else "[deleted]"
# Build comment chain (in reverse)
# NOTE: this can potentially make a lot of network requests since it has to fetch each parent
comment = post
comment_list = [comment]
while not comment.is_root:
comment = comment.parent()
comment.refresh()
comment_list.append(comment)
# Form chain text
content = ""
level = 2
for comment in comment_list[::-1]:
# Author account exists
if comment.author:
author = "/u/" + comment.author.name
if comment.author.name == op_name:
author += " (OP)"
# Author account deleted
else:
author = "[deleted]"
content += ("> " * level) + author + ":\n\n"
# Comment body exists
if comment.body:
for para in comment.body.split("\n"):
content += (">" * level) + para + "\n"
# Comment body deleted
else:
content += ("> " * level) + "[deleted]\n"
level += 1
return content
'''
Copy the content of a reddit post
'''
def copy_post(self, parent, link):
title, content = self.get_post_text(link)
if len(content) + len(title) > 0:
text = ""
if taglines and len(taglines) > 0:
text += taglines[randint(0, len(taglines) - 1)]
text += TEXT_DIVIDER
if title:
text += title + "\n\n"
if content:
# Check length <= max length - (text + 2 x divider + \n\n + footer)
if len(content) <= MAX_COMMENT_LENGTH - (len(text) + (2 * len(TEXT_DIVIDER)) + 2 + len(footer)):
text += content
else:
text += "> " + error_msg
text += TEXT_DIVIDER
text += footer
# ID is either post ID or post id + comment ID depending on type
parent_id = parent.id if type(parent) is praw.models.Submission else parent.submission.id + "+" + parent.id
try:
#if type(parent) is praw.models.Submission:
# comment = parent.add_comment(text)
#else:
comment = parent.reply(text)
db_post = Post.create(id = parent_id)
db_content = Content()
db_content.permalink = link.permalink if link else ""
db_content.created = link.created_utc if link else datetime.utcnow()
db_content.edited = None if not link.edited else link.edited
db_content.last_checked = datetime.utcnow().timestamp()
db_content.update_interval = 60
db_content.post = db_post
db_content.save()
db_reply = Reply()
db_reply.permalink = comment.permalink
db_reply.latest_content = content
db_reply.post = db_post
db_reply.save()
logger.info("Successfully copied \"" + link.id + "\" to \"" + parent_id + "\"")
except praw.exceptions.APIException as e:
logger.exception("Failed to copy \"" + link.id + "\" to \"" + parent_id + "\"")
'''
Check for new posts to copy
'''
def check_new_posts(self):
for post in subreddit.new(limit = post_limit):
if not self.database.is_post_in_db(post.id):
try:
link = self.get_post_to_copy(post)
except CannotCopyError:
ignore = Post.create(id = post.id)
continue
# Found post to copy
if link:
self.copy_post(post, link)
'''
Forward a message from the bot
'''
def forward_message(self, message):
if not forwarding_address:
return
subject = "[/u/" + message.author.name + "] " + message.subject
body = ("https://www.reddit.com" + message.context) if hasattr(message, "context") else ""
body += "\n\n"
for para in message.body.split("\n"):
body += "> " + para + "\n"
try:
reddit.redditor(forwarding_address).message(subject, body)
logger.info("Successfully forwarded message from /u/" + message.author.name)
except praw.exceptions.APIException:
logger.exception("Failed to forward message from /u/" + message.author.name)
'''
Check any messages sent to the bot
'''
def check_messages(self):
for unread in reddit.inbox.unread(mark_read=True):
# Respond to summon
if summon_phrase and (unread.subject.lower().startswith("username mention") or unread.subject.lower().startswith("comment reply")):
lines = [line for line in unread.body.split("\n") if line]
if len(lines) >= 2 and lines[0].lower().startswith(summon_phrase):
parent = self.get_correct_reddit_object("https://www.reddit.com" + unread.context)
if parent.subreddit == subreddit and not self.database.is_post_in_db(parent.submission.id + "+" + parent.id):
link = self.get_post_to_copy(parent)
self.copy_post(parent, link)
else:
self.forward_message(unread)
# Forward message
else:
self.forward_message(unread)
unread.mark_read()
'''
Check for new links to copy in comments
'''
def check_new_comments(self):
for comment in subreddit.comments(limit = comment_limit):
id = comment.submission.id + "+" + comment.id
if not self.database.is_post_in_db(id):
try:
link = self.get_post_to_copy(comment)
except CannotCopyError:
ignore = Post.create(id = id)
continue
# Found comment with link to copy
if link:
self.copy_post(comment, link)
else:
ignore = Post.create(id = id)
'''
Check for posts that have been edited
'''
def check_edits(self):
i = 0
for db_post in self.database.get_posts_to_check_edits():
if i > 8:
return
db_content = db_post.content.get()
rd_content = self.get_correct_reddit_object(db_content.permalink)
if not rd_content:
continue
# Post was edited more recently than last check
if rd_content.edited and rd_content.edited > db_content.last_checked:
db_reply = self.database.get_reply_to_post(db_post.id)
rd_reply = self.get_correct_reddit_object(db_reply.permalink)
body_start = rd_reply.body.index("----\n") + 5
body_end = rd_reply.body.rindex("----")
footer = rd_reply.body[body_end:]
old_body = rd_reply.body[body_start:body_end]
# Jump back one more horizontal rule for each previous edit in the post
for i in range(db_post.edits.count()):
try:
body_end = old_body.rindex("\n\n----")
old_body = old_body[0:body_end]
# This can happen if an edit was saved but the reply body was not updated
except ValueError:
break
title, content = self.get_post_text(rd_content)
new_body = content
# Diff the previous version with the edited version to get the latest changes
diff = list(difflib.unified_diff(db_reply.latest_content.split("\n"), new_body.split("\n")))
edit_content = "Edited @ " + datetime.fromtimestamp(rd_content.edited).strftime("%d/%m/%Y %H:%M:%S") + "\n\n"
# No actual difference, so don't bother editing
if len(diff) == 0:
db_content.last_checked = time.time()
db_content.edited = rd_content.edited
db_content.update_interval = min(db_content.update_interval * 2, 16384)
self.database.save_objects([db_content])
continue
for line in diff[3:]:
# Swap + or - with last > (or ' ' with last > which is harmless)
match = re.search(r"([\+|\-| ]>*).*", line, re.IGNORECASE)
if match:
idx = len(match.group(1))
if line[idx:].strip() == "":
continue
# Escape + and - to avoid reddit turning it into a bullet point
line = line[1:idx] + ("\\" if line[0] == "-" or line[0] == "+" else "") + line[0] + " " + line[idx:]
edit_content += line + "\n\n"
text = rd_reply.body[0:body_start] + old_body
for edit in db_post.edits:
text += "\n\n----\n" + edit.content
text += "\n\n----\n" + edit_content
text += footer
try:
rd_reply.edit(text)
# Update Content object
db_content.last_checked = time.time()
db_content.edited = rd_content.edited
db_content.update_interval = 60
# Update Reply object
db_reply.latest_content = new_body
# Create new Edit object
edit = Edit()
edit.content = edit_content
edit.edit_time = rd_content.edited
edit.post = db_post
self.database.save_objects([db_content, db_reply, edit])
logger.info("Successfully edited \"" + rd_reply.id + "\" in \"" + db_post.id.strip() + "\"")
except praw.exceptions.APIException as e:
db_content.last_checked = time.time()
db_content.edited = rd_content.edited
db_content.update_interval = min(db_content.update_interval * 2, 16384 * 2)
self.database.save_objects([db_content])
logger.exception("Failed to edit \"" + rd_reply.id + "\" in \"" + db_post.id.strip() + "\"")
except peewee.OperationalError as e:
logger.exception("Failed to save \"" + rd_reply.id + "\" in \"" + db_post.id.strip() + "\"")
#pass
# Not edited since last check
else:
db_content.last_checked = time.time()
db_content.edited = rd_content.edited
db_content.update_interval = min(db_content.update_interval * 2, 16384 * 2)
try:
self.database.save_objects([db_content])
except peewee.OperationalError as e:
logger.exception("Failed to save \"" + db_post.id.strip() + "\"")
#pass
i = i + 1
def main():
copykun = CopyKun()
try:
start = time.time()
copykun.check_new_posts()
logger.debug("check_new_posts: {:.2f}s".format(time.time() - start))
start = time.time()
copykun.check_messages()
logger.debug("check_messages: {:.2f}s".format(time.time() - start))
if auto_copy:
start = time.time()
copykun.check_new_comments()
logger.debug("check_new_comments: {:.2f}s".format(time.time() - start))
start = time.time()
copykun.check_edits()
logger.debug("check_edits: {:.2f}s".format(time.time() - start))
except KeyboardInterrupt:
exit(0)
except Exception as e:
logger.exception(e)
exit(1)
if __name__ == "__main__":
main()
|
|
from __future__ import absolute_import
from collections import namedtuple
from copy import deepcopy
import logging
import random
import sys
import time
import six
from kafka.client import KafkaClient
from kafka.common import (
OffsetFetchRequest, OffsetCommitRequest, OffsetRequest, FetchRequest,
check_error, NotLeaderForPartitionError, UnknownTopicOrPartitionError,
OffsetOutOfRangeError, RequestTimedOutError, KafkaMessage, ConsumerTimeout,
FailedPayloadsError, KafkaUnavailableError, KafkaConfigurationError
)
from kafka.util import kafka_bytestring
logger = logging.getLogger(__name__)
OffsetsStruct = namedtuple("OffsetsStruct", ["fetch", "highwater", "commit", "task_done"])
DEFAULT_CONSUMER_CONFIG = {
'client_id': __name__,
'group_id': None,
'bootstrap_servers': [],
'socket_timeout_ms': 30 * 1000,
'fetch_message_max_bytes': 1024 * 1024,
'auto_offset_reset': 'largest',
'fetch_min_bytes': 1,
'fetch_wait_max_ms': 100,
'refresh_leader_backoff_ms': 200,
'deserializer_class': lambda msg: msg,
'auto_commit_enable': False,
'auto_commit_interval_ms': 60 * 1000,
'auto_commit_interval_messages': None,
'consumer_timeout_ms': -1,
# Currently unused
'socket_receive_buffer_bytes': 64 * 1024,
'num_consumer_fetchers': 1,
'default_fetcher_backoff_ms': 1000,
'queued_max_message_chunks': 10,
'rebalance_max_retries': 4,
'rebalance_backoff_ms': 2000,
}
DEPRECATED_CONFIG_KEYS = {
'metadata_broker_list': 'bootstrap_servers',
}
class KafkaConsumer(object):
"""A simpler kafka consumer"""
def __init__(self, *topics, **configs):
self.configure(**configs)
self.set_topic_partitions(*topics)
def configure(self, **configs):
"""Configure the consumer instance
Configuration settings can be passed to constructor,
otherwise defaults will be used:
Keyword Arguments:
bootstrap_servers (list): List of initial broker nodes the consumer
should contact to bootstrap initial cluster metadata. This does
not have to be the full node list. It just needs to have at
least one broker that will respond to a Metadata API Request.
client_id (str): a unique name for this client. Defaults to
'kafka.consumer.kafka'.
group_id (str): the name of the consumer group to join,
Offsets are fetched / committed to this group name.
fetch_message_max_bytes (int, optional): Maximum bytes for each
topic/partition fetch request. Defaults to 1024*1024.
fetch_min_bytes (int, optional): Minimum amount of data the server
should return for a fetch request, otherwise wait up to
fetch_wait_max_ms for more data to accumulate. Defaults to 1.
fetch_wait_max_ms (int, optional): Maximum time for the server to
block waiting for fetch_min_bytes messages to accumulate.
Defaults to 100.
refresh_leader_backoff_ms (int, optional): Milliseconds to backoff
when refreshing metadata on errors (subject to random jitter).
Defaults to 200.
socket_timeout_ms (int, optional): TCP socket timeout in
milliseconds. Defaults to 30*1000.
auto_offset_reset (str, optional): A policy for resetting offsets on
OffsetOutOfRange errors. 'smallest' will move to the oldest
available message, 'largest' will move to the most recent. Any
ofther value will raise the exception. Defaults to 'largest'.
deserializer_class (callable, optional): Any callable that takes a
raw message value and returns a deserialized value. Defaults to
lambda msg: msg.
auto_commit_enable (bool, optional): Enabling auto-commit will cause
the KafkaConsumer to periodically commit offsets without an
explicit call to commit(). Defaults to False.
auto_commit_interval_ms (int, optional): If auto_commit_enabled,
the milliseconds between automatic offset commits. Defaults to
60 * 1000.
auto_commit_interval_messages (int, optional): If
auto_commit_enabled, a number of messages consumed between
automatic offset commits. Defaults to None (disabled).
consumer_timeout_ms (int, optional): number of millisecond to throw
a timeout exception to the consumer if no message is available
for consumption. Defaults to -1 (dont throw exception).
Configuration parameters are described in more detail at
http://kafka.apache.org/documentation.html#highlevelconsumerapi
"""
configs = self._deprecate_configs(**configs)
self._config = {}
for key in DEFAULT_CONSUMER_CONFIG:
self._config[key] = configs.pop(key, DEFAULT_CONSUMER_CONFIG[key])
if configs:
raise KafkaConfigurationError('Unknown configuration key(s): ' +
str(list(configs.keys())))
if self._config['auto_commit_enable']:
if not self._config['group_id']:
raise KafkaConfigurationError('KafkaConsumer configured to auto-commit without required consumer group (group_id)')
# Check auto-commit configuration
if self._config['auto_commit_enable']:
logger.info("Configuring consumer to auto-commit offsets")
self._reset_auto_commit()
if not self._config['bootstrap_servers']:
raise KafkaConfigurationError('bootstrap_servers required to '
'configure KafkaConsumer')
self._client = KafkaClient(self._config['bootstrap_servers'],
client_id=self._config['client_id'],
timeout=(self._config['socket_timeout_ms'] / 1000.0))
def set_topic_partitions(self, *topics):
"""
Set the topic/partitions to consume
Optionally specify offsets to start from
Accepts types:
* str (utf-8): topic name (will consume all available partitions)
* tuple: (topic, partition)
* dict:
- { topic: partition }
- { topic: [partition list] }
- { topic: (partition tuple,) }
Optionally, offsets can be specified directly:
* tuple: (topic, partition, offset)
* dict: { (topic, partition): offset, ... }
Example:
.. code:: python
kafka = KafkaConsumer()
# Consume topic1-all; topic2-partition2; topic3-partition0
kafka.set_topic_partitions("topic1", ("topic2", 2), {"topic3": 0})
# Consume topic1-0 starting at offset 123, and topic2-1 at offset 456
# using tuples --
kafka.set_topic_partitions(("topic1", 0, 123), ("topic2", 1, 456))
# using dict --
kafka.set_topic_partitions({ ("topic1", 0): 123, ("topic2", 1): 456 })
"""
self._topics = []
self._client.load_metadata_for_topics()
# Setup offsets
self._offsets = OffsetsStruct(fetch=dict(),
commit=dict(),
highwater=dict(),
task_done=dict())
# Handle different topic types
for arg in topics:
# Topic name str -- all partitions
if isinstance(arg, (six.string_types, six.binary_type)):
topic = kafka_bytestring(arg)
for partition in self._client.get_partition_ids_for_topic(topic):
self._consume_topic_partition(topic, partition)
# (topic, partition [, offset]) tuple
elif isinstance(arg, tuple):
topic = kafka_bytestring(arg[0])
partition = arg[1]
self._consume_topic_partition(topic, partition)
if len(arg) == 3:
offset = arg[2]
self._offsets.fetch[(topic, partition)] = offset
# { topic: partitions, ... } dict
elif isinstance(arg, dict):
for key, value in six.iteritems(arg):
# key can be string (a topic)
if isinstance(key, (six.string_types, six.binary_type)):
topic = kafka_bytestring(key)
# topic: partition
if isinstance(value, int):
self._consume_topic_partition(topic, value)
# topic: [ partition1, partition2, ... ]
elif isinstance(value, (list, tuple)):
for partition in value:
self._consume_topic_partition(topic, partition)
else:
raise KafkaConfigurationError('Unknown topic type (dict key must be '
'int or list/tuple of ints)')
# (topic, partition): offset
elif isinstance(key, tuple):
topic = kafka_bytestring(key[0])
partition = key[1]
self._consume_topic_partition(topic, partition)
self._offsets.fetch[(topic, partition)] = value
else:
raise KafkaConfigurationError('Unknown topic type (%s)' % type(arg))
# If we have a consumer group, try to fetch stored offsets
if self._config['group_id']:
self._get_commit_offsets()
# Update missing fetch/commit offsets
for topic_partition in self._topics:
# Commit offsets default is None
if topic_partition not in self._offsets.commit:
self._offsets.commit[topic_partition] = None
# Skip if we already have a fetch offset from user args
if topic_partition not in self._offsets.fetch:
# Fetch offsets default is (1) commit
if self._offsets.commit[topic_partition] is not None:
self._offsets.fetch[topic_partition] = self._offsets.commit[topic_partition]
# or (2) auto reset
else:
self._offsets.fetch[topic_partition] = self._reset_partition_offset(topic_partition)
# highwater marks (received from server on fetch response)
# and task_done (set locally by user)
# should always get initialized to None
self._reset_highwater_offsets()
self._reset_task_done_offsets()
# Reset message iterator in case we were in the middle of one
self._reset_message_iterator()
def next(self):
"""Return the next available message
Blocks indefinitely unless consumer_timeout_ms > 0
Returns:
a single KafkaMessage from the message iterator
Raises:
ConsumerTimeout after consumer_timeout_ms and no message
Note:
This is also the method called internally during iteration
"""
self._set_consumer_timeout_start()
while True:
try:
return six.next(self._get_message_iterator())
# Handle batch completion
except StopIteration:
self._reset_message_iterator()
self._check_consumer_timeout()
def fetch_messages(self):
"""Sends FetchRequests for all topic/partitions set for consumption
Returns:
Generator that yields KafkaMessage structs
after deserializing with the configured `deserializer_class`
Note:
Refreshes metadata on errors, and resets fetch offset on
OffsetOutOfRange, per the configured `auto_offset_reset` policy
See Also:
Key KafkaConsumer configuration parameters:
* `fetch_message_max_bytes`
* `fetch_max_wait_ms`
* `fetch_min_bytes`
* `deserializer_class`
* `auto_offset_reset`
"""
max_bytes = self._config['fetch_message_max_bytes']
max_wait_time = self._config['fetch_wait_max_ms']
min_bytes = self._config['fetch_min_bytes']
if not self._topics:
raise KafkaConfigurationError('No topics or partitions configured')
if not self._offsets.fetch:
raise KafkaConfigurationError('No fetch offsets found when calling fetch_messages')
fetches = [FetchRequest(topic, partition,
self._offsets.fetch[(topic, partition)],
max_bytes)
for (topic, partition) in self._topics]
# client.send_fetch_request will collect topic/partition requests by leader
# and send each group as a single FetchRequest to the correct broker
try:
responses = self._client.send_fetch_request(fetches,
max_wait_time=max_wait_time,
min_bytes=min_bytes,
fail_on_error=False)
except FailedPayloadsError:
logger.warning('FailedPayloadsError attempting to fetch data from kafka')
self._refresh_metadata_on_error()
return
for resp in responses:
topic = kafka_bytestring(resp.topic)
partition = resp.partition
try:
check_error(resp)
except OffsetOutOfRangeError:
logger.warning('OffsetOutOfRange: topic %s, partition %d, '
'offset %d (Highwatermark: %d)',
topic, partition,
self._offsets.fetch[(topic, partition)],
resp.highwaterMark)
# Reset offset
self._offsets.fetch[(topic, partition)] = (
self._reset_partition_offset((topic, partition))
)
continue
except NotLeaderForPartitionError:
logger.warning("NotLeaderForPartitionError for %s - %d. "
"Metadata may be out of date",
topic, partition)
self._refresh_metadata_on_error()
continue
except RequestTimedOutError:
logger.warning("RequestTimedOutError for %s - %d",
topic, partition)
continue
# Track server highwater mark
self._offsets.highwater[(topic, partition)] = resp.highwaterMark
# Yield each message
# Kafka-python could raise an exception during iteration
# we are not catching -- user will need to address
for (offset, message) in resp.messages:
# deserializer_class could raise an exception here
val = self._config['deserializer_class'](message.value)
msg = KafkaMessage(topic, partition, offset, message.key, val)
# in some cases the server will return earlier messages
# than we requested. skip them per kafka spec
if offset < self._offsets.fetch[(topic, partition)]:
logger.debug('message offset less than fetched offset '
'skipping: %s', msg)
continue
# Only increment fetch offset if we safely got the message and deserialized
self._offsets.fetch[(topic, partition)] = offset + 1
# Then yield to user
yield msg
def get_partition_offsets(self, topic, partition, request_time_ms, max_num_offsets):
"""Request available fetch offsets for a single topic/partition
Keyword Arguments:
topic (str): topic for offset request
partition (int): partition for offset request
request_time_ms (int): Used to ask for all messages before a
certain time (ms). There are two special values. Specify -1 to receive the latest
offset (i.e. the offset of the next coming message) and -2 to receive the earliest
available offset. Note that because offsets are pulled in descending order, asking for
the earliest offset will always return you a single element.
max_num_offsets (int): Maximum offsets to include in the OffsetResponse
Returns:
a list of offsets in the OffsetResponse submitted for the provided
topic / partition. See:
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetAPI
"""
reqs = [OffsetRequest(topic, partition, request_time_ms, max_num_offsets)]
(resp,) = self._client.send_offset_request(reqs)
check_error(resp)
# Just for sanity..
# probably unnecessary
assert resp.topic == topic
assert resp.partition == partition
return resp.offsets
def offsets(self, group=None):
"""Get internal consumer offset values
Keyword Arguments:
group: Either "fetch", "commit", "task_done", or "highwater".
If no group specified, returns all groups.
Returns:
A copy of internal offsets struct
"""
if not group:
return {
'fetch': self.offsets('fetch'),
'commit': self.offsets('commit'),
'task_done': self.offsets('task_done'),
'highwater': self.offsets('highwater')
}
else:
return dict(deepcopy(getattr(self._offsets, group)))
def task_done(self, message):
"""Mark a fetched message as consumed.
Offsets for messages marked as "task_done" will be stored back
to the kafka cluster for this consumer group on commit()
Arguments:
message (KafkaMessage): the message to mark as complete
Returns:
Nothing
"""
topic_partition = (message.topic, message.partition)
offset = message.offset
# Warn on non-contiguous offsets
prev_done = self._offsets.task_done[topic_partition]
if prev_done is not None and offset != (prev_done + 1):
logger.warning('Marking task_done on a non-continuous offset: %d != %d + 1',
offset, prev_done)
# Warn on smaller offsets than previous commit
# "commit" offsets are actually the offset of the next message to fetch.
prev_commit = self._offsets.commit[topic_partition]
if prev_commit is not None and ((offset + 1) <= prev_commit):
logger.warning('Marking task_done on a previously committed offset?: %d (+1) <= %d',
offset, prev_commit)
self._offsets.task_done[topic_partition] = offset
# Check for auto-commit
if self._does_auto_commit_messages():
self._incr_auto_commit_message_count()
if self._should_auto_commit():
self.commit()
def commit(self):
"""Store consumed message offsets (marked via task_done())
to kafka cluster for this consumer_group.
Returns:
True on success, or False if no offsets were found for commit
Note:
this functionality requires server version >=0.8.1.1
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetCommit/FetchAPI
"""
if not self._config['group_id']:
logger.warning('Cannot commit without a group_id!')
raise KafkaConfigurationError('Attempted to commit offsets without a configured consumer group (group_id)')
# API supports storing metadata with each commit
# but for now it is unused
metadata = b''
offsets = self._offsets.task_done
commits = []
for topic_partition, task_done_offset in six.iteritems(offsets):
# Skip if None
if task_done_offset is None:
continue
# Commit offsets as the next offset to fetch
# which is consistent with the Java Client
# task_done is marked by messages consumed,
# so add one to mark the next message for fetching
commit_offset = (task_done_offset + 1)
# Skip if no change from previous committed
if commit_offset == self._offsets.commit[topic_partition]:
continue
commits.append(OffsetCommitRequest(topic_partition[0], topic_partition[1], commit_offset, metadata))
if commits:
logger.info('committing consumer offsets to group %s', self._config['group_id'])
resps = self._client.send_offset_commit_request(kafka_bytestring(self._config['group_id']),
commits,
fail_on_error=False)
for r in resps:
check_error(r)
topic_partition = (r.topic, r.partition)
task_done = self._offsets.task_done[topic_partition]
self._offsets.commit[topic_partition] = (task_done + 1)
if self._config['auto_commit_enable']:
self._reset_auto_commit()
return True
else:
logger.info('No new offsets found to commit in group %s', self._config['group_id'])
return False
#
# Topic/partition management private methods
#
def _consume_topic_partition(self, topic, partition):
topic = kafka_bytestring(topic)
if not isinstance(partition, int):
raise KafkaConfigurationError('Unknown partition type (%s) '
'-- expected int' % type(partition))
if topic not in self._client.topic_partitions:
raise UnknownTopicOrPartitionError("Topic %s not found in broker metadata" % topic)
if partition not in self._client.get_partition_ids_for_topic(topic):
raise UnknownTopicOrPartitionError("Partition %d not found in Topic %s "
"in broker metadata" % (partition, topic))
logger.info("Configuring consumer to fetch topic '%s', partition %d", topic, partition)
self._topics.append((topic, partition))
def _refresh_metadata_on_error(self):
refresh_ms = self._config['refresh_leader_backoff_ms']
jitter_pct = 0.20
sleep_ms = random.randint(
int((1.0 - 0.5 * jitter_pct) * refresh_ms),
int((1.0 + 0.5 * jitter_pct) * refresh_ms)
)
while True:
logger.info("Sleeping for refresh_leader_backoff_ms: %d", sleep_ms)
time.sleep(sleep_ms / 1000.0)
try:
self._client.load_metadata_for_topics()
except KafkaUnavailableError:
logger.warning("Unable to refresh topic metadata... cluster unavailable")
self._check_consumer_timeout()
else:
logger.info("Topic metadata refreshed")
return
#
# Offset-managment private methods
#
def _get_commit_offsets(self):
logger.info("Consumer fetching stored offsets")
for topic_partition in self._topics:
(resp,) = self._client.send_offset_fetch_request(
kafka_bytestring(self._config['group_id']),
[OffsetFetchRequest(topic_partition[0], topic_partition[1])],
fail_on_error=False)
try:
check_error(resp)
# API spec says server wont set an error here
# but 0.8.1.1 does actually...
except UnknownTopicOrPartitionError:
pass
# -1 offset signals no commit is currently stored
if resp.offset == -1:
self._offsets.commit[topic_partition] = None
# Otherwise we committed the stored offset
# and need to fetch the next one
else:
self._offsets.commit[topic_partition] = resp.offset
def _reset_highwater_offsets(self):
for topic_partition in self._topics:
self._offsets.highwater[topic_partition] = None
def _reset_task_done_offsets(self):
for topic_partition in self._topics:
self._offsets.task_done[topic_partition] = None
def _reset_partition_offset(self, topic_partition):
(topic, partition) = topic_partition
LATEST = -1
EARLIEST = -2
request_time_ms = None
if self._config['auto_offset_reset'] == 'largest':
request_time_ms = LATEST
elif self._config['auto_offset_reset'] == 'smallest':
request_time_ms = EARLIEST
else:
# Let's raise an reasonable exception type if user calls
# outside of an exception context
if sys.exc_info() == (None, None, None):
raise OffsetOutOfRangeError('Cannot reset partition offsets without a '
'valid auto_offset_reset setting '
'(largest|smallest)')
# Otherwise we should re-raise the upstream exception
# b/c it typically includes additional data about
# the request that triggered it, and we do not want to drop that
raise
(offset, ) = self.get_partition_offsets(topic, partition,
request_time_ms, max_num_offsets=1)
return offset
#
# Consumer Timeout private methods
#
def _set_consumer_timeout_start(self):
self._consumer_timeout = False
if self._config['consumer_timeout_ms'] >= 0:
self._consumer_timeout = time.time() + (self._config['consumer_timeout_ms'] / 1000.0)
def _check_consumer_timeout(self):
if self._consumer_timeout and time.time() > self._consumer_timeout:
raise ConsumerTimeout('Consumer timed out after %d ms' % + self._config['consumer_timeout_ms'])
#
# Autocommit private methods
#
def _should_auto_commit(self):
if self._does_auto_commit_ms():
if time.time() >= self._next_commit_time:
return True
if self._does_auto_commit_messages():
if self._uncommitted_message_count >= self._config['auto_commit_interval_messages']:
return True
return False
def _reset_auto_commit(self):
self._uncommitted_message_count = 0
self._next_commit_time = None
if self._does_auto_commit_ms():
self._next_commit_time = time.time() + (self._config['auto_commit_interval_ms'] / 1000.0)
def _incr_auto_commit_message_count(self, n=1):
self._uncommitted_message_count += n
def _does_auto_commit_ms(self):
if not self._config['auto_commit_enable']:
return False
conf = self._config['auto_commit_interval_ms']
if conf is not None and conf > 0:
return True
return False
def _does_auto_commit_messages(self):
if not self._config['auto_commit_enable']:
return False
conf = self._config['auto_commit_interval_messages']
if conf is not None and conf > 0:
return True
return False
#
# Message iterator private methods
#
def __iter__(self):
return self
def __next__(self):
return self.next()
def _get_message_iterator(self):
# Fetch a new batch if needed
if self._msg_iter is None:
self._msg_iter = self.fetch_messages()
return self._msg_iter
def _reset_message_iterator(self):
self._msg_iter = None
#
# python private methods
#
def __repr__(self):
return '<KafkaConsumer topics=(%s)>' % ', '.join(["%s-%d" % topic_partition
for topic_partition in
self._topics])
#
# other private methods
#
def _deprecate_configs(self, **configs):
for old, new in six.iteritems(DEPRECATED_CONFIG_KEYS):
if old in configs:
logger.warning('Deprecated Kafka Consumer configuration: %s. '
'Please use %s instead.', old, new)
old_value = configs.pop(old)
if new not in configs:
configs[new] = old_value
return configs
|
|
"""Finds out the ellipse that best fits to a set of data points and calculates
its keplerian elements.
"""
import math
import argparse
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
from functools import partial
def __read_args():
"""Reads command line arguments.
Returns:
object: Parsed arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='path to .csv file', default='../example_data/orbit.csv')
parser.add_argument('-u', '--units', type=str, help='units of distance (m or km)', default='km')
return parser.parse_args()
def __cross_sum(data):
"""Returns the normalized sum of the cross products between consecutive vectors.
Args:
data(nx3 numpy array): A matrix where each column represents the x,y,z coordinates of each position vector.
Returns:
float: The normalized sum of the cross products between consecutive vectors.
"""
cross_sum = 0
for i in range(len(data)-1):
v1 = data[i]
v2 = data[i+1]
cross_sum = cross_sum + np.cross(v1,v2)
return cross_sum/np.linalg.norm(cross_sum)
def __plane_err(data,coeffs):
"""Calculates the total squared error of the data wrt a plane.
The data should be a list of points. coeffs is an array of
3 elements - the coefficients a,b,c in the plane equation
ax+by+c = 0.
Args:
data(nx3 numpy array): A numpy array of points.
coeffs(1x3 array): The coefficients of the plane ax+by+c=0.
Returns:
float: The total squared error wrt the plane defined by ax+by+cz = 0.
"""
a,b,c = coeffs
return np.sum((a*data[:,0]+b*data[:,1]+c*data[:,2])**2)/(a**2+b**2+c**2)
def __project_to_plane(points,coeffs):
"""Projects points onto a plane.
Projects a list of points onto the plane ax+by+c=0,
where a,b,c are elements of coeffs.
Args:
points(nx3 numpy array): A numpy array of points.
coeffs(1x3 array): The coefficients of the plane ax+by+c=0.
Returns:
nx3 numpy array: A list of projected points.
"""
a,b,c = coeffs
proj_mat = [[b**2+c**2, -a*b , -a*c ],
[ -a*b ,a**2+c**2, -b*c ],
[ -a*c , -b*c ,a**2+b**2]]
return np.matmul(points,proj_mat)/(a**2+b**2+c**2)
def __conv_to_2D(points,x,y):
"""Finds coordinates of points in a plane wrt a basis.
Given a list of points in a plane, and a basis of the plane,
this function returns the coordinates of those points
wrt this basis.
Args:
points(numpy array): A numpy array of points.
x(3x1 numpy array): One vector of the basis.
y(3x1 numpy array): Another vector of the basis.
Returns:
nx2 numpy array: Coordinates of the points wrt the basis [x,y].
"""
mat = [x[0:2],y[0:2]]
mat_inv = np.linalg.inv(mat)
coords = np.matmul(points[:,0:2],mat_inv)
return coords
def __cart_to_pol(points):
"""Converts a list of cartesian coordinates into polar ones.
Args:
points(nx2 numpy array): The list of points in the format [x,y].
Returns:
nx2 numpy array: A list of polar coordinates in the format [radius,angle].
"""
pol = np.empty(points.shape)
pol[:,0] = np.sqrt(points[:,0]**2+points[:,1]**2)
pol[:,1] = np.arctan2(points[:,1],points[:,0])
return pol
def __ellipse_err(polar_coords,params):
"""Calculates the total squared error of the data wrt an ellipse.
params is a 3 element array used to define an ellipse.
It contains 3 elements a,e, and t0.
a is the semi-major axis
e is the eccentricity
t0 is the angle of the major axis wrt the x-axis.
These 3 elements define an ellipse with one focus at origin.
Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))
The function calculates r for every theta in the data.
It then takes the square of the difference and sums it.
Args:
polar_coords(nx2 numpy array): A list of polar coordinates in the format [radius,angle].
params(1x3 numpy array): The array [a,e,t0].
Returns:
float: The total squared error of the data wrt the ellipse.
"""
a,e,t0 = params
dem = 1+e*np.cos(polar_coords[:,1]-t0)
num = a*(1-e**2)
r = np.divide(num,dem)
err = np.sum((r - polar_coords[:,0])**2)
return err
def __residuals(data,params,polar_coords,basis):
"""Calculates the residuals after fitting the ellipse.
Residuals are the difference between the fitted points and
the actual points.
res_x = fitted_x - initial_x
res_y = fitted_y - initial_y
res_z = fitted_z - initial_z
where fitted_x,y,z is the closest point on the ellipse to initial_x,y,z.
However, it is computationally expensive to find the true nearest point.
So we take an approximation. We consider the point on the ellipse with
the same true anomaly as the initial point to be the nearest point to it.
Since the eccentricities of the orbits involved are small, this approximation
holds.
Args:
data(nx3 numpy array): The list of original points.
params(1x3 numpy array): The array [semi-major axis, eccentricity, argument of periapsis]
of the fitted ellipse.
polar_coords(nx2 numpy array): The list of 2D polar coordinates of the original points after
projecting them onto the best-fit plane.
basis(3x2 numpy array): The basis of the best-fit plane.
Returns:
nx3 numpy array: Returns the residuals
"""
a,e,t0 = params
dem = 1+e*np.cos(polar_coords[:,1]-t0)
num = a*(1-e**2)
r = np.divide(num,dem)
# convert to cartesian
x_s = np.multiply(r,np.cos(polar_coords[:,1]))
y_s = np.multiply(r,np.sin(polar_coords[:,1]))
# convert to 3D
filtered_coords = np.transpose(np.matmul(basis,[x_s,y_s]))
residuals = filtered_coords - data
return residuals
def __read_file(file_name):
"""Reads a space separated csv file with 4 columns in the format t x y z.
Args:
file_name(string): the path to the file
Returns:
nx3 numpy array: A numpy array with the columns [x y z]. Note that the t coloumn is discarded.
"""
data = np.loadtxt(file_name,skiprows=1,usecols=(1,2,3))
return data
def determine_kep(data):
"""Determines keplerian elements that fit a set of points.
Args:
data(nx3 numpy array): A numpy array of points in the format [x y z].
Returns:
(kep,res) - The keplerian elements and the residuals as a tuple.
kep: 1x6 numpy array
res: nx3 numpy array
For the keplerian elements:
kep[0] - semi-major axis (in whatever units the data was provided in)
kep[1] - eccentricity
kep[2] - inclination (in degrees)
kep[3] - argument of periapsis (in degrees)
kep[4] - right ascension of ascending node (in degrees)
kep[5] - true anomaly of the first row in the data (in degrees)
For the residuals: (in whatever units the data was provided in)
res[0] - residuals in x axis
res[1] - residuals in y axis
res[2] - residuals in z axis
"""
# try to fit a plane to the data first.
# make a partial function of plane_err by supplying the data
plane_err_data = partial(__plane_err,data)
# plane is defined by ax+by+cz=0.
p0 = __cross_sum(data) # make an initial guess
# minimize the error
p = minimize(plane_err_data,p0,method='nelder-mead',options={'maxiter':1000}).x
p = p/np.linalg.norm(p) # normalize p
# now p is the normal vector of the best-fit plane.
# lan_vec is a vector along the line of intersection of the plane
# and the x-y plane.
lan_vec = np.cross([0,0,1],p)
# if lan_vec is [0,0,0] it means that it is undefined and can take on
# any value. So we set it to [1,0,0] so that the rest of the
# calculation can proceed.
if (np.array_equal(lan_vec,[0,0,0])):
lan_vec = [1,0,0]
# inclination is the angle between p and the z axis.
inc = math.acos(np.clip(p[2]/np.linalg.norm(p),-1,1))
# lan is the angle between the lan_vec and the x axis.
lan = math.atan2(lan_vec[1],lan_vec[0])%(2*math.pi)
# now we try to convert the problem into a 2D problem.
# project all the points onto the plane.
proj_data = __project_to_plane(data,p)
# p_x and p_y are 2 orthogonal unit vectors on the plane.
p_x,p_y = lan_vec, np.cross(p,lan_vec)
p_x,p_y = p_x/np.linalg.norm(p_x), p_y/np.linalg.norm(p_y)
# find coordinates of the points wrt the basis [p_x,p_y].
coords_2D = __conv_to_2D(proj_data,p_x,p_y)
# now try to fit an ellipse to these points.
# convert them into polar coordinates
polar_coords = __cart_to_pol(coords_2D)
# make an initial guess for the parametres
r_m = np.min(polar_coords[:,0])
r_M = np.max(polar_coords[:,0])
a0 = (r_m+r_M)/2
e0 = (r_M-r_m)/(r_M+r_m)
t00 = polar_coords[np.argmin(polar_coords[:,0]),1]
params0 = [a0,e0,t00] # initial guess
# make a partial function of ellipse_err with the data
ellipse_err_data = partial(__ellipse_err,polar_coords)
# minimize the error
params = minimize(ellipse_err_data,params0,method='nelder-mead',options={'maxiter':1000}).x
params[2] = params[2]%(2*math.pi) # bring argp between 0-360 degrees
# calculate the true anomaly of the first entry in the dataset
true_anom = (polar_coords[0][1]-params[2])%(2*math.pi)
# calculation of residuals
res = __residuals(data,params,polar_coords,np.column_stack((p_x,p_y)))
kep = np.empty((6,1))
kep[0] = params[0]
kep[1] = params[1]
kep[2] = math.degrees(inc)
kep[3] = math.degrees(params[2])
kep[4] = math.degrees(lan)
kep[5] = math.degrees(true_anom)
return kep,res
def __print_kep(kep,res,unit):
"""Prints the keplerian elements and some information on residuals.
Args:
kep(1x6 numpy array): keplerian elements
res(nx3 numpy array): residuals
unit(string): units of distance used
Returns:
NIL
"""
# output the parameters
print("Semi-major axis: ",kep[0][0],unit)
print("Eccentricity: ",kep[1][0])
print("Inclination: ",kep[2][0],"deg")
print("Argument of periapsis: ",kep[3][0],"deg")
print("Longitude of Ascending Node:",kep[4][0],"deg")
print("True Anomaly ",kep[5][0],"deg")
# print data about residuals
print()
max_res = np.max(res,axis=0)
min_res = np.min(res,axis=0)
sum_res = np.sum(res,axis=0)
avg_res = np.average(res,axis=0)
std_res = np.std(res,axis=0)
print("Printing data about residuals in each axis:")
print("Max: ",max_res)
print("Min: ",min_res)
print("Sum: ",sum_res)
print("Average: ",avg_res)
print("Standard Deviation:",std_res)
def plot_kep(kep,data):
"""Plots the original data and the orbit defined by the keplerian elements.
Args:
kep(1x6 numpy array): keplerian elements
data(nx3 numpy array): original data
Returns:
nothing
"""
a = kep[0]
e = kep[1]
inc = math.radians(kep[2])
t0 = math.radians(kep[3])
lan = math.radians(kep[4])
p_x = np.array([math.cos(lan), math.sin(lan), 0])
p_y = np.array([-math.sin(lan)*math.cos(inc), math.cos(lan)*math.cos(inc), math.sin(inc)])
# generate 1000 points on the ellipse
theta = np.linspace(0,2*math.pi,1000)
radii = a*(1-e**2)/(1+e*np.cos(theta-t0))
# convert to cartesian
x_s = np.multiply(radii,np.cos(theta))
y_s = np.multiply(radii,np.sin(theta))
# convert to 3D
mat = np.column_stack((p_x,p_y))
coords_3D = np.matmul(mat,[x_s,y_s])
fig = plt.figure()
ax = Axes3D(fig)
ax.axis('equal')
# plot
ax.plot3D(coords_3D[0],coords_3D[1],coords_3D[2],c = 'red',label='Fitted Ellipse')
ax.scatter3D(data[:,0],data[:,1],data[:,2],c='black',label='Initial Data')
# The Pale Blue Dot
ax.scatter3D(0,0,0,c='blue',depthshade=False,label='Earth')
ax.can_zoom()
ax.legend()
plt.show()
if __name__ == "__main__":
args = __read_args()
data = __read_file(args.file)
kep, res = determine_kep(data)
__print_kep(kep,res,args.units)
plot_kep(kep,data)
|
|
# -*- coding: utf-8 -*-
import datetime
from django.conf import settings
from django.test import TestCase
from haystack import connections, connection_router, reset_search_queries
from haystack.backends import SQ, BaseSearchQuery
from haystack.exceptions import FacetingError
from haystack import indexes
from haystack.models import SearchResult
from haystack.query import (SearchQuerySet, EmptySearchQuerySet,
ValuesSearchQuerySet, ValuesListSearchQuerySet)
from haystack.utils.loading import UnifiedIndex
from core.models import MockModel, AnotherMockModel, CharPKMockModel, AFifthMockModel
from core.tests.indexes import ReadQuerySetTestSearchIndex, GhettoAFifthMockModelSearchIndex, TextReadQuerySetTestSearchIndex
from core.tests.mocks import MockSearchQuery, MockSearchBackend, CharPKMockSearchBackend, MixedMockSearchBackend, ReadQuerySetMockSearchBackend, MOCK_SEARCH_RESULTS
from core.tests.views import BasicMockModelSearchIndex, BasicAnotherMockModelSearchIndex
test_pickling = True
try:
import pickle
except ImportError:
test_pickling = False
class SQTestCase(TestCase):
def test_split_expression(self):
sq = SQ(foo='bar')
self.assertEqual(sq.split_expression('foo'), ('foo', 'contains'))
self.assertEqual(sq.split_expression('foo__exact'), ('foo', 'exact'))
self.assertEqual(sq.split_expression('foo__contains'), ('foo', 'contains'))
self.assertEqual(sq.split_expression('foo__lt'), ('foo', 'lt'))
self.assertEqual(sq.split_expression('foo__lte'), ('foo', 'lte'))
self.assertEqual(sq.split_expression('foo__gt'), ('foo', 'gt'))
self.assertEqual(sq.split_expression('foo__gte'), ('foo', 'gte'))
self.assertEqual(sq.split_expression('foo__in'), ('foo', 'in'))
self.assertEqual(sq.split_expression('foo__startswith'), ('foo', 'startswith'))
self.assertEqual(sq.split_expression('foo__range'), ('foo', 'range'))
# Unrecognized filter. Fall back to exact.
self.assertEqual(sq.split_expression('foo__moof'), ('foo', 'contains'))
def test_repr(self):
self.assertEqual(repr(SQ(foo='bar')), '<SQ: AND foo__contains=bar>')
self.assertEqual(repr(SQ(foo=1)), '<SQ: AND foo__contains=1>')
self.assertEqual(repr(SQ(foo=datetime.datetime(2009, 5, 12, 23, 17))), '<SQ: AND foo__contains=2009-05-12 23:17:00>')
def test_simple_nesting(self):
sq1 = SQ(foo='bar')
sq2 = SQ(foo='bar')
bigger_sq = SQ(sq1 & sq2)
self.assertEqual(repr(bigger_sq), '<SQ: AND (foo__contains=bar AND foo__contains=bar)>')
another_bigger_sq = SQ(sq1 | sq2)
self.assertEqual(repr(another_bigger_sq), '<SQ: AND (foo__contains=bar OR foo__contains=bar)>')
one_more_bigger_sq = SQ(sq1 & ~sq2)
self.assertEqual(repr(one_more_bigger_sq), '<SQ: AND (foo__contains=bar AND NOT (foo__contains=bar))>')
mega_sq = SQ(bigger_sq & SQ(another_bigger_sq | ~one_more_bigger_sq))
self.assertEqual(repr(mega_sq), '<SQ: AND ((foo__contains=bar AND foo__contains=bar) AND ((foo__contains=bar OR foo__contains=bar) OR NOT ((foo__contains=bar AND NOT (foo__contains=bar)))))>')
class BaseSearchQueryTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(BaseSearchQueryTestCase, self).setUp()
self.bsq = BaseSearchQuery()
def test_get_count(self):
self.bsq.add_filter(SQ(foo='bar'))
self.assertRaises(NotImplementedError, self.bsq.get_count)
def test_build_query(self):
self.bsq.add_filter(SQ(foo='bar'))
self.assertRaises(NotImplementedError, self.bsq.build_query)
def test_add_filter(self):
self.assertEqual(len(self.bsq.query_filter), 0)
self.bsq.add_filter(SQ(foo='bar'))
self.assertEqual(len(self.bsq.query_filter), 1)
self.bsq.add_filter(SQ(foo__lt='10'))
self.bsq.add_filter(~SQ(claris='moof'))
self.bsq.add_filter(SQ(claris='moof'), use_or=True)
self.assertEqual(repr(self.bsq.query_filter), '<SQ: OR ((foo__contains=bar AND foo__lt=10 AND NOT (claris__contains=moof)) OR claris__contains=moof)>')
self.bsq.add_filter(SQ(claris='moof'))
self.assertEqual(repr(self.bsq.query_filter), '<SQ: AND (((foo__contains=bar AND foo__lt=10 AND NOT (claris__contains=moof)) OR claris__contains=moof) AND claris__contains=moof)>')
self.bsq.add_filter(SQ(claris='wtf mate'))
self.assertEqual(repr(self.bsq.query_filter), '<SQ: AND (((foo__contains=bar AND foo__lt=10 AND NOT (claris__contains=moof)) OR claris__contains=moof) AND claris__contains=moof AND claris__contains=wtf mate)>')
def test_add_order_by(self):
self.assertEqual(len(self.bsq.order_by), 0)
self.bsq.add_order_by('foo')
self.assertEqual(len(self.bsq.order_by), 1)
def test_clear_order_by(self):
self.bsq.add_order_by('foo')
self.assertEqual(len(self.bsq.order_by), 1)
self.bsq.clear_order_by()
self.assertEqual(len(self.bsq.order_by), 0)
def test_add_model(self):
self.assertEqual(len(self.bsq.models), 0)
self.assertRaises(AttributeError, self.bsq.add_model, object)
self.assertEqual(len(self.bsq.models), 0)
self.bsq.add_model(MockModel)
self.assertEqual(len(self.bsq.models), 1)
self.bsq.add_model(AnotherMockModel)
self.assertEqual(len(self.bsq.models), 2)
def test_set_limits(self):
self.assertEqual(self.bsq.start_offset, 0)
self.assertEqual(self.bsq.end_offset, None)
self.bsq.set_limits(10, 50)
self.assertEqual(self.bsq.start_offset, 10)
self.assertEqual(self.bsq.end_offset, 50)
def test_clear_limits(self):
self.bsq.set_limits(10, 50)
self.assertEqual(self.bsq.start_offset, 10)
self.assertEqual(self.bsq.end_offset, 50)
self.bsq.clear_limits()
self.assertEqual(self.bsq.start_offset, 0)
self.assertEqual(self.bsq.end_offset, None)
def test_add_boost(self):
self.assertEqual(self.bsq.boost, {})
self.bsq.add_boost('foo', 10)
self.assertEqual(self.bsq.boost, {'foo': 10})
def test_add_highlight(self):
self.assertEqual(self.bsq.highlight, False)
self.bsq.add_highlight()
self.assertEqual(self.bsq.highlight, True)
def test_more_like_this(self):
mock = MockModel()
mock.id = 1
msq = MockSearchQuery()
msq.backend = MockSearchBackend('mlt')
ui = connections['default'].get_unified_index()
bmmsi = BasicMockModelSearchIndex()
ui.build(indexes=[bmmsi])
bmmsi.update()
msq.more_like_this(mock)
self.assertEqual(msq.get_count(), 23)
self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk)
def test_add_field_facet(self):
self.bsq.add_field_facet('foo')
self.assertEqual(self.bsq.facets, {'foo': {}})
self.bsq.add_field_facet('bar')
self.assertEqual(self.bsq.facets, {'foo': {}, 'bar': {}})
def test_add_date_facet(self):
self.bsq.add_date_facet('foo', start_date=datetime.date(2009, 2, 25), end_date=datetime.date(2009, 3, 25), gap_by='day')
self.assertEqual(self.bsq.date_facets, {'foo': {'gap_by': 'day', 'start_date': datetime.date(2009, 2, 25), 'end_date': datetime.date(2009, 3, 25), 'gap_amount': 1}})
self.bsq.add_date_facet('bar', start_date=datetime.date(2008, 1, 1), end_date=datetime.date(2009, 12, 1), gap_by='month')
self.assertEqual(self.bsq.date_facets, {'foo': {'gap_by': 'day', 'start_date': datetime.date(2009, 2, 25), 'end_date': datetime.date(2009, 3, 25), 'gap_amount': 1}, 'bar': {'gap_by': 'month', 'start_date': datetime.date(2008, 1, 1), 'end_date': datetime.date(2009, 12, 1), 'gap_amount': 1}})
def test_add_query_facet(self):
self.bsq.add_query_facet('foo', 'bar')
self.assertEqual(self.bsq.query_facets, [('foo', 'bar')])
self.bsq.add_query_facet('moof', 'baz')
self.assertEqual(self.bsq.query_facets, [('foo', 'bar'), ('moof', 'baz')])
self.bsq.add_query_facet('foo', 'baz')
self.assertEqual(self.bsq.query_facets, [('foo', 'bar'), ('moof', 'baz'), ('foo', 'baz')])
def test_add_stats(self):
self.bsq.add_stats_query('foo',['bar'])
self.assertEqual(self.bsq.stats,{'foo':['bar']})
self.bsq.add_stats_query('moof',['bar','baz'])
self.assertEqual(self.bsq.stats,{'foo':['bar'],'moof':['bar','baz']})
def test_add_narrow_query(self):
self.bsq.add_narrow_query('foo:bar')
self.assertEqual(self.bsq.narrow_queries, set(['foo:bar']))
self.bsq.add_narrow_query('moof:baz')
self.assertEqual(self.bsq.narrow_queries, set(['foo:bar', 'moof:baz']))
def test_set_result_class(self):
# Assert that we're defaulting to ``SearchResult``.
self.assertTrue(issubclass(self.bsq.result_class, SearchResult))
# Custom class.
class IttyBittyResult(object):
pass
self.bsq.set_result_class(IttyBittyResult)
self.assertTrue(issubclass(self.bsq.result_class, IttyBittyResult))
# Reset to default.
self.bsq.set_result_class(None)
self.assertTrue(issubclass(self.bsq.result_class, SearchResult))
def test_run(self):
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
msq = connections['default'].get_query()
self.assertEqual(len(msq.get_results()), 23)
self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk)
# Restore.
connections['default']._index = self.old_unified_index
def test_clone(self):
self.bsq.add_filter(SQ(foo='bar'))
self.bsq.add_filter(SQ(foo__lt='10'))
self.bsq.add_filter(~SQ(claris='moof'))
self.bsq.add_filter(SQ(claris='moof'), use_or=True)
self.bsq.add_order_by('foo')
self.bsq.add_model(MockModel)
self.bsq.add_boost('foo', 2)
self.bsq.add_highlight()
self.bsq.add_field_facet('foo')
self.bsq.add_date_facet('foo', start_date=datetime.date(2009, 1, 1), end_date=datetime.date(2009, 1, 31), gap_by='day')
self.bsq.add_query_facet('foo', 'bar')
self.bsq.add_stats_query('foo', 'bar')
self.bsq.add_narrow_query('foo:bar')
clone = self.bsq._clone()
self.assertTrue(isinstance(clone, BaseSearchQuery))
self.assertEqual(len(clone.query_filter), 2)
self.assertEqual(len(clone.order_by), 1)
self.assertEqual(len(clone.models), 1)
self.assertEqual(len(clone.boost), 1)
self.assertEqual(clone.highlight, True)
self.assertEqual(len(clone.facets), 1)
self.assertEqual(len(clone.date_facets), 1)
self.assertEqual(len(clone.query_facets), 1)
self.assertEqual(len(clone.narrow_queries), 1)
self.assertEqual(clone.start_offset, self.bsq.start_offset)
self.assertEqual(clone.end_offset, self.bsq.end_offset)
self.assertEqual(clone.backend.__class__, self.bsq.backend.__class__)
def test_log_query(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
self.bmmsi.update()
old_debug = settings.DEBUG
settings.DEBUG = False
msq = connections['default'].get_query()
self.assertEqual(len(msq.get_results()), 23)
self.assertEqual(len(connections['default'].queries), 0)
settings.DEBUG = True
# Redefine it to clear out the cached results.
msq2 = connections['default'].get_query()
self.assertEqual(len(msq2.get_results()), 23)
self.assertEqual(len(connections['default'].queries), 1)
self.assertEqual(connections['default'].queries[0]['query_string'], '')
msq3 = connections['default'].get_query()
msq3.add_filter(SQ(foo='bar'))
len(msq3.get_results())
self.assertEqual(len(connections['default'].queries), 2)
self.assertEqual(connections['default'].queries[0]['query_string'], '')
self.assertEqual(connections['default'].queries[1]['query_string'], '')
# Restore.
connections['default']._index = self.old_unified_index
settings.DEBUG = old_debug
class CharPKMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr='key')
def get_model(self):
return CharPKMockModel
class SearchQuerySetTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(SearchQuerySetTestCase, self).setUp()
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.cpkmmsi = CharPKMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.cpkmmsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
self.msqs = SearchQuerySet()
# Stow.
self.old_debug = settings.DEBUG
settings.DEBUG = True
reset_search_queries()
def tearDown(self):
# Restore.
connections['default']._index = self.old_unified_index
settings.DEBUG = self.old_debug
super(SearchQuerySetTestCase, self).tearDown()
def test_len(self):
self.assertEqual(len(self.msqs), 23)
def test_repr(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
self.assertEqual(repr(self.msqs), "[<SearchResult: core.mockmodel (pk=u'1')>, <SearchResult: core.mockmodel (pk=u'2')>, <SearchResult: core.mockmodel (pk=u'3')>, <SearchResult: core.mockmodel (pk=u'4')>, <SearchResult: core.mockmodel (pk=u'5')>, <SearchResult: core.mockmodel (pk=u'6')>, <SearchResult: core.mockmodel (pk=u'7')>, <SearchResult: core.mockmodel (pk=u'8')>, <SearchResult: core.mockmodel (pk=u'9')>, <SearchResult: core.mockmodel (pk=u'10')>, <SearchResult: core.mockmodel (pk=u'11')>, <SearchResult: core.mockmodel (pk=u'12')>, <SearchResult: core.mockmodel (pk=u'13')>, <SearchResult: core.mockmodel (pk=u'14')>, <SearchResult: core.mockmodel (pk=u'15')>, <SearchResult: core.mockmodel (pk=u'16')>, <SearchResult: core.mockmodel (pk=u'17')>, <SearchResult: core.mockmodel (pk=u'18')>, <SearchResult: core.mockmodel (pk=u'19')>, '...(remaining elements truncated)...']")
self.assertEqual(len(connections['default'].queries), 1)
def test_iter(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
msqs = self.msqs.all()
results = [int(res.pk) for res in msqs]
self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]])
self.assertEqual(len(connections['default'].queries), 3)
def test_slice(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = self.msqs.all()
self.assertEqual([int(res.pk) for res in results[1:11]], [res.pk for res in MOCK_SEARCH_RESULTS[1:11]])
self.assertEqual(len(connections['default'].queries), 1)
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = self.msqs.all()
self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk)
self.assertEqual(len(connections['default'].queries), 1)
def test_manual_iter(self):
results = self.msqs.all()
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
check = [result.pk for result in results._manual_iter()]
self.assertEqual(check, [u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9', u'10', u'11', u'12', u'13', u'14', u'15', u'16', u'17', u'18', u'19', u'20', u'21', u'22', u'23'])
self.assertEqual(len(connections['default'].queries), 3)
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
# Test to ensure we properly fill the cache, even if we get fewer
# results back (not a handled model) than the hit count indicates.
# This will hang indefinitely if broken.
old_ui = self.ui
self.ui.build(indexes=[self.cpkmmsi])
connections['default']._index = self.ui
self.cpkmmsi.update()
results = self.msqs.all()
loaded = [result.pk for result in results._manual_iter()]
self.assertEqual(loaded, [u'sometext', u'1234'])
self.assertEqual(len(connections['default'].queries), 1)
connections['default']._index = old_ui
def test_fill_cache(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = self.msqs.all()
self.assertEqual(len(results._result_cache), 0)
self.assertEqual(len(connections['default'].queries), 0)
results._fill_cache(0, 10)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 10)
self.assertEqual(len(connections['default'].queries), 1)
results._fill_cache(10, 20)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 20)
self.assertEqual(len(connections['default'].queries), 2)
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
# Test to ensure we properly fill the cache, even if we get fewer
# results back (not a handled model) than the hit count indicates.
sqs = SearchQuerySet().all()
sqs.query.backend = MixedMockSearchBackend('default')
results = sqs
self.assertEqual(len([result for result in results._result_cache if result is not None]), 0)
self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [])
self.assertEqual(len(connections['default'].queries), 0)
results._fill_cache(0, 10)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 9)
self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10])
self.assertEqual(len(connections['default'].queries), 2)
results._fill_cache(10, 20)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 17)
self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20])
self.assertEqual(len(connections['default'].queries), 4)
results._fill_cache(20, 30)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 20)
self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(len(connections['default'].queries), 6)
def test_cache_is_full(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
self.assertEqual(self.msqs._cache_is_full(), False)
results = self.msqs.all()
fire_the_iterator_and_fill_cache = [result for result in results]
self.assertEqual(results._cache_is_full(), True)
self.assertEqual(len(connections['default'].queries), 3)
def test_all(self):
sqs = self.msqs.all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
def test_filter(self):
sqs = self.msqs.filter(content='foo')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_exclude(self):
sqs = self.msqs.exclude(content='foo')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_order_by(self):
sqs = self.msqs.order_by('foo')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertTrue('foo' in sqs.query.order_by)
def test_models(self):
# Stow.
old_unified_index = connections['default']._index
ui = UnifiedIndex()
bmmsi = BasicMockModelSearchIndex()
bammsi = BasicAnotherMockModelSearchIndex()
ui.build(indexes=[bmmsi, bammsi])
connections['default']._index = ui
msqs = SearchQuerySet()
sqs = msqs.all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 0)
sqs = msqs.models(MockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 1)
sqs = msqs.models(MockModel, AnotherMockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 2)
# This will produce a warning.
ui.build(indexes=[bmmsi])
sqs = msqs.models(AnotherMockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 1)
def test_result_class(self):
sqs = self.msqs.all()
self.assertTrue(issubclass(sqs.query.result_class, SearchResult))
# Custom class.
class IttyBittyResult(object):
pass
sqs = self.msqs.result_class(IttyBittyResult)
self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult))
# Reset to default.
sqs = self.msqs.result_class(None)
self.assertTrue(issubclass(sqs.query.result_class, SearchResult))
def test_boost(self):
sqs = self.msqs.boost('foo', 10)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.boost.keys()), 1)
def test_highlight(self):
sqs = self.msqs.highlight()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.query.highlight, True)
def test_spelling(self):
# Test the case where spelling support is disabled.
sqs = self.msqs.filter(content='Indx')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.spelling_suggestion(), None)
self.assertEqual(sqs.spelling_suggestion('indexy'), None)
def test_raw_search(self):
self.assertEqual(len(self.msqs.raw_search('foo')), 23)
self.assertEqual(len(self.msqs.raw_search('(content__exact:hello AND content__exact:world)')), 23)
def test_load_all(self):
# Models with character primary keys.
sqs = SearchQuerySet()
sqs.query.backend = CharPKMockSearchBackend('charpk')
results = sqs.load_all().all()
self.assertEqual(len(results._result_cache), 0)
results._fill_cache(0, 2)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 2)
# If nothing is handled, you get nothing.
old_ui = connections['default']._index
ui = UnifiedIndex()
ui.build(indexes=[])
connections['default']._index = ui
sqs = self.msqs.load_all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs), 0)
connections['default']._index = old_ui
# For full tests, see the solr_backend.
def test_load_all_read_queryset(self):
# Stow.
old_ui = connections['default']._index
ui = UnifiedIndex()
gafmmsi = GhettoAFifthMockModelSearchIndex()
ui.build(indexes=[gafmmsi])
connections['default']._index = ui
gafmmsi.update()
sqs = SearchQuerySet()
results = sqs.load_all().all()
results.query.backend = ReadQuerySetMockSearchBackend('default')
results._fill_cache(0, 2)
# The deleted result isn't returned
self.assertEqual(len([result for result in results._result_cache if result is not None]), 1)
# Register a SearchIndex with a read_queryset that returns deleted items
rqstsi = TextReadQuerySetTestSearchIndex()
ui.build(indexes=[rqstsi])
rqstsi.update()
sqs = SearchQuerySet()
results = sqs.load_all().all()
results.query.backend = ReadQuerySetMockSearchBackend('default')
results._fill_cache(0, 2)
# Both the deleted and not deleted items are returned
self.assertEqual(len([result for result in results._result_cache if result is not None]), 2)
# Restore.
connections['default']._index = old_ui
def test_auto_query(self):
sqs = self.msqs.auto_query('test search -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test search -stuff>')
sqs = self.msqs.auto_query('test "my thing" search -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search -stuff>')
sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search \'moar quotes\' -stuff>')
sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search \'moar quotes\' "foo -stuff>')
sqs = self.msqs.auto_query('test - stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND content__contains=test - stuff>")
# Ensure bits in exact matches get escaped properly as well.
sqs = self.msqs.auto_query('"pants:rule"')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains="pants:rule">')
# Now with a different fieldname
sqs = self.msqs.auto_query('test search -stuff', fieldname='title')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND title__contains=test search -stuff>")
sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname='title')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND title__contains=test "my thing" search -stuff>')
def test_count(self):
self.assertEqual(self.msqs.count(), 23)
def test_facet_counts(self):
self.assertEqual(self.msqs.facet_counts(), {})
def test_best_match(self):
self.assertTrue(isinstance(self.msqs.best_match(), SearchResult))
def test_latest(self):
self.assertTrue(isinstance(self.msqs.latest('pub_date'), SearchResult))
def test_more_like_this(self):
mock = MockModel()
mock.id = 1
self.assertEqual(len(self.msqs.more_like_this(mock)), 23)
def test_facets(self):
sqs = self.msqs.facet('foo')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.facets), 1)
sqs2 = self.msqs.facet('foo').facet('bar')
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.facets), 2)
def test_date_facets(self):
try:
sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='smarblaph')
self.fail()
except FacetingError, e:
self.assertEqual(str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.")
sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.date_facets), 1)
sqs2 = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet('bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year')
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.date_facets), 2)
def test_query_facets(self):
sqs = self.msqs.query_facet('foo', '[bar TO *]')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_facets), 1)
sqs2 = self.msqs.query_facet('foo', '[bar TO *]').query_facet('bar', '[100 TO 499]')
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.query_facets), 2)
# Test multiple query facets on a single field
sqs3 = self.msqs.query_facet('foo', '[bar TO *]').query_facet('bar', '[100 TO 499]').query_facet('foo', '[1000 TO 1499]')
self.assertTrue(isinstance(sqs3, SearchQuerySet))
self.assertEqual(len(sqs3.query.query_facets), 3)
def test_stats(self):
sqs = self.msqs.stats_facet('foo','bar')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.stats),1)
sqs2 = self.msqs.stats_facet('foo','bar').stats_facet('foo','baz')
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.stats),1)
sqs3 = self.msqs.stats_facet('foo','bar').stats_facet('moof','baz')
self.assertTrue(isinstance(sqs3, SearchQuerySet))
self.assertEqual(len(sqs3.query.stats),2)
def test_narrow(self):
sqs = self.msqs.narrow('foo:moof')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.narrow_queries), 1)
def test_clone(self):
results = self.msqs.filter(foo='bar', foo__lt='10')
clone = results._clone()
self.assertTrue(isinstance(clone, SearchQuerySet))
self.assertEqual(str(clone.query), str(results.query))
self.assertEqual(clone._result_cache, [])
self.assertEqual(clone._result_count, None)
self.assertEqual(clone._cache_full, False)
self.assertEqual(clone._using, results._using)
def test_using(self):
sqs = SearchQuerySet(using='default')
self.assertNotEqual(sqs.query, None)
self.assertEqual(sqs.query._using, 'default')
def test_chaining(self):
sqs = self.msqs.filter(content='foo')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
# A second instance should inherit none of the changes from above.
sqs = self.msqs.filter(content='bar')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_none(self):
sqs = self.msqs.none()
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
def test___and__(self):
sqs1 = self.msqs.filter(content='foo')
sqs2 = self.msqs.filter(content='bar')
sqs = sqs1 & sqs2
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 2)
def test___or__(self):
sqs1 = self.msqs.filter(content='foo')
sqs2 = self.msqs.filter(content='bar')
sqs = sqs1 | sqs2
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 2)
def test_and_or(self):
"""
Combining AND queries with OR should give
AND(OR(a, b), OR(c, d))
"""
sqs1 = self.msqs.filter(content='foo').filter(content='oof')
sqs2 = self.msqs.filter(content='bar').filter(content='rab')
sqs = sqs1 | sqs2
self.assertEqual(sqs.query.query_filter.connector, 'OR')
self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter))
self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter))
def test_or_and(self):
"""
Combining OR queries with AND should give
OR(AND(a, b), AND(c, d))
"""
sqs1 = self.msqs.filter(content='foo').filter_or(content='oof')
sqs2 = self.msqs.filter(content='bar').filter_or(content='rab')
sqs = sqs1 & sqs2
self.assertEqual(sqs.query.query_filter.connector, 'AND')
self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter))
self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter))
class ValuesQuerySetTestCase(SearchQuerySetTestCase):
def test_values_sqs(self):
sqs = self.msqs.auto_query("test").values("id")
self.assert_(isinstance(sqs, ValuesSearchQuerySet))
# We'll do a basic test to confirm that slicing works as expected:
self.assert_(isinstance(sqs[0], dict))
self.assert_(isinstance(sqs[0:5][0], dict))
def test_valueslist_sqs(self):
sqs = self.msqs.auto_query("test").values_list("id")
self.assert_(isinstance(sqs, ValuesListSearchQuerySet))
self.assert_(isinstance(sqs[0], (list, tuple)))
self.assert_(isinstance(sqs[0:1][0], (list, tuple)))
self.assertRaises(TypeError, self.msqs.auto_query("test").values_list, "id", "score", flat=True)
flat_sqs = self.msqs.auto_query("test").values_list("id", flat=True)
self.assert_(isinstance(sqs, ValuesListSearchQuerySet))
# Note that this will actually be None because a mocked sqs lacks
# anything else:
self.assert_(flat_sqs[0] is None)
self.assert_(flat_sqs[0:1][0] is None)
class EmptySearchQuerySetTestCase(TestCase):
def setUp(self):
super(EmptySearchQuerySetTestCase, self).setUp()
self.esqs = EmptySearchQuerySet()
def test_get_count(self):
self.assertEqual(self.esqs.count(), 0)
self.assertEqual(len(self.esqs.all()), 0)
def test_filter(self):
sqs = self.esqs.filter(content='foo')
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
def test_exclude(self):
sqs = self.esqs.exclude(content='foo')
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
def test_slice(self):
sqs = self.esqs.filter(content='foo')
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
self.assertEqual(sqs[:10], [])
try:
sqs[4]
self.fail()
except IndexError:
pass
def test_dictionary_lookup(self):
"""
Ensure doing a dictionary lookup raises a TypeError so
EmptySearchQuerySets can be used in templates.
"""
self.assertRaises(TypeError, lambda: self.esqs['count'])
if test_pickling:
class PickleSearchQuerySetTestCase(TestCase):
def setUp(self):
super(PickleSearchQuerySetTestCase, self).setUp()
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.cpkmmsi = CharPKMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.cpkmmsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
self.msqs = SearchQuerySet()
# Stow.
self.old_debug = settings.DEBUG
settings.DEBUG = True
reset_search_queries()
def tearDown(self):
# Restore.
connections['default']._index = self.old_unified_index
settings.DEBUG = self.old_debug
super(PickleSearchQuerySetTestCase, self).tearDown()
def test_pickling(self):
results = self.msqs.all()
for res in results:
# Make sure the cache is full.
pass
in_a_pickle = pickle.dumps(results)
like_a_cuke = pickle.loads(in_a_pickle)
self.assertEqual(len(like_a_cuke), len(results))
self.assertEqual(like_a_cuke[0].id, results[0].id)
|
|
import unittest
import sys
from mock import patch
from honeybadger import honeybadger
from honeybadger.config import Configuration
from honeybadger.contrib.flask import FlaskPlugin, FlaskHoneybadger
PYTHON_VERSION = sys.version_info[0:2]
class FlaskPluginTestCase(unittest.TestCase):
def setUp(self):
import flask
if flask.__version__.startswith('0.12') and PYTHON_VERSION < (3, 3):
self.skipTest('Flask 0.12 requires Python > 3.2')
if flask.__version__.startswith('1.0') and PYTHON_VERSION < (3, 4):
self.skipTest('Flask 1.0 requires Python > 3.3')
if flask.__version__.startswith('1.1') and PYTHON_VERSION < (3, 5):
self.skipTest('Flask 1.1 requires Python > 3.4')
self.config = Configuration()
self.default_payload = {'request':{}}
self.app = flask.Flask(__name__)
self.app.secret_key = 'safe'
self.app.config.update({
'HONEYBADGER_ENVIRONMENT': 'production_flask'
})
self.plugin = FlaskPlugin()
@self.app.route('/test', methods=['GET', 'POST', 'PUT'])
def foo():
return 'bar'
def test_supports_in_request_context(self):
with self.app.test_request_context():
self.assertTrue(self.plugin.supports(self.config, {}))
def test_supports_no_request_context(self):
self.assertFalse(self.plugin.supports(self.config, {}))
def test_get_request_with_headers(self):
with self.app.test_request_context(path='/test',
base_url='http://server:1234/path',
query_string='a=1&a=2&foo=bar',
headers={
'X-Wizard-Color': 'grey'
}):
payload = self.plugin.generate_payload(self.default_payload, self.config, {'k': 'value'})
self.assertEqual(payload['request']['url'], 'http://server:1234/path/test')
self.assertEqual(payload['request']['component'], 'honeybadger.tests.contrib.test_flask')
self.assertEqual(payload['request']['action'], 'foo')
self.assertDictEqual(payload['request']['params'], {'a': ['1', '2'], 'foo': ['bar']})
self.assertDictEqual(payload['request']['session'], {})
self.assertDictContainsSubset({
'Host': 'server:1234',
'X-Wizard-Color': 'grey',
'REQUEST_METHOD': 'GET'
}, payload['request']['cgi_data'])
self.assertDictEqual(payload['request']['context'], {'k': 'value'})
def test_get_request_with_session(self):
with self.app.test_request_context(path='/test', base_url='http://server:1234/path') as ctx:
ctx.session['answer'] = 42
ctx.session['password'] = 'this is fine'
payload = self.plugin.generate_payload(self.default_payload, self.config, {'k': 'value'})
self.assertEqual(payload['request']['url'], 'http://server:1234/path/test')
self.assertEqual(payload['request']['component'], 'honeybadger.tests.contrib.test_flask')
self.assertEqual(payload['request']['action'], 'foo')
self.assertDictEqual(payload['request']['params'], {})
self.assertDictEqual(payload['request']['session'], {'answer': 42, 'password': '[FILTERED]'})
self.assertDictContainsSubset({
'Host': 'server:1234',
'REQUEST_METHOD': 'GET'
}, payload['request']['cgi_data'])
self.assertDictEqual(payload['request']['context'], {'k': 'value'})
def test_post_request(self):
with self.app.test_request_context(path='/test', base_url='http://server:1234/path', method='POST',
data={'foo': 'bar', 'password': 'this is file'}):
payload = self.plugin.generate_payload(self.default_payload, self.config, {'k': 'value'})
self.assertEqual(payload['request']['url'], 'http://server:1234/path/test')
self.assertEqual(payload['request']['component'], 'honeybadger.tests.contrib.test_flask')
self.assertEqual(payload['request']['action'], 'foo')
self.assertDictEqual(payload['request']['params'], {'foo': ['bar'], 'password': '[FILTERED]'})
self.assertDictEqual(payload['request']['session'], {})
self.assertDictEqual(payload['request']['cgi_data'], {
'Host': 'server:1234',
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': '29',
'REQUEST_METHOD': 'POST',
'HTTP_COOKIE': {}
})
self.assertDictEqual(payload['request']['context'], {'k': 'value'})
def test_put_request(self):
with self.app.test_request_context(path='/test', base_url='http://server:1234/path', method='PUT',
data={'foo': 'bar', 'password': 'this is file'}):
payload = self.plugin.generate_payload(self.default_payload, self.config, {'k': 'value'})
self.assertEqual(payload['request']['url'], 'http://server:1234/path/test')
self.assertEqual(payload['request']['component'], 'honeybadger.tests.contrib.test_flask')
self.assertEqual(payload['request']['action'], 'foo')
self.assertDictEqual(payload['request']['params'], {'foo': ['bar'], 'password': '[FILTERED]'})
self.assertDictEqual(payload['request']['session'], {})
self.assertDictEqual(payload['request']['cgi_data'], {
'Host': 'server:1234',
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': '29',
'REQUEST_METHOD': 'PUT',
'HTTP_COOKIE': {}
})
self.assertDictEqual(payload['request']['context'], {'k': 'value'})
class FlaskHoneybadgerTestCase(unittest.TestCase):
def setUp(self):
import flask
if flask.__version__.startswith('0.12') and PYTHON_VERSION < (3, 3):
self.skipTest('Flask 0.12 requires Python > 3.2')
if flask.__version__.startswith('1.0') and PYTHON_VERSION < (3, 4):
self.skipTest('Flask 1.0 requires Python >= 3.4')
if flask.__version__.startswith('1.1') and PYTHON_VERSION < (3, 5):
self.skipTest('Flask 1.1 requires Python >= 3.5')
import werkzeug
self.default_headers = {
'Content-Length': '0',
'Host': 'localhost',
'User-Agent': 'werkzeug/%s' % werkzeug.__version__
}
self.app = flask.Flask(__name__)
self.app.config.update({
'environment': 'production_flask',
'api_key': 'abcd'
})
def assert_called_with_exception_type(self, mock_hb, exc_type):
self.assertEqual(1, mock_hb.notify.call_count)
self.assertEqual(exc_type, type(mock_hb.notify.call_args[0][0]))
@patch('honeybadger.contrib.flask.honeybadger')
def test_with_auto_report_exceptions(self, mock_hb):
FlaskHoneybadger(self.app, report_exceptions=True)
@self.app.route('/error')
def error():
return 1 / 0
self.app.test_client().get('/error?a=1&b=2&b=3')
self.assert_called_with_exception_type(mock_hb, ZeroDivisionError)
@patch('honeybadger.contrib.flask.honeybadger')
def test_without_auto_report_exceptions(self, mock_hb):
FlaskHoneybadger(self.app, report_exceptions=False)
@self.app.route('/error')
def error():
return 1 / 0
self.app.test_client().get('/error?a=1&b=2&b=3')
mock_hb.notify.assert_not_called()
@patch('honeybadger.contrib.flask.honeybadger')
def test_auto_report_exceptions_with_blueprint(self, mock_hb):
from flask import Blueprint
FlaskHoneybadger(self.app, report_exceptions=True)
bp = Blueprint('blueprint', __name__)
@bp.route('/error')
def error():
return 1 / 0
self.app.register_blueprint(bp)
self.app.test_client().get('/error?a=1&b=2&b=3')
self.assert_called_with_exception_type(mock_hb, ZeroDivisionError)
@patch('honeybadger.contrib.flask.honeybadger')
def test_auto_report_exceptions_with_view_class(self, mock_hb):
from flask.views import MethodView
FlaskHoneybadger(self.app, report_exceptions=True)
class ErrorView(MethodView):
def get(self):
return 1 / 0
self.app.add_url_rule('/error', view_func=ErrorView.as_view('error'))
self.app.test_client().get('/error?a=1&b=2&b=3')
self.assert_called_with_exception_type(mock_hb, ZeroDivisionError)
@patch('honeybadger.contrib.flask.honeybadger')
def test_dont_reset_context_with_exception(self, mock_hb):
from flask.views import MethodView
FlaskHoneybadger(self.app, report_exceptions=True, reset_context_after_request=False)
honeybadger.set_context(foo='bar')
class ErrorView(MethodView):
def get(self):
return 1 / 0
self.app.add_url_rule('/error', view_func=ErrorView.as_view('error'))
self.app.test_client().get('/error?a=1&b=2&b=3')
self.assert_called_with_exception_type(mock_hb, ZeroDivisionError)
mock_hb.reset_context.assert_not_called()
@patch('honeybadger.contrib.flask.honeybadger')
def test_dont_reset_context_when_not_reporting(self, mock_hb):
from flask.views import MethodView
FlaskHoneybadger(self.app, report_exceptions=False, reset_context_after_request=False)
honeybadger.set_context(foo='bar')
class ErrorView(MethodView):
def get(self):
return 1 / 0
self.app.add_url_rule('/error', view_func=ErrorView.as_view('error'))
self.app.test_client().get('/error?a=1&b=2&b=3')
mock_hb.notify.assert_not_called()
mock_hb.reset_context.assert_not_called()
@patch('honeybadger.contrib.flask.honeybadger')
def test_reset_context_when_not_reporting(self, mock_hb):
from flask.views import MethodView
FlaskHoneybadger(self.app, report_exceptions=False, reset_context_after_request=True)
honeybadger.set_context(foo='bar')
class ErrorView(MethodView):
def get(self):
return 1 / 0
self.app.add_url_rule('/error', view_func=ErrorView.as_view('error'))
self.app.test_client().get('/error?a=1&b=2&b=3')
mock_hb.notify.assert_not_called()
self.assertEqual(1, mock_hb.reset_context.call_count)
@patch('honeybadger.contrib.flask.honeybadger')
def test_reset_context_when_reporting(self, mock_hb):
from flask.views import MethodView
FlaskHoneybadger(self.app, report_exceptions=True, reset_context_after_request=True)
honeybadger.set_context(foo='bar')
class ErrorView(MethodView):
def get(self):
return 1 / 0
self.app.add_url_rule('/error', view_func=ErrorView.as_view('error'))
self.app.test_client().get('/error?a=1&b=2&b=3')
self.assert_called_with_exception_type(mock_hb, ZeroDivisionError)
self.assertEqual(2, mock_hb.reset_context.call_count)
|
|
import sys
import os
from astropy.units import ys
sys.path.insert(0, os.path.abspath('..'))
import random
import numpy as np
from assignment1.cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from assignment1.cs231n import data_utils
from assignment1.cs231n.classifiers.softmax import softmax_loss_naive
from assignment1.cs231n.gradient_check import grad_check_sparse
from assignment1.cs231n.classifiers.softmax import softmax_loss_vectorized
import time
from assignment1.cs231n.classifiers.linear_classifier import Softmax
class Softmaxmodel(object):
def __init__(self):
return
def get_CIFAR10_data(self,num_training=49000, num_validation=1000, num_test=1000, num_dev=500):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the linear classifier. These are the same steps as we used for the
SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = '../cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis = 0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# add bias dimension and transform into columns
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
print 'dev data shape: ', X_dev.shape
print 'dev labels shape: ', y_dev.shape
self.X_train = X_train
self.y_train = y_train
self.X_val = X_val
self.y_val = y_val
self.X_test = X_test
self.y_test = y_test
self.X_dev = X_dev
self.y_dev = y_dev
return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev
def compute_loss(self):
# First implement the naive softmax loss function with nested loops.
# Open the file cs231n/classifiers/softmax.py and implement the
# softmax_loss_naive function.
# Generate a random softmax weight matrix and use it to compute the loss.
W = np.random.randn(3073, 10) * 0.0001
loss, grad = softmax_loss_naive(W, self.X_dev, self.y_dev, 0.0)
# As a rough sanity check, our loss should be something close to -log(0.1).
print 'loss: %f' % loss
print 'sanity check: %f' % (-np.log(0.1))
return
def compute_gradient(self):
# Complete the implementation of softmax_loss_naive and implement a (naive)
# version of the gradient that uses nested loops.
W = np.random.randn(3073, 10) * 0.0001
loss, grad = softmax_loss_naive(W, self.X_dev, self.y_dev, 0.0)
# As we did for the SVM, use numeric gradient checking as a debugging tool.
# The numeric gradient should be close to the analytic gradient.
f = lambda w: softmax_loss_naive(w, self.X_dev, self.y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)
# similar to SVM case, do another gradient check with regularization
loss, grad = softmax_loss_naive(W, self.X_dev, self.y_dev, 1e2)
f = lambda w: softmax_loss_naive(w, self.X_dev, self.y_dev, 1e2)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)
return
def compute_vectorized_loss_grad(self):
# Now that we have a naive implementation of the softmax loss function and its gradient,
# implement a vectorized version in softmax_loss_vectorized.
# The two versions should compute the same results, but the vectorized version should be
# much faster.
W = np.random.randn(3073, 10) * 0.0001
tic = time.time()
loss_naive, grad_naive = softmax_loss_naive(W, self.X_dev, self.y_dev, 0.00001)
toc = time.time()
print 'naive loss: %e computed in %fs' % (loss_naive, toc - tic)
tic = time.time()
loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, self.X_dev, self.y_dev, 0.00001)
toc = time.time()
print 'vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)
# As we did for the SVM, we use the Frobenius norm to compare the two versions
# of the gradient.
grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print 'Loss difference: %f' % np.abs(loss_naive - loss_vectorized)
print 'Gradient difference: %f' % grad_difference
return
def parameter_tuning(self):
learning_rates = [1e-7, 5e-8]
regularization_strengths = [5e4, 1e3]
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_model = None # The LinearSVM object that achieved the highest validation rate.
num_iters = 1500
for learning_rate in learning_rates:
for regularization_strength in regularization_strengths:
print "learning_rage {}, regularization_strength {}".format(learning_rate, regularization_strength)
#train it
model = Softmax()
model.train(self.X_train, self.y_train, learning_rate=learning_rate, reg=regularization_strength,
num_iters=num_iters, verbose=True)
#predict
y_train_pred = model.predict(self.X_train)
training_accuracy = np.mean(self.y_train == y_train_pred)
y_val_pred = model.predict(self.X_val)
validation_accuracy = np.mean(self.y_val == y_val_pred)
results[(learning_rate,regularization_strength)] = training_accuracy, validation_accuracy
if validation_accuracy > best_val:
best_val = validation_accuracy
best_model = model
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
self.evaluate_bestmodel(best_model)
self.visualize_bestmodel_weights(best_model)
return
def evaluate_bestmodel(self, best_model):
# Evaluate the best svm on test set
y_test_pred = best_model.predict(self.X_test)
test_accuracy = np.mean(self.y_test == y_test_pred)
print 'linear SVM on raw pixels final test set accuracy: %f' % test_accuracy
return
def visualize_bestmodel_weights(self,best_model):
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_model.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in xrange(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
plt.show()
return
def run(self):
self.get_CIFAR10_data()
self.parameter_tuning()
# self.compute_loss()
# self.compute_gradient()
# self.compute_vectorized_loss_grad()
return
if __name__ == "__main__":
obj= Softmaxmodel()
obj.run()
|
|
import warnings
from enum import IntFlag
from typing import Optional, Union
from .. import xdr as stellar_xdr
from ..keypair import Keypair
from ..muxed_account import MuxedAccount
from ..signer import Signer
from ..strkey import StrKey
from ..type_checked import type_checked
from ..utils import raise_if_not_valid_ed25519_public_key
from .operation import Operation
__all__ = ["AuthorizationFlag", "SetOptions"]
class AuthorizationFlag(IntFlag):
"""Indicates which flags to set. For details about the flags,
please refer to the `Control Access to an Asset - Flag <https://developers.stellar.org/docs/issuing-assets/control-asset-access/#flags>`__.
"""
AUTHORIZATION_REQUIRED = 1
AUTHORIZATION_REVOCABLE = 2
AUTHORIZATION_IMMUTABLE = 4
AUTHORIZATION_CLAWBACK_ENABLED = 8
@type_checked
class SetOptions(Operation):
"""The :class:`SetOptions` object, which represents a SetOptions operation
on Stellar's network.
This operation sets the options for an account.
For more information on the signing options, please refer to the `multi-sig
doc <https://developers.stellar.org/docs/glossary/multisig/>`_.
When updating signers or other thresholds, the threshold of this operation
is high.
Threshold: Medium or High
See `Set Options <https://developers.stellar.org/docs/start/list-of-operations/#set-options>`_ for more information.
:param inflation_dest: Account of the inflation destination.
:param clear_flags: Indicates which flags to clear. For details about the flags,
please refer to the `Control Access to an Asset - Flag <https://developers.stellar.org/docs/issuing-assets/control-asset-access/#flags>`__.
The `bit mask <https://en.wikipedia.org/wiki/Bit_field>`_ integer subtracts from the existing flags of the account.
This allows for setting specific bits without knowledge of existing flags, you can also use
:class:`stellar_sdk.operation.set_options.AuthorizationFlag`
* AUTHORIZATION_REQUIRED = 1
* AUTHORIZATION_REVOCABLE = 2
* AUTHORIZATION_IMMUTABLE = 4
* AUTHORIZATION_CLAWBACK_ENABLED = 8
:param set_flags: Indicates which flags to set. For details about the flags,
please refer to the `Control Access to an Asset - Flag <https://developers.stellar.org/docs/issuing-assets/control-asset-access/#flags>`__.
The bit mask integer adds onto the existing flags of the account.
This allows for setting specific bits without knowledge of existing flags, you can also use
:class:`stellar_sdk.operation.set_options.AuthorizationFlag`
* AUTHORIZATION_REQUIRED = 1
* AUTHORIZATION_REVOCABLE = 2
* AUTHORIZATION_IMMUTABLE = 4
* AUTHORIZATION_CLAWBACK_ENABLED = 8
:param master_weight: A number from 0-255 (inclusive) representing the weight of the master key.
If the weight of the master key is updated to 0, it is effectively disabled.
:param low_threshold: A number from 0-255 (inclusive) representing the threshold this account sets on all
operations it performs that have `a low threshold <https://developers.stellar.org/docs/glossary/multisig/>`_.
:param med_threshold: A number from 0-255 (inclusive) representing the threshold this account sets on all
operations it performs that have `a medium threshold <https://developers.stellar.org/docs/glossary/multisig/>`_.
:param high_threshold: A number from 0-255 (inclusive) representing the threshold this account sets on all
operations it performs that have `a high threshold <https://developers.stellar.org/docs/glossary/multisig/>`_.
:param home_domain: sets the home domain used for
reverse `federation <https://developers.stellar.org/docs/glossary/federation/>`_ lookup.
:param signer: Add, update, or remove a signer from the account.
:param source: The source account for the operation. Defaults to the transaction's source account.
"""
_XDR_OPERATION_TYPE: stellar_xdr.OperationType = (
stellar_xdr.OperationType.SET_OPTIONS
)
def __init__(
self,
inflation_dest: str = None,
clear_flags: Union[int, AuthorizationFlag] = None,
set_flags: Union[int, AuthorizationFlag] = None,
master_weight: int = None,
low_threshold: int = None,
med_threshold: int = None,
high_threshold: int = None,
signer: Signer = None,
home_domain: str = None,
source: Optional[Union[MuxedAccount, str]] = None,
) -> None:
super().__init__(source)
if set_flags is not None and not isinstance(set_flags, AuthorizationFlag):
warnings.warn(
"`set_flags` is a int, we recommend using AuthorizationFlag.",
DeprecationWarning,
)
set_flags = AuthorizationFlag(set_flags)
if clear_flags is not None and not isinstance(clear_flags, AuthorizationFlag):
warnings.warn(
"`clear_flags` is a int, we recommend using AuthorizationFlag.",
DeprecationWarning,
)
clear_flags = AuthorizationFlag(clear_flags)
self.inflation_dest = inflation_dest
self.clear_flags: Optional[AuthorizationFlag] = clear_flags
self.set_flags: Optional[AuthorizationFlag] = set_flags
self.master_weight: Optional[int] = master_weight
self.low_threshold: Optional[int] = low_threshold
self.med_threshold: Optional[int] = med_threshold
self.high_threshold: Optional[int] = high_threshold
self.home_domain: Optional[str] = home_domain
self.signer: Optional[Signer] = signer
if self.inflation_dest is not None:
raise_if_not_valid_ed25519_public_key(self.inflation_dest, "inflation_dest")
def _to_operation_body(self) -> stellar_xdr.OperationBody:
inflation_dest = (
Keypair.from_public_key(self.inflation_dest).xdr_account_id()
if self.inflation_dest is not None
else None
)
home_domain = (
stellar_xdr.String32(bytes(self.home_domain, encoding="utf-8"))
if self.home_domain is not None
else None
)
clear_flags = (
None
if self.clear_flags is None
else stellar_xdr.Uint32(self.clear_flags.value)
)
set_flags = (
None if self.set_flags is None else stellar_xdr.Uint32(self.set_flags.value)
)
master_weight = (
None
if self.master_weight is None
else stellar_xdr.Uint32(self.master_weight)
)
low_threshold = (
None
if self.low_threshold is None
else stellar_xdr.Uint32(self.low_threshold)
)
med_threshold = (
None
if self.med_threshold is None
else stellar_xdr.Uint32(self.med_threshold)
)
high_threshold = (
None
if self.high_threshold is None
else stellar_xdr.Uint32(self.high_threshold)
)
signer = None if self.signer is None else self.signer.to_xdr_object()
set_options_op = stellar_xdr.SetOptionsOp(
inflation_dest,
clear_flags,
set_flags,
master_weight,
low_threshold,
med_threshold,
high_threshold,
home_domain,
signer,
)
body = stellar_xdr.OperationBody(
type=self._XDR_OPERATION_TYPE, set_options_op=set_options_op
)
return body
@classmethod
def from_xdr_object(cls, xdr_object) -> "SetOptions":
"""Creates a :class:`SetOptions` object from an XDR Operation
object.
"""
source = Operation.get_source_from_xdr_obj(xdr_object)
inflation_dest = None
if xdr_object.body.set_options_op.inflation_dest:
inflation_dest = StrKey.encode_ed25519_public_key(
xdr_object.body.set_options_op.inflation_dest.account_id.ed25519.uint256
)
clear_flags_xdr = xdr_object.body.set_options_op.clear_flags
set_flags_xdr = xdr_object.body.set_options_op.set_flags
master_weight_xdr = xdr_object.body.set_options_op.master_weight
low_threshold_xdr = xdr_object.body.set_options_op.low_threshold
med_threshold_xdr = xdr_object.body.set_options_op.med_threshold
high_threshold_xdr = xdr_object.body.set_options_op.high_threshold
home_domain_xdr = xdr_object.body.set_options_op.home_domain
signer_xdr_object = xdr_object.body.set_options_op.signer
clear_flags = None if clear_flags_xdr is None else clear_flags_xdr.uint32
set_flags = None if set_flags_xdr is None else set_flags_xdr.uint32
master_weight = None if master_weight_xdr is None else master_weight_xdr.uint32
low_threshold = None if low_threshold_xdr is None else low_threshold_xdr.uint32
med_threshold = None if med_threshold_xdr is None else med_threshold_xdr.uint32
high_threshold = (
None if high_threshold_xdr is None else high_threshold_xdr.uint32
)
home_domain = None if home_domain_xdr is None else home_domain_xdr.string32
signer = (
None
if signer_xdr_object is None
else Signer.from_xdr_object(signer_xdr_object)
)
if home_domain is not None:
home_domain = home_domain.decode("utf-8")
op = cls(
inflation_dest=inflation_dest,
clear_flags=clear_flags,
set_flags=set_flags,
master_weight=master_weight,
low_threshold=low_threshold,
med_threshold=med_threshold,
high_threshold=high_threshold,
home_domain=home_domain,
signer=signer,
source=source,
)
return op
def __str__(self):
return (
f"<SetOptions [inflation_dest={self.inflation_dest}, "
f"clear_flags={self.clear_flags}, "
f"set_flags={self.set_flags}, "
f"master_weight={self.master_weight}, "
f"low_threshold={self.low_threshold}, "
f"med_threshold={self.med_threshold}, "
f"high_threshold={self.high_threshold}, "
f"signer={self.signer}, "
f"home_domain={self.home_domain}, "
f"source={self.source}]>"
)
|
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for registering already known functions for tagging patterns."""
import functools
from typing import Sequence, Tuple, TypeVar
import jax
from jax import core as jax_core
from jax import lax
from jax import lib as jax_lib
from jax.interpreters import batching as jax_batching
import jax.numpy as jnp
_T = TypeVar("_T")
class LossTag(jax_core.Primitive):
"""A tagging primitive specifically for losses."""
multiple_results = True
def __init__(self, cls, num_inputs: int, num_targets: int = 1):
super().__init__(cls.__name__ + "_tag")
self._cls = cls
self._num_inputs = num_inputs
self._num_targets = num_targets
jax.xla.translations[self] = self.xla_translation
jax.ad.primitive_jvps[self] = self.jvp
# This line defines how does the tag behave under vmap. It is required for
# any primitive that can be used inside a vmap. The reason why we want to
# allow this is two fold - one to not break user code when the tags are not
# used at all, and two - to be able to define a network with code for a
# single example which is the vmap-ed for a batch.
jax_batching.primitive_batchers[self] = self.batching
@property
def num_inputs(self) -> int:
return self._num_inputs
@property
def num_targets(self) -> int:
return self._num_targets
def loss(self, *args, weight: float = 1.0, **kwargs):
return self._cls(*args, weight=weight, **kwargs)
def loss_evaluate(self, *args, weight: float = 1.0, **kwargs):
return self.loss(*args, weight=weight, **kwargs).evaluate()
def get_outputs(self, *args, weight: float, return_loss: bool, **kwargs):
if len(args) < self.num_inputs:
raise ValueError("Inputs to the tag are not enough.")
if len(args) < self.num_inputs + self.num_targets:
if len(args) != self.num_inputs:
raise ValueError("Inputs to the tag are not quite enough.")
if return_loss:
raise ValueError("Can not have return_loss=True when there are no "
"targets.")
return args
if len(args) > self.num_inputs + self.num_targets:
raise ValueError("Inputs to the tag are too many.")
if return_loss:
return self.loss(*args, weight=weight, **kwargs).evaluate()
else:
return args
def impl(self, *args, weight: float, return_loss: bool, **kwargs):
return self.get_outputs(*args, weight=weight, return_loss=return_loss)
def abstract_eval(self, *args, weight: float, return_loss: bool, **kwargs):
return self.get_outputs(*args, weight=weight, return_loss=return_loss)
def xla_translation(
self,
c,
*args,
weight: float = 1.0,
return_loss: bool = False,
**kwargs,
):
outputs = self.get_outputs(
*args, weight=weight, return_loss=return_loss, **kwargs)
if isinstance(outputs, tuple):
return jax_lib.xla_client.ops.Tuple(c, outputs)
return outputs
def jvp(
self,
arg_values,
arg_tangents,
weight: float,
return_loss: bool,
**kwargs,
):
if len(arg_values) != len(arg_tangents):
raise ValueError("Values and tangents are not the same length.")
primal_output = self.bind(
*arg_values, weight=weight, return_loss=return_loss, **kwargs)
if len(arg_values) == self.num_inputs:
tangents_out = self.get_outputs(
*arg_tangents, weight=weight, return_loss=return_loss, **kwargs)
elif return_loss:
tangents_out = jax.jvp(
functools.partial(self.loss_evaluate, weight=weight, **kwargs),
arg_tangents, arg_tangents)[1]
else:
tangents_out = arg_tangents
return primal_output, tangents_out
def batching(self, batched_args, batched_dims, **kwargs):
return self.bind(*batched_args, **kwargs), batched_dims[0]
class LayerTag(jax_core.Primitive):
"""A tagging primitive that is used to mark/tag computation."""
def __init__(self, name: str, num_inputs: int, num_outputs: int):
super().__init__(name)
if num_outputs > 1:
raise NotImplementedError(
f"Only single outputs are supported, got: num_outputs={num_outputs}")
self._num_outputs = num_outputs
self._num_inputs = num_inputs
jax.xla.translations[self] = self.xla_translation
jax.ad.deflinear(self, self.transpose)
jax.ad.primitive_transposes[self] = self.transpose
# This line defines how does the tag behave under vmap. It is required for
# any primitive that can be used inside a vmap. The reason why we want to
# allow this is two fold - one to not break user code when the tags are not
# used at all, and two - to be able to define a network with code for a
# single example which is the vmap-ed for a batch.
jax_batching.primitive_batchers[self] = self.batching
@property
def num_outputs(self) -> int:
return self._num_outputs
@property
def num_inputs(self) -> int:
return self._num_inputs
def split_all_inputs(
self,
all_inputs: Sequence[_T],
) -> Tuple[Sequence[_T], Sequence[_T], Sequence[_T]]:
outputs = tuple(all_inputs[:self.num_outputs])
inputs = tuple(all_inputs[self.num_outputs:self.num_outputs +
self.num_inputs])
params = tuple(all_inputs[self.num_outputs + self.num_inputs:])
return outputs, inputs, params
def get_outputs(self, *operands: _T, **kwargs) -> _T:
assert self.num_outputs == 1
return operands[0]
def xla_translation(self, c, *operands: _T, **kwargs) -> _T:
return self.get_outputs(*operands, **kwargs)
@staticmethod
def transpose(cotangent, *operands, **kwargs):
return (cotangent,) + (None,) * (len(operands) - 1)
def impl(self, *operands, **kwargs):
return self.get_outputs(*operands, **kwargs)
def abstract_eval(self, *abstract_operands, **kwargs):
return self.get_outputs(*abstract_operands, **kwargs)
def batching(self, batched_operands, batched_dims, **kwargs):
return self.bind(*batched_operands, **kwargs), batched_dims[0]
# _____ _
# / ____| (_)
# | | __ ___ _ __ ___ _ __ _ ___
# | | |_ |/ _ \ '_ \ / _ \ '__| |/ __|
# | |__| | __/ | | | __/ | | | (__
# \_____|\___|_| |_|\___|_| |_|\___|
#
#
generic_tag = LayerTag(name="generic_tag", num_inputs=0, num_outputs=1)
def register_generic(parameter: _T) -> _T:
return generic_tag.bind(parameter)
# _____
# | __ \
# | | | | ___ _ __ ___ ___
# | | | |/ _ \ '_ \/ __|/ _ \
# | |__| | __/ | | \__ \ __/
# |_____/ \___|_| |_|___/\___|
#
dense_tag = LayerTag(name="dense_tag", num_inputs=1, num_outputs=1)
def register_dense(y, x, w, b=None):
if b is None:
return dense_tag.bind(y, x, w)
return dense_tag.bind(y, x, w, b)
def dense_func(x, params):
"""Example of a dense layer function."""
w = params[0]
y = jnp.matmul(x, w)
if len(params) == 1:
# No bias
return y
# Add bias
return y + params[1]
def dense_tagging(jaxpr, inverse_map, values_map):
"""Correctly registers a dense layer pattern."""
del inverse_map
in_values = [values_map[v] for v in jaxpr.invars]
out_values = [values_map[v] for v in jaxpr.outvars]
return register_dense(out_values[0], *in_values)
# ___ _____ _____ _ _ _
# |__ \| __ \ / ____| | | | | (_)
# ) | | | | | | ___ _ ____ _____ | |_ _| |_ _ ___ _ __
# / /| | | | | | / _ \| '_ \ \ / / _ \| | | | | __| |/ _ \| "_ \
# / /_| |__| | | |___| (_) | | | \ V / (_) | | |_| | |_| | (_) | | | |
# |____|_____/ \_____\___/|_| |_|\_/ \___/|_|\__,_|\__|_|\___/|_| |_|
#
conv2d_tag = LayerTag(name="conv2d_tag", num_inputs=1, num_outputs=1)
def register_conv2d(y, x, w, b=None, **kwargs):
if b is None:
return conv2d_tag.bind(y, x, w, **kwargs)
return conv2d_tag.bind(y, x, w, b, **kwargs)
def conv2d_func(x, params):
"""Example of a conv2d layer function."""
w = params[0]
y = lax.conv_general_dilated(
x,
w,
window_strides=(2, 2),
padding="SAME",
dimension_numbers=("NHWC", "HWIO", "NHWC"))
if len(params) == 1:
# No bias
return y
# Add bias
return y + params[1][None, None, None]
def conv2d_tagging(jaxpr, inverse_map, values_map):
"""Correctly registers a conv2d layer pattern."""
in_values = [values_map[v] for v in jaxpr.invars]
out_values = [values_map[v] for v in jaxpr.outvars]
keys = [k for k in inverse_map.keys() if isinstance(k, str)]
keys = [k for k in keys if k.startswith("conv_general_dilated")]
if len(keys) != 1:
raise ValueError("Did not find any conv_general_dilated!")
kwargs = inverse_map[keys[0]].params
return register_conv2d(out_values[0], *in_values, **kwargs)
# _____ _ _ _____ _ _ __ _
# / ____| | | | | / ____| | (_)/ _| |
# | (___ ___ __ _| | ___ __ _ _ __ __| | | (___ | |__ _| |_| |_
# \___ \ / __/ _` | |/ _ \ / _` | '_ \ / _` | \___ \| '_ \| | _| __|
# ____) | (_| (_| | | __/ | (_| | | | | (_| | ____) | | | | | | | |_
# |_____/ \___\__,_|_|\___| \__,_|_| |_|\__,_| |_____/|_| |_|_|_| \__|
#
scale_and_shift_tag = LayerTag(
name="scale_and_shift_tag", num_inputs=1, num_outputs=1)
def register_scale_and_shift(y, args, has_scale: bool, has_shift: bool):
assert has_scale or has_shift
x, args = args[0], args[1:]
return scale_and_shift_tag.bind(
y, x, *args, has_scale=has_scale, has_shift=has_shift)
def scale_and_shift_func(x, params, has_scale: bool, has_shift: bool):
"""Example of a scale and shift function."""
if has_scale and has_shift:
scale, shift = params
return x * scale + shift
elif has_scale:
return x * params[0]
elif has_shift:
return x + params[0]
else:
raise ValueError()
def scale_and_shift_tagging(
jaxpr,
inverse_map,
values_map,
has_scale: bool,
has_shift: bool,
):
"""Correctly registers a scale and shift layer pattern."""
del inverse_map
in_values = [values_map[v] for v in jaxpr.invars]
out_values = [values_map[v] for v in jaxpr.outvars]
return register_scale_and_shift(out_values[0], in_values, has_scale,
has_shift)
def batch_norm_func(
inputs: Tuple[jnp.ndarray, jnp.ndarray],
params: Tuple[jnp.ndarray, jnp.ndarray],
) -> jnp.ndarray:
"""Example of batch norm as is defined in Haiku."""
x, y = inputs
scale, shift = params
inv = scale * y
return x * inv + shift
def batch_norm_tagging_func(
jaxpr,
inverse_map,
values_map,
has_scale: bool,
has_shift: bool,
):
"""Correctly registers a batch norm layer pattern as is defined in Haiku."""
del inverse_map
in_values = [values_map[v] for v in jaxpr.invars]
out_values = [values_map[v] for v in jaxpr.outvars]
# The first two are both multipliers with the scale so we merge them
in_values = [in_values[0] * in_values[1]] + in_values[2:]
return register_scale_and_shift(out_values[0], in_values, has_scale,
has_shift)
|
|
#!/usr/bin/env python3
from __future__ import print_function
import argparse
import importlib
import os
import matplotlib.pyplot as plt
import compare
from util import msg, profile, runparams, io
valid_solvers = ["advection",
"advection_nonuniform",
"advection_rk",
"advection_fv4",
"advection_weno",
"compressible",
"compressible_rk",
"compressible_fv4",
"compressible_sdc",
"compressible_react",
"diffusion",
"incompressible",
"lm_atm",
"swe"]
class Pyro(object):
"""
The main driver to run pyro.
"""
def __init__(self, solver_name):
"""
Constructor
Parameters
----------
solver_name : str
Name of solver to use
"""
msg.bold('pyro ...')
if solver_name not in valid_solvers:
msg.fail("ERROR: %s is not a valid solver" % solver_name)
self.pyro_home = os.path.dirname(os.path.realpath(__file__)) + '/'
# import desired solver under "solver" namespace
self.solver = importlib.import_module(solver_name)
self.solver_name = solver_name
# -------------------------------------------------------------------------
# runtime parameters
# -------------------------------------------------------------------------
# parameter defaults
self.rp = runparams.RuntimeParameters()
self.rp.load_params(self.pyro_home + "_defaults")
self.rp.load_params(self.pyro_home + solver_name + "/_defaults")
self.tc = profile.TimerCollection()
self.is_initialized = False
def initialize_problem(self, problem_name, inputs_file=None, inputs_dict=None,
other_commands=None):
"""
Initialize the specific problem
Parameters
----------
problem_name : str
Name of the problem
inputs_file : str
Filename containing problem's runtime parameters
inputs_dict : dict
Dictionary containing extra runtime parameters
other_commands : str
Other command line parameter options
"""
problem_defaults_file = self.pyro_home + self.solver_name + \
"/problems/_" + problem_name + ".defaults"
# problem-specific runtime parameters
if os.path.isfile(problem_defaults_file):
self.rp.load_params(problem_defaults_file)
# now read in the inputs file
if inputs_file is not None:
if not os.path.isfile(inputs_file):
# check if the param file lives in the solver's problems directory
inputs_file = self.pyro_home + self.solver_name + "/problems/" + inputs_file
if not os.path.isfile(inputs_file):
msg.fail("ERROR: inputs file does not exist")
self.rp.load_params(inputs_file, no_new=1)
if inputs_dict is not None:
for k, v in inputs_dict.items():
self.rp.params[k] = v
# and any commandline overrides
if other_commands is not None:
self.rp.command_line_params(other_commands)
# write out the inputs.auto
self.rp.print_paramfile()
self.verbose = self.rp.get_param("driver.verbose")
self.dovis = self.rp.get_param("vis.dovis")
# -------------------------------------------------------------------------
# initialization
# -------------------------------------------------------------------------
# initialize the Simulation object -- this will hold the grid and
# data and know about the runtime parameters and which problem we
# are running
self.sim = self.solver.Simulation(
self.solver_name, problem_name, self.rp, timers=self.tc)
self.sim.initialize()
self.sim.preevolve()
plt.ion()
self.sim.cc_data.t = 0.0
self.is_initialized = True
def run_sim(self):
"""
Evolve entire simulation
"""
if not self.is_initialized:
msg.fail("ERROR: problem has not been initialized")
tm_main = self.tc.timer("main")
tm_main.begin()
# output the 0th data
basename = self.rp.get_param("io.basename")
self.sim.write("{}{:04d}".format(basename, self.sim.n))
if self.dovis:
plt.figure(num=1, figsize=(8, 6), dpi=100, facecolor='w')
self.sim.dovis()
while not self.sim.finished():
self.single_step()
# final output
if self.verbose > 0:
msg.warning("outputting...")
basename = self.rp.get_param("io.basename")
self.sim.write("{}{:04d}".format(basename, self.sim.n))
tm_main.end()
# -------------------------------------------------------------------------
# final reports
# -------------------------------------------------------------------------
if self.verbose > 0:
self.rp.print_unused_params()
self.tc.report()
self.sim.finalize()
return self.sim
def single_step(self):
"""
Do a single step
"""
if not self.is_initialized:
msg.fail("ERROR: problem has not been initialized")
# fill boundary conditions
self.sim.cc_data.fill_BC_all()
# get the timestep
self.sim.compute_timestep()
# evolve for a single timestep
self.sim.evolve()
if self.verbose > 0:
print("%5d %10.5f %10.5f" %
(self.sim.n, self.sim.cc_data.t, self.sim.dt))
# output
if self.sim.do_output():
if self.verbose > 0:
msg.warning("outputting...")
basename = self.rp.get_param("io.basename")
self.sim.write("{}{:04d}".format(basename, self.sim.n))
# visualization
if self.dovis:
tm_vis = self.tc.timer("vis")
tm_vis.begin()
self.sim.dovis()
store = self.rp.get_param("vis.store_images")
if store == 1:
basename = self.rp.get_param("io.basename")
plt.savefig("{}{:04d}.png".format(basename, self.sim.n))
tm_vis.end()
def __repr__(self):
""" Return a representation of the Pyro object """
s = "Solver = {}\n".format(self.solver_name)
if self.is_initialized:
s += "Problem = {}\n".format(self.sim.problem_name)
s += "Simulation time = {}\n".format(self.sim.cc_data.t)
s += "Simulation step number = {}\n".format(self.sim.n)
s += "\nRuntime Parameters"
s += "\n------------------\n"
s += str(self.rp)
return s
def get_var(self, v):
"""
Alias for cc_data's get_var routine, returns the cell-centered data
given the variable name v.
"""
if not self.is_initialized:
msg.fail("ERROR: problem has not been initialized")
return self.sim.cc_data.get_var(v)
class PyroBenchmark(Pyro):
"""
A subclass of Pyro for benchmarking. Inherits everything from pyro, but adds benchmarking routines.
"""
def __init__(self, solver_name, comp_bench=False,
reset_bench_on_fail=False, make_bench=False):
"""
Constructor
Parameters
----------
solver_name : str
Name of solver to use
comp_bench : bool
Are we comparing to a benchmark?
reset_bench_on_fail : bool
Do we reset the benchmark on fail?
make_bench : bool
Are we storing a benchmark?
"""
super().__init__(solver_name)
self.comp_bench = comp_bench
self.reset_bench_on_fail = reset_bench_on_fail
self.make_bench = make_bench
def run_sim(self, rtol):
"""
Evolve entire simulation and compare to benchmark at the end.
"""
super().run_sim()
result = 0
if self.comp_bench:
result = self.compare_to_benchmark(rtol)
if self.make_bench or (result != 0 and self.reset_bench_on_fail):
self.store_as_benchmark()
if self.comp_bench:
return result
else:
return self.sim
def compare_to_benchmark(self, rtol):
""" Are we comparing to a benchmark? """
basename = self.rp.get_param("io.basename")
compare_file = "{}/tests/{}{:04d}".format(
self.solver_name, basename, self.sim.n)
msg.warning("comparing to: {} ".format(compare_file))
try:
sim_bench = io.read(compare_file)
except IOError:
msg.warning("ERROR opening compare file")
return "ERROR opening compare file"
result = compare.compare(self.sim.cc_data, sim_bench.cc_data, rtol)
if result == 0:
msg.success("results match benchmark to within relative tolerance of {}\n".format(rtol))
else:
msg.warning("ERROR: " + compare.errors[result] + "\n")
return result
def store_as_benchmark(self):
""" Are we storing a benchmark? """
if not os.path.isdir(self.solver_name + "/tests/"):
try:
os.mkdir(self.solver_name + "/tests/")
except (FileNotFoundError, PermissionError):
msg.fail(
"ERROR: unable to create the solver's tests/ directory")
basename = self.rp.get_param("io.basename")
bench_file = self.pyro_home + self.solver_name + "/tests/" + \
basename + "%4.4d" % (self.sim.n)
msg.warning("storing new benchmark: {}\n".format(bench_file))
self.sim.write(bench_file)
def parse_args():
"""Parse the runtime parameters"""
p = argparse.ArgumentParser()
p.add_argument("--make_benchmark",
help="create a new benchmark file for regression testing",
action="store_true")
p.add_argument("--compare_benchmark",
help="compare the end result to the stored benchmark",
action="store_true")
p.add_argument("solver", metavar="solver-name", type=str, nargs=1,
help="name of the solver to use", choices=valid_solvers)
p.add_argument("problem", metavar="problem-name", type=str, nargs=1,
help="name of the problem to run")
p.add_argument("param", metavar="inputs-file", type=str, nargs=1,
help="name of the inputs file")
p.add_argument("other", metavar="runtime-parameters", type=str, nargs="*",
help="additional runtime parameters that override the inputs file "
"in the format section.option=value")
return p.parse_args()
if __name__ == "__main__":
args = parse_args()
if args.compare_benchmark or args.make_benchmark:
pyro = PyroBenchmark(args.solver[0],
comp_bench=args.compare_benchmark,
make_bench=args.make_benchmark)
else:
pyro = Pyro(args.solver[0])
pyro.initialize_problem(problem_name=args.problem[0],
inputs_file=args.param[0],
other_commands=args.other)
pyro.run_sim()
|
|
import os.path as osp
import time
import joblib
import numpy as np
import tensorflow as tf
from baselines import logger
from baselines.common import set_global_seeds, explained_variance
from baselines.a2c.utils import discount_with_dones
from baselines.a2c.utils import Scheduler, find_trainable_variables
from baselines.a2c.utils import cat_entropy
# from a2c import kfac
from pysc2.env import environment
from pysc2.lib import actions as sc2_actions
from common import common
import nsml
_CONTROL_GROUP_RECALL = 0
_NOT_QUEUED = 0
# np.set_printoptions(threshold=np.inf)
def mse(pred, target):
return tf.square(pred-target)/2.
class Model(object):
def __init__(self,
policy,
ob_space,
ac_space,
nenvs,
total_timesteps,
nprocs=32,
nscripts=16,
nsteps=20,
nstack=4,
ent_coef=0.1,
vf_coef=0.5,
vf_fisher_coef=1.0,
lr=0.25,
max_grad_norm=0.001,
kfac_clip=0.001,
lrschedule='linear',
alpha=0.99,
epsilon=1e-5):
config = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=nprocs,
inter_op_parallelism_threads=nprocs)
config.gpu_options.allow_growth = True
self.sess = sess = tf.Session(config=config)
nsml.bind(sess=sess)
#nact = ac_space.n
nbatch = nenvs * nsteps
A = tf.placeholder(tf.int32, [nbatch])
XY0 = tf.placeholder(tf.int32, [nbatch])
XY1 = tf.placeholder(tf.int32, [nbatch])
# ADV == TD_TARGET - values
ADV = tf.placeholder(tf.float32, [nbatch])
TD_TARGET = tf.placeholder(tf.float32, [nbatch])
PG_LR = tf.placeholder(tf.float32, [])
VF_LR = tf.placeholder(tf.float32, [])
self.model = step_model = policy(
sess, ob_space, ac_space, nenvs, 1, nstack, reuse=False)
self.model2 = train_model = policy(
sess, ob_space, ac_space, nenvs, nsteps, nstack, reuse=True)
# Policy 1 : Base Action : train_model.pi label = A
script_mask = tf.concat(
[
tf.zeros([nscripts * nsteps, 1]),
tf.ones([(nprocs - nscripts) * nsteps, 1])
],
axis=0)
pi = train_model.pi
pac_weight = script_mask * (tf.nn.softmax(pi) - 1.0) + 1.0
pac_weight = tf.reduce_sum(pac_weight * tf.one_hot(A, depth=3), axis=1)
neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=pi, labels=A)
neglogpac *= tf.stop_gradient(pac_weight)
inv_A = 1.0 - tf.cast(A, tf.float32)
xy0_mask = tf.cast(A, tf.float32)
xy1_mask = tf.cast(A, tf.float32)
condition0 = tf.equal(xy0_mask, 2)
xy0_mask = tf.where(condition0, tf.ones(tf.shape(xy0_mask)), xy0_mask)
xy0_mask = 1.0 - xy0_mask
condition1 = tf.equal(xy1_mask, 2)
xy1_mask = tf.where(condition1, tf.zeros(tf.shape(xy1_mask)), xy1_mask)
# One hot representation of chosen marine.
# [batch_size, 2]
pi_xy0 = train_model.pi_xy0
pac_weight = script_mask * (tf.nn.softmax(pi_xy0) - 1.0) + 1.0
pac_weight = tf.reduce_sum(
pac_weight * tf.one_hot(XY0, depth=1024), axis=1)
logpac_xy0 = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=pi_xy0, labels=XY0)
logpac_xy0 *= tf.stop_gradient(pac_weight)
logpac_xy0 *= tf.cast(xy0_mask, tf.float32)
pi_xy1 = train_model.pi_xy1
pac_weight = script_mask * (tf.nn.softmax(pi_xy1) - 1.0) + 1.0
pac_weight = tf.reduce_sum(
pac_weight * tf.one_hot(XY0, depth=1024), axis=1)
# 1D? 2D?
logpac_xy1 = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=pi_xy1, labels=XY1)
logpac_xy1 *= tf.stop_gradient(pac_weight)
logpac_xy1 *= tf.cast(xy1_mask, tf.float32)
pg_loss = tf.reduce_mean(ADV * neglogpac)
pg_loss_xy0 = tf.reduce_mean(ADV * logpac_xy0)
pg_loss_xy1 = tf.reduce_mean(ADV * logpac_xy1)
vf_ = tf.squeeze(train_model.vf)
vf_r = tf.concat(
[
tf.ones([nscripts * nsteps, 1]),
tf.zeros([(nprocs - nscripts) * nsteps, 1])
],
axis=0) * TD_TARGET
vf_masked = vf_ * script_mask + vf_r
#vf_mask[0:nscripts * nsteps] = R[0:nscripts * nsteps]
vf_loss = tf.reduce_mean(mse(vf_masked, TD_TARGET))
entropy_a = tf.reduce_mean(cat_entropy(train_model.pi))
entropy_xy0 = tf.reduce_mean(cat_entropy(train_model.pi_xy0))
entropy_xy1 = tf.reduce_mean(cat_entropy(train_model.pi_xy1))
entropy = entropy_a + entropy_xy0 + entropy_xy1
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
params = find_trainable_variables("model")
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
trainer = tf.train.RMSPropOptimizer(
learning_rate=lr, decay=alpha, epsilon=epsilon)
_train = trainer.apply_gradients(grads)
self.logits = logits = train_model.pi
# xy0
self.params_common = params_common = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='model/common')
self.params_xy0 = params_xy0 = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope='model/xy0') + params_common
train_loss_xy0 = pg_loss_xy0 - entropy * ent_coef + vf_coef * vf_loss
self.grads_check_xy0 = grads_xy0 = tf.gradients(
train_loss_xy0, params_xy0)
if max_grad_norm is not None:
grads_xy0, _ = tf.clip_by_global_norm(grads_xy0, max_grad_norm)
grads_xy0 = list(zip(grads_xy0, params_xy0))
trainer_xy0 = tf.train.RMSPropOptimizer(
learning_rate=lr, decay=alpha, epsilon=epsilon)
_train_xy0 = trainer_xy0.apply_gradients(grads_xy0)
# xy1
self.params_xy1 = params_xy1 = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope='model/xy1') + params_common
train_loss_xy1 = pg_loss_xy1 - entropy * ent_coef + vf_coef * vf_loss
self.grads_check_xy1 = grads_xy1 = tf.gradients(
train_loss_xy1, params_xy1)
if max_grad_norm is not None:
grads_xy1, _ = tf.clip_by_global_norm(grads_xy1, max_grad_norm)
grads_xy1 = list(zip(grads_xy1, params_xy1))
trainer_xy1 = tf.train.RMSPropOptimizer(
learning_rate=lr, decay=alpha, epsilon=epsilon)
_train_xy1 = trainer_xy1.apply_gradients(grads_xy1)
self.lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, td_targets, masks, actions, xy0, xy1, values):
advs = td_targets - values
for step in range(len(obs)):
cur_lr = self.lr.value()
td_map = {
train_model.X: obs,
A: actions,
XY0: xy0,
XY1: xy1,
ADV: advs,
TD_TARGET: td_targets,
PG_LR: cur_lr
}
if states != []:
td_map[train_model.S] = states
td_map[train_model.M] = masks
policy_loss, value_loss, policy_entropy, _, \
policy_loss_xy0, policy_entropy_xy0, _, \
policy_loss_xy1, policy_entropy_xy1, _ = sess.run(
[pg_loss, vf_loss, entropy, _train,
pg_loss_xy0, entropy_xy0, _train_xy0,
pg_loss_xy1, entropy_xy1, _train_xy1],
td_map)
return policy_loss, value_loss, policy_entropy, \
policy_loss_xy0, policy_entropy_xy0, \
policy_loss_xy1, policy_entropy_xy1
def save(save_path):
ps = sess.run(params)
joblib.dump(ps, save_path)
def load(load_path):
loaded_params = joblib.load(load_path)
restores = []
for p, loaded_p in zip(params, loaded_params):
restores.append(p.assign(loaded_p))
sess.run(restores)
self.train = train
self.save = save
self.load = load
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
print("global_variables_initializer start")
tf.global_variables_initializer().run(session=sess)
print("global_variables_initializer complete")
class Runner(object):
def __init__(self,
env,
model,
nsteps,
nscripts,
nstack,
gamma,
callback=None):
self.env = env
self.model = model
nh, nw, nc = (32, 32, 3)
self.nsteps = nsteps
self.nscripts = nscripts
self.nenv = nenv = env.num_envs
self.batch_ob_shape = (nenv * nsteps, nh, nw, nc * nstack)
self.batch_coord_shape = (nenv * nsteps, 32)
self.obs = np.zeros((nenv, nh, nw, nc * nstack), dtype=np.uint8)
self.available_actions = None
self.base_act_mask = np.full((self.nenv, 2), 0, dtype=np.uint8)
obs, rewards, dones, available_actions, army_counts, control_groups, selected, xy_per_marine = env.reset(
)
self.xy_per_marine = [{"0":[0,0], "1":[0,0]} for _ in range(nenv)]
for env_num, data in enumerate(xy_per_marine):
self.xy_per_marine[env_num] = data
self.army_counts = army_counts
self.control_groups = control_groups
self.selected = selected
self.update_obs(obs) # (2,13,32,32)
self.update_available(available_actions)
self.gamma = gamma
self.states = model.initial_state
self.dones = [False for _ in range(nenv)]
self.total_reward = [0.0 for _ in range(nenv)]
self.episode_rewards = []
self.episode_rewards_script = []
self.episode_rewards_a2c = []
self.episodes = 0
self.steps = 0
self.callback = callback
self.action_queue = [[] for _ in range(nenv)]
self.group_list = [[] for _ in range(nenv)]
self.agent_state = ["IDLE" for _ in range(nenv)]
self.dest_per_marine = [{} for _ in range(nenv)]
self.group_id = [0 for _ in range(nenv)]
def update_obs(self, obs): # (self.nenv, 32, 32, 2)
#obs = np.asarray(obs, dtype=np.int32).swapaxes(1, 2).swapaxes(2, 3)
self.obs = np.roll(self.obs, shift=-3, axis=3)
new_map = np.zeros((self.nenv, 32, 32, 3))
new_map[:, :, :, -1] = obs[:, 0, :, :]
for env_num in range(self.nenv):
# print("xy_per_marine: ", self.xy_per_marine)
if "0" not in self.xy_per_marine[env_num]:
self.xy_per_marine[env_num]["0"] = [0, 0]
if "1" not in self.xy_per_marine[env_num]:
self.xy_per_marine[env_num]["1"] = [0, 0]
marine0 = self.xy_per_marine[env_num]["0"]
marine1 = self.xy_per_marine[env_num]["1"]
new_map[env_num, marine0[0], marine0[1], -3] = 1
new_map[env_num, marine1[0], marine1[1], -2] = 1
self.obs[:, :, :, -3:] = new_map
# could not broadcast input array from shape (4,1,32,32) into shape (4,4,32)
def update_available(self, _available_actions):
#print("update_available : ", _available_actions)
self.available_actions = _available_actions
# avail = np.array([[0,1,2,3,4,7], [0,1,2,3,4,7]])
self.base_act_mask = np.full((self.nenv, 3), 0, dtype=np.uint8)
for env_num, list in enumerate(_available_actions):
# print("env_num :", env_num, " list :", list)
for action_num in list:
# print("action_num :", action_num)
if (action_num == 4):
self.base_act_mask[env_num][0] = 1
self.base_act_mask[env_num][1] = 1
elif action_num == 0:
self.base_act_mask[env_num][2] = 1
# elif(action_num == 331):
# self.base_act_mask[env_num][2] = 1
def valid_base_action(self, base_actions):
for env_num, list in enumerate(self.available_actions):
avail = []
for action_num in list:
if (action_num == 4):
avail.append(0)
avail.append(1)
elif action_num == 0:
avail.append(2)
# elif(action_num == 331):
# avail.append(2)
if base_actions[env_num] not in avail:
print("env_num", env_num, " argmax is not valid. random pick ",
avail)
base_actions[env_num] = np.random.choice(avail)
return base_actions
def trans_base_actions(self, base_actions):
new_base_actions = np.copy(base_actions)
for env_num, ba in enumerate(new_base_actions):
if (ba == 0):
new_base_actions[env_num] = 4 # move marine control group 0
elif (ba == 1):
new_base_actions[env_num] = 4 # move marine control group 1
elif (ba == 2):
new_base_actions[env_num] = 0 # move marine control group 1
# elif(ba==2):
# new_base_actions[env_num] = 331 # move marine xy0
return new_base_actions
def construct_action(self, base_actions, base_action_spec, x0, y0, x1, y1):
actions = []
for env_num, spec in enumerate(base_action_spec):
# print("spec", spec.args)
args = []
# for arg_idx, arg in enumerate(spec.args):
# #print("arg", arg)
# #print("arg.id", arg.id)
# if(arg.id==0): # screen (32,32) x0, y0
# args.append([int(x0[env_num]), int(y0[env_num])])
# # elif(arg.id==1): # minimap (32,32) x1, y1
# # args.append([int(x1[env_num]), int(y1[env_num])])
# # elif(arg.id==2): # screen2 (32,32) x2, y2
# # args.append([int(x2[env_num]), y2[env_num]])
# elif(arg.id==3): # pi3 queued (2)
# args.append([int(0)])
# elif(arg.id==4): # pi4 control_group_act (5)
# args.append([_CONTROL_GROUP_RECALL])
# elif(arg.id==5): # pi5 control_group_id 10
# args.append([int(base_actions[env_num])]) # 0 => cg 0 / 1 => cg 1
# # elif(arg.id==6): # pi6 select_point_act 4
# # args.append([int(sub6[env_num])])
# # elif(arg.id==7): # pi7 select_add 2
# # args.append([int(sub7[env_num])])
# # elif(arg.id==8): # pi8 select_unit_act 4
# # args.append([int(sub8[env_num])])
# # elif(arg.id==9): # pi9 select_unit_id 500
# # args.append([int(sub9[env_num])])
# # elif(arg.id==10): # pi10 select_worker 4
# # args.append([int(sub10[env_num])])
# # elif(arg.id==11): # pi11 build_queue_id 10
# # args.append([int(sub11[env_num])])
# # elif(arg.id==12): # pi12 unload_id 500
# # args.append([int(sub12[env_num])])
# else:
# raise NotImplementedError("cannot construct this arg", spec.args)
two_action = []
if base_actions[env_num] == 0:
two_action.append(
sc2_actions.FunctionCall(
4,
[[_CONTROL_GROUP_RECALL], [0]]
))
two_action.append(
sc2_actions.FunctionCall(
331,
[[_NOT_QUEUED], [int(x0[env_num]), y0[env_num]]]))
elif base_actions[env_num] == 1:
two_action.append(
sc2_actions.FunctionCall(4, [[_CONTROL_GROUP_RECALL], [1]]))
two_action.append(
sc2_actions.FunctionCall(
331, [[_NOT_QUEUED], [int(x1[env_num]), y1[env_num]]]))
elif base_actions[env_num] == 2:
two_action.append(
sc2_actions.FunctionCall(0, []))
two_action.append(
sc2_actions.FunctionCall(0, []))
#action = sc2_actions.FunctionCall(a, args)
actions.append(two_action)
return actions
def run(self):
mb_obs, mb_td_targets, mb_base_actions, \
mb_xy0, mb_xy1, \
mb_values, mb_dones \
= [], [], [], [], [], [], []
mb_states = self.states
for n in range(self.nsteps):
# pi, pi2, x1, y1, x2, y2, v0
pi1, pi_xy0, pi_xy1, values, states = self.model.step(
self.obs, self.states, self.dones)
pi1_noise = np.random.random_sample((self.nenv, 3)) * 0.3
base_actions = np.argmax(
pi1 * self.base_act_mask + pi1_noise, axis=1)
xy0 = np.argmax(pi_xy0, axis=1)
x0 = (xy0 % 32).astype(int)
y0 = (xy0 / 32).astype(int)
xy1 = np.argmax(pi_xy1, axis=1)
x1 = (xy1 % 32).astype(int)
y1 = (xy1 / 32).astype(int)
# Scripted Agent Hacking
for env_num in range(self.nenv):
if env_num >= self.nscripts: # only for scripted agents
continue
ob = self.obs[env_num, :, :, :]
player_relative = ob[:, :, -1]
self.group_list[env_num] = common.update_group_list2(
self.control_groups[env_num])
if len(self.action_queue[env_num]) == 0:
self.action_queue[env_num], self.group_id[env_num], self.dest_per_marine[env_num], self.xy_per_marine[env_num] = \
common.solve_tsp(player_relative,
self.selected[env_num][0],
self.group_list[env_num],
self.group_id[env_num],
self.dest_per_marine[env_num],
self.xy_per_marine[env_num])
base_actions[env_num] = 0
x0[env_num] = 0
y0[env_num] = 0
x1[env_num] = 0
y1[env_num] = 0
if len(self.action_queue[env_num]) > 0:
action = self.action_queue[env_num].pop(0)
base_actions[env_num] = action.get("base_action", 0)
x0[env_num] = action.get("x0", 0)
y0[env_num] = action.get("y0", 0)
xy0[env_num] = y0[env_num] * 32 + x0[env_num]
x1[env_num] = action.get("x1", 0)
y1[env_num] = action.get("y1", 0)
xy1[env_num] = y1[env_num] * 32 + x1[env_num]
base_actions = self.valid_base_action(base_actions)
new_base_actions = self.trans_base_actions(base_actions)
base_action_spec = self.env.action_spec(new_base_actions)
# print("base_actions:", base_actions)
actions = self.construct_action(
base_actions,
base_action_spec,
x0,
y0,
x1,
y1
)
mb_obs.append(np.copy(self.obs))
mb_base_actions.append(base_actions)
mb_xy0.append(xy0)
mb_xy1.append(xy1)
mb_values.append(values)
mb_dones.append(self.dones)
#print("final acitons : ", actions)
obs, rewards, dones,\
available_actions, army_counts,\
control_groups, selected, xy_per_marine\
= self.env.step(
actions=actions)
self.army_counts = army_counts
self.control_groups = control_groups
self.selected = selected
for env_num, data in enumerate(xy_per_marine):
self.xy_per_marine[env_num] = data
self.update_available(available_actions)
self.states = states
self.dones = dones
mean_100ep_reward_a2c = 0
for n, done in enumerate(dones):
self.total_reward[n] += float(rewards[n])
if done:
self.obs[n] = self.obs[n] * 0
self.episodes += 1
num_episodes = self.episodes
self.episode_rewards.append(self.total_reward[n])
model = self.model
mean_100ep_reward = round(
np.mean(self.episode_rewards[-101:]), 1)
if (n < self.nscripts): # scripted agents
self.episode_rewards_script.append(
self.total_reward[n])
mean_100ep_reward_script = round(
np.mean(self.episode_rewards_script[-101:]), 1)
nsml.report(
reward_script=self.total_reward[n],
mean_reward_script=mean_100ep_reward_script,
reward=self.total_reward[n],
mean_100ep_reward=mean_100ep_reward,
episodes=self.episodes,
step=self.episodes,
scope=locals()
)
else:
self.episode_rewards_a2c.append(self.total_reward[n])
mean_100ep_reward_a2c = round(
np.mean(self.episode_rewards_a2c[-101:]), 1)
nsml.report(
reward_a2c=self.total_reward[n],
mean_reward_a2c=mean_100ep_reward_a2c,
reward=self.total_reward[n],
mean_100ep_reward=mean_100ep_reward,
episodes=self.episodes,
step=self.episodes,
scope=locals()
)
print("mean_100ep_reward_a2c", mean_100ep_reward_a2c)
if self.callback is not None:
self.callback(locals(), globals())
self.total_reward[n] = 0
self.group_list[n] = []
self.update_obs(obs)
mb_td_targets.append(rewards)
mb_dones.append(self.dones)
#batch of steps to batch of rollouts
mb_obs = np.asarray(
mb_obs, dtype=np.uint8).swapaxes(1, 0).reshape(
self.batch_ob_shape)
mb_td_targets = np.asarray(mb_td_targets, dtype=np.float32).swapaxes(1, 0)
mb_base_actions = np.asarray(
mb_base_actions, dtype=np.int32).swapaxes(1, 0)
mb_xy0 = np.asarray(mb_xy0, dtype=np.int32).swapaxes(1, 0)
mb_xy1 = np.asarray(mb_xy1, dtype=np.int32).swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
last_values = self.model.value(self.obs, self.states,
self.dones).tolist()
#discount/bootstrap off value fn
for n, (rewards, dones, value) in enumerate(
zip(mb_td_targets, mb_dones, last_values)):
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards + [value], dones + [0],
self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
mb_td_targets[n] = rewards
mb_td_targets = mb_td_targets.flatten()
mb_base_actions = mb_base_actions.flatten()
mb_xy0 = mb_xy0.flatten()
mb_xy1 = mb_xy1.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
return mb_obs, mb_states, mb_td_targets, mb_masks, \
mb_base_actions, mb_xy0, mb_xy1, mb_values
def learn(policy,
env,
seed,
total_timesteps=int(40e6),
gamma=0.99,
log_interval=1,
nprocs=24,
nscripts=12,
nsteps=20,
nstack=4,
ent_coef=0.01,
vf_coef=0.5,
vf_fisher_coef=1.0,
lr=0.25,
max_grad_norm=0.01,
kfac_clip=0.001,
save_interval=None,
lrschedule='linear',
callback=None):
tf.reset_default_graph()
set_global_seeds(seed)
nenvs = nprocs
ob_space = (32, 32, 3) # env.observation_space
ac_space = (32, 32)
make_model = lambda: Model(policy, ob_space, ac_space, nenvs,
total_timesteps,
nprocs=nprocs,
nscripts=nscripts,
nsteps=nsteps,
nstack=nstack,
ent_coef=ent_coef,
vf_coef=vf_coef,
vf_fisher_coef=vf_fisher_coef,
lr=lr,
max_grad_norm=max_grad_norm,
kfac_clip=kfac_clip,
lrschedule=lrschedule)
if save_interval and logger.get_dir():
import cloudpickle
with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh:
fh.write(cloudpickle.dumps(make_model))
model = make_model()
print("make_model complete!")
runner = Runner(
env,
model,
nsteps=nsteps,
nscripts=nscripts,
nstack=nstack,
gamma=gamma,
callback=callback)
nbatch = nenvs * nsteps
tstart = time.time()
# enqueue_threads = model.q_runner.create_threads(model.sess, coord=tf.train.Coordinator(), start=True)
for update in range(1, total_timesteps // nbatch + 1):
obs, states, td_targets, masks, actions, xy0, xy1, values = runner.run()
policy_loss, value_loss, policy_entropy, \
policy_loss_xy0, policy_entropy_xy0, \
policy_loss_xy1, policy_entropy_xy1, \
= model.train(obs, states, td_targets,
masks, actions,
xy0, xy1, values)
model.old_obs = obs
nseconds = time.time() - tstart
fps = int((update * nbatch) / nseconds)
if update % log_interval == 0 or update == 1:
ev = explained_variance(values, td_targets)
# nsml.report(
# nupdates=update,
# total_timesteps=update * nbatch,
# fps=fps,
# policy_entropy=float(policy_entropy),
# policy_loss=float(policy_loss),
# policy_loss_xy0=float(policy_loss_xy0),
# policy_entropy_xy0=float(policy_entropy_xy0),
# policy_loss_xy1=float(policy_loss_xy1),
# policy_entropy_xy1=float(policy_entropy_xy1),
# value_loss=float(value_loss),
# explained_variance=float(ev),
# batch_size=nbatch,
# step=update,
# scope=locals()
# )
# logger.record_tabular("nupdates", update)
# logger.record_tabular("total_timesteps", update * nbatch)
# logger.record_tabular("fps", fps)
# logger.record_tabular("policy_entropy", float(policy_entropy))
# logger.record_tabular("policy_loss", float(policy_loss))
# logger.record_tabular("policy_loss_xy0", float(policy_loss_xy0))
# logger.record_tabular("policy_entropy_xy0",
# float(policy_entropy_xy0))
# logger.record_tabular("policy_loss_xy1", float(policy_loss_xy1))
# logger.record_tabular("policy_entropy_xy1",
# float(policy_entropy_xy1))
# # logger.record_tabular("policy_loss_y0", float(policy_loss_y0))
# # logger.record_tabular("policy_entropy_y0", float(policy_entropy_y0))
# logger.record_tabular("value_loss", float(value_loss))
# logger.record_tabular("explained_variance", float(ev))
# logger.dump_tabular()
if save_interval and (update % save_interval == 0
or update == 1) and logger.get_dir():
savepath = osp.join(logger.get_dir(), 'checkpoint%.5i' % update)
print('Saving to', savepath)
model.save(savepath)
env.close()
|
|
#!/usr/bin/env python
'''
The MIT License (MIT)
Copyright (c) <2016> <Mathias Lesche>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
contact: mat.lesche(at)gmail.com
'''
''' python modules '''
import logging
from argparse import ArgumentParser as ArgumentParser
from argparse import RawDescriptionHelpFormatter
from collections import defaultdict
from collections import OrderedDict
from functools import partial
from operator import itemgetter
from os import chmod
from os import listdir
from os.path import join as pathjoin, basename, isfile
from types import NoneType
from types import TupleType
from types import IntType
''' own modules '''
from database.database import Database
from report.tex import Tex
from support.io_module import check_Directorylist
from support.io_module import check_Fileslist
from support.io_module import get_fileobject
from support.io_module import write_list
from support.information import Information
from support.main_logger import MainLogger
class Parser(object):
def __init__(self):
self.__parser = ArgumentParser(description="""
Script builds a report tex file which includes the main plots
and tables. pdfs have to be in the directory 'pdf' and
tex files in the report folder and a file with the labels must
exist. One can give the script tex prefix which is added to the input
path in the tex files. Comes in handy when the report is copied
and the structure is different.
""", formatter_class=RawDescriptionHelpFormatter)
self.initialiseParser()
self.__report = ''
self.__bfxid = ''
self.__pdf = ''
self.__tex = ''
self.__species = []
self.__ensembl = ''
self.__config = ''
self.__label = ''
self.__programfile = ''
self.__deresults = []
self.__logger = logging.getLogger('dsp.report')
self.parse()
def initialiseParser(self):
self.__parser.add_argument('-r', '--report', type=str, metavar='DIRECTORY', dest='report', required = True, help="report directory (contains tex files)")
self.__parser.add_argument('-b', '--bfx', type=str, metavar='STRING', dest='bfx', required = True , help="bfx id of the project")
self.__parser.add_argument('-d', '--deresults', type=str, metavar='DIRECTORY', dest='deresults', required = True, nargs='+', help="directories with de analysis files")
self.__parser.add_argument('-l', '--labels', type=str, metavar='FILE', dest='label', required = True , help="label file (having labels for the tex figures and tables)")
self.__parser.add_argument('-v', '--versionfile', type=str, metavar='FILE', dest='programfile', default='' , help="contains the used module versions")
self.__parser.add_argument("-t", "--tex", dest = 'tex', metavar='STRING', default = '', help="tex prefix (added to the path of the files within the tex document)")
self.__parser.add_argument('-s', '--species', type=str, metavar='STRING', dest='species', default = 'mouse', help="species [human, mouse, zebrafish, rat, drosophila] (default: mouse)")
self.__parser.add_argument('-e', '--ensembl', type=str, metavar='STRING', dest='ensembl', default = '81', help="Ensembl version (default: 81)")
def parse(self, inputstring = None):
if isinstance(inputstring, NoneType):
self.__options = self.__parser.parse_args()
else:
self.__options = self.__parser.parse_args(inputstring)
def show_log(self, level, message):
if level == 'debug':
self.__logger.debug(message)
elif level == 'info':
self.__logger.info(message)
elif level == 'warning':
self.__logger.warning(message)
elif level == 'error':
self.__logger.error(message)
elif level == 'critical':
self.__logger.critical(message)
'''
method check if the output directory exists
@param inputdir: list
@param inputparam: string representing the option
@return: list
'''
def check_folder(self, inputdir, inputparam):
good, bad = check_Directorylist(inputdir)
if inputparam == '-r' and len(good) != 1:
self.show_log('error', "check directory from {1} parameter: {0}".format(inputdir, inputparam))
exit(2)
elif inputparam == '-d' and len(bad) != 0:
self.show_log('error', "check directory from {1} parameter: {0}".format(inputdir, inputparam))
exit(2)
return good
'''
method checks the input list which are files, filters for fastq files and returns the valid files
@param filelist: list of string
@param choose: string with the option prefix
@return: list or string
'''
def check_files(self, filelist, choose):
if isinstance(filelist, NoneType):
return []
elif filelist[0] == '':
return ''
good = check_Fileslist(filelist)[0]
if choose in ('-v', '-l', '-c'):
if len(good) != 1:
self.show_log('error', "check input {0} for option {1}".format(filelist, choose))
exit(2)
return good[0]
elif choose == '-d':
if len(good) != len(filelist):
self.show_log('error', "check input {0} for option {1}".format(filelist, choose))
exit(2)
return good
def main(self):
self.__report = self.check_folder(self.__options.report, '-r')[0]
self.__deresults = self.check_folder(self.__options.deresults, '-d')
self.__label = self.check_files((self.__options.label, ), '-l') # label file
self.__programfile = self.check_files((self.__options.programfile, ), '-v') # program versions
self.__tex = self.__options.tex
self.__bfxid = self.__options.bfx
if self.__options.species in ('human', 'homo_sapiens'):
self.__species = ('homo sapiens', 'hg38')
elif self.__options.species in ('mouse', 'mus_musculus'):
self.__species = ('mus musculus', 'mm10')
elif self.__options.species in ('zebrafish', 'danio_rerio'):
self.__species = ('danio rerio', 'GRCz10')
elif self.__options.species in ('rat', 'rattus_norvegicus'):
self.__species = ('rattus norvegicus', 'rn6')
elif self.__options.species in ('drosophila', 'drosophila_melanogaster'):
self.__species = ('drosophila_melanogaster', 'dm6')
self.__ensembl = self.__options.ensembl
def get_bfx(self):
return self.__bfxid
def get_species(self):
return self.__species
def get_ensembl(self):
return self.__ensembl
def get_tex(self):
return self.__tex
def get_report(self):
return self.__report
def get_label(self):
return self.__label
def get_deresults(self):
return self.__deresults
def get_versionfile(self):
return self.__programfile
def get_condition(self):
return self.__config
bfx = property(get_bfx, None, None, None)
label = property(get_label, None, None, None)
condition = property(get_condition)
deresults = property(get_deresults, None, None, None)
versionfile = property(get_versionfile, None, None, None)
report = property(get_report, None, None, None)
tex = property(get_tex, None, None, None)
species = property(get_species, None, None, None)
ensembl = property(get_ensembl, None, None, None)
class Report(object):
def __init__(self, bfx, report, tex, label, versionfile, deresults, ensembl, species):
self.__bfxid = bfx
self.__bfx = bfx[3:]
self.__report = report
self.__reportfile = pathjoin(self.__report, '{0}.tex'.format(self.__bfxid))
self.__tex = tex
self.__labelfile = label
self.__versionfile = versionfile
self.__species = species
self.__ensembl = ensembl
self.__defolders = deresults
self.__deresultsdict = defaultdict(list)
self.__deorder = []
self.__conditiondict = defaultdict(list)
self.__pathdict = defaultdict(partial(defaultdict, list))
self.__labeldict = defaultdict(partial(defaultdict, set))
self.__atypedict = defaultdict(set)
self.__toollist = []
self.__maindocument = []
self.__destringtuple = ('none', 'expl', 'exploratory', 'de-expl') # when you change it here, change it in figures_report too
self.__newlabels = []
self.__conditionlabel = 'condition_table'
self.__conditionfilename = '{0}_{1}_COND_1.tex'.format(self.__bfxid, self.__conditionlabel)
self.__resultlabel = 'result_table'
self.__resultfilename = '{0}_{1}_COND_1.tex'.format(self.__bfxid, self.__resultlabel)
self.__samplelabel = 'sample_table'
self.__sampletag = 'sample'
self.__samplefilename = '{0}_{1}_1.tex'.format(self.__bfxid, self.__samplelabel)
self.__logger = logging.getLogger('dsp.report')
def show_log(self, level, message):
if level == 'debug':
self.__logger.debug(message)
elif level == 'info':
self.__logger.info(message)
elif level == 'warning':
self.__logger.warning(message)
elif level == 'error':
self.__logger.error(message)
elif level == 'critical':
self.__logger.critical(message)
'''
extract substring of a string by position. if tuple is supplied, the sublist is merged
by the sep and returned
@param inputstring: string
@param last: index
@param sep: string for splitting
@return: string or list
'''
def get_substring_fromindex(self, inputstring, index, sep = '.', aslist = False):
inputlist = inputstring.split(sep)
if isinstance(index, IntType):
return inputlist[index]
elif isinstance(index, TupleType):
if aslist: return inputlist[index[0]:index[1]]
return sep.join(inputlist[index[0]:index[1]])
return inputstring
def query_database(self):
temp = []
dbinst = Database(Information.DB_HOST, Information.DB_USER, Information.DB_PW, Information.DB)
dbinst.setConnection()
libs = dbinst.query_BFXLib_with_bfxid(self.__bfx)
for lib in libs:
libid = 'L{0}'.format(lib['LIBRARY_ID'])
libentry = dbinst.query_Library_with_Libid(lib['LIBRARY_ID'])[0]
libname = libentry['NAME']
temp.append((libid, libname))
dbinst.closeConnection()
return temp
def get_texreplace(self, string):
return string.replace(' ', '_').replace("'", '\\textquotesingle').replace('_', '\_')
def get_underlinereplace(self, string):
return string.replace('_', '\_')
'''
builds the lib sample name table from a list of tuple. if more than 10 entries are given,
it creates a 4 column table, otherwise 2 column
@param lol: list of tuple
@return: list ... tex table
'''
def build_libtable(self, lol):
temp = []
lol = sorted(lol, key = itemgetter(1))
lol = [(self.get_texreplace(i[0]), self.get_texreplace(i[1])) for i in lol]
temp.extend((Tex.TABLESTART, Tex.get_replaced_caption(self.__bfxid, 'Sample Table', 'Sample Table'), Tex.BEGINCENTER))
llol = len(lol)
if llol > 10:
ldiv = llol/2
lol = [lol[:ldiv], lol[ldiv:]] if llol % 2 == 0 else [lol[:ldiv+1], lol[ldiv+1:]]
temp.extend((Tex.get_replaced_tabularstart('c|c||c|c'), Tex.HLINE, Tex.SAMPLEOHEADFOUR))
for i in xrange(len(lol[0])):
try:
temp.append(Tex.get_tablerowstring((lol[0][i][0], lol[0][i][1], lol[1][i][0], lol[1][i][1])))
except IndexError:
temp.append(Tex.get_tablerowstring((lol[0][i][0], lol[0][i][1], '', '')))
else:
temp.extend((Tex.get_replaced_tabularstart('c|c'), Tex.HLINE, Tex.SAMPLEHEADTWO))
temp.extend([Tex.get_tablerowstring(i[:7]) for i in lol])
label = Tex.get_replaced_label(self.__bfxid, '_'.join((self.__samplelabel, self.__sampletag, '1')), 'tab')
temp.extend([Tex.TABULAREND, Tex.ENDCENTER, label, Tex.TABLEEND])
self.__newlabels.append('\t'.join((self.__samplelabel, 'sample', label[7:].rstrip('}\n'), '1', '', self.__sampletag))+'\n')
self.__labeldict[self.__sampletag][self.__samplelabel] = set([(label[7:].rstrip('}\n'), 1)])
return temp
'''
read the label file and store it in dictionary
dict[sectiontype/detype][componenttype] = [(label, labelcount, additional information)]
store the sectiontype/detype in a set
'''
def read_labelfile(self):
with get_fileobject(self.__labelfile, 'r') as filein:
for line in filein:
if line[0] == '#' or line in ('\n', '\r\n'): continue
linus = line.rstrip('\n').split('\t')
comptype, sectype, label, count, addinfo, atype = linus[0], linus[1], linus[2], int(linus[3]), linus[4], linus[5]
self.__labeldict[sectype][comptype].add((label, int(count), addinfo))
self.__atypedict[atype].add(sectype) # also add the sub type to the analysis type dict
'''
method extracts the results from the de summary files, stores them in a dictionary
where the key is the detype. returns the order of the de analysis
@param filename: name of the summary file
@return: list w
'''
def get_deresults_from_summaryfile(self, filename):
temp = []
with get_fileobject(filename, 'r') as filein:
filein.readline()
for line in filein:
linus = line.rstrip('\n').split('\t')
temp.append(linus[7])
self.__deresultsdict[linus[7]].append(linus)
return list(OrderedDict.fromkeys(temp))
'''
method extracts the conditions from the condition file
@param filename: name of the condition file
@return: list
'''
def get_condition_from_conditionfile(self, filename):
temp = []
with get_fileobject(filename, 'r') as filein:
for line in filein:
linus = line.rstrip('\n').split('\t')
linus = [self.get_texreplace(i) for i in linus]
temp.append(linus)
return temp
def read_defolders(self):
for folder in self.__defolders:
files = [i for i in listdir(folder)]
condlist = [i for i in files if i.endswith('condition.csv')]
condfile = pathjoin(folder, condlist[0]) if len(condlist) == 1 else ''
sumlist = [i for i in files if 'deseq-results.summary' in i]
summaryfile = pathjoin(folder, sumlist[0]) if len(sumlist) == 1 else ''
if isfile(summaryfile):
deorder = self.get_deresults_from_summaryfile(summaryfile)
if len(deorder) != 0:
self.__deorder.append(deorder)
if isfile(condfile): self.__conditiondict[self.__deorder[-1][0]] = self.get_condition_from_conditionfile(condfile)
def read_build_toollist(self):
self.__toollist = ['Reference: {0} $\ldots$ {1}'.format(self.__species[0].replace('_', '\_'), self.__species[1].replace('_', '\_')), 'Ensembl: {0}'.format(self.__ensembl)]
with get_fileobject(self.__versionfile, 'r') as filein:
for line in filein:
if line[0] == '#' or line in ('\n', '\r\n'): continue
linus = line.rstrip('\n').split('\t')
if 'bedtools' == linus[0]:
self.__toollist.append('BEDTools: {0} \citep{{quinlan2010bedtools}}'.format(linus[1]))
elif 'bismark' == linus[0]:
self.__toollist.append('Bismark: {0} \citep{{krueger2011bismark}}'.format(linus[1]))
elif 'bowtie2' == linus[0]:
self.__toollist.append('Bowtie 2: {0} \citep{{langmead2012fast}}'.format(linus[1]))
elif 'bowtie1' == linus[0]:
self.__toollist.append('Bowtie: {0} \citep{{langmead2009ultrafast}}'.format(linus[1]))
elif 'bwa' == linus[0]:
self.__toollist.append('BWA: {0} \citep{{bwamem}}'.format(linus[1]))
elif 'cutadapt' == linus[0]:
self.__toollist.append('cutadapt: {0} \citep{{martin2011cutadapt}}'.format(linus[1]))
elif 'deseq2' == linus[0]:
self.__toollist.append('DESeq2: {0} \citep{{love2014moderated}}'.format(linus[1]))
elif 'fastqc' == linus[0]:
self.__toollist.append('FastQC: {0} \href{{http://www.bioinformatics.bbsrc.ac.uk/projects/fastqc/}}{{Homepage}}'.format(linus[1]))
elif 'fastq_screen' == linus[0]:
self.__toollist.append('Fastq Screen: {0} \href{{http://www.bioinformatics.babraham.ac.uk/projects/fastq_screen/}}{{Homepage}}'.format(linus[1]))
elif 'gmap' == linus[0]:
self.__toollist.append('GSNAP: {0} \citep{{wu2010fast}}'.format(linus[1]))
elif 'hisat2' == linus[0]:
self.__toollist.append('HISAT2: {0} \citep{{kim2015hisat}}'.format(linus[1]))
elif 'kallisto' == linus[0]:
self.__toollist.append('kallisto: {0} \citep{{bray2016near}}'.format(linus[1]))
elif 'mirdeep' == linus[0]:
self.__toollist.append('miRDeep2: {0} \citep{{friedlander2012mirdeep2}}'.format(linus[1]))
elif 'python' == linus[0]:
self.__toollist.append('Python 2: {0}'.format(linus[1]))
elif 'python3' == linus[0]:
self.__toollist.append('Python 3: {0}'.format(linus[1]))
elif 'R' == linus[0]:
self.__toollist.append('R: {0} \citep{{R}}'.format(linus[1]))
elif 'rna-seqc' == linus[0]:
self.__toollist.append('RNA-SeQC: {0} \citep{{deluca2012rna}}'.format(linus[1]))
elif 'samtools' == linus[0]:
self.__toollist.append('Samtools: {0} \citep{{li2009sequence}}'.format(linus[1]))
elif 'STAR' == linus[0]:
self.__toollist.append('STAR: {0} \citep{{dobin2013star}}'.format(linus[1]))
elif 'subread' == linus[0]:
self.__toollist.append('featureCounts: {0} \citep{{liao2014featurecounts}}'.format(linus[1]))
elif 'tophat' == linus[0]:
self.__toollist.append('TopHat 2: {0} \citep{{kim2013tophat2}}'.format(linus[1]))
elif 'trim_galore' == linus[0]:
self.__toollist.append('Trim Galore: {0} \href{{http://www.bioinformatics.babraham.ac.uk/projects/trim_galore/}}{{Homepage}}'.format(linus[1]))
elif 'trimmomatic' == linus[0]:
self.__toollist.append('Trimmomatic: {0} \citep{{bolger2014trimmomatic}}'.format(linus[1]))
elif 'snakemake' == linus[0]:
self.__toollist.append('Snakemake: {0} \citep{{koster2012snakemake}}'.format(linus[1]))
'''
method scans the report directory for tex file which have a specific
string in the filename and adds them to a dictionary
'''
def register_texfiles(self):
filelist = [i for i in listdir(self.__report) if i.endswith('.tex')]
for filename in filelist:
if filename == '{0}.tex'.format(self.__bfxid): continue
substring = self.get_substring_fromindex(self.get_substring_fromindex(basename(filename), -2), -2, '_')
if 'heatmap_plot' in filename:
self.__pathdict[substring]['heatmap_plot'].append(filename)
elif 'heatmappoisson_plot' in filename:
self.__pathdict[substring]['heatmappoisson_plot'].append(filename)
elif 'highExpressedGenes_plot' in filename:
self.__pathdict[substring]['highExpressedGenes_plot'].append(filename)
elif 'varGenes_plot' in filename:
self.__pathdict[substring]['varGenes_plot'].append(filename)
elif 'maplot' in filename:
self.__pathdict[substring]['maplot'].append(filename)
elif 'mds_plot' in filename:
self.__pathdict[substring]['mds_plot'].append(filename)
elif 'poisson_plot' in filename:
self.__pathdict[substring]['poisson_plot'].append(filename)
elif 'pca_1-2' in filename:
self.__pathdict[substring]['pca_1-2'].append(filename)
elif 'pca_2-3' in filename:
self.__pathdict[substring]['pca_2-3'].append(filename)
elif 'batch_1-2' in filename:
self.__pathdict[substring]['batch_1-2'].append(filename)
elif 'pearson_plot' in filename:
self.__pathdict[substring]['pearson_plot'].append(filename)
elif 'spearman_plot' in filename:
self.__pathdict[substring]['spearman_plot'].append(filename)
elif 'plotcounts' in filename:
self.__pathdict[substring]['plotcounts'].append(filename)
elif 'align_complete_number' in filename:
self.__pathdict[substring]['align_complete_number'].append(filename)
elif 'align_complete_perc' in filename:
self.__pathdict[substring]['align_complete_perc'].append(filename)
elif 'align_subsample_number' in filename:
self.__pathdict[substring]['align_subsample_number'].append(filename)
elif 'align_subsample_perc' in filename:
self.__pathdict[substring]['align_subsample_perc'].append(filename)
elif 'alignstats' in filename:
self.__pathdict[substring]['alignstats'].append(filename)
elif 'isoformcomplexity_added' in filename:
self.__pathdict[substring]['isoformcomplexity_added'].append(filename)
elif 'isoformcomplexity_complete' in filename:
self.__pathdict[substring]['isoformcomplexity_complete'].append(filename)
elif 'featureCounts_stats' in filename:
self.__pathdict[substring]['featureCounts_stats'].append(filename)
elif 'rnaseqc_alignment' in filename:
self.__pathdict[substring]['rnaseqc_alignment'].append(filename)
elif 'rnaseqc_libprofile' in filename:
self.__pathdict[substring]['rnaseqc_libprofile'].append(filename)
def get_labelinformation(self, inputlist):
return [i[-1] for i in sorted(inputlist, key = itemgetter(1))]
'''
method builds the href string for the tex document. string is slightly
changed if there is more than one figure or table to reference
@param inputlist: labels of the figures or tables
@param what: string (figure or table)
@return: string
'''
def get_labelstring(self, inputlist, what):
inputlist = sorted(list(inputlist), key = itemgetter(1))
inputlist = ['~\\vref{{{0}}}'.format(i[0]) for i in inputlist]
if len(inputlist) == 1 and what == 'figure':
return 'figure{0} shows'.format(''.join(inputlist))
elif len(inputlist) > 1 and what == 'figure':
return 'figures{0} show'.format(','.join(inputlist))
if len(inputlist) == 1 and what == 'table':
return 'table{0} provides'.format(''.join(inputlist))
elif len(inputlist) > 1 and what == 'table':
return 'tables{0} provide'.format(','.join(inputlist))
return ''
'''
methods builds the lists for the alignment section. first list is the part of the tex document and
second list are the tex input string to link the sub tex files
@return: list, list ... list of string, list of string
'''
def build_tex_input_alignment(self):
temp, inputtemp, align, rnaseqc = [], [], 'align', 'rnaseqc'
alabkeys, apathkeys = self.__labeldict[align].keys(), self.__pathdict[align].keys()
rlabkeys, rpathkeys = self.__labeldict[rnaseqc].keys(), self.__pathdict[rnaseqc].keys()
if 'alignstats' in alabkeys and 'alignstats' in apathkeys:
temp.append('{0} the alignment statistic'.format(self.get_labelstring(self.__labeldict[align]['alignstats'], 'figure')))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[align]['alignstats']])
if 'rnaseqc_alignment' in rlabkeys and 'rnaseqc_alignment' in rpathkeys:
temp.append('{0} general alignment statistics and information about rRNA content in each sample'.format(self.get_labelstring(self.__labeldict[rnaseqc]['rnaseqc_alignment'], 'table')))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[rnaseqc]['rnaseqc_alignment']])
if 'align_complete_number' in alabkeys and 'align_complete_number' in apathkeys:
temp.append('{0} the complexity of the samples as total number of non-redundant fragments over all fragments'.format(self.get_labelstring(self.__labeldict[align]['align_complete_number'], 'figure')))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[align]['align_complete_number']])
if 'align_complete_perc' in alabkeys and 'align_complete_perc' in apathkeys:
temp.append('{0} the complexity of the samples as \% of non-redundant fragments over all fragments'.format(self.get_labelstring(self.__labeldict[align]['align_complete_perc'], 'figure')))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[align]['align_complete_perc']])
if 'align_subsample_number' in alabkeys and 'align_subsample_number' in apathkeys:
temp.append('{0} the complexity of the libraries as total number new non-redundant fragments for each subsample set'.format(self.get_labelstring(self.__labeldict[align]['align_subsample_number'], 'figure')))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[align]['align_subsample_number']])
if 'align_subsample_perc' in alabkeys and 'align_subsample_perc' in apathkeys:
temp.append('{0} the complexity of the libraries as \% of new non-redundant fragments for each subsample set'.format(self.get_labelstring(self.__labeldict[align]['align_subsample_perc'], 'figure')))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[align]['align_subsample_perc']])
if 'rnaseqc_libprofile' in rlabkeys and 'rnaseqc_libprofile' in rpathkeys:
temp.append('{0} information to which regions on the reference reads align (exonic, intronic, intergenic)'.format(self.get_labelstring(self.__labeldict[rnaseqc]['rnaseqc_libprofile'], 'table')))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[rnaseqc]['rnaseqc_libprofile']])
return temp, inputtemp
'''
methods builds the lists for the count section. first list is the part of the tex document and
second list are the tex input string to link the sub tex files
@return: list, list ... list of string, list of string
'''
def build_countlist(self):
temp, inputtemp, fc = [], [], 'featurecounts'
if fc not in self.__pathdict or fc not in self.__labeldict: return [], []
flabkeys, fpathkeys = self.__labeldict[fc].keys(), self.__pathdict[fc].keys()
if 'featureCounts_stats' in flabkeys and 'featureCounts_stats' in fpathkeys:
temp.append('{0} information about how many fragments align to features'.format(self.get_labelstring(self.__labeldict[fc]['featureCounts_stats'], 'table')))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[fc]['featureCounts_stats']])
if 'isoformcomplexity_complete' in flabkeys and 'isoformcomplexity_complete' in fpathkeys:
temp.append('{0} how many isoforms are seen by increasing the sequencing depth'.format(self.get_labelstring(self.__labeldict[fc]['isoformcomplexity_complete'], 'figure')))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[fc]['isoformcomplexity_complete']])
if 'isoformcomplexity_added' in flabkeys and 'isoformcomplexity_added' in fpathkeys:
temp.append('{0} how many new features are seen per subset'.format(self.get_labelstring(self.__labeldict[fc]['isoformcomplexity_added'], 'figure')))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[fc]['isoformcomplexity_added']])
return temp, inputtemp
'''
method takes the short cap string and add a detype specific string
@param inputstring: string
@param detype: de analysis identifier
@return: string
'''
def prep_shortcap(self, inputstring, detype):
if detype.startswith('remove'):
temp = [i for i in detype[6:].split('-') if len(i) != 0]
temp = ', '.join(temp)
return '{1} ({0} removed)'.format(temp.replace('_','\_'), inputstring)
elif detype.startswith('add'):
temp = [i for i in detype[3:].split('-') if len(i) != 0]
temp = ', '.join(temp)
return '{1} ({0} added)'.format(temp.replace('_','\_'), inputstring)
elif detype.startswith('only'):
temp = [i for i in detype[4:].split('-') if len(i) != 0]
temp = ', '.join(temp)
return '{1} (only {0})'.format(temp.replace('_','\_'), inputstring)
elif detype.startswith('comb'):
temp = [i for i in detype[4:].split('-') if len(i) != 0]
temp = ', '.join(temp)
return '{1} ({0} combined)'.format(temp.replace('_','\_'), inputstring)
elif detype in (self.__destringtuple):
return inputstring.replace('_', '\_')
temp = ' '.join([i for i in detype.split('-') if len(i) != 0])
return '{1} ({0})'.format(temp, inputstring.replace('_', '\_'))
def build_deexplorelist(self, detype):
temp, inputtemp = [], []
dlabkeys, dpathkeys = self.__labeldict[detype].keys(), self.__pathdict[detype].keys()
if 'plotcounts' in dlabkeys and 'plotcounts' in dpathkeys:
temp.append('{0} the normalised Counts for {1}'.format(self.get_labelstring(self.__labeldict[detype]['plotcounts'], 'figure'), ', '.join(self.get_labelinformation(self.__labeldict[detype]['plotcounts']))))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[detype]['plotcounts']])
if 'pearson_plot' in dlabkeys and 'pearson_plot' in dpathkeys:
temp.append('{0} the heatmap of the Pearson correlation'.format(self.get_labelstring(self.__labeldict[detype]['pearson_plot'], 'figure')))
temp.append(("it's a pairwise comparison between samples and uses the normalised fragment counts as distance between genes", ))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[detype]['pearson_plot']])
if 'spearman_plot' in dlabkeys and 'spearman_plot' in dpathkeys:
temp.append('{0} the heatmap of the Spearman correlation'.format(self.get_labelstring(self.__labeldict[detype]['spearman_plot'], 'figure')))
temp.append(("it's a pairwise comparison that uses the normalised fragment counts to create a ranking for the genes which is then compared between samples", ))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[detype]['spearman_plot']])
if 'heatmap_plot' in dlabkeys and 'heatmap_plot' in dpathkeys:
temp.append('{0} the heatmap of the sample to sample distance calculated by using Euclidean distance'.format(self.get_labelstring(self.__labeldict[detype]['heatmap_plot'], 'figure')))
temp.append(("the calculation ignores the conditions and treats all samples as if they were replicates of the same condition", "it's calculated based on the normalised, transformed counts"))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[detype]['heatmap_plot']])
if 'heatmappoisson_plot' in dlabkeys and 'heatmappoisson_plot' in dpathkeys:
temp.append('{0} the heatmap of the sample to sample distance calculated by using Poisson distance'.format(self.get_labelstring(self.__labeldict[detype]['heatmappoisson_plot'], 'figure')))
temp.append(("it's a measure of dissimilarity between counts and takes the inherent variance structure of counts into consideration when calculating the distances between samples", "all samples are treated as if they were replicates of the same condition", "calculation is based on the raw counts"))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[detype]['heatmappoisson_plot']])
if 'pca_1-2' in dlabkeys and 'pca_1-2' in dpathkeys:
temp.append('{0} the principal component analysis (PCA) of the samples'.format(self.get_labelstring(self.__labeldict[detype]['pca_1-2'], 'figure')))
temp.append(("it visualises sample to sample distances onto a 2D plane", "Principal Component 1 (PC1) is the direction that separates the samples the most; PC2 is the second"))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[detype]['pca_1-2']])
if 'batch_1-2' in dlabkeys and 'batch_1-2' in dpathkeys:
temp.append('{0} the principal component analysis (PCA) of the samples after batch correction'.format(self.get_labelstring(self.__labeldict[detype]['batch_1-2'], 'figure')))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[detype]['batch_1-2']])
if 'pca_2-3' in dlabkeys and 'pca_2-3' in dpathkeys:
temp.append('{0} the principal component analysis (PCA) of the samples for PC2 and PC3'.format(self.get_labelstring(self.__labeldict[detype]['pca_2-3'], 'figure')))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[detype]['pca_2-3']])
if 'mds_plot' in dlabkeys and 'mds_plot' in dpathkeys:
temp.append('{0} multidimensional scaling (MDS) plot using Euclidean distance'.format(self.get_labelstring(self.__labeldict[detype]['mds_plot'], 'figure')))
temp.append(("it visualises sample to sample distances onto a 2D plane", "X-axis is the direction that separates the samples the most; y-axis the second most"))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[detype]['mds_plot']])
if 'poisson_plot' in dlabkeys and 'poisson_plot' in dpathkeys:
temp.append('{0} multidimensional scaling (MDS) plot using the Poisson Distance'.format(self.get_labelstring(self.__labeldict[detype]['poisson_plot'], 'figure')))
temp.append(("it visualises sample to sample distances onto a 2D plane", "X-axis is the direction that separates the samples the most; y-axis the second most"))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[detype]['poisson_plot']])
if 'varGenes_plot' in dlabkeys and 'varGenes_plot' in dpathkeys:
temp.append('{0} the heatmap of genes with the highest variance across samples'.format(self.get_labelstring(self.__labeldict[detype]['varGenes_plot'], 'figure')))
temp.append(("the plot shows the amount by which each gene deviates in a specific sample from the gene's average across all samples", ))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[detype]['varGenes_plot']])
if 'highExpressedGenes_plot' in dlabkeys and 'highExpressedGenes_plot' in dpathkeys:
temp.append('{0} the heatmap of genes with the highest mean expression strength across samples'.format(self.get_labelstring(self.__labeldict[detype]['highExpressedGenes_plot'], 'figure')))
inputtemp.extend([Tex.get_inputstring(self.get_substring_fromindex(basename(i), 0), self.__tex) for i in self.__pathdict[detype]['highExpressedGenes_plot']])
return temp, inputtemp
'''
builds the condition tex table for a de analysis id. adds the label of the table to
labeldict variable and appends it to the newlabels list
@param detype: de analysis id
@return: list ... list of strings
'''
def build_conditiontable(self, detype):
condlist = self.__conditiondict[detype]
if len(condlist) == 0: return []
temp, collength = [], len(condlist[0])
# prep table
shortcap = self.prep_shortcap('Condition table', detype)
tabular = '|'.join(['l']*collength)
label = Tex.get_replaced_label(self.__bfxid, '_'.join((self.__conditionlabel, detype, '1')), 'tab')
temp.extend((Tex.TABLESTART, Tex.get_replaced_caption(self.__bfxid, shortcap, '{0}. {1}'.format(shortcap, Information.CAPTION_CONDTABLE)), Tex.BEGINCENTER, Tex.get_replaced_tabularstart(tabular), Tex.HLINE))
temp.append(Tex.get_tableheader(condlist[0]))
temp.extend([Tex.get_tablerowstring(i) for i in sorted(condlist[1:], key = itemgetter(0))])
temp.extend([Tex.TABULAREND, Tex.ENDCENTER, label, Tex.TABLEEND])
# add new label to a list and to label dict
self.__newlabels.append('\t'.join((self.__conditionlabel, detype, label[7:].rstrip('}\n'), '1', '', 'de'))+'\n')
self.__labeldict[detype][self.__conditionlabel] = set([(label[7:].rstrip('}\n'), 1)])
return temp
def build_detable(self, detype):
delist = self.__deresultsdict[detype]
for i, entry in enumerate(delist):
delist[i][2] = entry[2].replace('~', '$\sim$')
delist[i][0] = self.get_underlinereplace(entry[0])
delist[i][1] = self.get_underlinereplace(entry[1])
delist[i][2] = self.get_underlinereplace(entry[2])
if len(delist) == 0: return []
temp = []
# prep table
shortcap = self.prep_shortcap('Overview DE genes', detype)
tabular = 'c|c|c|r|r|r|r'
label = Tex.get_replaced_label(self.__bfxid, '_'.join((self.__resultlabel, detype, '1')), 'tab')
temp.extend((Tex.TABLESTART, Tex.get_replaced_caption(self.__bfxid, shortcap, '{0}. {1}'.format(shortcap, Information.CAPTION_DETABLE)), Tex.BEGINCENTER, Tex.get_replaced_tabularstart(tabular), Tex.HLINE))
temp.append(Tex.RESULTHEAD)
temp.extend([Tex.get_tablerowstring(i[:7]) for i in delist])
temp.extend([Tex.TABULAREND, Tex.ENDCENTER, label, Tex.TABLEEND])
# add new label to a list and to label dict
self.__newlabels.append('\t'.join((self.__resultlabel, detype, label[7:].rstrip('}\n'), '1', '', 'de'))+'\n')
self.__labeldict[detype][self.__resultlabel] = set([(label[7:].rstrip('}\n'), 1)])
return temp
def build_deresultlist(self, detype):
temp, inputtemp, dehelp = [], [], ''
deordlist = [i for i in self.__deorder if detype in i]
if len(deordlist) == 1: dehelp = deordlist[0][0]
labkeys = self.__labeldict[detype]
if self.__resultlabel in labkeys:
temp.append('{0} an overview of the number of DE genes for each comparison'.format(self.get_labelstring(self.__labeldict[detype][self.__resultlabel], 'table')))
inputtemp.append(Tex.get_inputstring(self.__pathdict[detype][self.__resultlabel][0][:-4], self.__tex))
comments = set([(i[0], i[1], i[2], i[7], i[8]) for i in self.__deresultsdict[detype]])
for i in comments:
if i[4] != '': temp.append('comment for comparison \\textquoteleft{{}}{0}\\textquoteright~of factor~\\textquoteleft{{}}{1}\\textquoteright~and formula~\\textquoteleft{{}}{2}\\textquoteright: {3}'.format(i[0], i[1], i[2], self.get_underlinereplace(i[4])))
if 'maplot' in labkeys:
temp.append('{0} the MA plot for the DE comparisons'.format(self.get_labelstring(self.__labeldict[detype]['maplot'], 'figure')))
inputtemp.extend([Tex.get_inputstring(i[:-4], self.__tex) for i in self.__pathdict[detype]['maplot']])
if 'condition_table' in labkeys:
temp.append('{0} an overview of the assignment of samples to factors, their corresponding levels and if samples were used within a factor (none means this sample was excluded from factor and the comparison)'.format(self.get_labelstring(self.__labeldict[detype]['condition_table'], 'table')))
inputtemp.extend([Tex.get_inputstring(i[:-4], self.__tex) for i in self.__pathdict[detype]['condition_table']])
elif dehelp != '':
temp.append('{0} an overview of the assignment of samples to factors, their corresponding levels and if samples were used within a factor (none means this samples was excluded from factor and the comparison)'.format(self.get_labelstring(self.__labeldict[dehelp]['condition_table'], 'table')))
return temp, inputtemp
def main(self):
lol = self.query_database()
labsampletab = self.build_libtable(lol)
write_list(labsampletab, pathjoin(self.__report, self.__samplefilename))
self.__pathdict[self.__sampletag][self.__samplelabel] = [self.__samplefilename]
self.read_labelfile()
self.read_defolders()
self.read_build_toollist()
self.register_texfiles()
self.__maindocument.extend((Tex.get_replaced_reporthead(self.__bfxid), Tex.get_replaced_label(self.__bfxid, '', 'ch'), '\n'))
self.__maindocument.append(Tex.get_replaced_section(Tex.DESCRIPTION))
if len(labsampletab) > 1:
self.__maindocument.extend(Tex.build_itemize(('{0} the sample overview'.format(self.get_labelstring(self.__labeldict[self.__sampletag][self.__samplelabel], 'table')),)))
self.__maindocument.extend(('\n', Tex.get_inputstring(self.__samplefilename[:-4], self.__tex), '\n'))
else:
self.__maindocument.extend(Tex.build_itemize())
self.__maindocument.append(Tex.get_replaced_section(Tex.QUESTION)) # add section question
self.__maindocument.extend(Tex.build_itemize())
# references and tools section
self.__maindocument.extend(('\n', Tex.get_replaced_section(Tex.TOOLS))) # add section tools
self.__maindocument.extend(Tex.build_itemize(self.__toollist))
# alignment statistics section
texlist, inputlist = self.build_tex_input_alignment() # build tex and inputlist for alignment section
if len(texlist) != 0: # add section alignment and quality
self.__maindocument.extend(('\n', Tex.get_replaced_section(Tex.ALIGNMENT)))
self.__maindocument.extend(Tex.build_itemize(texlist))
self.__maindocument.extend(('\n', Tex.get_replaced_subsection(Tex.COMMENT))) # add subsection comment
self.__maindocument.extend(Tex.build_itemize())
self.__maindocument.append('\n')
self.__maindocument.extend(inputlist)
# fragment count section
texlist, inputlist = self.build_countlist() # build tex and inputlist for count section
if len(texlist) != 0: # add fragment/gene counts
self.__maindocument.extend(('\n', Tex.get_replaced_section(Tex.COUNT)))
self.__maindocument.extend(Tex.build_itemize(texlist))
self.__maindocument.extend(('\n', Tex.get_replaced_subsection(Tex.COMMENT))) # add subsection comment
self.__maindocument.extend(Tex.build_itemize())
self.__maindocument.append('\n')
self.__maindocument.extend(inputlist)
# check if exploratory analysis exists
if 'de-expl' in self.__atypedict['de'] and 'de-expl' in self.__pathdict:
texlist, inputlist = self.build_deexplorelist('de-expl')
if len(texlist) != 0:
self.__maindocument.extend(('\n', Tex.get_replaced_section(Tex.DEEXPL)))
self.__maindocument.extend(Tex.build_itemize(texlist))
self.__maindocument.extend(('\n', Tex.get_replaced_subsection(Tex.COMMENT)))
self.__maindocument.extend(Tex.build_itemize())
self.__maindocument.append('\n')
self.__maindocument.extend(inputlist)
# de analyses section
dekeys = [item for sublist in self.__deorder for item in sublist]
for detype in dekeys:
self.__maindocument.extend(('\n', Tex.get_replaced_section(self.prep_shortcap(Tex.DEHEAD, detype))))
texlist, inputlist = self.build_deexplorelist(detype)
if len(texlist) != 0:
self.__maindocument.append(Tex.get_replaced_subsection(Tex.DEEXPL))
self.__maindocument.extend(Tex.build_itemize(texlist))
self.__maindocument.extend(('\n', Tex.get_replaced_subsubsection(Tex.COMMENT)))
self.__maindocument.extend(Tex.build_itemize())
self.__maindocument.append('\n')
self.__maindocument.extend(inputlist)
self.__maindocument.append('\n')
condtextab = self.build_conditiontable(detype)
if len(condtextab) != 0:
write_list(condtextab, pathjoin(self.__report, self.__conditionfilename.replace('COND', detype)))
self.__pathdict[detype][self.__conditionlabel] = [self.__conditionfilename.replace('COND', detype)]
resulttab = self.build_detable(detype)
if len(resulttab) != 0:
write_list(resulttab, pathjoin(self.__report, self.__resultfilename.replace('COND', detype)))
self.__pathdict[detype][self.__resultlabel] = [self.__resultfilename.replace('COND', detype)]
texlist, inputlist = self.build_deresultlist(detype)
self.__maindocument.append(Tex.get_replaced_subsection(Tex.DERESULTS))
self.__maindocument.extend(Tex.build_itemize(texlist))
self.__maindocument.extend(('\n', Tex.get_replaced_subsubsection(Tex.COMMENT)))
self.__maindocument.extend(Tex.build_itemize())
self.__maindocument.append('\n')
self.__maindocument.extend(inputlist)
if len(self.__newlabels) != 0:
self.show_log('info', 'New labels are added to {0}'.format(self.__labelfile))
write_list(self.__newlabels, self.__labelfile, 'a')
self.show_log('info', 'Main tex file is {0}'.format(self.__reportfile))
write_list(self.__maindocument, self.__reportfile)
chmod(self.__reportfile, 0664)
if __name__ == '__main__':
mainlog = MainLogger()
parser = Parser()
parser.main()
report = Report(parser.bfx, parser.report, parser.tex, parser.label, parser.versionfile, parser.deresults, parser.ensembl, parser.species)
report.main()
mainlog.close()
logging.shutdown()
|
|
#!/usr/bin/env python
#Copyright (C) 2006-2011 by Benedict Paten ([email protected])
#
#Released under the MIT license, see LICENSE.txt
#!/usr/bin/env python
import sys
import os
import re
import bp.common.maths.Maths as Maths
SUBTRACT_MAX = 10000000
class StateMachine:
GAP = 0
MATCH = 1
def __init__(self):
self.__stateNo = 0
self.__names = {}
self.__types = [[], []]
self.__des = []
self.__outT = []
def stateNo(self):
return self.__stateNo
def addState(self, name, de, type):
self.__names[name] = self.__stateNo
self.__types[type].append(self.__stateNo)
self.__outT.append([])
self.__des.append(de)
self.__stateNo += 1
def addTransition(self, nameFrom, nameTo, t):
i = self.__names[nameFrom]
j = self.__names[nameTo]
self.__outT[i].append((j, t))
def fromToFn(self, fromType, toType, action):
actions = {}
for j in self.__types[fromType]:
for t in self.__outT[j]:
k = t[0]
if k in self.__types[toType]:
if actions.has_key(k):
actions[k].append((j, t[1]))
else:
actions[k] = [(j, t[1])]
actions = [ (i, actions[i]) for i in actions.keys() ]
def fn(s, sI, s2, sI2, *args):
for i in actions:
j = i[0]
d = self.__des[j](*args)
for k in i[1]:
l = k[0]
t = k[1]
action(s, sI, s2, sI2, l, j, t(*args), d, *args)
return fn
def getMatchStates(self):
return self.__types[self.MATCH]
def getStateTypes(self):
i = [self.GAP]*self.stateNo()
for j in self.getMatchStates():
i[j] = self.MATCH
return i
def getFns(self, action):
fn = {}
for i in self.GAP, self.MATCH:
for j in self.GAP, self.MATCH:
fn[(i, j)] = self.fromToFn(i, j, action)
return fn
def mapEmissions(self, fn):
self.__des = [ fn(i) for i in self.__des ]
def fTransition(stateNo):
def fTransition(s, sI, s2, sI2, i, j, t, de, *args):
s2[sI2 + j + stateNo] = Maths.logAdd(s2[sI2 + j + stateNo], s[sI + i + stateNo] + t + de)
return fTransition
def bTransition(s, sI, s2, sI2, i, j, t, de, *args):
s[sI + i] = Maths.logAdd(s[sI + i], s2[sI2 + j] + t + de)
class Rescale:
def __init__(self, startDiagonal, stateNo):
self.__list = [0]*2
self.__sD = startDiagonal-2
self.__stateNo = stateNo
def addNewScale(self, states, adP, ad):
scale = Maths.NEG_INFINITY
for i in xrange(adP.yS(ad), adP.yS(ad+1)):
s = adP.e[i]
scale = Maths.logAdd(scale, \
logSum(states[s+self.__stateNo:s+2*self.__stateNo]))
i = (adP.yS(ad+1) - adP.yS(ad))*self.__stateNo
if i is 0:
i = 1
i = Maths.log(i)
if scale is Maths.NEG_INFINITY:
scale = i
#print "________________________________________scale ", i, scale, int(scale - i)
#scale = i
self.__list.append(self.__list[-1] + int(scale - i))
def rescale(self, x1, y1, x2, y2):
return self.__list[x2 + y2 - self.__sD] - self.__list[x1 + y1 - self.__sD]
def rescaleFn(self, fn):
def fn2(*args):
return fn(*args) - self.rescale(*args)
return fn2
class BTransitionAndTotalReCalculator:
def __init__(self, totalFn, startDiagonal, endDiagonal, interval, stateNo):
self.__totalFn = totalFn
self.__stateNo = stateNo
self.__list = []
i = startDiagonal + interval
while(i < endDiagonal):
self.__list.append([i, Maths.NEG_INFINITY])
i += interval
def bTransition(self, s, sI, s2, sI2, i, j, t, de, x1, y1, x2, y2):
sD = x1 + y1
k = s2[sI2 + j] + t + de
for m in xrange(len(self.__list)-1, -1, -1):
l = self.__list[m]
if sD < l[0]:
if x2 + y2 < l[0]:
self.__list.pop()
#print "new total ", l[1]
self.__totalFn(l[1])
else:
l[1] = Maths.logAdd(l[1], s[sI + i + self.__stateNo] + k)
else:
break
s[sI + i] = Maths.logAdd(s[sI + i], k)
class PosteriorProbs:
def __init__(self, pairs, stateNo):
self.__pairs = pairs
self.__current = 100
self.__pa = []
self.__x = 0
self.__y = 0
self.__total = Maths.NEG_INFINITY
self.__stateNo = stateNo
def diagBuilder(self, x1, y1):
self.__x = x1
self.__y = y1
if self.__current != 100:
self.__pa.append(self.__current)
self.__current = Maths.NEG_INFINITY
def diagStart(self, x1, y1):
self.__current = 100
self.__pa = []
self.__x = x1
self.__y = y1
self.__current = Maths.NEG_INFINITY
def diagEnd(self):
if self.__current != 100:
self.__pa.append(self.__current)
j = 0.0
self.__pa.reverse()
for i in self.__pa:
j += Maths.exp(i)
self.__pairs[(self.__x, \
self.__y)] += j
self.__x += 1
self.__y += 1
def total(self, total):
self.__total = total
def bTransition(self, s, sI, s2, sI2, i, j, t, de, *args):
self.__current = \
Maths.logAdd(self.__current,
s2[sI2 + j] + t + de + s[sI + self.__stateNo + i] - self.__total)
class SparseArray:
def __init__(self, start, end, points):
"""
inclusive coordinates
"""
if end < start:
raise IndexError("%s, %s" % (start, end))
self.__yS = []
self.y = []
self.e = []
points.sort()
self.y = [i[1] for i in points]
self.e = [i[2] for i in points]
self.__fI = p = start
for i in range(0, len(points)):
j = points[i][0]
if j < start:
raise IndexError("%s %s %s" % (start, end, j))
while(p <= j):
self.__yS.append(i)
p += 1
if p > end + 1:
raise IndexError("%s %s %s" % (start, end, j))
while(p <= end+1):
self.__yS.append(len(points))
p += 1
def firstXIndex(self):
return self.__fI
def xLength(self):
return len(self.__yS)-1
def yS(self, x):
try:
return self.__yS[x - self.__fI]
except IndexError:
raise IndexError("%s %s %s" % (x, self.firstXIndex(), self.xLength()))
#diagBuilder add gap
#diagReset
#gap to match - diagGap
#match to match - diagMatch
#match to gap - gap
#gap to gap - gapGap
#position calculator
def sparseAlign(startStates, points, gapPointsBL,
gapPointsTR, endStates, stateNo,
gap, gapGap,
diagStart,
diagBuilder,
diagEnd,
diagGap, diagMatch,
gapR, gapGapR,
diagStartR,
diagBuilderR,
diagEndR,
diagGapR, diagMatchR,
totalReporter,
rescale,
diagTraceBackLimit):
"""
only the match states must have positive start and finish values
"""
points.sort()
points = [(points[j][0], points[j][1], j*stateNo*2) \
for j in xrange(0, len(points))]
startX, startY = points[0][0:2]
endX, endY = points[len(points)-1][0:2]
pH = {}
for i in points:
pH[(i[0]-1, i[1]-1)] = i[2]
i = [len(points)*stateNo*2]
def fn(p):
l = []
for j in p:
if pH.has_key((j[0], j[1])):
l.append((j[0], j[1], pH[(j[0], j[1])]))
else:
l.append((j[0], j[1], i[0]))
pH[(j[0], j[1])] = i[0]
i[0] += stateNo*2
return l
gapPointsBL = fn(gapPointsBL)
gapPointsTR = fn(gapPointsTR)
states = [Maths.NEG_INFINITY]*i[0]
pointsH = {}
for i in points:
pointsH[(i[0], i[1])] = i[2]
def flip(p):
return [(i[1], i[0], i[2]) for i in p]
def dc(p):
return [((i[0] - i[1]), (i[0] + i[1]), i[2]) for i in p]
xP = SparseArray(startX, endX, points)
yP = SparseArray(startY, endY, flip(points))
p = dc(points)
dP = SparseArray(startX - endY, endX - startY, p)
adP = SparseArray(startX + startY, endX + endY, flip(p))
fn = lambda j : [ i for i in j if not pointsH.has_key((i[0]+1, i[1]+1)) ]
yBLP = SparseArray(startY, endY, flip(gapPointsBL))
adBLP = SparseArray(startX + startY, endX + endY,\
flip(dc(fn(gapPointsBL))))
xTRP = SparseArray(startX, endX, gapPointsTR)
adTRP = SparseArray(startX + startY, endX + endY,\
flip(dc(fn(gapPointsTR))))
i = stateNo*2*(len(points)-1)
states[stateNo:stateNo*2] = startStates
states[i:i+stateNo] = endStates
def fIterator():
for i in xrange(adP.firstXIndex()+1, adP.firstXIndex() \
+ adP.xLength()):
rescale.addNewScale(states, adP, i-1)
yield i
forwardRecursion(xP, yP, dP, adP,
yBLP, adBLP, xTRP, adTRP,
states,
gap, gapGap,
diagStart,
diagBuilder,
diagEnd,
diagGap, diagMatch,
fIterator(),
diagTraceBackLimit)
i += stateNo
totalReporter(logSum([ states[i + j] + endStates[j] for j in range(0, stateNo)]))
forwardRecursion(xP, yP, dP, adP,
yBLP, adBLP, xTRP, adTRP,
states,
gapR, gapGapR,
diagStartR,
diagBuilderR,
diagEndR,
diagGapR, diagMatchR,
xrange(adP.firstXIndex() \
+ adP.xLength()-1, adP.firstXIndex(), -1),
diagTraceBackLimit)
def doPoint(p, x, y, s, states, gapFn):
#for i in xrange(p.yS(x+1)-1, p.yS(x)-1, -1):
for i in xrange(p.yS(x), p.yS(x+1)):
y2 = p.y[i]
if y2 >= y:
break
s2 = p.e[i]
gapFn(states, s2, states, s, x, y2, x, y)
def doPointR(p, x, y, s, states, gapFn):
#for i in xrange(p.yS(x+1)-1, p.yS(x)-1, -1):
for i in xrange(p.yS(x), p.yS(x+1)):
y2 = p.y[i]
if y2 >= y:
break
s2 = p.e[i]
gapFn(states, s2, states, s, y2, x, y, x)
def logSum(s):
f = s[0]
for i in s[1:]:
f = Maths.logAdd(f, i)
return f
def forwardRecursion(xP, yP, dP, adP,
yBLP, adBLP, xTRP, adTRP,
states,
gap, gapGap,
diagStart, diagBuilder,
diagEnd,
diagGap, diagMatch,
iterator,
diagTraceBackLimit):
for ad in iterator:
for i in xrange(adBLP.yS(ad), adBLP.yS(ad+1)):
d = adBLP.y[i]
s = adBLP.e[i]
x = (ad+d)/2
y = ad-x
doPoint(xP, x, y, s, states, gap)
for i in xrange(adTRP.yS(ad), adTRP.yS(ad+1)):
d = adTRP.y[i]
s = adTRP.e[i]
x = (ad+d)/2
y = ad-x
doPointR(yP, y, x, s, states, gap)
if ad + 2 < adP.firstXIndex() \
+ adP.xLength():
for i in xrange(adP.yS(ad+2), adP.yS(ad+3)):
d = adP.y[i]
s = adP.e[i]
x = (ad+2+d)/2
y = ad+2-x
doPoint(xP, x-1, y-1, s, states, gap)
doPointR(yP, y-1, x-1, s, states, gap)
doPoint(xTRP, x-1, y-1, s, states, gapGap)
doPointR(yBLP, y-1, x-1, s, states, gapGap)
for i in xrange(adP.yS(ad), adP.yS(ad+1)):
d = adP.y[i]
s = adP.e[i]
x = (ad+d)/2
y = ad-x
diagStart(x, y)
j = dP.yS(d) + dP.y[dP.yS(d):dP.yS(d+1)].index(ad)
k = 1
s2 = s
lim = dP.yS(d)-1
if j-1-lim > diagTraceBackLimit:
lim = j-1-diagTraceBackLimit
for j in xrange(j-1, lim, -1):
ad2 = dP.y[j]
if ad2 < ad - 2*k:
break
s3 = dP.e[j]
diagMatch(states, s3, states, s)
diagGap(states, s2, states, s)
diagBuilder(x-k, y-k)
s2 = s3
k += 1
diagGap(states, s2, states, s)
diagEnd()
def main():
pass
def _test():
import doctest
return doctest.testmod()
if __name__ == '__main__':
_test()
main()
|
|
import unittest
from meerkat_abacus import model
from meerkat_abacus.codes.to_codes import to_code
from meerkat_abacus.codes.variable import Variable
from geoalchemy2.shape import from_shape
from shapely.geometry import Polygon
# Data for the tests
class ToCodeTest(unittest.TestCase):
"""
Test the to_code functionality
"""
def setUp(self):
locations = {1: model.Locations(name="Demo", id=1),
2: model.Locations(
name="Region 1", parent_location=1, id=2),
3: model.Locations(
name="Region 2", parent_location=1, id=3),
4: model.Locations(
name="District 1", parent_location=2,
level="district", id=4,
area=from_shape(Polygon([(0, 0), (0, 0.4), (0.2, 0.4),
(0.2, 0), (0, 0)]))
),
5: model.Locations(
name="District 2", parent_location=3,
level="district", id=5,
area=from_shape(Polygon([(0.2, 0.4), (0.4, 0.4), (0.4, 0),
(0.2, 0), (0.2, 0.4)]))),
6: model.Locations(
name="Clinic 1", parent_location=4, id=6),
7: model.Locations(
name="Clinic 2", parent_location=5, id=7),
8: model.Locations(
name="Clinic with no district", parent_location=2, id=8)}
locations_by_deviceid = {"1": 6, "2": 7, "3": 8}
zones = []
regions = [2, 3]
districts = [4, 5]
agg_variables = [
model.AggregationVariables(
id=1, method="not_null", db_column="index", condition="",
category=[],
form="form1"),
model.AggregationVariables(
id=2,
method="match",
db_column="column1",
alert=1,
category=[],
alert_type="individual",
condition="A",
form="form1"),
model.AggregationVariables(
id=3,
category=[],
method="sub_match",
db_column="column2",
condition="B",
form="form1"),
model.AggregationVariables(
id=4,
category=[],
method="between",
calculation="column3",
db_column="column3",
condition="5,10",
disregard=1,
form="form1")
]
self.alert_data = {"form1": {"column1": "column1"}}
devices = {"1": [], "2": [], "3": [], "4": [], "5": [],
"6": [], "7": [], "8": []}
self.all_locations = (locations, locations_by_deviceid, zones, regions, districts, devices)
self.variables = {"case": {1: {}, 2: {}, 3: {}, 4: {}}}
self.variables_forms = {}
self.variables_test = {}
self.variables_groups = {}
self.mul_forms = []
for av in agg_variables:
self.variables["case"][av.id][av.id] = Variable(av)
self.variables_forms[av.id] = "form1"
self.variables_test[av.id] = self.variables["case"][av.id][av.id].test
self.variables_groups[av.id] = [av.id]
def tearDown(self):
pass
def test_location_information(self):
"""
Testing that all the location infomation is translated correctly
"""
row = {"form1":
{"index": 1,
"column1": "A",
"column2": "B34",
"column3": "7",
"date": "2015-10-25",
"deviceid": "1",
"meta/instanceID": "a"},
"original_form": "form1"}
var, category, ret_location, disregarded = to_code(
row,
(self.variables, self.variables_forms, self.variables_test, self.variables_groups, {}),
self.all_locations, "case", self.alert_data, self.mul_forms, "deviceid")
self.assertEqual(ret_location["country"], 1)
self.assertEqual(ret_location["region"], 2)
self.assertEqual(ret_location["district"], 4)
self.assertEqual(ret_location["clinic"], 6)
self.assertEqual(ret_location["device_id"], '1')
row["form1"]["deviceid"] = "2"
var, category, ret_location, disregard = to_code(
row,
(self.variables, self.variables_forms, self.variables_test, self.variables_groups, {}),
self.all_locations, "case", self.alert_data, self.mul_forms, "deviceid")
self.assertEqual(ret_location["country"], 1)
self.assertEqual(ret_location["region"], 3)
self.assertEqual(ret_location["district"], 5)
self.assertEqual(ret_location["device_id"], '2')
row["form1"]["deviceid"] = "3"
var, category, ret_location, disregard = to_code(
row,
(self.variables, self.variables_forms, self.variables_test, self.variables_groups, {}),
self.all_locations, "case", self.alert_data, self.mul_forms, "deviceid")
self.assertEqual(ret_location["country"], 1)
self.assertEqual(ret_location["region"], 2)
self.assertEqual(ret_location["district"], None)
self.assertEqual(ret_location["device_id"], '3')
row["form1"]["deviceid"] = "99"
var, category, ret_location, disregard = to_code(
row,
(self.variables, self.variables_forms, self.variables_test, self.variables_groups, {}),
self.all_locations, "case", self.alert_data, self.mul_forms, "deviceid")
self.assertEqual(ret_location, None)
# Test gps in district
row = {"form1":
{"index": 1,
"column1": "A",
"column2": "B34",
"column3": "7",
"lat": "0.1",
"lng": "0.1",
"date": "2015-10-25",
"deviceid": "1",
"meta/instanceID": "a"},
"original_form": "form1"}
var, category, ret_location, disregard = to_code(
row,
(self.variables, self.variables_forms, self.variables_test, self.variables_groups, {}),
self.all_locations, "case", self.alert_data, self.mul_forms,
"in_geometry$lat,lng")
self.assertEqual(ret_location["district"], 4)
self.assertEqual(ret_location["region"], 2)
self.assertEqual(ret_location["clinic"], None)
row = {"form1":
{"index": 1,
"column1": "A",
"column2": "B34",
"column3": "7",
"lat": "0.3",
"lng": "0.1",
"date": "2015-10-25",
"deviceid": "1",
"meta/instanceID": "a"},
"original_form": "form1"}
var, category, ret_location, disregard = to_code(
row,
(self.variables, self.variables_forms, self.variables_test, self.variables_groups, {}),
self.all_locations, "case", self.alert_data, self.mul_forms,
"in_geometry$lat,lng")
self.assertEqual(ret_location["district"], 5)
self.assertEqual(ret_location["region"], 3)
self.assertEqual(ret_location["clinic"], None)
row = {"form1":
{"index": 1,
"column1": "A",
"column2": "B34",
"column3": "7",
"lat": "0.5",
"lng": "0.1",
"date": "2015-10-25",
"deviceid": "1",
"meta/instanceID": "a"},
"original_form": "form1"}
var, category, ret_location, disregard = to_code(
row,
(self.variables, self.variables_forms, self.variables_test, self.variables_groups, {}),
self.all_locations, "case", self.alert_data, self.mul_forms,
"in_geometry$lat,lng")
self.assertEqual(ret_location, None)
def test_variables(self):
"""
Checking that variables returned and alerts are working
"""
row1 = {"form1": {"index": 1,
"column1": "A",
"column2": "B34",
"column3": "7",
"date": "2015-10-25",
"deviceid": "1",
"meta/instanceID": "a"},
"original_form": "form1"}
row2 = {"form1": {"index": 2,
"column1": "B",
"column2": "A",
"column3": "4",
"date": "2015-10-25",
"deviceid": "2",
"meta/instanceID": "b"},
"original_form": "form1"}
row3 = {"form1": {"index": 1,
"column1": "A",
"column2": "C",
"column3": "7",
"date": "2015-10-25",
"deviceid": "2",
"meta/instanceID": "c"},
"original_form": "form1"}
var, category, ret_loc, disregard = to_code(
row1,
(self.variables, self.variables_forms, self.variables_test, self.variables_groups, {}),
self.all_locations, "case", self.alert_data, self.mul_forms, "deviceid")
self.assertEqual(var, {1: 1,
2: 1,
3: 1,
4: 1,
'alert_reason': 2,
'alert': 1,
'alert_type': "individual",
'alert_column1': 'A'})
self.assertEqual(disregard, True)
var, category, ret_loc, disregard = to_code(
row2,
(self.variables, self.variables_forms, self.variables_test, self.variables_groups, {}),
self.all_locations, "case", self.alert_data, self.mul_forms, "deviceid")
self.assertEqual(var, {1: 1})
self.assertEqual(disregard, False)
var, category, ret_loc, disregard = to_code(
row3,
(self.variables, self.variables_forms, self.variables_test, self.variables_groups, {}),
self.all_locations, "case", self.alert_data, self.mul_forms, "deviceid")
self.assertEqual(var, {1: 1,
2: 1,
4: 1,
'alert': 1,
'alert_column1': 'A',
"alert_type": "individual",
'alert_reason': 2})
self.assertEqual(disregard, True)
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Cronvar Plugin: The goal of this plugin is to provide an idempotent
# method for set cron variable values. It should play well with the
# existing cron module as well as allow for manually added variables.
# Each variable entered will be preceded with a comment describing the
# variable so that it can be found later. This is required to be
# present in order for this plugin to find/modify the variable
# This module is based on the crontab module.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cronvar
short_description: Manage variables in crontabs
description:
- Use this module to manage crontab variables.
- This module allows you to create, update, or delete cron variable definitions.
version_added: "2.0"
options:
name:
description:
- Name of the crontab variable.
type: str
required: yes
value:
description:
- The value to set this variable to.
- Required if C(state=present).
type: str
insertafter:
description:
- If specified, the variable will be inserted after the variable specified.
- Used with C(state=present).
type: str
insertbefore:
description:
- Used with C(state=present). If specified, the variable will be inserted
just before the variable specified.
type: str
state:
description:
- Whether to ensure that the variable is present or absent.
type: str
choices: [ absent, present ]
default: present
user:
description:
- The specific user whose crontab should be modified.
- This parameter defaults to C(root) when unset.
type: str
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
- Without a leading C(/), this is assumed to be in I(/etc/cron.d).
- With a leading C(/), this is taken as absolute.
type: str
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup) variable by this module.
type: bool
default: no
requirements:
- cron
author:
- Doug Luce (@dougluce)
'''
EXAMPLES = r'''
- name: Ensure entry like "[email protected]" exists
cronvar:
name: EMAIL
value: [email protected]
- name: Ensure a variable does not exist. This may remove any variable named "LEGACY"
cronvar:
name: LEGACY
state: absent
- name: Add a variable to a file under /etc/cron.d
cronvar:
name: LOGFILE
value: /var/log/yum-autoupdate.log
user: root
cron_file: ansible_yum-autoupdate
'''
import os
import platform
import pwd
import re
import shlex
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import shlex_quote
class CronVarError(Exception):
pass
class CronVar(object):
"""
CronVar object to write variables to crontabs.
user - the user of the crontab (defaults to root)
cron_file - a cron file under /etc/cron.d
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.lines = None
self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',))
self.cron_cmd = self.module.get_bin_path('cronvar', required=True)
if cron_file:
self.cron_file = ""
if os.path.isabs(cron_file):
self.cron_file = cron_file
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.cron_file, 'r')
self.lines = f.read().splitlines()
f.close()
except IOError:
# cron file does not exist
return
except Exception:
raise CronVarError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronVarError("Unable to read crontab")
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l
) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)):
self.lines.append(l)
count += 1
def log_message(self, message):
self.module.debug('ansible: "%s"' % message)
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'w')
elif self.cron_file:
fileh = open(self.cron_file, 'w')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
fileh = os.fdopen(filed, 'w')
fileh.write(self.render())
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
def remove_variable_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError:
# cron file does not exist
return False
except Exception:
raise CronVarError("Unexpected error:", sys.exc_info()[0])
def parse_for_var(self, line):
lexer = shlex.shlex(line)
lexer.wordchars = self.wordchars
varname = lexer.get_token()
is_env_var = lexer.get_token() == '='
value = ''.join(lexer)
if is_env_var:
return (varname, value)
raise CronVarError("Not a variable.")
def find_variable(self, name):
for l in self.lines:
try:
(varname, value) = self.parse_for_var(l)
if varname == name:
return value
except CronVarError:
pass
return None
def get_var_names(self):
var_names = []
for l in self.lines:
try:
(var_name, _) = self.parse_for_var(l)
var_names.append(var_name)
except CronVarError:
pass
return var_names
def add_variable(self, name, value, insertbefore, insertafter):
if insertbefore is None and insertafter is None:
# Add the variable to the top of the file.
self.lines.insert(0, "%s=%s" % (name, value))
else:
newlines = []
for l in self.lines:
try:
(varname, _) = self.parse_for_var(l) # Throws if not a var line
if varname == insertbefore:
newlines.append("%s=%s" % (name, value))
newlines.append(l)
elif varname == insertafter:
newlines.append(l)
newlines.append("%s=%s" % (name, value))
else:
raise CronVarError # Append.
except CronVarError:
newlines.append(l)
self.lines = newlines
def remove_variable(self, name):
self.update_variable(name, None, remove=True)
def update_variable(self, name, value, remove=False):
newlines = []
for l in self.lines:
try:
(varname, _) = self.parse_for_var(l) # Throws if not a var line
if varname != name:
raise CronVarError # Append.
if not remove:
newlines.append("%s=%s" % (name, value))
except CronVarError:
newlines.append(l)
self.lines = newlines
def render(self):
"""
Render a proper crontab
"""
result = '\n'.join(self.lines)
if result and result[-1] not in ['\n', '\r']:
result += '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
elif platform.system() == 'AIX':
return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (
shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
# ==================================================
def main():
# The following example playbooks:
#
# - cronvar: name="SHELL" value="/bin/bash"
#
# - name: Set the email
# cronvar: name="EMAILTO" value="[email protected]"
#
# - name: Get rid of the old new host variable
# cronvar: name="NEW_HOST" state=absent
#
# Would produce:
# SHELL = /bin/bash
# EMAILTO = [email protected]
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
value=dict(type='str'),
user=dict(type='str'),
cron_file=dict(type='str'),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
backup=dict(type='bool', default=False),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
supports_check_mode=False,
)
name = module.params['name']
value = module.params['value']
user = module.params['user']
cron_file = module.params['cron_file']
insertafter = module.params['insertafter']
insertbefore = module.params['insertbefore']
state = module.params['state']
backup = module.params['backup']
ensure_present = state == 'present'
changed = False
res_args = dict()
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022', 8))
cronvar = CronVar(module, user, cron_file)
module.debug('cronvar instantiated - name: "%s"' % name)
# --- user input validation ---
if name is None and ensure_present:
module.fail_json(msg="You must specify 'name' to insert a new cron variable")
if value is None and ensure_present:
module.fail_json(msg="You must specify 'value' to insert a new cron variable")
if name is None and not ensure_present:
module.fail_json(msg="You must specify 'name' to remove a cron variable")
# if requested make a backup before making a change
if backup:
(_, backup_file) = tempfile.mkstemp(prefix='cronvar')
cronvar.write(backup_file)
if cronvar.cron_file and not name and not ensure_present:
changed = cronvar.remove_job_file()
module.exit_json(changed=changed, cron_file=cron_file, state=state)
old_value = cronvar.find_variable(name)
if ensure_present:
if old_value is None:
cronvar.add_variable(name, value, insertbefore, insertafter)
changed = True
elif old_value != value:
cronvar.update_variable(name, value)
changed = True
else:
if old_value is not None:
cronvar.remove_variable(name)
changed = True
res_args = {
"vars": cronvar.get_var_names(),
"changed": changed
}
if changed:
cronvar.write()
# retain the backup only if crontab or cron file have changed
if backup:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
if __name__ == '__main__':
main()
|
|
import numpy as np
from scipy.linalg import norm
import amnet
from collections import deque
import copy
from math import isinf
"""
Contains routines for manipulating and simplifying Amn trees
"""
def compose_rewire(phi1, phi2):
"""
Given two AMNs and pointers to their input variables,
rewires the first AMN's variable to point to the output of the second AMN.
Given:
phi1(x1)
phi2(x2)
Side effects:
phi1 is now rewired to
phi(x) = phi1(phi2(x)),
where x = x2 (the variable of phi2)
Note: x1 is thrown away!
Note: this routine modifies phi1!
"""
# cannot compose when dimensions are wrong
assert phi1.indim == phi2.outdim
# it does not make sense to compose with phi1 a variable
assert not (isinstance(phi1, amnet.Variable))
# compute the list of descendants of phi1 and phi2
desc1 = descendants(phi1)
desc2 = descendants(phi2)
# the trees should have no overlaps
nodeids1 = set([id(d) for d in desc1])
nodeids2 = set([id(d) for d in desc2])
assert len(nodeids1) == len(desc1)
assert len(nodeids2) == len(desc2)
assert len(nodeids1 & nodeids2) == 0
# determine the variables x1, x2 associated with phi1, phi2
vars1 = [d for d in desc1 if isinstance(d, amnet.Variable)]
vars2 = [d for d in desc2 if isinstance(d, amnet.Variable)]
assert len(vars1) == 1
assert len(vars2) == 1
x1 = vars1[0]
x2 = vars2[0]
# TODO: rewire here
def children(phi):
"""
returns a (possibly empty) list of all direct children
of the node phi
"""
ret = []
if hasattr(phi, 'x'):
ret.append(phi.x)
if hasattr(phi, 'y'):
assert isinstance(phi, amnet.Mu) or \
isinstance(phi, amnet.Stack)
ret.append(phi.y)
if hasattr(phi, 'z'):
assert isinstance(phi, amnet.Mu)
ret.append(phi.z)
assert len(ret) <= 3
return ret
def is_element_of(node, lst):
"""
equivalent to `node in lst`,
except uses `is` comparison, instead of `is` or `==`
"""
for k in lst:
if node is k:
return True
return False
def descendants(phi):
"""
returns a list of all descendants of phi,
including phi itself
"""
q = deque([phi]) # queue of nodes to check
d = list() # list of descendants
while len(q) > 0:
node = q.popleft()
# cannot use not(node in d) because Python's `in`
# checks for `==` or `is` equality;
# we want only `is` equality
#if not(any(node is e for e in d)):
if not is_element_of(node, d):
# node is new
d.append(node)
# add its children to check for reachability
#q.extend([c for c in children(node) if c not in d])
q.extend([c for c in children(node) if not is_element_of(c, d)])
# done finding reachable nodes
return d
def leaves_of(phi):
"""
returns a list of all variables in the graph of phi
"""
return [d for d in descendants(phi)
if isinstance(d, amnet.Variable)]
def unique_leaf_of(phi):
"""
returns the variable associated with phi (assuming there is only one)
"""
vs = leaves_of(phi)
assert len(vs) == 1
assert isinstance(vs[0], amnet.Variable)
return vs[0]
def valid_tree(phi):
"""
goes through the tree of phi and ensures that
1. the dimensions work out
2. there is only one variable
3. there are no directed cycles
"""
q = deque([phi]) # queue of nodes to check
visited = list() # already checked
# save the indim of the root node, and make sure all the indims
# of the children are the same
indim = phi.indim
retval = True
varsfound = 0
while len(q) > 0:
# node to check
node = q.popleft()
# check outdim
if isinstance(node, amnet.Variable):
retval &= (node.outdim == node.indim)
varsfound += 1
elif isinstance(node, amnet.Linear):
m, n = node.w.shape
retval &= (node.outdim == m)
retval &= (node.x.outdim == n)
retval &= (all([bi == 0 for bi in node.b])) # check value
elif isinstance(node, amnet.Constant):
retval &= (node.outdim == len(node.b))
retval &= (all([wij == 0 for wij in np.nditer(node.w)])) # check value
elif isinstance(node, amnet.Affine):
m, n = node.w.shape
retval &= (node.outdim == m)
retval &= (node.x.outdim == n)
retval &= (m == len(node.b))
elif isinstance(node, amnet.Mu):
retval &= (node.outdim == node.x.outdim)
retval &= (node.outdim == node.y.outdim)
retval &= (node.z.outdim == 1)
elif isinstance(node, amnet.Stack):
retval &= (node.outdim == node.x.outdim + node.y.outdim)
else:
retval = False # unknown node type
# check indim
retval &= (node.indim == indim)
# short-circuit if an inconsistency has been found
if not retval:
return False
# add children to queue
if not(any(node is e for e in visited)):
visited.append(node)
#q.extend(children(node))
q.extend([c for c in children(node) if c not in visited])
# finished iterating
# TODO: also check if graph is cyclic
return (varsfound == 1)
def is_cyclic(phi):
# 1. walk through the graph to determine available nodes
white = deque(descendants(phi)) # all reachable nodes
stk = deque([]) # stack for dfs
gray = list() # exploring set
black = list() # explored set
# 2. walk through the graph in DFS order
while len(white) > 0:
# get a new white vertex
stk.append(white.popleft())
#
while len(stk) > 0:
pass
# TODO
################################################################################
# Generic graph algorithms
################################################################################
def sample_graph(num):
"""
returns sample graphs
"""
G = dict()
if num == 1:
# graph on Fig 22.4 of CLRS 3rd ed.
G['u'] = ['x', 'v']
G['v'] = ['y']
G['w'] = ['y', 'z']
G['x'] = ['v']
G['y'] = ['x']
G['z'] = ['z']
elif num == 2:
# graph on Fig 22.6 of CLRS 3rd ed.
G['q'] = ['s', 'w', 't']
G['r'] = ['u', 'y']
G['s'] = ['v']
G['t'] = ['x', 'y']
G['u'] = ['y']
G['v'] = ['w']
G['w'] = ['s']
G['x'] = ['z']
G['y'] = ['q']
G['z'] = ['x']
return G
def dfs(G):
"""
G is a dict :: node -> [node]
with g[n] = all the nodes one-step reachable from n
"""
# data structure for labeling nodes
# only one instance of this object should exist
class DfsData(object):
def __init__(self):
self.color = dict() # node -> 'WHITE', 'GRAY', or 'BLACK'
self.pred = dict() # node -> node (predecessor)
self.dtime = dict() # node -> int (discover time)
self.ftime = dict() # node -> int (finish time)
self.time = 0 # global time
data = DfsData()
# 1. initialize dfs data
for u in G:
data.color[u] = 'WHITE'
data.pred[u] = None
data.dtime[u] = float('inf')
data.ftime[u] = float('inf')
data.time = 0
assert all([data.color[u] == 'WHITE' for u in G])
assert all([data.pred[u] is None for u in G])
# 2. visit every connected component
for u in G:
if data.color[u] == 'WHITE':
dfs_visit(G, u, data)
#dfs_visit_iterative(G, u, data)
# 3. classify edges
assert all([not(isinf(data.dtime[u])) for u in G])
assert all([not(isinf(data.ftime[u])) for u in G])
def dfs_visit(G, u, data):
# white vertex u has just been discovered
data.time += 1
data.dtime[u] = data.time
data.color[u] = 'GRAY'
print 'Colored %s WHITE->GRAY' % u
# explore edge (u, v)
# tree edge or forward edge <=> u.d < v.d < v.f < u.f
# back edge <=> v.d <= u.d < u.f <= v.f
# cross edge <=> v.d < v.f < u.d < u.f
for v in G[u]:
print 'Exploring edge %s->%s' % (u, v)
if data.color[v] == 'WHITE':
print 'Found tree edge: %s->%s' % (u, v)
data.pred[v] = u
dfs_visit(G, v, data)
elif data.color[v] == 'GRAY':
print 'Found back edge: %s->%s' % (u, v)
assert data.dtime[v] <= data.dtime[u] < data.ftime[u] <= data.ftime[v]
else:
print 'Found forward/cross edge: %s->%s (ignoring)' % (u, v)
# blacken u, it's finished
data.time += 1
data.ftime[u] = data.time
data.color[u] = 'BLACK'
print 'Colored %s BLACK' % u
if __name__ == '__main__':
G = sample_graph(2)
print G
dfs(G)
################################################################################
# ABANDONED METHODS
################################################################################
FPTOL=1e-8
def simplify(phi):
"""
Returns a new Amn that is equivalent to phi from the
perspective of phi.eval(..), but potentially has
* fewer nodes (e.g., fewer Mu's)
* affine simplifications
The affine simplifications are greedy, and may not be performed
if the result is a higher-dimensional
"""
# 1. only manipulate the copy
#phic = copy.deepcopy(phi)
#return phic
pass
def eval_ones(phi):
"""
evaluates phi on the all ones vector
and returns the floating point answer
"""
return phi.eval(np.ones(phi.indim))
def _simp_aff_aff(aff, force=False):
"""
TODO: this does not work if the child of aff
is the child of someone else
"""
assert isinstance(aff, amnet.Affine)
# whether an operation was performed
simp = False
# ensure we can do a simplification
if not(isinstance(aff.x, amnet.Affine)):
return simp
assert isinstance(aff.x, amnet.Affine)
# simplify if dimensions are reduced, or if forced to
m1, n1 = aff.w.shape
m2, n2 = aff.x.w.shape
assert n1 == m2
if force:
simp = True
elif m1*n2 <= ((m1 + n1) + (m2 + n2)):
simp = True
# before manipulation
val_a = eval_ones(aff)
if simp:
w1 = aff.w
b1 = aff.b
w2 = aff.x.w
b2 = aff.x.b2
# compute new affine
w3 = np.dot(w1, w2)
b3 = np.dot(w1, b2) + b1
# save grandchild
assert isinstance(aff.x, amnet.Affine)
x2 = aff.x.x
# rewrite node
aff.w = w3
aff.b = b3
aff.x = x2
# after manipulation
val_b = eval_ones(aff)
assert norm(val_a - val_b) <= FPTOL
return simp
|
|
from SimPEG import Survey, Utils, Problem, Maps, np, sp, mkvc
from simpegEM.FDEM.SurveyFDEM import SrcFDEM
from simpegEM.Utils.EMUtils import omega
from scipy.constants import mu_0
import sys
from numpy.lib import recfunctions as recFunc
from DataMT import DataMT
from simpegMT.Sources import homo1DModelSource
#################
### Receivers ###
#################
class RxMT(Survey.BaseRx):
knownRxTypes = {
# 3D impedance
'zxxr':['Z3D', 'real'],
'zxyr':['Z3D', 'real'],
'zyxr':['Z3D', 'real'],
'zyyr':['Z3D', 'real'],
'zxxi':['Z3D', 'imag'],
'zxyi':['Z3D', 'imag'],
'zyxi':['Z3D', 'imag'],
'zyyi':['Z3D', 'imag'],
# 2D impedance
# TODO:
# 1D impedance
'z1dr':['Z1D', 'real'],
'z1di':['Z1D', 'imag'],
# Tipper
'tzxr':['T3D','real'],
'tzxi':['T3D','imag'],
'tzyr':['T3D','real'],
'tzyi':['T3D','imag']
}
# TODO: Have locs as single or double coordinates for both or numerator and denominator separately, respectively.
def __init__(self, locs, rxType):
Survey.BaseRx.__init__(self, locs, rxType)
@property
def projField(self):
"""
Field Type projection (e.g. e b ...)
:param str fracPos: Position of the field in the data ratio
"""
if 'numerator' in fracPos:
return self.knownRxTypes[self.rxType][0][0]
elif 'denominator' in fracPos:
return self.knownRxTypes[self.rxType][1][0]
else:
raise Exception('{s} is an unknown option. Use numerator or denominator.')
@property
def projGLoc(self):
"""
Grid Location projection (e.g. Ex Fy ...)
:param str fracPos: Position of the field in the data ratio
"""
if 'numerator' in fracPos:
return self.knownRxTypes[self.rxType][0][1]
elif 'denominator' in fracPos:
return self.knownRxTypes[self.rxType][0][1]
else:
raise Exception('{s} is an unknown option. Use numerator or denominator.')
@property
def projType(self):
"""
Receiver type for projection.
"""
return self.knownRxTypes[self.rxType][0]
@property
def projComp(self):
"""Component projection (real/imag)"""
return self.knownRxTypes[self.rxType][1]
def projectFields(self, src, mesh, f):
'''
Project the fields and return the correct data.
'''
if self.projType is 'Z1D':
Pex = mesh.getInterpolationMat(self.locs[:,-1],'Fx')
Pbx = mesh.getInterpolationMat(self.locs[:,-1],'Ex')
ex = Pex*mkvc(f[src,'e_1d'],2)
bx = Pbx*mkvc(f[src,'b_1d'],2)/mu_0
# Note: Has a minus sign in front, to comply with quadrant calculations.
# Can be derived from zyx case for the 3D case.
f_part_complex = -ex/bx
# elif self.projType is 'Z2D':
elif self.projType is 'Z3D':
if self.locs.ndim == 3:
eFLocs = self.locs[:,:,0]
bFLocs = self.locs[:,:,1]
else:
eFLocs = self.locs
bFLocs = self.locs
# Get the projection
Pex = mesh.getInterpolationMat(eFLocs,'Ex')
Pey = mesh.getInterpolationMat(eFLocs,'Ey')
Pbx = mesh.getInterpolationMat(bFLocs,'Fx')
Pby = mesh.getInterpolationMat(bFLocs,'Fy')
# Get the fields at location
# px: x-polaration and py: y-polaration.
ex_px = Pex*f[src,'e_px']
ey_px = Pey*f[src,'e_px']
ex_py = Pex*f[src,'e_py']
ey_py = Pey*f[src,'e_py']
hx_px = Pbx*f[src,'b_px']/mu_0
hy_px = Pby*f[src,'b_px']/mu_0
hx_py = Pbx*f[src,'b_py']/mu_0
hy_py = Pby*f[src,'b_py']/mu_0
# Make the complex data
if 'zxx' in self.rxType:
f_part_complex = ( ex_px*hy_py - ex_py*hy_px)/(hx_px*hy_py - hx_py*hy_px)
elif 'zxy' in self.rxType:
f_part_complex = (-ex_px*hx_py + ex_py*hx_px)/(hx_px*hy_py - hx_py*hy_px)
elif 'zyx' in self.rxType:
f_part_complex = ( ey_px*hy_py - ey_py*hy_px)/(hx_px*hy_py - hx_py*hy_px)
elif 'zyy' in self.rxType:
f_part_complex = (-ey_px*hx_py + ey_py*hx_px)/(hx_px*hy_py - hx_py*hy_px)
elif self.projType is 'T3D':
if self.locs.ndim == 3:
horLoc = self.locs[:,:,0]
vertLoc = self.locs[:,:,1]
else:
horLoc = self.locs
vertLoc = self.locs
Pbx = mesh.getInterpolationMat(horLoc,'Fx')
Pby = mesh.getInterpolationMat(horLoc,'Fy')
Pbz = mesh.getInterpolationMat(vertLoc,'Fz')
bx_px = Pbx*f[src,'b_px']
by_px = Pby*f[src,'b_px']
bz_px = Pbz*f[src,'b_px']
bx_py = Pbx*f[src,'b_py']
by_py = Pby*f[src,'b_py']
bz_py = Pbz*f[src,'b_py']
if 'tzx' in self.rxType:
f_part_complex = (- by_px*bz_py + by_py*bz_px)/(bx_px*by_py - bx_py*by_px)
if 'tzy' in self.rxType:
f_part_complex = ( bx_px*bz_py - bx_py*bz_px)/(bx_px*by_py - bx_py*by_px)
else:
NotImplementedError('Projection of {:s} receiver type is not implemented.'.format(self.rxType))
# Get the real or imag component
real_or_imag = self.projComp
f_part = getattr(f_part_complex, real_or_imag)
# print f_part
return f_part
def projectFieldsDeriv(self, src, mesh, f, v, adjoint=False):
"""
The derivative of the projection wrt u
:param MTsrc src: MT source
:param TensorMesh mesh: Mesh defining the topology of the problem
:param MTfields f: MT fields object of the source
:param numpy.ndarray v: Random vector of size
"""
real_or_imag = self.projComp
if not adjoint:
if self.projType is 'Z1D':
Pex = mesh.getInterpolationMat(self.locs[:,-1],'Fx')
Pbx = mesh.getInterpolationMat(self.locs[:,-1],'Ex')
# ex = Pex*mkvc(f[src,'e_1d'],2)
# bx = Pbx*mkvc(f[src,'b_1d'],2)/mu_0
dP_de = -mkvc(Utils.sdiag(1./(Pbx*mkvc(f[src,'b_1d'],2)/mu_0))*(Pex*v),2)
dP_db = mkvc( Utils.sdiag(Pex*mkvc(f[src,'e_1d'],2))*(Utils.sdiag(1./(Pbx*mkvc(f[src,'b_1d'],2)/mu_0)).T*Utils.sdiag(1./(Pbx*mkvc(f[src,'b_1d'],2)/mu_0)))*(Pbx*f._bDeriv_u(src,v)/mu_0),2)
PDeriv_complex = np.sum(np.hstack((dP_de,dP_db)),1)
elif self.projType is 'Z2D':
raise NotImplementedError('Has not been implement for 2D impedance tensor')
elif self.projType is 'Z3D':
if self.locs.ndim == 3:
eFLocs = self.locs[:,:,0]
bFLocs = self.locs[:,:,1]
else:
eFLocs = self.locs
bFLocs = self.locs
# Get the projection
Pex = mesh.getInterpolationMat(eFLocs,'Ex')
Pey = mesh.getInterpolationMat(eFLocs,'Ey')
Pbx = mesh.getInterpolationMat(bFLocs,'Fx')
Pby = mesh.getInterpolationMat(bFLocs,'Fy')
# Get the fields at location
# px: x-polaration and py: y-polaration.
ex_px = Pex*f[src,'e_px']
ey_px = Pey*f[src,'e_px']
ex_py = Pex*f[src,'e_py']
ey_py = Pey*f[src,'e_py']
hx_px = Pbx*f[src,'b_px']/mu_0
hy_px = Pby*f[src,'b_px']/mu_0
hx_py = Pbx*f[src,'b_py']/mu_0
hy_py = Pby*f[src,'b_py']/mu_0
# Derivatives as lambda functions
# The size of the diratives should be nD,nU
ex_px_u = lambda vec: Pex*f._e_pxDeriv_u(src,vec)
ey_px_u = lambda vec: Pey*f._e_pxDeriv_u(src,vec)
ex_py_u = lambda vec: Pex*f._e_pyDeriv_u(src,vec)
ey_py_u = lambda vec: Pey*f._e_pyDeriv_u(src,vec)
# NOTE: Think b_p?Deriv_u should return a 2*nF size matrix
hx_px_u = lambda vec: Pbx*f._b_pxDeriv_u(src,vec)/mu_0
hy_px_u = lambda vec: Pby*f._b_pxDeriv_u(src,vec)/mu_0
hx_py_u = lambda vec: Pbx*f._b_pyDeriv_u(src,vec)/mu_0
hy_py_u = lambda vec: Pby*f._b_pyDeriv_u(src,vec)/mu_0
# Update the input vector
sDiag = lambda t: Utils.sdiag(mkvc(t,2))
# Define the components of the derivative
Hd = sDiag(1./(sDiag(hx_px)*hy_py - sDiag(hx_py)*hy_px))
Hd_uV = sDiag(hy_py)*hx_px_u(v) + sDiag(hx_px)*hy_py_u(v) - sDiag(hx_py)*hy_px_u(v) - sDiag(hy_px)*hx_py_u(v)
# Calculate components
if 'zxx' in self.rxType:
Zij = sDiag(Hd*( sDiag(ex_px)*hy_py - sDiag(ex_py)*hy_px ))
ZijN_uV = sDiag(hy_py)*ex_px_u(v) + sDiag(ex_px)*hy_py_u(v) - sDiag(ex_py)*hy_px_u(v) - sDiag(hy_px)*ex_py_u(v)
elif 'zxy' in self.rxType:
Zij = sDiag(Hd*(-sDiag(ex_px)*hx_py + sDiag(ex_py)*hx_px ))
ZijN_uV = -sDiag(hx_py)*ex_px_u(v) - sDiag(ex_px)*hx_py_u(v) + sDiag(ex_py)*hx_px_u(v) + sDiag(hx_px)*ex_py_u(v)
elif 'zyx' in self.rxType:
Zij = sDiag(Hd*( sDiag(ey_px)*hy_py - sDiag(ey_py)*hy_px ))
ZijN_uV = sDiag(hy_py)*ey_px_u(v) + sDiag(ey_px)*hy_py_u(v) - sDiag(ey_py)*hy_px_u(v) - sDiag(hy_px)*ey_py_u(v)
elif 'zyy' in self.rxType:
Zij = sDiag(Hd*(-sDiag(ey_px)*hx_py + sDiag(ey_py)*hx_px ))
ZijN_uV = -sDiag(hx_py)*ey_px_u(v) - sDiag(ey_px)*hx_py_u(v) + sDiag(ey_py)*hx_px_u(v) + sDiag(hx_px)*ey_py_u(v)
# Calculate the complex derivative
PDeriv_complex = Hd * (ZijN_uV - Zij * Hd_uV )
# Extract the real number for the real/imag components.
Pv = np.array(getattr(PDeriv_complex, real_or_imag))
elif adjoint:
# Note: The v vector is real and the return should be complex
if self.projType is 'Z1D':
Pex = mesh.getInterpolationMat(self.locs[:,-1],'Fx')
Pbx = mesh.getInterpolationMat(self.locs[:,-1],'Ex')
# ex = Pex*mkvc(f[src,'e_1d'],2)
# bx = Pbx*mkvc(f[src,'b_1d'],2)/mu_0
dP_deTv = -mkvc(Pex.T*Utils.sdiag(1./(Pbx*mkvc(f[src,'b_1d'],2)/mu_0)).T*v,2)
db_duv = Pbx.T/mu_0*Utils.sdiag(1./(Pbx*mkvc(f[src,'b_1d'],2)/mu_0))*(Utils.sdiag(1./(Pbx*mkvc(f[src,'b_1d'],2)/mu_0))).T*Utils.sdiag(Pex*mkvc(f[src,'e_1d'],2)).T*v
dP_dbTv = mkvc(f._bDeriv_u(src,db_duv,adjoint=True),2)
PDeriv_real = np.sum(np.hstack((dP_deTv,dP_dbTv)),1)
elif self.projType is 'Z2D':
raise NotImplementedError('Has not be implement for 2D impedance tensor')
elif self.projType is 'Z3D':
if self.locs.ndim == 3:
eFLocs = self.locs[:,:,0]
bFLocs = self.locs[:,:,1]
else:
eFLocs = self.locs
bFLocs = self.locs
# Get the projection
Pex = mesh.getInterpolationMat(eFLocs,'Ex')
Pey = mesh.getInterpolationMat(eFLocs,'Ey')
Pbx = mesh.getInterpolationMat(bFLocs,'Fx')
Pby = mesh.getInterpolationMat(bFLocs,'Fy')
# Get the fields at location
# px: x-polaration and py: y-polaration.
aex_px = mkvc(mkvc(f[src,'e_px'],2).T*Pex.T)
aey_px = mkvc(mkvc(f[src,'e_px'],2).T*Pey.T)
aex_py = mkvc(mkvc(f[src,'e_py'],2).T*Pex.T)
aey_py = mkvc(mkvc(f[src,'e_py'],2).T*Pey.T)
ahx_px = mkvc(mkvc(f[src,'b_px'],2).T/mu_0*Pbx.T)
ahy_px = mkvc(mkvc(f[src,'b_px'],2).T/mu_0*Pby.T)
ahx_py = mkvc(mkvc(f[src,'b_py'],2).T/mu_0*Pbx.T)
ahy_py = mkvc(mkvc(f[src,'b_py'],2).T/mu_0*Pby.T)
# Derivatives as lambda functions
aex_px_u = lambda vec: f._e_pxDeriv_u(src,Pex.T*vec,adjoint=True)
aey_px_u = lambda vec: f._e_pxDeriv_u(src,Pey.T*vec,adjoint=True)
aex_py_u = lambda vec: f._e_pyDeriv_u(src,Pex.T*vec,adjoint=True)
aey_py_u = lambda vec: f._e_pyDeriv_u(src,Pey.T*vec,adjoint=True)
ahx_px_u = lambda vec: f._b_pxDeriv_u(src,Pbx.T*vec,adjoint=True)/mu_0
ahy_px_u = lambda vec: f._b_pxDeriv_u(src,Pby.T*vec,adjoint=True)/mu_0
ahx_py_u = lambda vec: f._b_pyDeriv_u(src,Pbx.T*vec,adjoint=True)/mu_0
ahy_py_u = lambda vec: f._b_pyDeriv_u(src,Pby.T*vec,adjoint=True)/mu_0
# Update the input vector
# Define shortcuts
sDiag = lambda t: Utils.sdiag(mkvc(t,2))
sVec = lambda t: Utils.sp.csr_matrix(mkvc(t,2))
# Define the components of the derivative
aHd = sDiag(1./(sDiag(ahx_px)*ahy_py - sDiag(ahx_py)*ahy_px))
aHd_uV = lambda x: ahx_px_u(sDiag(ahy_py)*x) + ahx_px_u(sDiag(ahy_py)*x) - ahy_px_u(sDiag(ahx_py)*x) - ahx_py_u(sDiag(ahy_px)*x)
# Need to fix this to reflect the adjoint
if 'zxx' in self.rxType:
Zij = sDiag(aHd*( sDiag(ahy_py)*aex_px - sDiag(ahy_px)*aex_py))
ZijN_uV = lambda x: aex_px_u(sDiag(ahy_py)*x) + ahy_py_u(sDiag(aex_px)*x) - ahy_px_u(sDiag(aex_py)*x) - aex_py_u(sDiag(ahy_px)*x)
elif 'zxy' in self.rxType:
Zij = sDiag(aHd*(-sDiag(ahx_py)*aex_px + sDiag(ahx_px)*aex_py))
ZijN_uV = lambda x:-aex_px_u(sDiag(ahx_py)*x) - ahx_py_u(sDiag(aex_px)*x) + ahx_px_u(sDiag(aex_py)*x) + aex_py_u(sDiag(ahx_px)*x)
elif 'zyx' in self.rxType:
Zij = sDiag(aHd*( sDiag(ahy_py)*aey_px - sDiag(ahy_px)*aey_py))
ZijN_uV = lambda x: aey_px_u(sDiag(ahy_py)*x) + ahy_py_u(sDiag(aey_px)*x) - ahy_px_u(sDiag(aey_py)*x) - aey_py_u(sDiag(ahy_px)*x)
elif 'zyy' in self.rxType:
Zij = sDiag(aHd*(-sDiag(ahx_py)*aey_px + sDiag(ahx_px)*aey_py))
ZijN_uV = lambda x:-aey_px_u(sDiag(ahx_py)*x) - ahx_py_u(sDiag(aey_px)*x) + ahx_px_u(sDiag(aey_py)*x) + aey_py_u(sDiag(ahx_px)*x)
# Calculate the complex derivative
PDeriv_real = ZijN_uV(aHd*v) - aHd_uV(Zij.T*aHd*v)#
# NOTE: Need to reshape the output to go from 2*nU array to a (nU,2) matrix for each polarization
# PDeriv_real = np.hstack((mkvc(PDeriv_real[:len(PDeriv_real)/2],2),mkvc(PDeriv_real[len(PDeriv_real)/2::],2)))
PDeriv_real = PDeriv_real.reshape((2,mesh.nE)).T
# Extract the data
if real_or_imag == 'imag':
Pv = 1j*PDeriv_real
elif real_or_imag == 'real':
Pv = PDeriv_real.astype(complex)
return Pv
###############
### Sources ###
###############
class srcMT(SrcFDEM): # Survey.BaseSrc):
'''
Sources for the MT problem.
Use the SimPEG BaseSrc, since the source fields share properties with the transmitters.
:param float freq: The frequency of the source
:param list rxList: A list of receivers associated with the source
'''
freq = None #: Frequency (float)
rxPair = RxMT
def __init__(self, rxList, freq):
self.freq = float(freq)
Survey.BaseSrc.__init__(self, rxList)
# 1D sources
class srcMT_polxy_1DhomotD(srcMT):
"""
MT source for both polarizations (x and y) for the total Domain. It calculates fields calculated based on conditions on the boundary of the domain.
"""
def __init__(self, rxList, freq):
srcMT.__init__(self, rxList, freq)
# TODO: need to add the primary fields calc and source terms into the problem.
# Need to implement such that it works for all dims.
class srcMT_polxy_1Dprimary(srcMT):
"""
MT source for both polarizations (x and y) given a 1D primary models. It assigns fields calculated from the 1D model
as fields in the full space of the problem.
"""
def __init__(self, rxList, freq):
# assert mkvc(self.mesh.hz.shape,1) == mkvc(sigma1d.shape,1),'The number of values in the 1D background model does not match the number of vertical cells (hz).'
self.sigma1d = None
srcMT.__init__(self, rxList, freq)
# Hidden property of the ePrimary
self._ePrimary = None
def ePrimary(self,problem):
# Get primary fields for both polarizations
if self.sigma1d is None:
# Set the sigma1d as the 1st column in the background model
if len(problem._sigmaPrimary) == problem.mesh.nC:
if problem.mesh.dim == 1:
self.sigma1d = problem.mesh.r(problem._sigmaPrimary,'CC','CC','M')[:]
elif problem.mesh.dim == 3:
self.sigma1d = problem.mesh.r(problem._sigmaPrimary,'CC','CC','M')[0,0,:]
# Or as the 1D model that matches the vertical cell number
elif len(problem._sigmaPrimary) == problem.mesh.nCz:
self.sigma1d = problem._sigmaPrimary
if self._ePrimary is None:
self._ePrimary = homo1DModelSource(problem.mesh,self.freq,self.sigma1d)
return self._ePrimary
def bPrimary(self,problem):
# Project ePrimary to bPrimary
# Satisfies the primary(background) field conditions
if problem.mesh.dim == 1:
C = problem.mesh.nodalGrad
elif problem.mesh.dim == 3:
C = problem.mesh.edgeCurl
bBG_bp = (- C * self.ePrimary(problem) )*(1/( 1j*omega(self.freq) ))
return bBG_bp
def S_e(self,problem):
"""
Get the electrical field source
"""
e_p = self.ePrimary(problem)
Map_sigma_p = Maps.Vertical1DMap(problem.mesh)
sigma_p = Map_sigma_p._transform(self.sigma1d)
# Make mass matrix
# Note: M(sig) - M(sig_p) = M(sig - sig_p)
# Need to deal with the edge/face discrepencies between 1d/2d/3d
if problem.mesh.dim == 1:
Mesigma = problem.mesh.getFaceInnerProduct(problem.curModel.sigma)
Mesigma_p = problem.mesh.getFaceInnerProduct(sigma_p)
if problem.mesh.dim == 2:
pass
if problem.mesh.dim == 3:
Mesigma = problem.MeSigma
Mesigma_p = problem.mesh.getEdgeInnerProduct(sigma_p)
return (Mesigma - Mesigma_p) * e_p
def S_eDeriv_m(self, problem, v, adjoint = False):
'''
Get the derivative of S_e wrt to sigma (m)
'''
# Need to deal with
if problem.mesh.dim == 1:
# Need to use the faceInnerProduct
MsigmaDeriv = problem.mesh.getFaceInnerProductDeriv(problem.curModel.sigma)(self.ePrimary(problem)[:,1]) * problem.curModel.sigmaDeriv
# MsigmaDeriv = ( MsigmaDeriv * MsigmaDeriv.T)**2
if problem.mesh.dim == 2:
pass
if problem.mesh.dim == 3:
# Need to take the derivative of both u_px and u_py
ePri = self.ePrimary(problem)
# MsigmaDeriv = problem.MeSigmaDeriv(ePri[:,0]) + problem.MeSigmaDeriv(ePri[:,1])
# MsigmaDeriv = problem.MeSigmaDeriv(np.sum(ePri,axis=1))
if adjoint:
return sp.hstack(( problem.MeSigmaDeriv(ePri[:,0]).T, problem.MeSigmaDeriv(ePri[:,1]).T ))*v
else:
return np.hstack(( mkvc(problem.MeSigmaDeriv(ePri[:,0]) * v,2), mkvc(problem.MeSigmaDeriv(ePri[:,1])*v,2) ))
if adjoint:
#
return MsigmaDeriv.T * v
else:
# v should be nC size
return MsigmaDeriv * v
class srcMT_polxy_3Dprimary(srcMT):
"""
MT source for both polarizations (x and y) given a 3D primary model. It assigns fields calculated from the 1D model
as fields in the full space of the problem.
"""
def __init__(self, rxList, freq):
# assert mkvc(self.mesh.hz.shape,1) == mkvc(sigma1d.shape,1),'The number of values in the 1D background model does not match the number of vertical cells (hz).'
self.sigmaPrimary = None
srcMT.__init__(self, rxList, freq)
# Hidden property of the ePrimary
self._ePrimary = None
def ePrimary(self,problem):
# Get primary fields for both polarizations
self.sigmaPrimary = problem._sigmaPrimary
if self._ePrimary is None:
self._ePrimary = homo3DModelSource(problem.mesh,self.sigmaPrimary,self.freq)
return self._ePrimary
def bPrimary(self,problem):
# Project ePrimary to bPrimary
# Satisfies the primary(background) field conditions
if problem.mesh.dim == 1:
C = problem.mesh.nodalGrad
elif problem.mesh.dim == 3:
C = problem.mesh.edgeCurl
bBG_bp = (- C * self.ePrimary(problem) )*(1/( 1j*omega(self.freq) ))
return bBG_bp
def S_e(self,problem):
"""
Get the electrical field source
"""
e_p = self.ePrimary(problem)
Map_sigma_p = Maps.Vertical1DMap(problem.mesh)
sigma_p = Map_sigma_p._transform(self.sigma1d)
# Make mass matrix
# Note: M(sig) - M(sig_p) = M(sig - sig_p)
# Need to deal with the edge/face discrepencies between 1d/2d/3d
if problem.mesh.dim == 1:
Mesigma = problem.mesh.getFaceInnerProduct(problem.curModel.sigma)
Mesigma_p = problem.mesh.getFaceInnerProduct(sigma_p)
if problem.mesh.dim == 2:
pass
if problem.mesh.dim == 3:
Mesigma = problem.MeSigma
Mesigma_p = problem.mesh.getEdgeInnerProduct(sigma_p)
return (Mesigma - Mesigma_p) * e_p
def S_eDeriv_m(self, problem, v, adjoint = False):
'''
Get the derivative of S_e wrt to sigma (m)
'''
# Need to deal with
if problem.mesh.dim == 1:
# Need to use the faceInnerProduct
MsigmaDeriv = problem.mesh.getFaceInnerProductDeriv(problem.curModel.sigma)(self.ePrimary(problem)[:,1]) * problem.curModel.sigmaDeriv
# MsigmaDeriv = ( MsigmaDeriv * MsigmaDeriv.T)**2
if problem.mesh.dim == 2:
pass
if problem.mesh.dim == 3:
# Need to take the derivative of both u_px and u_py
ePri = self.ePrimary(problem)
# MsigmaDeriv = problem.MeSigmaDeriv(ePri[:,0]) + problem.MeSigmaDeriv(ePri[:,1])
# MsigmaDeriv = problem.MeSigmaDeriv(np.sum(ePri,axis=1))
if adjoint:
return sp.hstack(( problem.MeSigmaDeriv(ePri[:,0]).T, problem.MeSigmaDeriv(ePri[:,1]).T ))*v
else:
return np.hstack(( mkvc(problem.MeSigmaDeriv(ePri[:,0]) * v,2), mkvc(problem.MeSigmaDeriv(ePri[:,1])*v,2) ))
if adjoint:
#
return MsigmaDeriv.T * v
else:
# v should be nC size
return MsigmaDeriv * v
##############
### Survey ###
##############
class SurveyMT(Survey.BaseSurvey):
"""
Survey class for MT. Contains all the sources associated with the survey.
:param list srcList: List of sources associated with the survey
"""
srcPair = srcMT
def __init__(self, srcList, **kwargs):
# Sort these by frequency
self.srcList = srcList
Survey.BaseSurvey.__init__(self, **kwargs)
_freqDict = {}
for src in srcList:
if src.freq not in _freqDict:
_freqDict[src.freq] = []
_freqDict[src.freq] += [src]
self._freqDict = _freqDict
self._freqs = sorted([f for f in self._freqDict])
@property
def freqs(self):
"""Frequencies"""
return self._freqs
@property
def nFreq(self):
"""Number of frequencies"""
return len(self._freqDict)
# TODO: Rename to getSources
def getSrcByFreq(self, freq):
"""Returns the sources associated with a specific frequency."""
assert freq in self._freqDict, "The requested frequency is not in this survey."
return self._freqDict[freq]
def projectFields(self, u):
data = DataMT(self)
for src in self.srcList:
sys.stdout.flush()
for rx in src.rxList:
data[src, rx] = rx.projectFields(src, self.mesh, u)
return data
def projectFieldsDeriv(self, u):
raise Exception('Use Transmitters to project fields deriv.')
|
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import abc
from ..dist_attribute import OperatorDistributedAttribute
_g_distributed_operator_impl_containers = {}
_g_elementwise_ops = ["elementwise_add", "gelu", "dropout", "cast"]
BACKWARD_ONLY_DIST_OPS = {'check_finite_and_unscale', 'update_loss_scaling'}
def is_elementwise_op(op_type):
if op_type in _g_elementwise_ops:
return True
else:
return False
class DistributedOperatorImplContainer:
def __init__(self, op_type):
self._type = op_type
self._impls = []
@property
def type(self):
return self._type
@type.setter
def type(self, op_type):
self._type = op_type
@property
def impls(self):
return self._impls
def register_impl(self, dist_impl):
assert self.type == dist_impl.type, \
"Op type of container must be same as that of the implementation."
impl_idx = len(self.impls)
dist_impl.idx = impl_idx
self._impls.append(dist_impl)
def get_impl(self, impl_idx):
return self._impls[impl_idx]
def get_input_compatible_impls(self, dist_op):
compatible_impls = []
for impl in self.impls:
if impl.is_input_compatible(dist_op):
compatible_impls.append(impl)
return compatible_impls
def get_output_compatible_impls(self, dist_op):
compatible_impls = []
for impl in self.impls:
if impl.is_output_compatible(dist_op):
compatible_impls.append(impl)
return compatible_impls
def get_compatible_impls(self, dist_op):
compatible_impls = []
for impl in self.impls:
if impl.is_auto_compatible(dist_op):
compatible_impls.append(impl)
return compatible_impls
class DistributedOperatorImpl(abc.ABC):
def __init__(self, name):
self._name = name
self._type = None
self._idx = None
self._forward_implemented = False
self._backward_implemented = False
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def type(self):
return self._type
@type.setter
def type(self, op_type):
self._type = op_type
@property
def idx(self):
return self._idx
@idx.setter
def idx(self, impl_idx):
self._idx = impl_idx
@abc.abstractmethod
def is_input_compatible(self, dist_op):
raise NotImplementedError("Please Implement this method in Subclass.")
@abc.abstractmethod
def is_output_compatible(self, dist_op):
raise NotImplementedError("Please Implement this method in Subclass.")
@abc.abstractmethod
def is_auto_compatible(self, dist_op):
raise NotImplementedError("Please Implement this method in Subclass.")
@staticmethod
@abc.abstractmethod
def forward(dist_ctx, *args, **kwargs):
raise NotImplementedError("Please Implement this method in Subclass.")
@staticmethod
@abc.abstractmethod
def backward(dist_ctx, *grad_outputs, **kwargs):
raise NotImplementedError("Please Implement this method in Subclass.")
def update_dims_mapping(self, dist_op):
raise NotImplementedError("Please Implement this method in Subclass.")
def register_distributed_operator_impl_container(container):
global _g_distributed_operator_impl_containers
_g_distributed_operator_impl_containers[container.type] = container
def get_distributed_operator_impl_container(op_type):
global _g_distributed_operator_impl_containers
return _g_distributed_operator_impl_containers.get(op_type, None)
def register_distributed_operator_impl(op_type, dist_impl):
dist_op_impl_container = get_distributed_operator_impl_container(op_type)
if dist_op_impl_container is not None:
dist_impl.type = op_type
dist_op_impl_container.register_impl(dist_impl)
else:
assert False, "Must register distributed operator registry first."
def find_best_compatible_distributed_operator_impl(dist_op, fwd=True):
"""
Here just return the first compatible implemention.
This will be improved by cost model in the future.
"""
op_type = dist_op.serial_op.type
dist_op_impl_container = get_distributed_operator_impl_container(op_type)
dist_op_eltwise_impl_container = get_distributed_operator_impl_container(
"elementwise")
dist_op_default_impl_container = get_distributed_operator_impl_container(
"default")
compatible_impls = []
if fwd:
# First, find impls in the corresponding container
if dist_op_impl_container:
compatible_impls.extend(
dist_op_impl_container.get_input_compatible_impls(dist_op))
# Second, find impls in the elementwise container
if dist_op_eltwise_impl_container and is_elementwise_op(op_type):
compatible_impls.extend(
dist_op_eltwise_impl_container.get_input_compatible_impls(
dist_op))
# Third, find impls in the default container
if dist_op_default_impl_container:
compatible_impls.extend(
dist_op_default_impl_container.get_input_compatible_impls(
dist_op))
else:
# First, find impls in the corresponding container
if dist_op_impl_container:
compatible_impls.extend(
dist_op_impl_container.get_output_compatible_impls(dist_op))
# Second, find impls in the elementwise container
if dist_op_eltwise_impl_container and is_elementwise_op(op_type):
compatible_impls.extend(
dist_op_eltwise_impl_container.get_output_compatible_impls(
dist_op))
# Third, find impls in the default container
if dist_op_default_impl_container:
compatible_impls.extend(
dist_op_default_impl_container.get_output_compatible_impls(
dist_op))
if compatible_impls:
# For now, just return the first compatible impl
best_compatible_impl = compatible_impls[0]
else:
best_compatible_impl = None
return best_compatible_impl
def is_parameter_related(varname, block):
if ".subprog_" in varname:
varname = varname[:varname.index(".subprog_")]
if ".cast_fp" in varname:
varname = varname[:varname.index(".cast_fp")]
assert block.has_var(varname)
var = block.var(varname)
return var.is_parameter
def infer_shape(block, src_var, src_var_dist_attr, op_input_dist_attr):
var_shape = block.var(src_var.name).shape
var_topoloy = src_var_dist_attr.process_mesh.topology
var_dims_mapping = src_var_dist_attr.dims_mapping
complete_shape = []
for idx, shape in enumerate(var_shape):
if var_dims_mapping[idx] == -1:
complete_shape.append(shape)
else:
new_shape = shape * var_topoloy[var_dims_mapping[idx]]
complete_shape.append(new_shape)
exact_shape = []
input_topology = op_input_dist_attr.process_mesh.topology
input_dims_mapping = op_input_dist_attr.dims_mapping
for idx, shape in enumerate(complete_shape):
if input_dims_mapping[idx] == -1:
exact_shape.append(shape)
else:
new_shape = shape // input_topology[input_dims_mapping[idx]]
exact_shape.append(new_shape)
return exact_shape
def set_comm_op_dist_attr_for_program(new_op, process_mesh, tensor_dist_attr,
ctx):
assert process_mesh is not None
assert tensor_dist_attr is not None
new_op_dist_attr = OperatorDistributedAttribute()
new_op_dist_attr.process_mesh = process_mesh
for input_varname in new_op.desc.input_arg_names():
new_op_dist_attr.set_input_dist_attr(input_varname, tensor_dist_attr)
for output_varname in new_op.desc.output_arg_names():
new_op_dist_attr.set_output_dist_attr(output_varname, tensor_dist_attr)
ctx.set_op_dist_attr_for_program(new_op, new_op_dist_attr)
def naive_copy_op_dist_attr_for_program(new_op, ref_op, ctx):
ref_dist_attr = ctx.get_op_dist_attr_for_program(ref_op)
new_op_dist_attr = OperatorDistributedAttribute()
new_op_dist_attr.process_mesh = ref_dist_attr.process_mesh
for input_name in ref_op.input_names:
assert input_name in new_op.input_names
assert len(ref_op.input(input_name)) == 1
assert len(new_op.input(input_name)) == 1
ref_tensor_dist_attr = ref_dist_attr.get_input_dist_attr(
ref_op.input(input_name)[0])
new_op_dist_attr.set_input_dist_attr(
new_op.input(input_name)[0], ref_tensor_dist_attr)
for output_name in ref_op.output_names:
assert output_name in new_op.output_names
assert len(ref_op.output(output_name)) == 1
assert len(new_op.output(output_name)) == 1
ref_tensor_dist_attr = ref_dist_attr.get_output_dist_attr(
ref_op.output(output_name)[0])
new_op_dist_attr.set_output_dist_attr(
new_op.output(output_name)[0], ref_tensor_dist_attr)
ctx.set_op_dist_attr_for_program(new_op, new_op_dist_attr)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for page in orm.Page.objects.all():
if page.menu_login_required:
page.limit_visibility_in_menu = 1
else:
page.limit_visibility_in_menu = None
page.save()
def backwards(self, orm):
for page in orm.Page.objects.all():
if page.limit_visibility_in_menu==1:
page.menu_login_required = True
else:
page.menu_login_required = False
page.save()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_moderate': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'menu_login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderate_descendants': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderate_page': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cms.pagemoderatorstate': {
'Meta': {'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_moderate': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': ['auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': "orm['auth.User']"}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': "orm['auth.User']"}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('publisher_is_draft', 'language', 'page'),)", 'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
|
|
# -*- coding: utf-8 -*-
"""
ulmo.lcra.hydromet.core
~~~~~~~~~~~~~~~~~~~~~~~
This module provides access to hydrologic and climate data in the Colorado
River Basin (Texas) provided by the `Lower Colorado River Authority`_
`Hydromet`_ web site and web service.
.. _Lower Colorado River Authority: http://www.lcra.org
.. _Hydromet: http://hydromet.lcra.org
"""
import datetime
import logging
import pandas
import requests
from bs4 import BeautifulSoup
from dateutil.relativedelta import relativedelta
from geojson import Feature, FeatureCollection, Point
from tsgettoolbox.ulmo import util
# configure logging
LOG_FORMAT = "%(message)s"
logging.basicConfig(format=LOG_FORMAT)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
historical_data_url = "https://hydromet.lcra.org/chronhist.aspx"
current_data_url = "https://hydrometdata.lcra.org"
PARAMETERS = {
"stage": "the level of water above a benchmark in feet",
"flow": "streamflow in cubic feet per second",
"pc": "precipitation in inches",
"temp": "air temperature in degrees fahrenheit",
"rhumid": "air relative humidity as percentage",
"cndvty": "water electrical conductivity in micromhos",
"tds": "total suspended solids",
"windsp": "wind speed, miles per hour",
"winddir": "wind direction in degrees azimuth",
}
current_data_services = ["GetLowerBasin", "GetUpperBasin"]
# in the site list by parameter web page, in order to make distinction between
# stage measurements in lake and stream, the LCRA uses 'stage' for stream sites
# and 'lake' for lake sites
site_types = PARAMETERS.copy()
site_types.update({"lake": "stage measurement in lakes"})
# for this dam sites, stage is named head or tail
dam_sites = ["1995", "1999", "2958", "2999", "3963", "3999"]
def get_sites_by_type(site_type):
"""Gets list of the hydromet site codes and description for site.
Parameters
----------
site_type : str
In all but lake sites, this is the parameter code collected at the site.
For lake sites, it is 'lake'. See ``site_types`` and ``PARAMETERS``
Returns
-------
sites_dict: dict
A python dict with four char long site codes mapped to site information.
"""
sites_base_url = "https://hydromet.lcra.org/navgagelist.asp?Stype=%s"
# the url doesn't provide list of sites for the following parameters but
# they are available with the paired parameter. e.g., flow is available
# at stage sites.
if site_type == "winddir":
site_type = "windsp"
if site_type == "flow":
site_type = "stage"
if site_type == "tds":
site_type = "cndvty"
if site_type not in site_types.keys():
return {}
res = requests.get(sites_base_url % site_type)
soup = BeautifulSoup(res.content, "html")
sites_str = [
site.text.replace(" ", "").replace(u"\xa0", "")
for site in soup.findAll("a")
]
sites_dict = {s[:4]: s[7:] for s in sites_str}
return sites_dict
def get_all_sites():
"""Returns list of all LCRA hydromet sites as geojson featurecollection."""
sites_url = "https://hydromet.lcra.org/data/datafull.xml"
res = requests.get(sites_url)
soup = BeautifulSoup(res.content, "xml")
rows = soup.findAll("row")
features = [_create_feature(row) for row in rows]
sites = FeatureCollection(features)
return sites
def get_current_data(service, as_geojson=False):
"""fetches the current (near real-time) river stage and flow values from
LCRA web service.
Parameters
----------
service : str
The web service providing data. see `current_data_services`.
Currently we have GetUpperBasin and GetLowerBasin.
as_geojson : 'True' or 'False' (default)
If True the data is returned as geojson featurecollection and if False
data is returned as list of dicts.
Returns
-------
current_values_dicts : a list of dicts or
current_values_geojson : a geojson featurecollection.
"""
request_body_template = (
'<?xml version="1.0" encoding="utf-8"?>\n'
'<soap12:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'xmlns:xsd="http://www.w3.org/2001/XMLSchema" '
'xmlns:soap12="http://www.w3.org/2003/05/soap-envelope">\n '
" <soap12:Body>\n"
' <%s xmlns="https://hydrometdata.lcra.org" />\n'
" </soap12:Body> \n"
"</soap12:Envelope>"
)
if service.lower() == "getupperbasin":
service = "GetUpperBasin"
elif service.lower() == "getlowerbasin":
service = "GetLowerBasin"
else:
log.info("service %s not recognized" % service)
return {}
request_body = request_body_template % service
headers = {"Content-Type": "text/xml; charset=utf-8"}
res = requests.post(current_data_url, data=request_body, headers=headers)
if res.status_code != 200:
log.info("http request failed with status code %s" % res.status_code)
return {}
soup = BeautifulSoup(res.content)
sites_els = soup.findAll("cls%s" % service.lower().replace("get", ""))
current_values_dicts = [_parse_current_values(site_el) for site_el in sites_els]
if as_geojson:
features = []
for value_dict in current_values_dicts:
feature = _feature_for_values_dict(value_dict)
if len(feature):
features.append(feature[0])
if len(features) != len(current_values_dicts):
log.warn("some of the sites did not location information")
if features:
current_values_geojson = FeatureCollection(features)
return current_values_geojson
else:
return {}
else:
return current_values_dicts
def get_site_data(
site_code,
parameter_code,
as_dataframe=True,
start_date=None,
end_date=None,
dam_site_location="head",
):
"""Fetches site's parameter data
Parameters
----------
site_code : str
The LCRA site code (four chars long) of the site you want to query data
for.
parameter_code : str
LCRA parameter code. see ``PARAMETERS``
start_date : ``None`` or datetime
Start of a date range for a query.
end_date : ``None`` or datetime
End of a date range for a query.
as_dataframe : ``True`` (default) or ``False``
This determines what format values are returned as. If ``True`` (default)
then the values will be a pandas.DataFrame object with the values
timestamp as the index. If ``False``, the format will be Python
dictionary.
dam_site_location : 'head' (default) or 'tail'
The site location relative to the dam.
Returns
-------
df : pandas.DataFrame or
values_dict : dict
"""
parameter_code = parameter_code.upper()
if parameter_code.lower() not in PARAMETERS.keys():
log.info("%s is not an LCRA parameter" % parameter_code)
return None
initial_request = requests.get(historical_data_url, verify=False)
if initial_request.status_code != 200:
return None
list_request_headers = {
"__EVENTTARGET": "DropDownList1",
"DropDownList1": site_code,
}
list_request = _make_next_request(
historical_data_url, initial_request, list_request_headers
)
if list_request.status_code != 200:
return None
if parameter_code == "STAGE":
if site_code in dam_sites:
parameter_code = dam_site_location.upper()
else:
parameter_code = "STAGE"
elif parameter_code == "RHUMID":
parameter_code = "Rhumid"
# the parameter selection dropdown doesn't have flow. the data comes with stage.
elif parameter_code == "FLOW":
parameter_code = "STAGE"
else:
pass
if start_date is None:
start_date = datetime.date.today()
if end_date is None:
end_date = datetime.date.today() + relativedelta(days=1)
if (end_date - start_date).days < 180:
values_dict = _get_data(
site_code[:4], parameter_code, list_request, start_date, end_date
)
if not values_dict:
return None
else:
values_dict = []
chunks = pandas.np.ceil((end_date - start_date).days / 180.0)
for chunk in pandas.np.arange(chunks) + 1:
request_start_date = start_date + relativedelta(days=180 * (chunk - 1))
chunk_end_date = start_date + relativedelta(days=180 * chunk)
if chunk_end_date >= end_date:
request_end_date = end_date
else:
request_end_date = chunk_end_date
log.info(
"getting chunk: %i, start: %s, end: %s, parameter: %s"
% (chunk, request_start_date, request_end_date, parameter_code)
)
values_chunk = _get_data(
site_code[:4],
parameter_code,
list_request,
request_start_date,
request_end_date,
)
values_dict += values_chunk
df = _values_dict_to_df(values_dict).astype(float)
if not as_dataframe:
return df.to_dict("records")
else:
return df
def _create_feature(row):
geometry = Point((float(row["e"]), float(row["d"])))
site_props = dict(site_code=row["a"], site_description=row["c"])
site = Feature(geometry=geometry, properties=site_props)
return site
def _feature_for_values_dict(site_values_dict):
sites = get_all_sites()["features"]
site = [
_update_feature_props(site, site_values_dict)
for site in sites
if site["properties"]["site_description"].lower()
== site_values_dict["location"].lower()
]
return site
def _parse_current_values(site_el):
site_value_els = site_el.findChildren()
site_values = {}
for value_el in site_value_els:
if value_el.name.lower() == "datetime":
if value_el.get_text().strip() == "":
site_values[value_el.name.lower()] = None
else:
site_values[value_el.name.lower()] = util.convert_datetime(
value_el.get_text()
)
elif value_el.name.lower() == "location":
site_values[value_el.name.lower()] = value_el.get_text().strip()
else:
if value_el.get_text().strip() == "":
site_values[value_el.name.lower()] = None
else:
site_values[value_el.name.lower()] = float(value_el.get_text())
return site_values
def _values_dict_to_df(values_dict):
if not values_dict:
return pandas.DataFrame({})
df = pandas.DataFrame(values_dict)
df.index = df["Date - Time"].apply(util.convert_datetime)
df.drop("Date - Time", axis=1, inplace=True)
df.sort_index(inplace=True)
df.dropna(axis=1, how="all", inplace=True)
df.dropna(axis=0, how="all", inplace=True)
return df
def _get_row_values(row, columns):
value_els = row.findAll("td")
values = [_parse_val(value_el.get_text()) for value_el in value_els]
return dict(zip(columns, values))
def _get_data(site_code, parameter_code, list_request, start, end):
data_request_headers = {
"Date1": start.strftime("%m/%d/%Y"),
"Date2": end.strftime("%m/%d/%Y"),
"DropDownList1": site_code,
}
data_request_headers["DropDownList2"] = parameter_code
data_request = _make_next_request(
historical_data_url, list_request, data_request_headers
)
if data_request.status_code != 200:
return None
soup = BeautifulSoup(data_request.content, "html.parser")
columns = [col.get_text() for col in soup.findAll("th")]
values_dict = [_get_row_values(row, columns) for row in soup.findAll("tr")[1:]]
return values_dict
def _extract_headers_for_next_request(request):
payload = {}
for tag in BeautifulSoup(request.content, "html.parser").findAll("input"):
tag_dict = dict(tag.attrs)
if tag_dict.get("value", None) == "tabular":
#
continue
# some tags don't have a value and are used w/ JS to toggle a set of checkboxes
payload[tag_dict["name"]] = tag_dict.get("value")
return payload
def _make_next_request(url, previous_request, data):
data_headers = _extract_headers_for_next_request(previous_request)
data_headers.update(data)
return requests.post(url, cookies=previous_request.cookies, data=data_headers)
def _parse_val(val):
# the &nsbp translates to the following unicode
if val == u"\xa0":
return None
else:
return val
def _update_feature_props(feature, props):
if "datetime" in props.keys():
props["datetime"] = props["datetime"].strftime("%Y-%m-%d %H:%M:%S")
feature_props = feature["properties"]
feature_props.update(props)
feature["properties"] = feature_props
return feature
|
|
import json
import urllib
import urllib2, re
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.template import defaultfilters
from dateutil.parser import *
from datetime import datetime
from django.utils.timezone import utc
from exceptions import YahooSearchException, TwitterRestAPIException
from settings import *
from tw_authentication import TwAuthentication
class Place(models.Model):
"""
Place Model
"""
created_at = models.DateTimeField(auto_now_add = True)
name = models.CharField(max_length=200, unique=True)
code = models.CharField(max_length=2, null=True, blank=True, unique=True)
woeid = models.IntegerField(null=True)
class Meta:
ordering = ['name']
verbose_name_plural = u'Places'
def __unicode__(self):
if self.woeid:
return u"%s, %i" % (self.name, self.woeid)
else:
return u"%s" % (self.name)
def get_woeid_from_yahoo(self):
"""
Search and return the WOEID number of this place using Yahoo API. With this number we can obtain the top
10 trending topics from Twitter.
"""
if self.woeid is None:
json_data = json.load(urllib2.urlopen(YAHOO_QUERY_WOEID_URL + self.name + YAHOO_QUERY_WOEID_FORMAT))
if json_data['query']['results']['place'][0]['name'] == self.name:
self.woeid = json_data['query']['results']['place'][0]['woeid']
self.save()
else:
raise YahooSearchException(1000)
return self.woeid
def get_trends(self, exclude_hashtags = False, get_tweets = True, result_type = 'mixed', count = '15'):
"""
Return the top 10 trending topics from Twitter using Rest API for a specific WOEID.
"""
client_authenticated = TwAuthentication()
if exclude_hashtags:
json_data = client_authenticated.get_client().request(BASE_URL + SEARCH_TRENDS_URL + str(self.woeid) +
EXCLUDE_HASHTAGS)
else:
json_data = client_authenticated.get_client().request(BASE_URL + SEARCH_TRENDS_URL + str(self.woeid))
if json_data[0].status == 200:
tendencias_json = json.loads(json_data[1].replace('null', '"null"'))
cont_importance = 1
for trend_item in tendencias_json[0]['trends']:
new_trend = Trend(
name = u''+trend_item['name'],
query = u''+trend_item['query'],
url = trend_item['url'],
importance = cont_importance,
place = self,
)
new_trend.set_promoted(trend_item['promoted_content'])
new_trend.set_trend_created_at(tendencias_json[0]['created_at'])
new_trend.save()
cont_importance += 1
#Update Tweets
if get_tweets:
new_trend.get_tweets(new_trend.name, self.code, result_type, count)
else:
raise TwitterRestAPIException(json_data[0].status)
class Trend(models.Model):
"""
Trend Model
"""
created_at = models.DateTimeField(auto_now_add = True)
name = models.CharField(max_length=200)
query = models.CharField(max_length=200)
slug = models.SlugField()
url = models.URLField()
trend_created_at = models.DateTimeField(null = True)
importance = models.SmallIntegerField()
promoted = models.BooleanField()
place = models.ForeignKey(Place)
class Meta:
ordering = ['-trend_created_at']
verbose_name_plural = u'Trends'
def get_trend_name_cleaned(self):
return re.sub('[^A-Za-z0-9]+', '', u''+ str(self.name.replace('#','').encode('ascii', 'ignore')))
def __unicode__(self):
return u"%s, %s, %s" % (self.name, self.place.name, self.url)
def set_promoted(self, promoted_field):
"""
If field of request to Tw Rest Api return null in promoted field, this trend is not a trend prmoted
"""
if promoted_field == 'null':
self.promoted = False
else:
self.promoted = True
def set_trend_created_at(self, datetime_string):
try:
self.trend_created_at = parse(datetime_string).replace(tzinfo=utc)
except ValueError:
self.trend_created_at = datetime.now()
def exist_trend(self, *args, **kwargs):
"""
A trend is the same if the DateTime created in Twitter is the same and query, name and place matches.
"""
try:
trend_created = Trend.objects.get(trend_created_at = self.trend_created_at,
query = self.query,
name = self.name,
place = self.place,
)
trend_created.importance = self.importance
super(Trend, self).save(*args, **kwargs)
return True
except ObjectDoesNotExist:
return False
def save(self, *args, **kwargs):
"""
If not exist the Trend in DB actually we do the save
"""
if not self.exist_trend():
self.slug = defaultfilters.slugify(self.name)
super(Trend, self).save(*args, **kwargs)
def get_tweets(self, q, lang, result_type, count):
"""
Returns a collection of relevant Tweets matching a specified query.
Parameters:
q. Trend name.
lang. Restricts tweets to the given language. See http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
result_type. mixed: Include both popular and real time results in the response.
recent: return only the most recent results in the response.
popular: return only the most popular results in the response.
count. The number of tweets to return per page, up to a maximum of 100. Defaults to 15
"""
client_authenticated = TwAuthentication()
parameters = {
'q': self.get_trend_name_cleaned(),
'lang': lang,
'result_type': result_type,
'count': count,
}
json_data = client_authenticated.get_client().request(BASE_URL + SEARCH_TWEETS_URL + '?' + urllib.urlencode(parameters))
if json_data[0].status == 200:
tweets_json = json.loads(json_data[1].replace('null', '"null"'))
for tweet_item in tweets_json['statuses']:
new_tweet = Tweet(
tweet_twitter_id = tweet_item['id_str'],
language = tweet_item['lang'],
retweets_count = tweet_item['retweet_count'],
from_username = u''+ str(tweet_item['user']['name'].encode('ascii', 'ignore')),
from_userid = tweet_item['user']['id_str'],
user_screen_name = u''+ str(tweet_item['user']['screen_name'].encode('ascii', 'ignore')),
user_profile_image = tweet_item['user']['profile_image_url'],
tweet = u''+ str(tweet_item['text'].encode('ascii', 'ignore')),
trend = self,
)
new_tweet.set_tweet_published_at(tweet_item['created_at'])
new_tweet.save()
else:
raise TwitterRestAPIException(json_data[0].status)
class Tweet(models.Model):
"""
Tweet Model
"""
created_at = models.DateTimeField(auto_now_add = True)
tweet_twitter_id = models.CharField(max_length=150)
tweet_published_at = models.DateTimeField()
language = models.CharField(max_length=4)
retweets_count = models.IntegerField()
from_username = models.CharField(max_length=150)
from_userid = models.CharField(max_length=50)
user_screen_name = models.CharField(max_length=150)
user_profile_image = models.URLField(max_length=250)
tweet = models.CharField(max_length=160)
trend = models.ForeignKey(Trend)
class Meta:
ordering = ['-tweet_published_at']
verbose_name_plural = u'Tweets'
def __unicode__(self):
return u"%s, %s, %s" % (self.trend.place.name, self.trend.name, self.tweet)
def exist_tweet(self, *args, **kwargs):
"""
A tweet is the same if the trend is the same, language is te same, was published at the same time and twitter
id matches.
"""
try:
tweet_created = Tweet.objects.get(trend = self.trend,
tweet_published_at = self.tweet_published_at,
language = self.language,
tweet_twitter_id = self.tweet_twitter_id,
)
tweet_created.retweets_count = self.retweets_count
super(Tweet, self).save(*args, **kwargs)
return True
except ObjectDoesNotExist:
return False
def set_tweet_published_at(self, datetime_string):
try:
self.tweet_published_at = parse(datetime_string).replace(tzinfo=utc)
except ValueError:
self.tweet_published_at = datetime.now()
def save(self, *args, **kwargs):
"""
If not exist the tweet in DB actually we do the save
"""
tweet_created = self.exist_tweet()
if not tweet_created:
super(Tweet, self).save(*args, **kwargs)
def get_mentions(self):
"""
Return a list of users mentioned in tweet
"""
mentions = []
patron_mentions = re.compile('(^|[^@\w])+@([A-Za-z0-9_]+)')
for m in patron_mentions.finditer(self.tweet):
mentions.append(m.group().strip())
return mentions
|
|
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Backup driver for IBM Tivoli Storage Manager (TSM).
Implementation of a backup service that uses IBM Tivoli Storage Manager (TSM)
as the backend. The driver uses TSM command line dsmc utility to
run an image backup and restore.
This version supports backup of block devices, e.g, FC, iSCSI, local.
A prerequisite for using the IBM TSM backup service is configuring the
Cinder host for using TSM.
"""
import os
import stat
from oslo.config import cfg
from cinder.backup.driver import BackupDriver
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import utils
LOG = logging.getLogger(__name__)
tsmbackup_service_opts = [
cfg.StrOpt('backup_tsm_volume_prefix',
default='backup',
help='Volume prefix for the backup id when backing up to TSM'),
cfg.StrOpt('backup_tsm_password',
default='password',
help='TSM password for the running username'),
cfg.BoolOpt('backup_tsm_compression',
default=True,
help='Enable or Disable compression for backups'),
]
CONF = cfg.CONF
CONF.register_opts(tsmbackup_service_opts)
class TSMBackupDriver(BackupDriver):
"""Provides backup, restore and delete of volumes backup for TSM."""
DRIVER_VERSION = '1.0.0'
def __init__(self, context, db_driver=None):
self.context = context
self.tsm_password = CONF.backup_tsm_password
self.volume_prefix = CONF.backup_tsm_volume_prefix
super(TSMBackupDriver, self).__init__(db_driver)
def _make_link(self, volume_path, backup_path, vol_id):
"""Create a hard link for the volume block device.
The IBM TSM client performs an image backup on a block device.
The name of the block device is the backup prefix plus the backup id
:param volume_path: real device path name for volume
:param backup_path: path name TSM will use as volume to backup
:param vol_id: id of volume to backup (for reporting)
:raises: InvalidBackup
"""
try:
utils.execute('ln', volume_path, backup_path,
run_as_root=True,
check_exit_code=True)
except processutils.ProcessExecutionError as e:
err = (_('backup: %(vol_id)s Failed to create device hardlink '
'from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': vol_id,
'vpath': volume_path,
'bpath': backup_path,
'out': e.stdout,
'err': e.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def _check_dsmc_output(self, output, check_attrs):
"""Check dsmc command line utility output.
Parse the output of the dsmc command and make sure that a given
attribute is present, and that it has the proper value.
TSM attribute has the format of "text : value".
:param output: TSM output to parse
:param check_attrs: text to identify in the output
:returns bool -- indicate if requited output attribute found in output
"""
parsed_attrs = {}
for line in output.split('\n'):
# parse TSM output: look for "msg : value
key, sep, val = line.partition(':')
if (sep is not None and key is not None and len(val.strip()) > 0):
parsed_attrs[key] = val.strip()
for k, v in check_attrs.iteritems():
if k not in parsed_attrs or parsed_attrs[k] != v:
return False
return True
def _do_backup(self, backup_path, vol_id):
"""Perform the actual backup operation.
:param backup_path: volume path
:param vol_id: volume id
:raises: InvalidBackup
"""
backup_attrs = {'Total number of objects backed up': '1'}
compr_flag = 'yes' if CONF.backup_tsm_compression else 'no'
out, err = utils.execute('dsmc',
'backup',
'image',
'-quiet',
'-compression=%s' % compr_flag,
'-password=%s' % CONF.backup_tsm_password,
backup_path,
run_as_root=True,
check_exit_code=False)
success = self._check_dsmc_output(out, backup_attrs)
if not success:
err = (_('backup: %(vol_id)s Failed to obtain backup '
'success notification from server.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': vol_id,
'out': out,
'err': err})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def _do_restore(self, restore_path, vol_id):
"""Perform the actual restore operation.
:param restore_path: volume path
:param vol_id: volume id
:raises: InvalidBackup
"""
restore_attrs = {'Total number of objects restored': '1'}
out, err = utils.execute('dsmc',
'restore',
'image',
'-quiet',
'-password=%s' % self.tsm_password,
'-noprompt',
restore_path,
run_as_root=True,
check_exit_code=False)
success = self._check_dsmc_output(out, restore_attrs)
if not success:
err = (_('restore: %(vol_id)s Failed.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': vol_id,
'out': out,
'err': err})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def _get_volume_realpath(self, volume_file, volume_id):
"""Get the real path for the volume block device.
If the volume is not a block device then issue an
InvalidBackup exsception.
:param volume_file: file object representing the volume
:param volume_id: Volume id for backup or as restore target
:raises: InvalidBackup
:returns str -- real path of volume device
"""
try:
# Get real path
volume_path = os.path.realpath(volume_file.name)
# Verify that path is a block device
volume_mode = os.stat(volume_path).st_mode
if not stat.S_ISBLK(volume_mode):
err = (_('backup: %(vol_id)s Failed. '
'%(path)s is not a block device.')
% {'vol_id': volume_id,
'path': volume_path})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except AttributeError as e:
err = (_('backup: %(vol_id)s Failed. Cannot obtain real path '
'to device %(path)s.')
% {'vol_id': volume_id,
'path': volume_file})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except OSError as e:
err = (_('backup: %(vol_id)s Failed. '
'%(path)s is not a file.')
% {'vol_id': volume_id,
'path': volume_path})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
return volume_path
def _create_device_link_using_backupid(self,
backup_id,
volume_path,
volume_id):
"""Create a consistent hardlink for the volume block device.
Create a consistent hardlink using the backup id so TSM
will be able to backup and restore to the same block device.
:param backup_id: the backup id
:param volume_path: real path of the backup/restore device
:param volume_id: Volume id for backup or as restore target
:raises: InvalidBackup
:returns str -- hardlink path of the volume block device
"""
hardlink_path = utils.make_dev_path('%s-%s' %
(self.volume_prefix,
backup_id))
self._make_link(volume_path, hardlink_path, volume_id)
return hardlink_path
def _cleanup_device_hardlink(self,
hardlink_path,
volume_path,
volume_id):
"""Remove the hardlink for the volume block device.
:param hardlink_path: hardlink to the volume block device
:param volume_path: real path of the backup/restore device
:param volume_id: Volume id for backup or as restore target
"""
try:
utils.execute('rm',
'-f',
hardlink_path,
run_as_root=True)
except processutils.ProcessExecutionError as e:
err = (_('backup: %(vol_id)s Failed to remove backup hardlink'
' from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'vpath': volume_path,
'bpath': hardlink_path,
'out': e.stdout,
'err': e.stderr})
LOG.error(err)
def backup(self, backup, volume_file):
"""Backup the given volume to TSM.
TSM performs an image backup of a volume. The volume_file is
used to determine the path of the block device that TSM will
back-up.
:param backup: backup information for volume
:param volume_file: file object representing the volume
:raises InvalidBackup
"""
backup_id = backup['id']
volume_id = backup['volume_id']
volume_path = self._get_volume_realpath(volume_file, volume_id)
LOG.debug(_('starting backup of volume: %(volume_id)s to TSM,'
' volume path: %(volume_path)s,')
% {'volume_id': volume_id,
'volume_path': volume_path})
backup_path = \
self._create_device_link_using_backupid(backup_id,
volume_path,
volume_id)
try:
self._do_backup(backup_path, volume_id)
except processutils.ProcessExecutionError as e:
err = (_('backup: %(vol_id)s Failed to run dsmc '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': backup_path,
'out': e.stdout,
'err': e.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as e:
err = (_('backup: %(vol_id)s Failed to run dsmc '
'due to invalid arguments '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': backup_path,
'out': e.stdout,
'err': e.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
finally:
self._cleanup_device_hardlink(backup_path,
volume_path,
volume_id)
LOG.debug(_('backup %s finished.') % backup_id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from TSM server.
:param backup: backup information for volume
:param volume_id: volume id
:param volume_file: file object representing the volume
:raises InvalidBackup
"""
backup_id = backup['id']
volume_path = self._get_volume_realpath(volume_file, volume_id)
LOG.debug(_('restore: starting restore of backup from TSM'
' to volume %(volume_id)s, '
' backup: %(backup_id)s')
% {'volume_id': volume_id,
'backup_id': backup_id})
restore_path = \
self._create_device_link_using_backupid(backup_id,
volume_path,
volume_id)
try:
self._do_restore(restore_path, volume_id)
except processutils.ProcessExecutionError as e:
err = (_('restore: %(vol_id)s Failed to run dsmc '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': restore_path,
'out': e.stdout,
'err': e.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as e:
err = (_('restore: %(vol_id)s Failed to run dsmc '
'due to invalid arguments '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': restore_path,
'out': e.stdout,
'err': e.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
finally:
self._cleanup_device_hardlink(restore_path,
volume_path,
volume_id)
LOG.debug(_('restore %(backup_id)s to %(volume_id)s finished.')
% {'backup_id': backup_id,
'volume_id': volume_id})
def delete(self, backup):
"""Delete the given backup from TSM server.
:param backup: backup information for volume
:raises InvalidBackup
"""
delete_attrs = {'Total number of objects deleted': '1'}
volume_id = backup['volume_id']
backup_id = backup['id']
LOG.debug('delete started, backup: %s',
backup['id'])
volume_path = utils.make_dev_path('%s-%s' %
(self.volume_prefix, backup_id))
try:
out, err = utils.execute('dsmc',
'delete',
'backup',
'-quiet',
'-noprompt',
'-objtype=image',
'-deltype=all',
'-password=%s' % self.tsm_password,
volume_path,
run_as_root=True,
check_exit_code=False)
except processutils.ProcessExecutionError as e:
err = (_('delete: %(vol_id)s Failed to run dsmc with '
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'out': e.stdout,
'err': e.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as e:
err = (_('restore: %(vol_id)s Failed to run dsmc '
'due to invalid arguments with '
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'out': e.stdout,
'err': e.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
success = self._check_dsmc_output(out, delete_attrs)
if not success:
err = (_('delete: %(vol_id)s Failed with '
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'out': out,
'err': err})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
LOG.debug(_('delete %s finished') % backup['id'])
def get_backup_driver(context):
return TSMBackupDriver(context)
|
|
data = (
'AUM', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
' // ', # 0x08
' * ', # 0x09
'', # 0x0a
'-', # 0x0b
' / ', # 0x0c
' / ', # 0x0d
' // ', # 0x0e
' -/ ', # 0x0f
' +/ ', # 0x10
' X/ ', # 0x11
' /XX/ ', # 0x12
' /X/ ', # 0x13
', ', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'0', # 0x20
'1', # 0x21
'2', # 0x22
'3', # 0x23
'4', # 0x24
'5', # 0x25
'6', # 0x26
'7', # 0x27
'8', # 0x28
'9', # 0x29
'.5', # 0x2a
'1.5', # 0x2b
'2.5', # 0x2c
'3.5', # 0x2d
'4.5', # 0x2e
'5.5', # 0x2f
'6.5', # 0x30
'7.5', # 0x31
'8.5', # 0x32
'-.5', # 0x33
'+', # 0x34
'*', # 0x35
'^', # 0x36
'_', # 0x37
'', # 0x38
'~', # 0x39
'[?]', # 0x3a
']', # 0x3b
'[[', # 0x3c
']]', # 0x3d
'', # 0x3e
'', # 0x3f
'k', # 0x40
'kh', # 0x41
'g', # 0x42
'gh', # 0x43
'ng', # 0x44
'c', # 0x45
'ch', # 0x46
'j', # 0x47
'[?]', # 0x48
'ny', # 0x49
'tt', # 0x4a
'tth', # 0x4b
'dd', # 0x4c
'ddh', # 0x4d
'nn', # 0x4e
't', # 0x4f
'th', # 0x50
'd', # 0x51
'dh', # 0x52
'n', # 0x53
'p', # 0x54
'ph', # 0x55
'b', # 0x56
'bh', # 0x57
'm', # 0x58
'ts', # 0x59
'tsh', # 0x5a
'dz', # 0x5b
'dzh', # 0x5c
'w', # 0x5d
'zh', # 0x5e
'z', # 0x5f
'\'', # 0x60
'y', # 0x61
'r', # 0x62
'l', # 0x63
'sh', # 0x64
'ssh', # 0x65
's', # 0x66
'h', # 0x67
'a', # 0x68
'kss', # 0x69
'r', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'aa', # 0x71
'i', # 0x72
'ii', # 0x73
'u', # 0x74
'uu', # 0x75
'R', # 0x76
'RR', # 0x77
'L', # 0x78
'LL', # 0x79
'e', # 0x7a
'ee', # 0x7b
'o', # 0x7c
'oo', # 0x7d
'M', # 0x7e
'H', # 0x7f
'i', # 0x80
'ii', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'k', # 0x90
'kh', # 0x91
'g', # 0x92
'gh', # 0x93
'ng', # 0x94
'c', # 0x95
'ch', # 0x96
'j', # 0x97
'[?]', # 0x98
'ny', # 0x99
'tt', # 0x9a
'tth', # 0x9b
'dd', # 0x9c
'ddh', # 0x9d
'nn', # 0x9e
't', # 0x9f
'th', # 0xa0
'd', # 0xa1
'dh', # 0xa2
'n', # 0xa3
'p', # 0xa4
'ph', # 0xa5
'b', # 0xa6
'bh', # 0xa7
'm', # 0xa8
'ts', # 0xa9
'tsh', # 0xaa
'dz', # 0xab
'dzh', # 0xac
'w', # 0xad
'zh', # 0xae
'z', # 0xaf
'\'', # 0xb0
'y', # 0xb1
'r', # 0xb2
'l', # 0xb3
'sh', # 0xb4
'ss', # 0xb5
's', # 0xb6
'h', # 0xb7
'a', # 0xb8
'kss', # 0xb9
'w', # 0xba
'y', # 0xbb
'r', # 0xbc
'[?]', # 0xbd
'X', # 0xbe
' :X: ', # 0xbf
' /O/ ', # 0xc0
' /o/ ', # 0xc1
' \\o\\ ', # 0xc2
' (O) ', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
|
import string
from cachecow.cache import invalidate_namespace
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.db import models
from django.db.models import signals
from django.utils.translation import ugettext, ugettext_lazy as _, pgettext_lazy
from facebook import GraphAPI, GraphAPIError
from canvas import json, bgwork
from canvas.util import papertrail
from canvas.cache_patterns import CachedCall
from canvas.exceptions import ServiceError
from canvas.models import Content, UserInfo, FacebookUser, UserRedis, Visibility
from canvas.upload import upload_from_url
from configuration import Config
from drawquest import knobs
from drawquest.activities import WelcomeActivity
from website.apps.canvas_auth.models import User as CanvasUser, AnonymousUser
class User(CanvasUser):
class Meta:
proxy = True
def __repr__(self):
return '<drawquest user: {}>'.format(self.username)
def delete(self):
"""
DON'T USE THIS except for in extreme circumstances. Instead, just set is_active=False.
Has leftover side-effects like not updating follower/following count. Only use this
if you're prepared to fix it, or do some manual work afterward.
"""
#raise Exception("Remove this exception first if you're sure you want to do this.")
from drawquest.apps.playback.models import Playback
from drawquest.apps.quest_comments.models import QuestComment
from drawquest.apps.stars.models import Unstar
from drawquest.apps.iap.models import IapReceipt
from apps.canvas_auth.models import User as CanvasUser
#self.activity_set.all().update(actor=None)
Playback.objects.filter(viewer=self).delete()
Unstar.objects.filter(user=self).delete()
IapReceipt.objects.filter(purchaser=self).delete()
for comment in self.comments.all():
quest_comment = QuestComment.all_objects.get(pk=comment.pk)
quest_comment.playbacks.all().delete()
comment.delete()
CanvasUser.objects.get(pk=self.pk).delete()
invalidate_namespace('comments')
@classmethod
def validate_username(cls, username, skip_uniqueness_check=False):
""" Returns None if the username is valid and does not exist. """
from drawquest.urls import PROTECTED_URLS
un = username.lower()
if (un in Config['blocked_usernames']
or any(fragment in un for fragment in Config['blocked_username_fragments'])
or any(fragment in un for fragment in Config['autoflag_words'])
or un in PROTECTED_URLS):
return _("Sorry, this username is not allowed.")
if not un:
return _("Please enter a username.")
elif len(un) < 3:
return _("Username must be 3 or more characters.")
elif len(un) > 16:
return _("Username must be 16 characters or less.")
alphabet = string.lowercase + string.uppercase + string.digits + '_'
if not all(char in alphabet for char in username):
return _("Usernames can only contain letters, digits and underscores.")
if not skip_uniqueness_check:
if cls.objects.filter(username__iexact=username):
return _("Sorry! This username is taken. Please pick a different username.")
@classmethod
def upload_avatar_from_url(cls, request, url):
resp = upload_from_url(request, url)
return Content.all_objects.get_or_none(id=resp['content']['id'])
def migrate_facebook_avatar(self, request, facebook_access_token):
from drawquest.models import user_profile
try:
fb = GraphAPI(facebook_access_token)
avatar = fb.get_object('me/picture', type='large', redirect='false')['data']
except (GraphAPIError, IOError,):
return
if avatar.get('is_silhouette'):
return
self.userinfo.avatar = User.upload_avatar_from_url(request, avatar.get('url'))
self.userinfo.save()
self.details.force()
user_profile.delete_cache(self.username)
def migrate_twitter_avatar(self, request, twitter_access_token, twitter_access_token_secret):
from drawquest.models import user_profile
from drawquest.apps.twitter.models import Twitter, TwitterError
try:
avatar_url = Twitter(twitter_access_token, twitter_access_token_secret).avatar_url()
except TwitterError:
return
if avatar_url is None:
return
self.userinfo.avatar = User.upload_avatar_from_url(request, avatar_url)
self.userinfo.save()
self.details.force()
user_profile.delete_cache(self.username)
def _details(self):
avatar_id = self.id % 9
def avatar_url(type_, retina=True):
retina_suffix = '@2x' if retina else ''
return 'https://{}/static/img/drawquest/avatar_default_{}_{}{}.png'.format(settings.DOMAIN, type_, avatar_id, retina_suffix)
ret = {
'id': self.id,
'username': self.username,
'email': self.email,
'avatar_url': 'https://{}/static/img/drawquest/avatar_default.png'.format(settings.DOMAIN),
'avatar_urls': {
'gallery': {
'1x': avatar_url('gallery', retina=False),
'2x': avatar_url('gallery', retina=True),
},
'profile': {
'1x': avatar_url('profile', retina=False),
'2x': avatar_url('profile', retina=True),
},
}
}
try:
if self.userinfo.avatar:
archive_url = self.userinfo.avatar.details().get_absolute_url_for_image_type('archive')
gallery_url = self.userinfo.avatar.details().get_absolute_url_for_image_type('gallery')
ret['avatar_url'] = archive_url
ret['avatar_urls'] = {
'gallery': {
'1x': archive_url,
'2x': archive_url,
},
'profile': {
'1x': archive_url,
'2x': gallery_url,
},
}
except UserInfo.DoesNotExist:
pass
return ret
@classmethod
def details_by_id(cls, user_id, promoter=None):
from drawquest.apps.drawquest_auth.details_models import UserDetails
if promoter is None:
promoter = UserDetails
def inner_call():
return cls.objects.get(id=user_id)._details()
return CachedCall(
'dq:user:{}:details_v11'.format(user_id),
inner_call,
14*24*60*60,
promoter=promoter,
)
@classmethod
def details_by_username(cls, username, promoter=None):
from drawquest.apps.drawquest_auth.details_models import UserDetails
if promoter is None:
promoter = UserDetails
def inner_call():
return cls.objects.get(username=username)._details()
return CachedCall(
'dq:user:{}:details_v2'.format(username),
inner_call,
30*24*60*60,
promoter=promoter,
)
@property
def details(self):
return self.details_by_id(self.id)
def invalidate_details(self):
self.details.force()
self.details_by_username(self.username).force()
def to_client(self, **kwargs):
return self.details().to_client()
def comment_count(self, viewer=None):
from drawquest.apps.quest_comments.models import QuestComment
if viewer is not None and viewer.is_authenticated() and viewer.id == self.id:
return QuestComment.objects.filter(author=self, visibility__in=[Visibility.PUBLIC, Visibility.CURATED]).count()
return QuestComment.objects.filter(author=self, visibility=Visibility.PUBLIC).count()
def violation_count(self):
""" Number of times this user's drawings have been removed. """
from canvas.models import Visibility
from drawquest.apps.quest_comments.models import QuestComment
return QuestComment.objects.filter(author=self, judged=True, visibility=Visibility.DISABLED).count()
def follow(self, user_to_follow):
super(User, self).follow(user_to_follow)
if user_to_follow.redis.new_followers.zcard() > knobs.AUTO_MODERATION['followers']:
user_to_follow.userinfo.trusted = True
user_to_follow.userinfo.save()
def following_ids(self):
return [int(id_) for id_ in self.redis.new_following.zrange(0, -1)]
def activate(self, activator):
if self.id != activator.id and not activator.is_staff:
raise PermissionDenied("Must be staff to deactivate a user.")
self.is_active = True
self.save()
self.details.force()
def deactivate(self, deactivator):
if self.id != deactivator.id and not deactivator.is_staff:
raise PermissionDenied("Must be staff to deactivate a user.")
for comment in self.comments.all():
if comment.is_visible():
comment.moderate_and_save(Visibility.UNPUBLISHED, deactivator, undoing=True)
self.is_active = False
self.save()
self.details.force()
def associate_facebook_account(user, facebook_access_token, request=None):
try:
fb_user = FacebookUser.get_or_create_from_access_token(facebook_access_token, request=request)
except GraphAPIError as e:
papertrail.debug(u'GraphAPIError inside associate_facebook_account: {} (user: {}, token: {})'.format(e.message, user.username, facebook_access_token))
raise ServiceError(_("There appears to be an issue communicating with Facebook. Please try again."))
try:
existing_fb_user = user.facebookuser
if existing_fb_user.fb_uid == fb_user.fb_uid:
return
existing_fb_user.user = None
existing_fb_user.save()
except FacebookUser.DoesNotExist:
pass
fb_user.user = user
fb_user.save()
@bgwork.defer
def notify_friends():
fb_user.notify_friends_of_signup(facebook_access_token)
fb_user.respond_to_apprequest_invites(facebook_access_token)
def user_post_save(sender, instance, created, **kwargs):
if created:
welcome_activity = WelcomeActivity()
UserRedis(instance.id).activity_stream.push(welcome_activity)
UserRedis(instance.id).iphone_activity_stream.push(welcome_activity)
signals.post_save.connect(user_post_save, sender=User)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from django.apps.registry import Apps, apps
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation
)
from django.contrib.contenttypes import management
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.db import connections, models, router
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.encoding import force_str
from django.utils.six import StringIO
from .models import Author, Article, SchemeIncludedURL
@override_settings(ROOT_URLCONF='contenttypes_tests.urls')
class ContentTypesViewsTests(TestCase):
fixtures = ['testdata.json']
def test_shortcut_with_absolute_url(self):
"Can view a shortcut for an Author object that has a get_absolute_url method"
for obj in Author.objects.all():
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, 'http://testserver%s' % obj.get_absolute_url(),
status_code=302, target_status_code=404)
def test_shortcut_with_absolute_url_including_scheme(self):
"""
Can view a shortcut when object's get_absolute_url returns a full URL
the tested URLs are in fixtures/testdata.json :
"http://...", "https://..." and "//..."
"""
for obj in SchemeIncludedURL.objects.all():
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(SchemeIncludedURL).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, obj.get_absolute_url(),
status_code=302,
fetch_redirect_response=False)
def test_shortcut_no_absolute_url(self):
"Shortcuts for an object that has no get_absolute_url method raises 404"
for obj in Article.objects.all():
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Article).id, obj.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_wrong_type_pk(self):
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, 'nobody/expects')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_shortcut_bad_pk(self):
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, '42424242')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_nonint_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/shortcut/%s/%s/' % ('spam', an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_bad_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/shortcut/%s/%s/' % (42424242, an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_create_contenttype_on_the_spot(self):
"""
Make sure ContentTypeManager.get_for_model creates the corresponding
content type if it doesn't exist in the database (for some reason).
"""
class ModelCreatedOnTheFly(models.Model):
name = models.CharField()
class Meta:
verbose_name = 'a model created on the fly'
app_label = 'my_great_app'
apps = Apps()
ct = ContentType.objects.get_for_model(ModelCreatedOnTheFly)
self.assertEqual(ct.app_label, 'my_great_app')
self.assertEqual(ct.model, 'modelcreatedonthefly')
self.assertEqual(ct.name, 'a model created on the fly')
class IsolatedModelsTestCase(TestCase):
def setUp(self):
# The unmanaged models need to be removed after the test in order to
# prevent bad interactions with the flush operation in other tests.
self._old_models = apps.app_configs['contenttypes_tests'].models.copy()
def tearDown(self):
apps.app_configs['contenttypes_tests'].models = self._old_models
apps.all_models['contenttypes_tests'] = self._old_models
apps.clear_cache()
class GenericForeignKeyTests(IsolatedModelsTestCase):
def test_str(self):
class Model(models.Model):
field = GenericForeignKey()
expected = "contenttypes_tests.Model.field"
actual = force_str(Model.field)
self.assertEqual(expected, actual)
def test_missing_content_type_field(self):
class TaggedItem(models.Model):
# no content_type field
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
errors = TaggedItem.content_object.check()
expected = [
checks.Error(
"The GenericForeignKey content type references the non-existent field 'TaggedItem.content_type'.",
hint=None,
obj=TaggedItem.content_object,
id='contenttypes.E002',
)
]
self.assertEqual(errors, expected)
def test_invalid_content_type_field(self):
class Model(models.Model):
content_type = models.IntegerField() # should be ForeignKey
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type', 'object_id')
errors = Model.content_object.check()
expected = [
checks.Error(
"'Model.content_type' is not a ForeignKey.",
hint="GenericForeignKeys must use a ForeignKey to 'contenttypes.ContentType' as the 'content_type' field.",
obj=Model.content_object,
id='contenttypes.E003',
)
]
self.assertEqual(errors, expected)
def test_content_type_field_pointing_to_wrong_model(self):
class Model(models.Model):
content_type = models.ForeignKey('self') # should point to ContentType
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type', 'object_id')
errors = Model.content_object.check()
expected = [
checks.Error(
"'Model.content_type' is not a ForeignKey to 'contenttypes.ContentType'.",
hint="GenericForeignKeys must use a ForeignKey to 'contenttypes.ContentType' as the 'content_type' field.",
obj=Model.content_object,
id='contenttypes.E004',
)
]
self.assertEqual(errors, expected)
def test_missing_object_id_field(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType)
# missing object_id field
content_object = GenericForeignKey()
errors = TaggedItem.content_object.check()
expected = [
checks.Error(
"The GenericForeignKey object ID references the non-existent field 'object_id'.",
hint=None,
obj=TaggedItem.content_object,
id='contenttypes.E001',
)
]
self.assertEqual(errors, expected)
def test_field_name_ending_with_underscore(self):
class Model(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object_ = GenericForeignKey(
'content_type', 'object_id')
errors = Model.content_object_.check()
expected = [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=Model.content_object_,
id='fields.E001',
)
]
self.assertEqual(errors, expected)
def test_generic_foreign_key_checks_are_performed(self):
class MyGenericForeignKey(GenericForeignKey):
def check(self, **kwargs):
return ['performed!']
class Model(models.Model):
content_object = MyGenericForeignKey()
errors = checks.run_checks()
self.assertEqual(errors, ['performed!'])
def test_unsaved_instance_on_generic_foreign_key(self):
"""
#10811 -- Assigning an unsaved object to GenericForeignKey
should raise an exception.
"""
class Model(models.Model):
content_type = models.ForeignKey(ContentType, null=True)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey('content_type', 'object_id')
author = Author(name='Author')
model = Model()
model.content_object = None # no error here as content_type allows None
with self.assertRaisesMessage(ValueError,
'Cannot assign "%r": "%s" instance isn\'t saved in the database.'
% (author, author._meta.object_name)):
model.content_object = author # raised ValueError here as author is unsaved
author.save()
model.content_object = author # no error because the instance is saved
class GenericRelationshipTests(IsolatedModelsTestCase):
def test_valid_generic_relationship(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Bookmark(models.Model):
tags = GenericRelation('TaggedItem')
errors = Bookmark.tags.field.check()
self.assertEqual(errors, [])
def test_valid_generic_relationship_with_explicit_fields(self):
class TaggedItem(models.Model):
custom_content_type = models.ForeignKey(ContentType)
custom_object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'custom_content_type', 'custom_object_id')
class Bookmark(models.Model):
tags = GenericRelation('TaggedItem',
content_type_field='custom_content_type',
object_id_field='custom_object_id')
errors = Bookmark.tags.field.check()
self.assertEqual(errors, [])
def test_pointing_to_missing_model(self):
class Model(models.Model):
rel = GenericRelation('MissingModel')
errors = Model.rel.field.check()
expected = [
checks.Error(
("Field defines a relation with model 'MissingModel', "
"which is either not installed, or is abstract."),
hint=None,
obj=Model.rel.field,
id='fields.E300',
)
]
self.assertEqual(errors, expected)
def test_valid_self_referential_generic_relationship(self):
class Model(models.Model):
rel = GenericRelation('Model')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type', 'object_id')
errors = Model.rel.field.check()
self.assertEqual(errors, [])
def test_missing_generic_foreign_key(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
class Bookmark(models.Model):
tags = GenericRelation('TaggedItem')
errors = Bookmark.tags.field.check()
expected = [
checks.Error(
("The GenericRelation defines a relation with the model "
"'contenttypes_tests.TaggedItem', but that model does not have a "
"GenericForeignKey."),
hint=None,
obj=Bookmark.tags.field,
id='contenttypes.E004',
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPED_MODEL='contenttypes_tests.Replacement')
def test_pointing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class Model(models.Model):
rel = GenericRelation('SwappedModel')
errors = Model.rel.field.check()
expected = [
checks.Error(
("Field defines a relation with the model "
"'contenttypes_tests.SwappedModel', "
"which has been swapped out."),
hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.",
obj=Model.rel.field,
id='fields.E301',
)
]
self.assertEqual(errors, expected)
def test_field_name_ending_with_underscore(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class InvalidBookmark(models.Model):
tags_ = GenericRelation('TaggedItem')
errors = InvalidBookmark.tags_.field.check()
expected = [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=InvalidBookmark.tags_.field,
id='fields.E001',
)
]
self.assertEqual(errors, expected)
class UpdateContentTypesTests(TestCase):
def setUp(self):
self.before_count = ContentType.objects.count()
ContentType.objects.create(name='fake', app_label='contenttypes_tests', model='Fake')
self.app_config = apps.get_app_config('contenttypes_tests')
self.old_stdout = sys.stdout
sys.stdout = StringIO()
def tearDown(self):
sys.stdout = self.old_stdout
def test_interactive_true(self):
"""
interactive mode of update_contenttypes() (the default) should delete
stale contenttypes.
"""
management.input = lambda x: force_str("yes")
management.update_contenttypes(self.app_config)
self.assertIn("Deleting stale content type", sys.stdout.getvalue())
self.assertEqual(ContentType.objects.count(), self.before_count)
def test_interactive_false(self):
"""
non-interactive mode of update_contenttypes() shouldn't delete stale
content types.
"""
management.update_contenttypes(self.app_config, interactive=False)
self.assertIn("Stale content types remain.", sys.stdout.getvalue())
self.assertEqual(ContentType.objects.count(), self.before_count + 1)
class TestRouter(object):
def db_for_read(self, model, **hints):
return 'other'
def db_for_write(self, model, **hints):
return 'default'
class ContentTypesMultidbTestCase(TestCase):
def setUp(self):
self.old_routers = router.routers
router.routers = [TestRouter()]
# Whenever a test starts executing, only the "default" database is
# connected. We explicitly connect to the "other" database here. If we
# don't do it, then it will be implicitly connected later when we query
# it, but in that case some database backends may automatically perform
# extra queries upon connecting (notably mysql executes
# "SET SQL_AUTO_IS_NULL = 0"), which will affect assertNumQueries().
connections['other'].ensure_connection()
def tearDown(self):
router.routers = self.old_routers
def test_multidb(self):
"""
Test that, when using multiple databases, we use the db_for_read (see
#20401).
"""
ContentType.objects.clear_cache()
with self.assertNumQueries(0, using='default'), \
self.assertNumQueries(1, using='other'):
ContentType.objects.get_for_model(Author)
|
|
# -*- coding: utf-8 -*-
# An extension module for rst2pdf
# Copyright 2010, Patrick Maupin
# See LICENSE.txt for licensing terms
"""
preprocess is a rst2pdf extension module (invoked by -e preprocess
on the rst2pdf command line.
There is a testcase for this file at rst2pdf/tests/test_preprocess.txt
This preprocesses the source text file before handing it to docutils.
This module serves two purposes:
1) It demonstrates the technique and can be a starting point for similar
user-written processing modules; and
2) It provides a simplified syntax for documents which are targeted only
at rst2pdf, rather than docutils in general.
The design goal of "base rst2pdf" is to be completely compatible with
docutils, such that a file which works as a PDF can also work as HTML,
etc.
Unfortunately, base docutils is a slow-moving target, and does not
make this easy. For example, SVG images do not work properly with
the HTML backend unless you install a patch, and docutils has no
concept of page breaks or additional vertical space (other than
the <hr>).
So, while it would be nice to have documents that render perfectly
with any backend, this goal is hard to achieve for some documents,
and once you are restricted to a particular transformation type,
then you might as well have a slightly nicer syntax for your source
document.
-----------------------------------------------------------------
Preprocessor extensions:
All current extensions except style occupy a single line in the
source file.
``.. include::``
Processes the include file as well. An include file may
either be a restructured text file, OR may be an RSON or
JSON stylesheet. The determination is made by trying to
parse it as RSON. If it passes, it is a stylesheet; if not,
well, we'll let the docutils parser have its way with it.
``.. page::``
Is translated into a raw PageBreak.
``.. space::``
Is translated into a raw Spacer. If only one number given, is
used for vertical space. This is the canonical use case, since
horizontal space is ignored anyway!
``.. style::``
Allows you to create in-line stylesheets. As with other
restructured text components, the stylesheet data must
be indented. Stylesheets are in RSON or JSON.
``.. widths::``
creates a new table style (based on table or the first
non-numeric token) and creates a class using that style
specifically for the next table in the document. (Creates
a .. class::, so you must specify .. widths:: immediately
before the table it applies to. Allows you to set the
widths for the table, using percentages.
``SingleWordAtLeftColumn``
If a single word at the left column is surrounded by
blank lines, the singleword style is automatically applied to
the word. This is a workaround for the broken interaction
between docutils subtitles and bibliographic metadata. (I
found that docutils was referencing my subtitles from inside
the TOC, and that seemed silly. Perhaps there is a better
workaround at a lower level in rst2pdf.)
-----------------------------------------------------------------
Preprocessor operation:
The preprocessor generates a file that has the same name as the source
file, with .build_temp. embedded in the name, and then passes that
file to the restructured text parser.
This file is left on the disk after operation, because any error
messages from docutils will refer to line numbers in it, rather than
in the original source, so debugging could be difficult if the
file were automatically removed.
"""
import os
import re
from rson import loads as rson_loads
from rst2pdf.log import log
# TODO: not really overkill to use StringIO...
class DummyFile(str):
"""
We could use stringio, but that's really overkill for what we need here.
"""
def read(self):
return self
class Preprocess:
def __init__(self, sourcef, incfile=False, widthcount=0):
"""
Process a file and decorate the resultant Preprocess instance with
self.result (the preprocessed file) and self.styles (extracted stylesheet
information) for the caller.
"""
self.widthcount = widthcount
name = sourcef.name
source = sourcef.read().replace('\r\n', '\n').replace('\r', '\n')
# Make the determination if an include file is a stylesheet or
# another restructured text file, and handle stylesheets appropriately.
if incfile:
try:
self.styles = styles = rson_loads(source)
substyles = styles.get('styles')
if substyles is not None:
styles['styles'] = dict(substyles)
except:
pass
else:
self.changed = True
self.keep = False
return
# Read the whole file and wrap it in a DummyFile
self.sourcef = DummyFile(source)
self.sourcef.name = name
# Use a regular expression on the source, to take it apart
# and put it back together again.
self.source = source = [x for x in self.splitter(source) if x]
self.result = result = []
self.styles = {}
self.changed = False
# More efficient to pop() a list than to keep taking tokens from [0]
source.reverse()
isblank = False
keywords = self.keywords
handle_single = keywords['single::']
while source:
wasblank = isblank
isblank = False
chunk = source.pop()
result.append(chunk)
# Only process single lines
if not chunk.endswith('\n'):
continue
result[-1] = chunk[:-1]
if chunk.index('\n') != len(chunk) - 1:
continue
# Parse the line to look for one of our keywords.
tokens = chunk.split()
isblank = not tokens
if len(tokens) >= 2 and tokens[0] == '..' and tokens[1].endswith('::'):
func = keywords.get(tokens[1])
if func is None:
continue
chunk = chunk.split('::', 1)[1]
elif wasblank and len(tokens) == 1 and chunk[0].isalpha() and tokens[0].isalpha():
func = handle_single
chunk = tokens[0]
else:
continue
result.pop()
func(self, chunk.strip())
# Determine if we actually did anything or not. Just use our source file
# if not. Otherwise, write the results to disk (so the user can use them
# for debugging) and return them.
if self.changed:
result.append('')
result = DummyFile('\n'.join(result))
result.name = name + '.build_temp'
self.keep = keep = len(result.strip())
if keep:
with open(result.name, 'w') as f:
f.write(result)
self.result = result
else:
self.result = self.sourcef
def handle_include(self, fname):
# Ugly, violates DRY, etc., but I'm not about to go
# figure out how to re-use docutils include file
# path processing!
for prefix in ('', os.path.dirname(self.sourcef.name)):
if os.path.exists(os.path.join(prefix, fname)):
break
else:
log.error("Could not find include file %s", fname)
self.changed = True
return
# Recursively call this class to process include files.
# Extract all the information from the included file.
with open(os.path.join(prefix, fname), 'r') as f:
inc = Preprocess(f, True, self.widthcount)
self.widthcount = inc.widthcount
if 'styles' in self.styles and 'styles' in inc.styles:
self.styles['styles'].update(inc.styles.pop('styles'))
self.styles.update(inc.styles)
if inc.changed:
self.changed = True
if not inc.keep:
return
fname = inc.result.name
self.result.extend(['', '', '.. include:: ' + fname, ''])
def handle_single(self, word):
''' Prepend the singleword class in front of the word.
'''
self.changed = True
self.result.extend(['', '', '.. class:: singleword', '', word, ''])
def handle_page(self, chunk):
''' Insert a raw pagebreak
'''
self.changed = True
self.result.extend(['', '', '.. raw:: pdf', '',
' PageBreak ' + chunk, ''])
def handle_space(self, chunk):
''' Insert a raw space
'''
self.changed = True
if len(chunk.replace(',', ' ').split()) == 1:
chunk = '0 ' + chunk
self.result.extend(['', '', '.. raw:: pdf', '',
' Spacer ' + chunk, ''])
def handle_widths(self, chunk):
''' Insert a unique style in the stylesheet, and reference it
from a .. class:: comment.
'''
self.changed = True
chunk = chunk.replace(',', ' ').replace('%', ' ').split()
if not chunk:
log.error('no widths specified in .. widths ::')
return
parent = chunk[0][0].isalpha() and chunk.pop(0) or 'table'
values = [float(x) for x in chunk]
total = sum(values)
values = [int(round(100 * x / total)) for x in values]
while 1:
total = sum(values)
if total > 100:
values[values.index(max(values))] -= 1
elif total < 100:
values[values.index(max(values))] += 1
else:
break
values = ['%s%%' % x for x in values]
self.widthcount += 1
stylename = 'embeddedtablewidth%d' % self.widthcount
self.styles.setdefault('styles', {})[stylename] = dict(parent=parent, colWidths=values)
self.result.extend(['', '', '.. class:: ' + stylename, ''])
def handle_style(self, chunk):
''' Parse through the source until we find lines that are no longer indented,
then pass our indented lines to the RSON parser.
'''
self.changed = True
if chunk:
log.error(".. style:: does not recognize string %s" % repr(chunk))
return
mystyles = '\n'.join(self.read_indented())
if not mystyles:
log.error("Empty .. style:: block found")
try:
styles = rson_loads(mystyles)
except ValueError as e: # Error parsing the JSON data
log.critical('Error parsing stylesheet "%s": %s' % \
(mystyles, str(e)))
else:
self.styles.setdefault('styles', {}).update(styles)
def read_indented(self):
''' Read data from source while it is indented (or blank).
Stop on the first non-indented line, and leave the rest
on the source.
'''
source = self.source
data = None
while source and not data:
data = source and source.pop().splitlines() or []
data.reverse()
while data:
line = data.pop().rstrip()
if not line or line.lstrip() != line:
yield line
continue
data.append(line)
break
data.reverse()
data.append('')
source.append('\n'.join(data))
source.append('\n')
# Automatically generate our keywords from methods prefixed with 'handle_'
__v = Preprocess.__dict__
__keywords = [x[7:] for x in __v if x.startswith('handle_')]
# Generate the regular expression for parsing, and a split function using it.
__blankline = r'^([ \t]*\n)'
__singleword = r'^([A-Za-z]+[ \t]*\n)(?=[ \t]*\n)'
__comment = r'^(\.\.[ \t]+(?:%s)\:\:.*\n)' % '|'.join(__keywords)
__expression = '(?:%s)' % '|'.join([__blankline, __singleword, __comment])
Preprocess.splitter = re.compile(__expression, re.MULTILINE).split
# Once we have used the keywords in our regular expression,
# fix them up for use by the parser.
Preprocess.keywords = dict((x + '::', __v['handle_' + x]) for x in __keywords)
class MyStyles(str):
"""
This class conforms to the styles.py processing requirements
for a stylesheet that is not really a file. It must be callable(),
and str(x) must return the name of the stylesheet.
"""
def __new__(cls, styles):
self = str.__new__(cls, 'Embedded Preprocess Styles')
self.data = styles
return self
def __call__(self):
return self.data
def install(createpdf, options):
"""
This is where we intercept the document conversion.
Preprocess the restructured text, and insert our new styles (if any).
"""
data = Preprocess(options.infile)
options.infile = data.result
if data.styles:
options.style.append(MyStyles(data.styles))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.