repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
kavigupta/61a-analysis | src/graded_exam.py | 1 | 7215 | """
A set of classes for handling graded exams
"""
from collections import defaultdict
import numpy as np
from tools import cached_property
class ExamQuestion:
"""
A view on a particular question, with optional filtering
"""
def __init__(self, exam_grades, problem, pred=lambda x: True):
self.__exam_grades = exam_grades
self.__problem = problem
self.__pred = pred
def __filter(self, new_pred):
return ExamQuestion(self.__exam_grades, self.__problem,
lambda x: self.__pred(x) and new_pred(x))
def for_grader(self, grader):
"""
Filters on the given grader.
"""
return self.__filter(lambda x: x.grader == grader)
def score_for(self, email):
"""
Return the score for the given EMAIL
"""
return self.__exam_grades._question_score_for(self.__problem, email) # pylint: disable=W0212
@property
def evaluations(self):
"""
Return a list of all evaluations
"""
return (x for x in self.__exam_grades.question_scores_for(self.__problem) if self.__pred(x))
@property
def graders(self):
"""
Return a list of all graders
"""
return set(x.grader for x in self.evaluations)
@property
def __scores(self):
return [x.complete_score for x in self.evaluations]
@property
def std_score(self):
"""
Get the standard deviation of the rubrics
"""
return np.std(self.__scores)
@property
def mean_score(self):
"""
Get the mean of the rubrics
"""
return np.mean(self.__scores)
@property
def emails(self):
"""
Get a list of emails in our evaluations
"""
return (x.email for x in self.evaluations)
class ExamGrades:
"""
A list of all exam grades for a given exam.
"""
def __init__(self, problem_names, location_per_email, evaluation_per_email):
self.__problem_names = problem_names
self.__location_per_email = location_per_email
self.__evaluation_per_email = evaluation_per_email
self.__emails = set(evaluation_per_email.keys())
def by_room(self, seating_chart):
"""
Input: seating chart
Output: iterable of room name, grades with only emails in that room
"""
by_room = defaultdict(lambda: [])
for email in seating_chart.emails:
if email not in self.emails:
continue
current = email, self.__location_per_email[email], self.__evaluation_per_email[email]
by_room[seating_chart.room_for(email)].append(current)
for room, items in by_room.items():
yield room, ExamGrades(self.__problem_names,
{e : l for e, l, _ in items}, {e : ev for e, _, ev in items})
def exam_profile(self, email):
"""
Returns the exam profile, a list of every rubric item possible.
"""
return [x
for ev in self.evaluation_for(email).evals
for x in ev.complete_score.rubric_items]
def change_grades(self, new_evals_per_email):
"""
Outputs a new ExamGrades object with the given evaluations per email dictionary.
"""
return ExamGrades(self.__problem_names, self.__location_per_email, new_evals_per_email)
@cached_property
def max_score(self):
"""
Outputs the maximum score any student acheived on this exam
"""
return max(x.score for x in self.__evaluation_per_email.values())
@cached_property
def mean_score(self):
"""
Outputs the mean score any student acheived on this exam
"""
return np.mean([x.score for x in self.__evaluation_per_email.values()])
@staticmethod
def create(problem_names, grades_per_index):
"""
Create a given exam grades dictionary from
problem_names: a list of problem names.
grades_per_index: a dictionary : TimeIndex -> Evaluation
"""
location_per_email = {e.email : i for i, e in grades_per_index.items()}
evaluation_per_email = {e.email : e for i, e in grades_per_index.items()}
return ExamGrades(problem_names, location_per_email, evaluation_per_email)
def __iter__(self):
return iter((name, ExamQuestion(self, name)) for name in self.__problem_names)
def question_scores_for(self, problem):
"""
Get the question scores for the given problem.
"""
p_index = self.__problem_names.index(problem)
for full_grade in self.__evaluation_per_email.values():
yield full_grade.evals[p_index]
def _question_score_for(self, problem, email):
"""
Get the question scores for the given problem.
"""
p_index = self.__problem_names.index(problem)
return self.evaluation_for(email).evals[p_index]
@property
def emails(self):
"""
Get a set of emails of students who took this exam
"""
return self.__emails
def evaluation_for(self, email):
"""
Get the evaluation mapped to the given email.
"""
return self.__evaluation_per_email[email]
def remove(self, emails):
"""
Returns a new ExamGrades object with the given iterable of emails filtered out.
"""
return ExamGrades(
self.__problem_names,
{x : y for x, y in self.__location_per_email.items() if x not in emails},
{x : y for x, y in self.__evaluation_per_email.items() if x not in emails})
def __replace(self, updater):
return ExamGrades(
self.__problem_names,
self.__location_per_email,
{x : updater(y) for x, y in self.__evaluation_per_email.items()})
def zero_meaned(self):
"""
Zero means each question score by grader.
"""
def mean_per_question_and_grader():
"""
Produces an iterable of
keys : question and grader
values : the mean score, rubric items, and adjustment
"""
for quest, grades in self:
for grader in grades.graders:
by_grader = grades.for_grader(grader)
yield ((quest, grader), by_grader.mean_score)
mpqag = dict(mean_per_question_and_grader())
def updater(elem):
"""
Takes an evaluation and zero means it.
"""
def means():
"""
Returns the means for each question for the given grader.
"""
for que, eva in zip(self.__problem_names, elem.evals):
yield mpqag[(que, eva.grader)]
return elem.zero_mean(means())
return self.__replace(updater)
def time_diff(self, email_a, email_b):
"""
Get the difference between the times at which email_a and email_b were processed (in number
of exams).
"""
return self.__location_per_email[email_a] - self.__location_per_email[email_b]
| gpl-3.0 | -4,514,944,348,679,788,500 | 36.38342 | 100 | 0.573112 | false |
mindbaffle/ATF | Test/FunctionalTests/FsmEditorTestScripts/EditSaveCloseAndReopen.py | 10 | 3066 | #Copyright (c) 2014 Sony Computer Entertainment America LLC. See License.txt.
import sys
sys.path.append("./CommonTestScripts")
import Test
import FsmUtil
from System.IO import File
doc = atfDocService.OpenNewDocument(editor)
states = []
comments = []
transitions = []
print "Add a few states ..."
states.append(FsmUtil.AddNewStateAndVerify(editingContext, 50, 50, "state", 64))
states.append(FsmUtil.AddNewStateAndVerify(editingContext, 300, 100, "statey", 80))
states.append(FsmUtil.AddNewStateAndVerify(editingContext, 100, 200, "super troopers", 100))
print "Add a few comments ..."
comments.append(FsmUtil.AddNewCommentAndVerify(editingContext, 30, 150, "do the "))
comments.append(FsmUtil.AddNewCommentAndVerify(editingContext, 30, 175, "can"))
comments.append(FsmUtil.AddNewCommentAndVerify(editingContext, 30, 200, "can"))
comments.append(FsmUtil.AddNewCommentAndVerify(editingContext, 30, 225, "can"))
print "Add a few transitions ..."
transitions.append(FsmUtil.AddNewTransitionAndVerify(editingContext, states[0], states[1]))
transitions[0].Label = "a -> b"
transitions.append(FsmUtil.AddNewTransitionAndVerify(editingContext, states[1], states[2]))
transitions[1].Label = "b -> c"
transitions.append(FsmUtil.AddNewTransitionAndVerify(editingContext, states[2], states[0]))
transitions[2].Label = "c -> a"
transitions.append(FsmUtil.AddNewTransitionAndVerify(editingContext, states[0], states[2]))
transitions[3].Label = "a -> c"
transitions.append(FsmUtil.AddNewTransitionAndVerify(editingContext, states[0], states[1]))
transitions[4].Label = "a -> b(2)"
filePath = Test.GetNewFilePath("EditAndSave.fsm")
editor.Save(doc, Uri(filePath))
Test.True(File.Exists(filePath), "Verify file saved")
editor.Close(doc)
editor.Open(Uri(filePath))
Test.Equal(states.Count, fsm.States.Count, "Verify states count matches")
Test.Equal(comments.Count, fsm.Annotations.Count, "Verify comments count matches")
Test.Equal(transitions.Count, fsm.Transitions.Count, "Verify transitions count matches")
for i in range(states.Count):
print "Testing state#" + unicode(i)
Test.Equal(states[i].Name, fsm.States[i].Name, "Verify name")
Test.Equal(states[i].Position.X, fsm.States[i].Position.X, "Verify X")
Test.Equal(states[i].Position.Y, fsm.States[i].Position.Y, "Verify Y")
Test.Equal(states[i].Size, fsm.States[i].Size, "Verify size")
for i in range(comments.Count):
print "Testing comment#" + unicode(i)
Test.Equal(comments[i].Text, fsm.Annotations[i].Text, "Verify text")
Test.Equal(comments[i].Location.X, fsm.Annotations[i].Location.X, "Verify X")
Test.Equal(comments[i].Location.Y, fsm.Annotations[i].Location.Y, "Verify Y")
for i in range(transitions.Count):
print "Testing transition#" + unicode(i)
Test.Equal(transitions[i].Label, fsm.Transitions[i].Label, "Verify label")
Test.Equal(transitions[i].FromState.Name, fsm.Transitions[i].FromState.Name, "Verify FromState name")
Test.Equal(transitions[i].ToState.Name, fsm.Transitions[i].ToState.Name, "Verify ToState name")
print Test.SUCCESS
| apache-2.0 | -1,698,312,080,891,285,200 | 43.42029 | 105 | 0.749674 | false |
idlead/scikit-learn | sklearn/linear_model/logistic.py | 9 | 66155 |
"""
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver in ['liblinear', 'sag']:
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set "
"class_weight='balanced'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
# 'auto' is deprecated and will be removed in 0.19
if class_weight in ("auto", "balanced"):
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
lbin = LabelBinarizer()
Y_binarized = lbin.fit_transform(y)
if Y_binarized.shape[1] == 1:
Y_binarized = np.hstack([1 - Y_binarized, Y_binarized])
w0 = np.zeros((Y_binarized.shape[1], n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_binarized
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
warm_start_sag = {'coef': w0}
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, 'log', 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum,
warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'* instead of deprecated *class_weight='auto'*.
max_iter : int
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag* solvers.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for 'lbfgs' and
'newton-cg' solvers.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
check_classification_targets(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
# 'auto' is deprecated and will be removed in 0.19
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
# compute the class weights for the entire dataset y
if self.class_weight in ("auto", "balanced"):
classes = np.unique(y)
class_weight = compute_class_weight(self.class_weight, classes, y)
class_weight = dict(zip(classes, class_weight))
else:
class_weight = self.class_weight
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause | 4,298,701,565,378,991,600 | 38.495522 | 81 | 0.602766 | false |
Lattyware/unrpa | unrpa/__init__.py | 1 | 9063 | import itertools
import operator
import os
import pickle
import sys
import traceback
import zlib
from typing import (
Union,
Tuple,
Optional,
Dict,
cast,
Iterable,
Type,
BinaryIO,
FrozenSet,
Sequence,
)
from unrpa.errors import (
OutputDirectoryNotFoundError,
ErrorExtractingFile,
AmbiguousArchiveError,
UnknownArchiveError,
)
from unrpa.versions import official_rpa, alt, zix, unofficial_rpa
from unrpa.versions.version import Version
from unrpa.view import ArchiveView
# Offset, Length
SimpleIndexPart = Tuple[int, int]
SimpleIndexEntry = Iterable[SimpleIndexPart]
# Offset, Length, Prefix
ComplexIndexPart = Tuple[int, int, bytes]
ComplexIndexEntry = Iterable[ComplexIndexPart]
IndexPart = Union[SimpleIndexPart, ComplexIndexPart]
IndexEntry = Iterable[IndexPart]
class TreeNode:
def __init__(self, name: str, children: Iterable[Sequence[str]]) -> None:
self.name = name
if children:
self.children = [
TreeNode(
child,
[
subchild[1:]
for subchild in children_of_child
if len(subchild) > 1
],
)
for (child, children_of_child) in itertools.groupby(
children, key=operator.itemgetter(0)
)
]
else:
self.children = []
class UnRPA:
"""Extraction tool for RPA archives."""
name = "unrpa"
error = 0
info = 1
debug = 2
ordered_versions: Tuple[Type[Version], ...] = (
*official_rpa.versions,
*alt.versions,
*zix.versions,
*unofficial_rpa.versions,
)
provided_versions: FrozenSet[Type[Version]] = frozenset(ordered_versions)
def __init__(
self,
filename: str,
verbosity: int = -1,
path: Optional[str] = None,
mkdir: bool = False,
version: Optional[Type[Version]] = None,
continue_on_error: bool = False,
offset_and_key: Optional[Tuple[int, int]] = None,
extra_versions: FrozenSet[Type[Version]] = frozenset(),
) -> None:
self.verbose = verbosity
if path:
self.path = os.path.abspath(path)
else:
self.path = os.getcwd()
self.mkdir = mkdir
self.version = version
self.archive = filename
self.continue_on_error = continue_on_error
self.offset_and_key = offset_and_key
self.tty = sys.stdout.isatty()
self.versions = UnRPA.provided_versions | extra_versions
def log(
self, verbosity: int, human_message: str, machine_message: str = None
) -> None:
if self.tty and self.verbose > verbosity:
print(
human_message if self.tty else machine_message,
file=sys.stderr if verbosity == UnRPA.error else sys.stdout,
)
def extract_files(self) -> None:
self.log(UnRPA.error, f"Extracting files from {self.archive}.")
if self.mkdir:
self.make_directory_structure(self.path)
if not os.path.isdir(self.path):
raise OutputDirectoryNotFoundError(self.path)
version = self.version() if self.version else self.detect_version()
with open(self.archive, "rb") as archive:
index = self.get_index(archive, version)
total_files = len(index)
for file_number, (path, data) in enumerate(index.items()):
try:
self.make_directory_structure(
os.path.join(self.path, os.path.split(path)[0])
)
file_view = self.extract_file(
path, data, file_number, total_files, archive
)
with open(os.path.join(self.path, path), "wb") as output_file:
version.postprocess(file_view, output_file)
except BaseException as error:
if self.continue_on_error:
self.log(
0,
f"Error extracting from the archive, but directed to continue on error. Detail: "
f"{traceback.format_exc()}.",
)
else:
raise ErrorExtractingFile(traceback.format_exc()) from error
def list_files(self) -> None:
self.log(UnRPA.info, f"Listing files in {self.archive}:")
with open(self.archive, "rb") as archive:
paths = self.get_index(archive).keys()
for path in sorted(paths):
print(path)
def list_files_tree(self) -> None:
print(self.archive)
for line in self.tree_lines():
print(line)
def tree(self) -> TreeNode:
with open(self.archive, "rb") as archive:
paths = sorted(self.get_index(archive).keys())
return TreeNode(
self.archive,
[list(reversed(list(self.full_split(path)))) for path in paths],
)
@staticmethod
def full_split(path: str) -> Iterable[str]:
while path:
(path, tail) = os.path.split(path)
yield tail
def tree_lines(
self, current_node: TreeNode = None, prefix: str = ""
) -> Iterable[str]:
if not current_node:
current_node = self.tree()
for child in current_node.children[:-1]:
yield f"{prefix}├--- {child.name}"
yield from self.tree_lines(child, f"{prefix}| ")
if current_node.children:
child = current_node.children[-1]
yield f"{prefix}└--- {child.name}"
yield from self.tree_lines(child, f"{prefix} ")
def extract_file(
self,
name: str,
data: ComplexIndexEntry,
file_number: int,
total_files: int,
archive: BinaryIO,
) -> ArchiveView:
self.log(
UnRPA.info, f"[{file_number / float(total_files):04.2%}] {name:>3}", name
)
offset, length, start = next(iter(data))
return ArchiveView(archive, offset, length, start)
def make_directory_structure(self, name: str) -> None:
self.log(UnRPA.debug, f"Creating directory structure: {name}")
if not os.path.exists(name):
os.makedirs(name)
def get_index(
self, archive: BinaryIO, version: Optional[Version] = None
) -> Dict[str, ComplexIndexEntry]:
if not version:
version = self.version() if self.version else self.detect_version()
offset = 0
key: Optional[int] = None
if self.offset_and_key:
offset, key = self.offset_and_key
else:
offset, key = version.find_offset_and_key(archive)
archive.seek(offset)
index: Dict[bytes, IndexEntry] = pickle.loads(
zlib.decompress(archive.read()), encoding="bytes"
)
if key is not None:
normal_index = UnRPA.deobfuscate_index(key, index)
else:
normal_index = UnRPA.normalise_index(index)
return {
UnRPA.ensure_str_path(path).replace("/", os.sep): data
for path, data in normal_index.items()
}
def detect_version(self) -> Version:
potential = (version() for version in self.versions)
ext = os.path.splitext(self.archive)[1].lower()
with open(self.archive, "rb") as f:
header = f.readline()
detected = {version for version in potential if version.detect(ext, header)}
if len(detected) > 1:
raise AmbiguousArchiveError(detected)
try:
return next(iter(detected))
except StopIteration:
raise UnknownArchiveError(header)
@staticmethod
def ensure_str_path(path: Union[str, bytes]) -> str:
if isinstance(path, str):
return path
else:
return path.decode("utf-8", "replace")
@staticmethod
def deobfuscate_index(
key: int, index: Dict[bytes, IndexEntry]
) -> Dict[bytes, ComplexIndexEntry]:
return {
path: UnRPA.deobfuscate_entry(key, entry) for path, entry in index.items()
}
@staticmethod
def deobfuscate_entry(key: int, entry: IndexEntry) -> ComplexIndexEntry:
return [
(offset ^ key, length ^ key, start)
for offset, length, start in UnRPA.normalise_entry(entry)
]
@staticmethod
def normalise_index(
index: Dict[bytes, IndexEntry]
) -> Dict[bytes, ComplexIndexEntry]:
return {path: UnRPA.normalise_entry(entry) for path, entry in index.items()}
@staticmethod
def normalise_entry(entry: IndexEntry) -> ComplexIndexEntry:
return [
(*cast(SimpleIndexPart, part), b"")
if len(part) == 2
else cast(ComplexIndexPart, part)
for part in entry
]
| gpl-3.0 | -3,801,496,572,079,539,700 | 31.703971 | 109 | 0.560768 | false |
serpilliere/miasm | miasm/ir/translators/z3_ir.py | 2 | 10482 | from builtins import map
from builtins import range
import imp
import logging
# Raise an ImportError if z3 is not available WITHOUT actually importing it
imp.find_module("z3")
from miasm.ir.translators.translator import Translator
log = logging.getLogger("translator_z3")
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("[%(levelname)-8s]: %(message)s"))
log.addHandler(console_handler)
log.setLevel(logging.WARNING)
class Z3Mem(object):
"""Memory abstraction for TranslatorZ3. Memory elements are only accessed,
never written. To give a concrete value for a given memory cell in a solver,
add "mem32.get(address, size) == <value>" constraints to your equation.
The endianness of memory accesses is handled accordingly to the "endianness"
attribute.
Note: Will have one memory space for each addressing size used.
For example, if memory is accessed via 32 bits values and 16 bits values,
these access will not occur in the same address space.
"""
def __init__(self, endianness="<", name="mem"):
"""Initializes a Z3Mem object with a given @name and @endianness.
@endianness: Endianness of memory representation. '<' for little endian,
'>' for big endian.
@name: name of memory Arrays generated. They will be named
name+str(address size) (for example mem32, mem16...).
"""
# Import z3 only on demand
global z3
import z3
if endianness not in ['<', '>']:
raise ValueError("Endianness should be '>' (big) or '<' (little)")
self.endianness = endianness
self.mems = {} # Address size -> memory z3.Array
self.name = name
def get_mem_array(self, size):
"""Returns a z3 Array used internally to represent memory for addresses
of size @size.
@size: integer, size in bit of addresses in the memory to get.
Return a z3 Array: BitVecSort(size) -> BitVecSort(8).
"""
try:
mem = self.mems[size]
except KeyError:
# Lazy instantiation
self.mems[size] = z3.Array(self.name + str(size),
z3.BitVecSort(size),
z3.BitVecSort(8))
mem = self.mems[size]
return mem
def __getitem__(self, addr):
"""One byte memory access. Different address sizes with the same value
will result in different memory accesses.
@addr: a z3 BitVec, the address to read.
Return a z3 BitVec of size 8 bits representing a memory access.
"""
size = addr.size()
mem = self.get_mem_array(size)
return mem[addr]
def get(self, addr, size):
""" Memory access at address @addr of size @size.
@addr: a z3 BitVec, the address to read.
@size: int, size of the read in bits.
Return a z3 BitVec of size @size representing a memory access.
"""
original_size = size
if original_size % 8 != 0:
# Size not aligned on 8bits -> read more than size and extract after
size = ((original_size // 8) + 1) * 8
res = self[addr]
if self.is_little_endian():
for i in range(1, size // 8):
res = z3.Concat(self[addr+i], res)
else:
for i in range(1, size //8):
res = z3.Concat(res, self[addr+i])
if size == original_size:
return res
else:
# Size not aligned, extract right sized result
return z3.Extract(original_size-1, 0, res)
def is_little_endian(self):
"""True if this memory is little endian."""
return self.endianness == "<"
def is_big_endian(self):
"""True if this memory is big endian."""
return not self.is_little_endian()
class TranslatorZ3(Translator):
"""Translate a Miasm expression to an equivalent z3 python binding
expression. Memory is abstracted via z3.Array (see Z3Mem).
The result of from_expr will be a z3 Expr.
If you want to interact with the memory abstraction after the translation,
you can instantiate your own Z3Mem, that will be equivalent to the one
used by TranslatorZ3.
"""
# Implemented language
__LANG__ = "z3"
# Operations translation
trivial_ops = ["+", "-", "/", "%", "&", "^", "|", "*", "<<"]
def __init__(self, endianness="<", loc_db=None, **kwargs):
"""Instance a Z3 translator
@endianness: (optional) memory endianness
"""
# Import z3 only on demand
global z3
import z3
super(TranslatorZ3, self).__init__(**kwargs)
self._mem = Z3Mem(endianness)
self.loc_db = loc_db
def from_ExprInt(self, expr):
return z3.BitVecVal(int(expr), expr.size)
def from_ExprId(self, expr):
return z3.BitVec(str(expr), expr.size)
def from_ExprLoc(self, expr):
if self.loc_db is None:
# No loc_db, fallback to default name
return z3.BitVec(str(expr), expr.size)
loc_key = expr.loc_key
offset = self.loc_db.get_location_offset(loc_key)
if offset is not None:
return z3.BitVecVal(offset, expr.size)
# fallback to default name
return z3.BitVec(str(loc_key), expr.size)
def from_ExprMem(self, expr):
addr = self.from_expr(expr.ptr)
return self._mem.get(addr, expr.size)
def from_ExprSlice(self, expr):
res = self.from_expr(expr.arg)
res = z3.Extract(expr.stop-1, expr.start, res)
return res
def from_ExprCompose(self, expr):
res = None
for arg in expr.args:
e = z3.Extract(arg.size-1, 0, self.from_expr(arg))
if res != None:
res = z3.Concat(e, res)
else:
res = e
return res
def from_ExprCond(self, expr):
cond = self.from_expr(expr.cond)
src1 = self.from_expr(expr.src1)
src2 = self.from_expr(expr.src2)
return z3.If(cond != 0, src1, src2)
def _abs(self, z3_value):
return z3.If(z3_value >= 0,z3_value,-z3_value)
def _sdivC(self, num_expr, den_expr):
"""Divide (signed) @num by @den (Expr) as C would
See modint.__div__ for implementation choice
"""
num, den = self.from_expr(num_expr), self.from_expr(den_expr)
num_s = self.from_expr(num_expr.signExtend(num_expr.size * 2))
den_s = self.from_expr(den_expr.signExtend(den_expr.size * 2))
result_sign = z3.If(num_s * den_s >= 0,
z3.BitVecVal(1, num.size()),
z3.BitVecVal(-1, num.size()),
)
return z3.UDiv(self._abs(num), self._abs(den)) * result_sign
def from_ExprOp(self, expr):
args = list(map(self.from_expr, expr.args))
res = args[0]
if len(args) > 1:
for arg in args[1:]:
if expr.op in self.trivial_ops:
res = eval("res %s arg" % expr.op)
elif expr.op == ">>":
res = z3.LShR(res, arg)
elif expr.op == "a>>":
res = res >> arg
elif expr.op == "<<<":
res = z3.RotateLeft(res, arg)
elif expr.op == ">>>":
res = z3.RotateRight(res, arg)
elif expr.op == "sdiv":
res = self._sdivC(expr.args[0], expr.args[1])
elif expr.op == "udiv":
res = z3.UDiv(res, arg)
elif expr.op == "smod":
res = res - (arg * (self._sdivC(expr.args[0], expr.args[1])))
elif expr.op == "umod":
res = z3.URem(res, arg)
elif expr.op == "==":
res = z3.If(
args[0] == args[1],
z3.BitVecVal(1, 1),
z3.BitVecVal(0, 1)
)
elif expr.op == "<u":
res = z3.If(
z3.ULT(args[0], args[1]),
z3.BitVecVal(1, 1),
z3.BitVecVal(0, 1)
)
elif expr.op == "<s":
res = z3.If(
args[0] < args[1],
z3.BitVecVal(1, 1),
z3.BitVecVal(0, 1)
)
elif expr.op == "<=u":
res = z3.If(
z3.ULE(args[0], args[1]),
z3.BitVecVal(1, 1),
z3.BitVecVal(0, 1)
)
elif expr.op == "<=s":
res = z3.If(
args[0] <= args[1],
z3.BitVecVal(1, 1),
z3.BitVecVal(0, 1)
)
else:
raise NotImplementedError("Unsupported OP yet: %s" % expr.op)
elif expr.op == 'parity':
arg = z3.Extract(7, 0, res)
res = z3.BitVecVal(1, 1)
for i in range(8):
res = res ^ z3.Extract(i, i, arg)
elif expr.op == '-':
res = -res
elif expr.op == "cnttrailzeros":
size = expr.size
src = res
res = z3.If(src == 0, size, src)
for i in range(size - 1, -1, -1):
res = z3.If((src & (1 << i)) != 0, i, res)
elif expr.op == "cntleadzeros":
size = expr.size
src = res
res = z3.If(src == 0, size, src)
for i in range(size, 0, -1):
index = - i % size
out = size - (index + 1)
res = z3.If((src & (1 << index)) != 0, out, res)
elif expr.op.startswith("zeroExt"):
arg, = expr.args
res = z3.ZeroExt(expr.size - arg.size, self.from_expr(arg))
elif expr.op.startswith("signExt"):
arg, = expr.args
res = z3.SignExt(expr.size - arg.size, self.from_expr(arg))
else:
raise NotImplementedError("Unsupported OP yet: %s" % expr.op)
return res
def from_ExprAssign(self, expr):
src = self.from_expr(expr.src)
dst = self.from_expr(expr.dst)
return (src == dst)
# Register the class
Translator.register(TranslatorZ3)
| gpl-2.0 | 5,558,664,054,745,267,000 | 35.908451 | 81 | 0.515646 | false |
huangkuan/hack | lib/google/api/label_pb2.py | 2 | 3868 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/label.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/api/label.proto',
package='google.api',
syntax='proto3',
serialized_pb=b'\n\x16google/api/label.proto\x12\ngoogle.api\"\x9c\x01\n\x0fLabelDescriptor\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x39\n\nvalue_type\x18\x02 \x01(\x0e\x32%.google.api.LabelDescriptor.ValueType\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\",\n\tValueType\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x42OOL\x10\x01\x12\t\n\x05INT64\x10\x02\x42\x1e\n\x0e\x63om.google.apiB\nLabelProtoP\x01\x62\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_LABELDESCRIPTOR_VALUETYPE = _descriptor.EnumDescriptor(
name='ValueType',
full_name='google.api.LabelDescriptor.ValueType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STRING', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BOOL', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INT64', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=151,
serialized_end=195,
)
_sym_db.RegisterEnumDescriptor(_LABELDESCRIPTOR_VALUETYPE)
_LABELDESCRIPTOR = _descriptor.Descriptor(
name='LabelDescriptor',
full_name='google.api.LabelDescriptor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.api.LabelDescriptor.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value_type', full_name='google.api.LabelDescriptor.value_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='google.api.LabelDescriptor.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_LABELDESCRIPTOR_VALUETYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=39,
serialized_end=195,
)
_LABELDESCRIPTOR.fields_by_name['value_type'].enum_type = _LABELDESCRIPTOR_VALUETYPE
_LABELDESCRIPTOR_VALUETYPE.containing_type = _LABELDESCRIPTOR
DESCRIPTOR.message_types_by_name['LabelDescriptor'] = _LABELDESCRIPTOR
LabelDescriptor = _reflection.GeneratedProtocolMessageType('LabelDescriptor', (_message.Message,), dict(
DESCRIPTOR = _LABELDESCRIPTOR,
__module__ = 'google.api.label_pb2'
# @@protoc_insertion_point(class_scope:google.api.LabelDescriptor)
))
_sym_db.RegisterMessage(LabelDescriptor)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\016com.google.apiB\nLabelProtoP\001')
# @@protoc_insertion_point(module_scope)
| apache-2.0 | -3,283,407,776,802,750,000 | 33.535714 | 415 | 0.72363 | false |
bigswitch/horizon | openstack_dashboard/dashboards/project/firewalls/forms.py | 9 | 16427 | # Copyright 2013, Big Switch Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators
from openstack_dashboard import api
port_validator = validators.validate_port_or_colon_separated_port_range
LOG = logging.getLogger(__name__)
class UpdateRule(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(
required=False,
max_length=80, label=_("Description"))
protocol = forms.ChoiceField(
label=_("Protocol"), required=False,
choices=[('TCP', _('TCP')), ('UDP', _('UDP')), ('ICMP', _('ICMP')),
('ANY', _('ANY'))],
help_text=_('Protocol for the firewall rule'))
action = forms.ChoiceField(
label=_("Action"), required=False,
choices=[('ALLOW', _('ALLOW')), ('DENY', _('DENY')),
('REJECT', _('REJECT'))],
help_text=_('Action for the firewall rule'))
source_ip_address = forms.IPField(
label=_("Source IP Address/Subnet"),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True,
help_text=_('Source IP address or subnet'))
destination_ip_address = forms.IPField(
label=_('Destination IP Address/Subnet'),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True,
help_text=_('Destination IP address or subnet'))
source_port = forms.CharField(
max_length=80,
label=_("Source Port/Port Range"),
required=False,
validators=[port_validator],
help_text=_('Source port (integer in [1, 65535] or range in a:b)'))
destination_port = forms.CharField(
max_length=80,
label=_("Destination Port/Port Range"),
required=False,
validators=[port_validator],
help_text=_('Destination port (integer in [1, 65535] or range'
' in a:b)'))
ip_version = forms.ChoiceField(
label=_("IP Version"), required=False,
choices=[('4', '4'), ('6', '6')],
help_text=_('IP Version for Firewall Rule'))
shared = forms.BooleanField(label=_("Shared"), required=False)
enabled = forms.BooleanField(label=_("Enabled"), required=False)
failure_url = 'horizon:project:firewalls:index'
def handle(self, request, context):
rule_id = self.initial['rule_id']
name_or_id = context.get('name') or rule_id
if context['protocol'] == 'ANY':
context['protocol'] = None
for f in ['source_ip_address', 'destination_ip_address',
'source_port', 'destination_port']:
if not context[f]:
context[f] = None
try:
rule = api.fwaas.rule_update(request, rule_id, **context)
msg = _('Rule %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return rule
except Exception as e:
msg = (_('Failed to update rule %(name)s: %(reason)s') %
{'name': name_or_id, 'reason': e})
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdatePolicy(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(required=False,
max_length=80, label=_("Description"))
shared = forms.BooleanField(label=_("Shared"), required=False)
audited = forms.BooleanField(label=_("Audited"), required=False)
failure_url = 'horizon:project:firewalls:index'
def handle(self, request, context):
policy_id = self.initial['policy_id']
name_or_id = context.get('name') or policy_id
try:
policy = api.fwaas.policy_update(request, policy_id, **context)
msg = _('Policy %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to update policy %(name)s: %(reason)s') % {
'name': name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdateFirewall(forms.SelfHandlingForm):
name = forms.CharField(max_length=80,
label=_("Name"),
required=False)
description = forms.CharField(max_length=80,
label=_("Description"),
required=False)
firewall_policy_id = forms.ChoiceField(label=_("Policy"))
admin_state_up = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"))
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(UpdateFirewall, self).__init__(request, *args, **kwargs)
try:
tenant_id = self.request.user.tenant_id
policies = api.fwaas.policy_list_for_tenant(request, tenant_id)
policies = sorted(policies, key=lambda policy: policy.name)
except Exception:
exceptions.handle(request,
_('Unable to retrieve policy list.'))
policies = []
policy_id = kwargs['initial']['firewall_policy_id']
policy_name = [p.name for p in policies if p.id == policy_id][0]
firewall_policy_id_choices = [(policy_id, policy_name)]
for p in policies:
if p.id != policy_id:
firewall_policy_id_choices.append((p.id, p.name_or_id))
self.fields['firewall_policy_id'].choices = firewall_policy_id_choices
def handle(self, request, context):
firewall_id = self.initial['firewall_id']
name_or_id = context.get('name') or firewall_id
context['admin_state_up'] = (context['admin_state_up'] == 'True')
try:
firewall = api.fwaas.firewall_update(request, firewall_id,
**context)
msg = _('Firewall %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return firewall
except Exception as e:
msg = _('Failed to update firewall %(name)s: %(reason)s') % {
'name': name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class InsertRuleToPolicy(forms.SelfHandlingForm):
firewall_rule_id = forms.ChoiceField(label=_("Insert Rule"))
insert_before = forms.ChoiceField(label=_("Before"),
required=False)
insert_after = forms.ChoiceField(label=_("After"),
required=False)
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(InsertRuleToPolicy, self).__init__(request, *args, **kwargs)
try:
tenant_id = self.request.user.tenant_id
all_rules = api.fwaas.rule_list_for_tenant(request, tenant_id)
all_rules = sorted(all_rules, key=lambda rule: rule.name_or_id)
available_rules = [r for r in all_rules
if not r.firewall_policy_id]
current_rules = []
for r in kwargs['initial']['firewall_rules']:
r_obj = [rule for rule in all_rules if r == rule.id][0]
current_rules.append(r_obj)
available_choices = [(r.id, r.name_or_id) for r in available_rules]
current_choices = [(r.id, r.name_or_id) for r in current_rules]
except Exception as e:
msg = _('Failed to retrieve available rules: %s') % e
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
self.fields['firewall_rule_id'].choices = available_choices
self.fields['insert_before'].choices = [('', '')] + current_choices
self.fields['insert_after'].choices = [('', '')] + current_choices
def handle(self, request, context):
policy_id = self.initial['policy_id']
policy_name_or_id = self.initial['name'] or policy_id
try:
insert_rule_id = context['firewall_rule_id']
insert_rule = api.fwaas.rule_get(request, insert_rule_id)
body = {'firewall_rule_id': insert_rule_id,
'insert_before': context['insert_before'],
'insert_after': context['insert_after']}
policy = api.fwaas.policy_insert_rule(request, policy_id, **body)
msg = _('Rule %(rule)s was successfully inserted to policy '
'%(policy)s.') % {
'rule': insert_rule.name or insert_rule.id,
'policy': policy_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to insert rule to policy %(name)s: %(reason)s') % {
'name': policy_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class RemoveRuleFromPolicy(forms.SelfHandlingForm):
firewall_rule_id = forms.ChoiceField(label=_("Remove Rule"))
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(RemoveRuleFromPolicy, self).__init__(request, *args, **kwargs)
try:
tenant_id = request.user.tenant_id
all_rules = api.fwaas.rule_list_for_tenant(request, tenant_id)
current_rules = []
for r in kwargs['initial']['firewall_rules']:
r_obj = [rule for rule in all_rules if r == rule.id][0]
current_rules.append(r_obj)
current_choices = [(r.id, r.name_or_id) for r in current_rules]
except Exception as e:
msg = _('Failed to retrieve current rules in policy %(name)s: '
'%(reason)s') % {'name': self.initial['name'], 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
self.fields['firewall_rule_id'].choices = current_choices
def handle(self, request, context):
policy_id = self.initial['policy_id']
policy_name_or_id = self.initial['name'] or policy_id
try:
remove_rule_id = context['firewall_rule_id']
remove_rule = api.fwaas.rule_get(request, remove_rule_id)
body = {'firewall_rule_id': remove_rule_id}
policy = api.fwaas.policy_remove_rule(request, policy_id, **body)
msg = _('Rule %(rule)s was successfully removed from policy '
'%(policy)s.') % {
'rule': remove_rule.name or remove_rule.id,
'policy': policy_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to remove rule from policy %(name)s: '
'%(reason)s') % {'name': self.initial['name'],
'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class RouterInsertionFormBase(forms.SelfHandlingForm):
def __init__(self, request, *args, **kwargs):
super(RouterInsertionFormBase, self).__init__(request, *args, **kwargs)
try:
router_choices = self.get_router_choices(request, kwargs)
self.fields['router_ids'].choices = router_choices
except Exception as e:
msg = self.init_failure_msg % {'name': self.initial['name'],
'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
@abc.abstractmethod
def get_router_choices(self, request, kwargs):
"""Return a list of selectable routers."""
@abc.abstractmethod
def get_new_router_ids(self, context):
"""Return a new list of router IDs associated with the firewall."""
def handle(self, request, context):
firewall_id = self.initial['firewall_id']
firewall_name_or_id = self.initial['name'] or firewall_id
try:
body = {'router_ids': self.get_new_router_ids(context)}
firewall = api.fwaas.firewall_update(request, firewall_id, **body)
msg = self.success_msg % {'firewall': firewall_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return firewall
except Exception as e:
msg = self.failure_msg % {'name': firewall_name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class AddRouterToFirewall(RouterInsertionFormBase):
router_ids = forms.MultipleChoiceField(
label=_("Add Routers"),
required=False,
widget=forms.ThemableCheckboxSelectMultiple(),
help_text=_("Add selected router(s) to the firewall."))
failure_url = 'horizon:project:firewalls:index'
success_msg = _('Router(s) was/were successfully added to firewall '
'%(firewall)s.')
failure_msg = _('Failed to add router(s) to firewall %(name)s: %(reason)s')
init_failure_msg = _('Failed to retrieve available routers: %(reason)s')
def get_router_choices(self, request, kwargs):
tenant_id = self.request.user.tenant_id
routers_list = api.fwaas.firewall_unassociated_routers_list(
request, tenant_id)
return [(r.id, r.name_or_id) for r in routers_list]
def get_new_router_ids(self, context):
existing_router_ids = self.initial['router_ids']
add_router_ids = context['router_ids']
return add_router_ids + existing_router_ids
class RemoveRouterFromFirewall(RouterInsertionFormBase):
router_ids = forms.MultipleChoiceField(
label=_("Associated Routers"),
required=False,
widget=forms.ThemableCheckboxSelectMultiple(),
help_text=_("Unselect the router(s) to be removed from firewall."))
failure_url = 'horizon:project:firewalls:index'
success_msg = _('Router(s) was successfully removed from firewall '
'%(firewall)s.')
failure_msg = _('Failed to remove router(s) from firewall %(name)s: '
'%(reason)s')
init_failure_msg = _('Failed to retrieve current routers in firewall '
'%(name)s: %(reason)s')
def get_router_choices(self, request, kwargs):
tenant_id = self.request.user.tenant_id
all_routers = api.neutron.router_list(request, tenant_id=tenant_id)
current_routers = [r for r in all_routers
if r['id'] in kwargs['initial']['router_ids']]
return [(r.id, r.name_or_id) for r in current_routers]
def get_new_router_ids(self, context):
# context[router_ids] is router IDs to be kept.
return context['router_ids']
| apache-2.0 | 5,478,903,712,468,556,000 | 41.556995 | 79 | 0.581847 | false |
allqoow/exerciseML | week02_1.py | 1 | 5407 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Author : allqoow
# Contact : [email protected]
# Started on: 20161029(yyyymmdd)
# Project : exerciseML(Exercise for Machine Learning)
# H2.1.A
print "H2.1.A"
import matplotlib.pyplot
import math
import numpy.random
x1List = []
x2List = []
yList = []
colourList = []
with open("applesOranges.csv","r") as openedF:
rawContent = openedF.read()
rawContentByLine = rawContent.split("\n")
for obs in rawContentByLine[1:]:
# For stability reason. It operates only if the input data are good.
if len(obs.split(",")) == 3:
splitRec = obs.split(",")
x1List.append(float(splitRec[0]))
x2List.append(float(splitRec[1]))
yList.append(int(splitRec[2]))
if int(splitRec[2]) == 0:
colourList.append((1,0,0,1)) # red for apples
elif int(splitRec[2]) == 1:
colourList.append((0,0,1,1)) # orange for oranges
print len(colourList)
matplotlib.pyplot.scatter(x1List,x2List, c=colourList)
matplotlib.pyplot.show()
# H2.1.B
print "\nH2.1.B"
pi = math.pi
wVecList = []
for alpha in range(20):
wVecList.append([math.sin(pi*alpha/20), math.cos(pi*alpha/20)])
#print wVecList
obsVecList = zip(x1List, x2List, yList)
sampleSize = float(len(obsVecList))
bestPerformance = 0
for wVec in wVecList:
countCorrect = 0
for obsVec in obsVecList:
if wVec[0]*obsVec[0] + wVec[1]*obsVec[1] > 0:
est = 1
elif wVec[0]*obsVec[0] + wVec[1]*obsVec[1] < 0:
est = 0
if est == int(obsVec[2]):
countCorrect += 1
# evaluation of performance
performance = countCorrect/sampleSize
print str(wVec) + " => " + str(performance)
if bestPerformance < performance:
bestWVec = wVec
bestPerformance = performance
# plotting
matplotlib.pyplot.scatter(x1List,x2List, c=colourList)
x2Vec = [-wVec[0]*(-2),0,-wVec[0]*2]
x1Vec = [wVec[1]*(-2),0,wVec[1]*2]
matplotlib.pyplot.plot(x1Vec, x2Vec)
#matplotlib.pyplot.show()
# H2.1.C
print "\nH2.1.C"
print str(bestWVec) + " => " + str(performance)
thetaList = [-3 + (x/10.0) for x in range(61)]
bestPerformance = 0
for theta in thetaList:
countCorrect = 0
inputText = ""
for obsVec in obsVecList:
if bestWVec[0]*obsVec[0] + bestWVec[1]*obsVec[1] + theta > 0:
est = 1
elif bestWVec[0]*obsVec[0] + bestWVec[1]*obsVec[1] + theta < 0:
est = 0
if est == int(obsVec[2]):
countCorrect += 1
#print str(obsVec[0]) +","+str(obsVec[1])+","+str(est)+","+str(obsVec[2])
inputText += str(obsVec[0]) +","+str(obsVec[1])+","+str(est)+"\n"
#print inputText
performance = countCorrect/sampleSize
print str(theta) + " => " + str(performance)
if bestPerformance < performance:
bestTheta = theta
bestPerformance = performance
bestInputText = inputText
print bestWVec
print bestTheta
# H2.1.D
with open("applesOrangesEst.txt","w") as res:
alphaList =range(20)
thetaList = [-3 + (x/10.0) for x in range(61)]
writeStr = ""
for obsVec in obsVecList:
if bestWVec[0]*obsVec[0] + bestWVec[1]*obsVec[1] + bestTheta> 0:
est = 1
elif bestWVec[0]*obsVec[0] + bestWVec[1]*obsVec[1] + bestTheta < 0:
est = 0
if est == int(obsVec[2]):
countCorrect += 1
writeStr += str(obsVec[0]) +","+ str(obsVec[1])+","+str(est)+"\n"
res.write(writeStr)
with open("applesOrangesEst.txt","r") as openedF:
x1List2 = []
x2List2 = []
yList2 = []
colourList2 = []
rawContent = openedF.read()
rawContentByLine = rawContent.split("\n")
for obs in rawContentByLine:
# For stability reason. It operates only if the input data are good.
if len(obs.split(",")) == 3:
splitRec = obs.strip().split(",")
x1List2.append(float(splitRec[0]))
x2List2.append(float(splitRec[1]))
#yList2.append(int(splitRec[2]))
if int(splitRec[2]) == 0:
colourList2.append((1,0,0,1)) # red for apples
elif int(splitRec[2]) == 1:
colourList2.append((1,0.5,0,1)) # orange for oranges
bestx2Vec = [(-bestWVec[0])*(-2)-bestTheta,0-bestTheta,(-bestWVec[0])*2-bestTheta]
bestx1Vec = [bestWVec[1]*(-2),0,bestWVec[1]*2]
matplotlib.pyplot.clf()
matplotlib.pyplot.scatter(x1List2,x2List2, c=colourList2)
matplotlib.pyplot.plot(bestx1Vec, bestx2Vec)
matplotlib.pyplot.show()
# H2.1.E
with open("results.txt","w") as res:
alphaList =range(360)
thetaList = [-3 + (x/40.0) for x in range(241)]
performanceList = []
for alpha in alphaList:
wVec = [math.sin(pi*alpha/80), math.cos(pi*alpha/80)]
for theta in thetaList:
countCorrect = 0
for obsVec in obsVecList:
if wVec[0]*obsVec[0] + wVec[1]*obsVec[1] + theta> 0:
est = 0
elif wVec[0]*obsVec[0] + wVec[1]*obsVec[1] + theta < 0:
est = 1
if est == int(obsVec[2]):
countCorrect += 1
# evaluation of performance
performance = countCorrect/sampleSize
#print "(alpha=" + str(alpha) + ", theta=" + str(theta) + ") => " + str(performance)
performanceList.append(performance)
writeStr = str(alpha) +","+ str(theta)+","+str(performance)+"\n"
res.write(writeStr)
data = numpy.genfromtxt('results.txt',delimiter=',')
alphas=numpy.unique(data[:,0])
thetas=numpy.unique(data[:,1])
Alphas,Thetas = numpy.meshgrid(alphas,thetas)
Performances=data[:,2].reshape(len(thetas),len(alphas))
matplotlib.pyplot.pcolormesh(Alphas,Thetas,Performances)
matplotlib.pyplot.show()
# H2.1.F
# No.
# What if there is a non-linear border (or a borderlike something) between classes?
# We cannot distinguish those (two) classes with a line or hyperplane.
# clearing existing data
matplotlib.pyplot.clf() | mit | 3,781,250,274,684,984,300 | 27.919786 | 87 | 0.667468 | false |
applicationdevm/XlsxWriter | xlsxwriter/test/comparison/test_print_options04.py | 8 | 1330 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'print_options04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with print options."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.print_row_col_headers()
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | 6,950,561,975,977,381,000 | 28.555556 | 91 | 0.585714 | false |
cjh1/StarCluster | starcluster/volume.py | 14 | 15640 | # Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
import time
import string
from starcluster import utils
from starcluster import static
from starcluster import exception
from starcluster import cluster
from starcluster.utils import print_timing
from starcluster.logger import log
class VolumeCreator(cluster.Cluster):
"""
Handles creating, partitioning, and formatting a new EBS volume.
By default this class will format the entire drive (without partitioning)
using the ext3 filesystem.
host_instance - EC2 instance to use when formatting volume. must exist in
the same zone as the new volume. if not specified this class will look for
host instances in the @sc-volumecreator security group. If it can't find
an instance in the @sc-volumecreator group that matches the zone of the
new volume, a new instance is launched.
shutdown_instance - True will shutdown the host instance after volume
creation
"""
def __init__(self, ec2_conn, spot_bid=None, keypair=None,
key_location=None, host_instance=None, device='/dev/sdz',
image_id=static.BASE_AMI_32, instance_type="t1.micro",
shutdown_instance=False, detach_vol=False,
mkfs_cmd='mkfs.ext3 -F', resizefs_cmd='resize2fs', **kwargs):
self._host_instance = host_instance
self._instance = None
self._volume = None
self._aws_block_device = device or '/dev/sdz'
self._real_device = None
self._image_id = image_id or static.BASE_AMI_32
self._instance_type = instance_type or 'm1.small'
self._shutdown = shutdown_instance
self._detach_vol = detach_vol
self._mkfs_cmd = mkfs_cmd
self._resizefs_cmd = resizefs_cmd
self._alias_tmpl = "volhost-%s"
super(VolumeCreator, self).__init__(
ec2_conn=ec2_conn, spot_bid=spot_bid, keyname=keypair,
key_location=key_location, cluster_tag=static.VOLUME_GROUP_NAME,
cluster_size=1, cluster_user="sgeadmin", cluster_shell="bash",
node_image_id=self._image_id, subnet_id=kwargs.get('subnet_id'),
node_instance_type=self._instance_type, force_spot_master=True)
def __repr__(self):
return "<VolumeCreator: %s>" % self._mkfs_cmd
def _get_existing_instance(self, zone):
"""
Returns any existing instance in the @sc-volumecreator group that's
located in zone.
"""
active_states = ['pending', 'running']
i = self._host_instance
if i and self._validate_host_instance(i, zone):
log.info("Using specified host instance %s" % i.id)
return i
for node in self.nodes:
if node.state in active_states and node.placement == zone:
log.info("Using existing instance %s in group %s" %
(node.id, self.cluster_group.name))
return node
def _request_instance(self, zone):
self._instance = self._get_existing_instance(zone)
if not self._instance:
alias = self._alias_tmpl % zone
self._validate_image_and_type(self._image_id, self._instance_type)
log.info(
"No instance in group %s for zone %s, launching one now." %
(self.cluster_group.name, zone))
self._resv = self.create_node(alias, image_id=self._image_id,
instance_type=self._instance_type,
zone=zone)
self.wait_for_cluster(msg="Waiting for volume host to come up...")
self._instance = self.get_node(alias)
else:
s = utils.get_spinner("Waiting for instance %s to come up..." %
self._instance.id)
while not self._instance.is_up():
time.sleep(self.refresh_interval)
s.stop()
return self._instance
def _create_volume(self, size, zone, snapshot_id=None):
vol = self.ec2.create_volume(size, zone, snapshot_id)
self._volume = vol
log.info("New volume id: %s" % vol.id)
self.ec2.wait_for_volume(vol, status='available')
return vol
def _create_snapshot(self, volume):
snap = self.ec2.create_snapshot(volume, wait_for_snapshot=True)
log.info("New snapshot id: %s" % snap.id)
self._snapshot = snap
return snap
def _determine_device(self):
block_dev_map = self._instance.block_device_mapping
for char in string.lowercase[::-1]:
dev = '/dev/sd%s' % char
if not block_dev_map.get(dev):
self._aws_block_device = dev
return self._aws_block_device
def _get_volume_device(self, device=None):
dev = device or self._aws_block_device
inst = self._instance
if inst.ssh.path_exists(dev):
self._real_device = dev
return dev
xvdev = '/dev/xvd' + dev[-1]
if inst.ssh.path_exists(xvdev):
self._real_device = xvdev
return xvdev
raise exception.BaseException("Can't find volume device")
def _attach_volume(self, vol, instance_id, device):
log.info("Attaching volume %s to instance %s..." %
(vol.id, instance_id))
vol.attach(instance_id, device)
self.ec2.wait_for_volume(vol, state='attached')
return self._volume
def _validate_host_instance(self, instance, zone):
if instance.state not in ['pending', 'running']:
raise exception.InstanceNotRunning(instance.id)
if instance.placement != zone:
raise exception.ValidationError(
"specified host instance %s is not in zone %s" %
(instance.id, zone))
return True
def _validate_image_and_type(self, image, itype):
img = self.ec2.get_image_or_none(image)
if not img:
raise exception.ValidationError(
'image %s does not exist' % image)
if itype not in static.INSTANCE_TYPES:
choices = ', '.join(static.INSTANCE_TYPES)
raise exception.ValidationError(
'instance_type must be one of: %s' % choices)
itype_platform = static.INSTANCE_TYPES.get(itype)
img_platform = img.architecture
if img_platform not in itype_platform:
error_msg = "instance_type %(itype)s is for an "
error_msg += "%(iplat)s platform while image_id "
error_msg += "%(img)s is an %(imgplat)s platform"
error_msg %= {'itype': itype, 'iplat': ', '.join(itype_platform),
'img': img.id, 'imgplat': img_platform}
raise exception.ValidationError(error_msg)
def _validate_zone(self, zone):
z = self.ec2.get_zone(zone)
if z.state != 'available':
log.warn('zone %s is not available at this time' % zone)
return True
def _validate_size(self, size):
try:
volume_size = int(size)
if volume_size < 1:
raise exception.ValidationError(
"volume_size must be an integer >= 1")
except ValueError:
raise exception.ValidationError("volume_size must be an integer")
def _validate_device(self, device):
if not utils.is_valid_device(device):
raise exception.ValidationError("volume device %s is not valid" %
device)
def _validate_required_progs(self, progs):
log.info("Checking for required remote commands...")
self._instance.ssh.check_required(progs)
def validate(self, size, zone, device):
self._validate_size(size)
self._validate_zone(zone)
self._validate_device(device)
def is_valid(self, size, zone, device):
try:
self.validate(size, zone, device)
return True
except exception.BaseException, e:
log.error(e.msg)
return False
def _repartition_volume(self):
conn = self._instance.ssh
partmap = self._instance.get_partition_map()
part = self._real_device + '1'
start = partmap.get(part)[0]
conn.execute('echo "%s,,L" | sfdisk -f -uS %s' %
(start, self._real_device), silent=False)
conn.execute('e2fsck -p -f %s' % part, silent=False)
def _format_volume(self):
log.info("Formatting volume...")
self._instance.ssh.execute('%s %s' %
(self._mkfs_cmd, self._real_device),
silent=False)
def _warn_about_volume_hosts(self):
sg = self.ec2.get_group_or_none(static.VOLUME_GROUP)
vol_hosts = []
if sg:
vol_hosts = filter(lambda x: x.state in ['running', 'pending'],
sg.instances())
if self._instance:
vol_hosts.append(self._instance)
vol_hosts = list(set([h.id for h in vol_hosts]))
if vol_hosts:
log.warn("There are still volume hosts running: %s" %
', '.join(vol_hosts))
if not self._instance:
log.warn("Run 'starcluster terminate -f %s' to terminate all "
"volume host instances" % static.VOLUME_GROUP_NAME,
extra=dict(__textwrap__=True))
elif sg:
log.info("No active volume hosts found. Run 'starcluster "
"terminate -f %(g)s' to remove the '%(g)s' group" %
{'g': static.VOLUME_GROUP_NAME},
extra=dict(__textwrap__=True))
def shutdown(self):
vol = self._volume
host = self._instance
if self._detach_vol:
log.info("Detaching volume %s from instance %s" %
(vol.id, host.id))
vol.detach()
else:
log.info("Leaving volume %s attached to instance %s" %
(vol.id, host.id))
if self._shutdown:
log.info("Terminating host instance %s" % host.id)
host.terminate()
else:
log.info("Not terminating host instance %s" %
host.id)
def _delete_new_volume(self):
"""
Should only be used during clean-up in the case of an error
"""
newvol = self._volume
if newvol:
log.error("Detaching and deleting *new* volume: %s" % newvol.id)
if newvol.update() != 'available':
newvol.detach(force=True)
self.ec2.wait_for_volume(newvol, status='available')
newvol.delete()
self._volume = None
@print_timing("Creating volume")
def create(self, volume_size, volume_zone, name=None, tags=None):
try:
self.validate(volume_size, volume_zone, self._aws_block_device)
instance = self._request_instance(volume_zone)
self._validate_required_progs([self._mkfs_cmd.split()[0]])
self._determine_device()
vol = self._create_volume(volume_size, volume_zone)
if tags:
for tag in tags:
tagval = tags.get(tag)
tagmsg = "Adding volume tag: %s" % tag
if tagval:
tagmsg += "=%s" % tagval
log.info(tagmsg)
vol.add_tag(tag, tagval)
if name:
vol.add_tag("Name", name)
self._attach_volume(self._volume, instance.id,
self._aws_block_device)
self._get_volume_device(self._aws_block_device)
self._format_volume()
self.shutdown()
log.info("Your new %sGB volume %s has been created successfully" %
(volume_size, vol.id))
return vol
except Exception:
log.error("Failed to create new volume", exc_info=True)
self._delete_new_volume()
raise
finally:
self._warn_about_volume_hosts()
def _validate_resize(self, vol, size):
self._validate_size(size)
if vol.size > size:
log.warn("You are attempting to shrink an EBS volume. "
"Data loss may occur")
@print_timing("Resizing volume")
def resize(self, vol, size, dest_zone=None):
"""
Resize EBS volume
vol - boto volume object
size - new volume size
dest_zone - zone to create the new resized volume in. this must be
within the original volume's region otherwise a manual copy (rsync)
is required. this is currently not implemented.
"""
try:
self._validate_device(self._aws_block_device)
self._validate_resize(vol, size)
zone = vol.zone
if dest_zone:
self._validate_zone(dest_zone)
zone = dest_zone
host = self._request_instance(zone)
resizefs_exe = self._resizefs_cmd.split()[0]
required = [resizefs_exe]
if resizefs_exe == 'resize2fs':
required.append('e2fsck')
self._validate_required_progs(required)
self._determine_device()
snap = self._create_snapshot(vol)
new_vol = self._create_volume(size, zone, snap.id)
self._attach_volume(new_vol, host.id, self._aws_block_device)
device = self._get_volume_device()
devs = filter(lambda x: x.startswith(device), host.ssh.ls('/dev'))
if len(devs) == 1:
log.info("No partitions found, resizing entire device")
elif len(devs) == 2:
log.info("One partition found, resizing partition...")
self._repartition_volume()
device += '1'
else:
raise exception.InvalidOperation(
"EBS volume %s has more than 1 partition. "
"You must resize this volume manually" % vol.id)
if resizefs_exe == "resize2fs":
log.info("Running e2fsck on new volume")
host.ssh.execute("e2fsck -y -f %s" % device)
log.info("Running %s on new volume" % self._resizefs_cmd)
host.ssh.execute(' '.join([self._resizefs_cmd, device]))
self.shutdown()
return new_vol.id
except Exception:
log.error("Failed to resize volume %s" % vol.id)
self._delete_new_volume()
raise
finally:
snap = self._snapshot
if snap:
log_func = log.info if self._volume else log.error
log_func("Deleting snapshot %s" % snap.id)
snap.delete()
self._warn_about_volume_hosts()
| gpl-3.0 | 88,742,406,173,033,680 | 40.595745 | 79 | 0.564386 | false |
lizardsystem/lizard5-apps | lizard_sticky_twitterized/layers.py | 2 | 6578 | """
Adapter for lizard-sticky-twitterized
"""
from __future__ import division, print_function
import os
import mapnik
from django.conf import settings
from django.contrib.gis.geos import Point
from django.contrib.gis.measure import D
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from lizard_map import workspace
from lizard_map.coordinates import WGS84
from lizard_map.coordinates import wgs84_to_google
from lizard_map.mapnik_helper import add_datasource_point
from lizard_map.models import ICON_ORIGINALS
from lizard_map.symbol_manager import SymbolManager
from lizard_sticky_twitterized.models import StickyTweet
from lizard_map.daterange import current_start_end_dates
ICON_STYLE = {'icon': 'twitter.png',
'mask': ('twitter_mask.png', ),
'color': (0.25, 0.6, 1, 0)}
#Twitter blue
class AdapterStickyTwitterized(workspace.WorkspaceItemAdapter):
def __init__(self, *args, **kwargs):
"""
tags: list or queryset of tags
If no tags are selected, all stickies are selected!
"""
super(AdapterStickyTwitterized, self).__init__(*args, **kwargs)
def style(self):
"""
Make mapnik point style
"""
symbol_manager = SymbolManager(
ICON_ORIGINALS,
os.path.join(settings.MEDIA_ROOT, 'generated_icons'))
output_filename = symbol_manager.get_symbol_transformed(
ICON_STYLE['icon'], **ICON_STYLE)
output_filename_abs = os.path.join(
settings.MEDIA_ROOT, 'generated_icons', output_filename)
point_looks = mapnik.PointSymbolizer()
point_looks.filename = output_filename_abs
point_looks.allow_overlap = True
layout_rule = mapnik.Rule()
layout_rule.symbols.append(point_looks)
point_style = mapnik.Style()
point_style.rules.append(layout_rule)
return point_style
@property
def stickies(self):
"""
Return Stickies.
"""
result = StickyTweet.objects.exclude(
geom=None).exclude(visible=False)
if self.layer_arguments:
result = result.filter(id=self.layer_arguments['id'])
return result
def layer(self, layer_ids=None, request=None):
"""Return a layer with all stickies or stickies with selected
tags
"""
start_end = current_start_end_dates(request)
layers = []
styles = {}
layer = mapnik.Layer("Stickies", WGS84)
layer.datasource = mapnik.MemoryDatasource()
stickies = self.stickies.exclude(time__gte=start_end[1]
).filter(time__gte=start_end[0])
for _id, sticky in enumerate(stickies):
add_datasource_point(layer.datasource,
sticky.geom.x,
sticky.geom.y,
'Name',
'hssd',
_id)
# generate "unique" point style name and append to layer
style_name = "StickyTweets"
styles[style_name] = self.style()
layer.styles.append(style_name)
layers = [layer, ]
return layers, styles
def values(self, identifier, start_date, end_date):
"""Return values in list of dictionaries (datetime, value, unit)
"""
stickies = self.stickies.filter(datetime__gte=start_date,
datetime__lte=end_date)
return [{'datetime': sticky.datetime,
'value': sticky.description,
'unit': ''} for sticky in stickies]
def search(self, google_x, google_y, radius=None):
"""
returns a list of dicts with keys distance, name, shortname,
google_coords, workspace_item, identifier
"""
#from lizard_map.coordinates import google_to_rd
#x, y = google_to_rd(google_x, google_y)
#pnt = Point(x, y, srid=28992) # 900913
pnt = Point(google_x, google_y, srid=900913) # 900913
#print pnt, radius
stickies = self.stickies.filter(
geom__distance_lte=(pnt, D(m=radius * 0.5))).distance(pnt
).order_by('distance')
if stickies:
stickies = [stickies[0]]
result = [{'distance': 0.0,
'name': '%s (%s)' % (sticky.tweet, sticky.twitter_name),
'shortname': str(sticky.tweet),
'object': sticky,
'google_coords': wgs84_to_google(sticky.geom.x,
sticky.geom.y),
'workspace_item': self.workspace_item,
'identifier': {'sticky_id': sticky.id},
} for sticky in stickies]
return result
def location(self, sticky_id, layout=None):
"""
returns location dict.
requires identifier_json
"""
sticky = get_object_or_404(StickyTweet, pk=sticky_id)
identifier = {'sticky_id': sticky.id}
return {
'name': '%s' % (sticky.twitter_name),
'tweet': str(sticky.tweet),
'media_url': str(sticky.media_url),
'workspace_item': self.workspace_item,
'identifier': identifier,
'google_coords': wgs84_to_google(sticky.geom.x, sticky.geom.y),
'object': sticky,
}
def symbol_url(self, identifier=None, start_date=None, end_date=None):
return super(AdapterStickyTwitterized, self).symbol_url(
identifier=identifier,
start_date=start_date,
end_date=end_date,
icon_style=ICON_STYLE)
def html(self, snippet_group=None, identifiers=None, layout_options=None):
"""
Renders stickies
"""
if snippet_group:
snippets = snippet_group.snippets.all()
identifiers = [snippet.identifier for snippet in snippets]
display_group = [
self.location(**identifier) for identifier in identifiers]
add_snippet = False
if layout_options and 'add_snippet' in layout_options:
add_snippet = layout_options['add_snippet']
return render_to_string(
'lizard_sticky_twitterized/popup_sticky_twitterized.html',
{'display_group': display_group,
'add_snippet': add_snippet,
'symbol_url': self.symbol_url()})
| lgpl-3.0 | 4,482,815,261,826,298,400 | 35.955056 | 88 | 0.571602 | false |
katyushacccp/ISN_projet_final | alpha 2.4/modules/mouvements.py | 2 | 11399 | from random import *
from modules.affichage import *
from modules.recherche import *
from time import *
def start():
"""Cette fonction sert à initialiser la variable globale 'compteur' qui va par la suite gérer le temps"""
wFile("compteur","cub",0)
def melangeur(cube,can,nombre):
"""Cette fonction sert à mélanger de manière aléatoire le cube un nombre n de fois, n étant défini par le paramètre 'nombre'"""
for i in range(nombre):
can.after(i*rFile("timeur","cub"),lambda:rotation(cube,can,randint(0,5),choice(["droite","gauche"])))
def rotative(cube,can,face,faceSup,norme):
"""La fonction 'rotative' normalise les différentes fonctions qui gère les mouvements dans le cube.
La fonction a également la notion du temps, pour cela elle s'aide de la variable globale 'compteur'"""
compteur=rFile("compteur","cub")
wFile("compteur","cub",compteur+1)
if norme.upper()=="U":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationHaut(cube,can,face,faceSup,"gauche"))
elif norme.upper()=="U'":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationHaut(cube,can,face,faceSup,"droite"))
elif norme.upper()=="L":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationGauche(cube,can,face,faceSup,"bas"))
elif norme.upper()=="L'":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationGauche(cube,can,face,faceSup,"haut"))
elif norme.upper()=="F":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotation(cube,can,face,"droite"))
elif norme.upper()=="F'":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotation(cube,can,face,"gauche"))
elif norme.upper()=="R":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationDroite(cube,can,face,faceSup,"haut"))
elif norme.upper()=="R'":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationDroite(cube,can,face,faceSup,"bas"))
elif norme.upper()=="D":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationBas(cube,can,face,faceSup,"droite"))
elif norme.upper()=="D'":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationBas(cube,can,face,faceSup,"gauche"))
elif norme.upper()=="M":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:axe(cube,can,face,faceSup,"bas"))
elif norme.upper()=="M'":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:axe(cube,can,face,faceSup,"haut"))
elif norme.upper()=="E":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:axe(cube,can,face,faceSup,"droite"))
elif norme.upper()=="E'":
can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:axe(cube,can,face,faceSup,"gauche"))
def rotativeUser(cube,can,face,faceSup,norme):
"""La fonction 'rotaitveUser' est identique à la fonction 'rotative', la gestion du temps en moins.
Cela est donc pratique pour effectuer des mouvements instantanés"""
if norme.upper()=="U":
rotationHaut(cube,can,face,faceSup,"gauche")
elif norme.upper()=="U'":
rotationHaut(cube,can,face,faceSup,"droite")
elif norme.upper()=="L":
rotationGauche(cube,can,face,faceSup,"bas")
elif norme.upper()=="L'":
rotationGauche(cube,can,face,faceSup,"haut")
elif norme.upper()=="F":
rotation(cube,can,face,"droite")
elif norme.upper()=="F'":
rotation(cube,can,face,"gauche")
elif norme.upper()=="R":
rotationDroite(cube,can,face,faceSup,"haut")
elif norme.upper()=="R'":
rotationDroite(cube,can,face,faceSup,"bas")
elif norme.upper()=="D":
rotationBas(cube,can,face,faceSup,"droite")
elif norme.upper()=="D'":
rotationBas(cube,can,face,faceSup,"gauche")
elif norme.upper()=="M":
axe(cube,can,face,faceSup,"bas")
elif norme.upper()=="M'":
axe(cube,can,face,faceSup,"haut")
elif norme.upper()=="E":
axe(cube,can,face,faceSup,"droite")
elif norme.upper()=="E'":
axe(cube,can,face,faceSup,"gauche")
def rotation(cube,can,face,sens):
"""Fonction gérant la rotation d'une face.
Correspond au mouvement F si sens = droite / F' si sens = gauche"""
if sens=="droite":
cube[face][0], cube[face][1], cube[face][2], cube[face][5], cube[face][8], cube[face][7], cube[face][6], cube[face][3] \
= cube[face][6], cube[face][3], cube[face][0], cube[face][1], cube[face][2], cube[face][5], cube[face][8], cube[face][7]
pos=posRel(face)
cube[pos[0][0]][pos[0][1]], cube[pos[0][0]][pos[0][2]], cube[pos[0][0]][pos[0][3]], \
cube[pos[1][0]][pos[1][1]], cube[pos[1][0]][pos[1][2]], cube[pos[1][0]][pos[1][3]], \
cube[pos[2][0]][pos[2][3]], cube[pos[2][0]][pos[2][2]], cube[pos[2][0]][pos[2][1]], \
cube[pos[3][0]][pos[3][3]], cube[pos[3][0]][pos[3][2]], cube[pos[3][0]][pos[3][1]] \
= cube[pos[3][0]][pos[3][3]], cube[pos[3][0]][pos[3][2]], cube[pos[3][0]][pos[3][1]], \
cube[pos[0][0]][pos[0][1]], cube[pos[0][0]][pos[0][2]], cube[pos[0][0]][pos[0][3]], \
cube[pos[1][0]][pos[1][1]], cube[pos[1][0]][pos[1][2]], cube[pos[1][0]][pos[1][3]], \
cube[pos[2][0]][pos[2][3]], cube[pos[2][0]][pos[2][2]], cube[pos[2][0]][pos[2][1]]
elif sens=="gauche":
cube[face][0], cube[face][1], cube[face][2], cube[face][5], cube[face][8], cube[face][7], cube[face][6], cube[face][3] \
= cube[face][2],cube[face][5],cube[face][8],cube[face][7],cube[face][6],cube[face][3],cube[face][0],cube[face][1]
pos=posRel(face)
cube[pos[0][0]][pos[0][1]], cube[pos[0][0]][pos[0][2]], cube[pos[0][0]][pos[0][3]], \
cube[pos[1][0]][pos[1][1]], cube[pos[1][0]][pos[1][2]], cube[pos[1][0]][pos[1][3]], \
cube[pos[2][0]][pos[2][3]], cube[pos[2][0]][pos[2][2]], cube[pos[2][0]][pos[2][1]], \
cube[pos[3][0]][pos[3][3]], cube[pos[3][0]][pos[3][2]], cube[pos[3][0]][pos[3][1]] \
= cube[pos[1][0]][pos[1][1]], cube[pos[1][0]][pos[1][2]], cube[pos[1][0]][pos[1][3]], \
cube[pos[2][0]][pos[2][3]], cube[pos[2][0]][pos[2][2]], cube[pos[2][0]][pos[2][1]], \
cube[pos[3][0]][pos[3][3]], cube[pos[3][0]][pos[3][2]], cube[pos[3][0]][pos[3][1]], \
cube[pos[0][0]][pos[0][1]], cube[pos[0][0]][pos[0][2]], cube[pos[0][0]][pos[0][3]]
actualise(cube,can)
def rotationHaut(cube,can,face,faceSup,sens):
"""Fonction se basant sur la fonction 'rotation' afin de permettre un autre mouvement.
Correspond au mouvement U si sens = gauche / U' si sens = droite"""
if sens=="gauche":
rotation(cube,can,faceSup,"droite")
elif sens=="droite":
rotation(cube,can,faceSup,"gauche")
def rotationBas(cube,can,face,faceSup,sens):
"""Fonction se basant sur la fonction 'rotation' afin de permettre un autre mouvement.
Correspond au mouvement D si sens = droite / D' si sens = gauche"""
faceBas=posRel(faceSup)[4]
rotation(cube,can,faceBas,sens)
def rotationDroite(cube,can,face,faceSup,sens):
"""Fonction se basant sur la fonction 'rotation' afin de permettre un autre mouvement.
Correspond au mouvement R si sens = haut / R' si sens = bas"""
pos=posRel(face)
for i in range(4):
if pos[i][0]==faceSup:
supChiffre=i
faceDroite=posRel(face)[boussole(supChiffre+1)][0]
if sens=="haut":
rotation(cube,can,faceDroite,"droite")
elif sens=="bas":
rotation(cube,can,faceDroite,"gauche")
def rotationGauche(cube,can,face,faceSup,sens):
"""Fonction se basant sur la fonction 'rotation' afin de permettre un autre mouvement.
Correspond au mouvement L si sens = bas / L' si sens = haut"""
pos=posRel(face)
for i in range(4):
if pos[i][0]==faceSup:
supChiffre=i
faceDroite=posRel(face)[boussole(supChiffre-1)][0]
if sens=="haut":
rotation(cube,can,faceDroite,"gauche")
elif sens=="bas":
rotation(cube,can,faceDroite,"droite")
def axe(cube,can,face,faceSup,sens):
sensGlobal=[]
pos=posRel(face)
for i in range(4):
if pos[i][0]==faceSup:
sensChiffre=i
sens=int(deReconnaissanceDirection(sens))
sensGlobal=reconnaissanceDirection(sensChiffre+sens)
if sensGlobal=="haut":
cube[face][1], cube[face][4], cube[face][7], \
cube[posRel(face)[2][0]][posRel(face)[2][2]], cube[posRel(face)[2][0]][4], cube[posRel(face)[2][0]][arreteOppose(posRel(face)[2][2])], \
cube[posRel(face)[4]][7], cube[posRel(face)[4]][4], cube[posRel(face)[4]][1], \
cube[posRel(face)[0][0]][arreteOppose(posRel(face)[0][2])], cube[posRel(face)[0][0]][4], cube[posRel(face)[0][0]][posRel(face)[0][2]] \
= cube[posRel(face)[2][0]][posRel(face)[2][2]], cube[posRel(face)[2][0]][4], cube[posRel(face)[2][0]][arreteOppose(posRel(face)[2][2])], \
cube[posRel(face)[4]][7], cube[posRel(face)[4]][4], cube[posRel(face)[4]][1], \
cube[posRel(face)[0][0]][arreteOppose(posRel(face)[0][2])], cube[posRel(face)[0][0]][4], cube[posRel(face)[0][0]][posRel(face)[0][2]], \
cube[face][1], cube[face][4], cube[face][7]
elif sensGlobal=="droite":
cube[face][3], cube[face][4], cube[face][5], \
cube[posRel(face)[1][0]][posRel(face)[1][2]], cube[posRel(face)[1][0]][4], cube[posRel(face)[1][0]][arreteOppose(posRel(face)[1][2])], \
cube[posRel(face)[4]][3], cube[posRel(face)[4]][4], cube[posRel(face)[4]][5], \
cube[posRel(face)[3][0]][arreteOppose(posRel(face)[3][2])], cube[posRel(face)[3][0]][4], cube[posRel(face)[3][0]][posRel(face)[3][2]] \
= cube[posRel(face)[3][0]][arreteOppose(posRel(face)[3][2])], cube[posRel(face)[3][0]][4], cube[posRel(face)[3][0]][posRel(face)[3][2]], \
cube[face][3], cube[face][4], cube[face][5], \
cube[posRel(face)[1][0]][posRel(face)[1][2]], cube[posRel(face)[1][0]][4], cube[posRel(face)[1][0]][arreteOppose(posRel(face)[1][2])], \
cube[posRel(face)[4]][3], cube[posRel(face)[4]][4], cube[posRel(face)[4]][5]
elif sensGlobal=="gauche":
cube[face][3], cube[face][4], cube[face][5], \
cube[posRel(face)[1][0]][posRel(face)[1][2]], cube[posRel(face)[1][0]][4], cube[posRel(face)[1][0]][arreteOppose(posRel(face)[1][2])], \
cube[posRel(face)[4]][3], cube[posRel(face)[4]][4], cube[posRel(face)[4]][5], \
cube[posRel(face)[3][0]][arreteOppose(posRel(face)[3][2])], cube[posRel(face)[3][0]][4], cube[posRel(face)[3][0]][posRel(face)[3][2]] \
= cube[posRel(face)[1][0]][posRel(face)[1][2]], cube[posRel(face)[1][0]][4], cube[posRel(face)[1][0]][arreteOppose(posRel(face)[1][2])], \
cube[posRel(face)[4]][3], cube[posRel(face)[4]][4], cube[posRel(face)[4]][5], \
cube[posRel(face)[3][0]][arreteOppose(posRel(face)[3][2])], cube[posRel(face)[3][0]][4], cube[posRel(face)[3][0]][posRel(face)[3][2]], \
cube[face][3], cube[face][4], cube[face][5]
elif sensGlobal=="bas":
cube[face][1], cube[face][4], cube[face][7], \
cube[posRel(face)[2][0]][posRel(face)[2][2]], cube[posRel(face)[2][0]][4], cube[posRel(face)[2][0]][arreteOppose(posRel(face)[2][2])], \
cube[posRel(face)[4]][7], cube[posRel(face)[4]][4], cube[posRel(face)[4]][1], \
cube[posRel(face)[0][0]][arreteOppose(posRel(face)[0][2])], cube[posRel(face)[0][0]][4], cube[posRel(face)[0][0]][posRel(face)[0][2]] \
= cube[posRel(face)[0][0]][arreteOppose(posRel(face)[0][2])], cube[posRel(face)[0][0]][4], cube[posRel(face)[0][0]][posRel(face)[0][2]], \
cube[face][1], cube[face][4], cube[face][7], \
cube[posRel(face)[2][0]][posRel(face)[2][2]], cube[posRel(face)[2][0]][4], cube[posRel(face)[2][0]][arreteOppose(posRel(face)[2][2])], \
cube[posRel(face)[4]][7], cube[posRel(face)[4]][4], cube[posRel(face)[4]][1]
actualise(cube,can) | cc0-1.0 | -5,517,878,145,422,332,000 | 49.990868 | 140 | 0.627811 | false |
printedheart/h2o-3 | h2o-py/tests/testdir_algos/deeplearning/pyunit_offsets_and_distributionsDeeplearning.py | 4 | 1662 | import sys
sys.path.insert(1, "../../../")
import h2o, tests
def offsets_and_distributions():
# cars
cars = h2o.upload_file(h2o.locate("smalldata/junit/cars_20mpg.csv"))
cars = cars[cars["economy_20mpg"].isna() == 0]
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
offset = h2o.H2OFrame(python_obj=[[.5] for x in range(398)])
offset.set_name(0,"x1")
cars = cars.cbind(offset)
# insurance
insurance = h2o.import_file(h2o.locate("smalldata/glm_test/insurance.csv"))
insurance["offset"] = insurance["Holders"].log()
# bernoulli - offset not supported
#dl = h2o.deeplearning(x=cars[2:8], y=cars["economy_20mpg"], distribution="bernoulli", offset_column="x1",
# training_frame=cars)
#predictions = dl.predict(cars)
# gamma
dl = h2o.deeplearning(x=insurance[0:3], y=insurance["Claims"], distribution="gamma", offset_column="offset", training_frame=insurance)
predictions = dl.predict(insurance)
# gaussian
dl = h2o.deeplearning(x=insurance[0:3], y=insurance["Claims"], distribution="gaussian", offset_column="offset", training_frame=insurance)
predictions = dl.predict(insurance)
# poisson
dl = h2o.deeplearning(x=insurance[0:3], y=insurance["Claims"], distribution="poisson", offset_column="offset", training_frame=insurance)
predictions = dl.predict(insurance)
# tweedie
dl = h2o.deeplearning(x=insurance.names[0:3], y="Claims", distribution="tweedie", offset_column="offset", training_frame=insurance)
predictions = dl.predict(insurance)
if __name__ == "__main__":
tests.run_test(sys.argv, offsets_and_distributions)
| apache-2.0 | -9,110,588,149,647,135,000 | 39.536585 | 141 | 0.67148 | false |
Strilanc/qbp_np_younes_test | mixed_state.py | 1 | 4660 | import random
from classical_state import ClassicalState
from pure_state import PureState
class MixedState:
"""
A probability distribution of pure states.
"""
def __init__(self, pure_to_probability_map):
err = abs(1 - sum(pure_to_probability_map.values()))
if err > 0.00001:
raise ValueError("Probabilities must sum to 1.")
# Discard negligible probabilities as an approximation/optimization.
self.distribution = {
pure_state: p
for pure_state, p in pure_to_probability_map.items()
if p >= 0.000001 # Discard impossible states (w/ rounding-error)
}
def measure(self, predicate):
"""
:param predicate: Takes classical states and returns hashable keys.
Determines which states the measurement distinguishes between.
States mapped to the same result by the predicate will not be split
into separate parts of the resulting mixed state.
:return: A mixed state of possible measurement results.
"""
return MixedState({
pure_2: p2 * p1
for pure_1, p1 in self.distribution.items()
for pure_2, p2 in pure_1.measure(predicate).distribution.items()
})
def post_select(self, predicate):
"""
:param predicate: Determines which classical states to keep. All
non-matching states are discarded out of the mixed state and its
superpositions (the resulting mixed state is renormalized to compensate
for the missing weight).
:return: A (probability, MixedState) pair with the probability of the
desired predicate being satisfied (the post-selection's "power") and the
renormalized mixed state bof the matching values.
>>> a, b, c = ClassicalState(0), ClassicalState(1), ClassicalState(2)
>>> p, q = PureState({a : 1}), PureState({b : 1})
>>> r = PureState({c : 0.8, b : 0.6})
>>> MixedState({p: 0.25, q: 0.75}).post_select(lambda c: c.bit(0))
(0.75, MixedState({PureState({ClassicalState(1): (1+0j)}): 1.0}))
>>> MixedState({p: 0.25, q: 0.75}).post_select(lambda c: not c.bit(0))
(0.25, MixedState({PureState({ClassicalState(0): (1+0j)}): 1.0}))
>>> MixedState({r: 1}).post_select(lambda c: c.bit(0))
(0.36, MixedState({PureState({ClassicalState(1): (1+0j)}): 1.0}))
>>> MixedState({r: 1}).post_select(lambda c: not c.bit(0))
(0.6400000000000001, MixedState({PureState({ClassicalState(2): (1+0j)}): 1.0}))
# disabled due to non-deterministic ordering of dictionaries
# >>> MixedState({p: 0.2, q: 0.3, r: 0.5}).post_select(lambda c: c.bit(0))
# (0.48, MixedState({PureState({ClassicalState(1): (1+0j)}): 0.375, PureState({ClassicalState(1): (1+0j)}): 0.625}))
"""
filtered = {
filtered_state: p_hit * p_state
for pure_state, p_state in self.distribution.items()
for p_hit, filtered_state in [pure_state.post_select(predicate)]
if p_hit * p_state != 0
}
remaining_weight = sum(filtered.values())
if remaining_weight == 0:
return 0, None
normalized = MixedState({pure_state: p / remaining_weight
for pure_state, p in filtered.items()})
return remaining_weight, normalized
def unitary_transform(self, op):
"""
:param: op Maps inputs to a superposition of outputs.
Must be a unitary operation (i.e. length preserving in all cases).
:return: The resulting mixed state, after the operation has been applied
to each pure state within the mixed state.
"""
return MixedState({pure_state.unitary_transform(op): p
for pure_state, p in self.distribution.items()})
def collapsed(self):
"""
Picks a classical state at random, with frequency proportional to
the containing mixed state probabilities and containing superpositions'
amplitudes' squared magnitudes.
:return: A classical state from the mixed state.
"""
t = random.random()
for pure_state, p in self.distribution.items():
t -= p
if t <= 0.000001:
return pure_state.collapsed()
raise AssertionError("Probabilities didn't sum to 1")
def __str__(self):
return "\n".join("{0:.1%}: {1}".format(p, pure_state)
for pure_state, p in self.distribution.items())
def __repr__(self):
return "MixedState(" + repr(self.distribution) + ")"
| apache-2.0 | -7,675,349,186,284,217,000 | 43.807692 | 124 | 0.600858 | false |
candrews/portage | pym/portage/proxy/objectproxy.py | 16 | 2758 | # Copyright 2008-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
__all__ = ['ObjectProxy']
class ObjectProxy(object):
"""
Object that acts as a proxy to another object, forwarding
attribute accesses and method calls. This can be useful
for implementing lazy initialization.
"""
__slots__ = ()
def _get_target(self):
raise NotImplementedError(self)
def __getattribute__(self, attr):
result = object.__getattribute__(self, '_get_target')()
return getattr(result, attr)
def __setattr__(self, attr, value):
result = object.__getattribute__(self, '_get_target')()
setattr(result, attr, value)
def __call__(self, *args, **kwargs):
result = object.__getattribute__(self, '_get_target')()
return result(*args, **kwargs)
def __enter__(self):
return object.__getattribute__(self, '_get_target')().__enter__()
def __exit__(self, exc_type, exc_value, traceback):
return object.__getattribute__(self, '_get_target')().__exit__(
exc_type, exc_value, traceback)
def __setitem__(self, key, value):
object.__getattribute__(self, '_get_target')()[key] = value
def __getitem__(self, key):
return object.__getattribute__(self, '_get_target')()[key]
def __delitem__(self, key):
del object.__getattribute__(self, '_get_target')()[key]
def __contains__(self, key):
return key in object.__getattribute__(self, '_get_target')()
def __iter__(self):
return iter(object.__getattribute__(self, '_get_target')())
def __len__(self):
return len(object.__getattribute__(self, '_get_target')())
def __repr__(self):
return repr(object.__getattribute__(self, '_get_target')())
def __str__(self):
return str(object.__getattribute__(self, '_get_target')())
def __add__(self, other):
return self.__str__() + other
def __hash__(self):
return hash(object.__getattribute__(self, '_get_target')())
def __ge__(self, other):
return object.__getattribute__(self, '_get_target')() >= other
def __gt__(self, other):
return object.__getattribute__(self, '_get_target')() > other
def __le__(self, other):
return object.__getattribute__(self, '_get_target')() <= other
def __lt__(self, other):
return object.__getattribute__(self, '_get_target')() < other
def __eq__(self, other):
return object.__getattribute__(self, '_get_target')() == other
def __ne__(self, other):
return object.__getattribute__(self, '_get_target')() != other
def __bool__(self):
return bool(object.__getattribute__(self, '_get_target')())
if sys.hexversion < 0x3000000:
__nonzero__ = __bool__
def __unicode__(self):
return unicode(object.__getattribute__(self, '_get_target')())
def __int__(self):
return int(object.__getattribute__(self, '_get_target')())
| gpl-2.0 | -4,750,186,335,476,187,000 | 27.142857 | 67 | 0.637418 | false |
arpadpe/plover | plover/virtualstenomachine/machine/stentura.py | 1 | 20424 | # Copyright (c) 2011 Hesky Fisher
# See LICENSE.txt for details.
# Many thanks to a steno geek for help with the protocol.
# TODO: Come up with a mechanism to communicate back to the engine when there
# is a connection error.
# TODO: Address any generic exceptions still left.
"""Thread-based monitoring of a stenotype machine using the stentura protocol.
"""
"""
The stentura protocol uses packets to communicate with the machine. A
request packet is sent to the machine and a response packet is received. If
no response is received after a one second timeout then the same packet
should be sent again. The machine may hold off on responding to a READC
packet for up to 500ms if there are no new strokes.
Each request packet should have a sequence number that is one higher than
the previously sent packet modulo 256. The response packet will have the
same sequence number. Each packet consists of a header followed by an
optional data section. All multibyte fields are little endian.
The request packet header is structured as follows:
- SOH: 1 byte. Always set to ASCII SOH (0x1).
- seq: 1 byte. The sequence number of this packet.
- length: 2 bytes. The total length of the packet, including the data
section, in bytes.
- action: 2 bytes. The action requested. See actions below.
- p1: 2 bytes. Parameter 1. The values for the parameters depend on the
action.
- p2: 2 bytes. Parameter 2.
- p3: 2 bytes. Parameter 3.
- p4: 2 bytes. Parameter 4.
- p5: 2 bytes. Parameter 5.
- checksum: 2 bytes. The CRC is computed over the packet from seq through
p5. The specific CRC algorithm used is described above in the Crc class.
The request header can be followed by a data section. The meaning of the
data section depends on the action:
- data: variable length.
- crc: 2 bytes. A CRC over just the data section.
The response packet header is structured as follows:
- SOH: 1 byte. Always set to ASCII SOH (0x1).
- seq: 1 byte. The sequence number of the request packet.
- length: 2 bytes. The total length of the packet, including the data
section, in bytes.
- action: 2 bytes. The action of the request packet.
- error: 2 bytes. The error code. Zero if no error.
- p1: 2 bytes. Parameter 1. The values of the parameters depend on the
action.
- p2: 2 bytes. Parameter 2.
- checksum: 2 bytes. The CRC is computed over the packet from seq through
p2.
The response header can be followed by a data section, whose meaning is
dependent on the action. The structure is the same as in request packets.
The stentura machine has a concept of drives and files. The first (and
possibly only) drive is called A. Each file consists of a set of one or
more blocks. Each block is 512 bytes long.
In addition to regular files, there is a realtime file whose name is
'REALTIME.000'. All strokes typed are appended to this file. Subsequent
reads from the realtime file ignore positional arguments and only return
all the strokes since the last read action. However, opening the file again
and reading from the beginning will result in all the same strokes being
read again. The only reliable way to jump to the end is to do a full,
sequential, read to the end before processing any strokes. I'm told that on
some machines sending a READC without an OPEN will just read from the
realtime file.
The contents of the files are a sequence of strokes. Each stroke consists
of four bytes. Each byte has the two most significant bytes set to one. The
rest of the byte is a bitmask indicating which keys were pressed during the
stroke. The format is as follows: 11^#STKP 11WHRAO* 11EUFRPB 11LGTSDZ ^ is
something called a stenomark. I'm not sure what that is. # is the number
bar.
Note: Only OPEN and READC are needed to get strokes as they are typed from
the realtime file.
Actions and their packets:
All unmentioned parameters should be zero and unless explicitly mentioned
the packet should have no data section.
RESET (0x14):
Unknown.
DISKSTATUS (0x7):
Unknown.
p1 is set to the ASCII value corresponding to the drive letter, e.g. 'A'.
GETDOS (0x18):
Returns the DOS filenames for the files in the requested drive.
p1 is set to the ASCII value corresponding to the drive letter, e.g. 'A'.
p2 is set to one to return the name of the realtime file (which is always
'REALTIME.000').
p3 controls which page to return, with 20 filenames per page.
The return packet contains a data section that is 512 bytes long. The first
bytes seems to be one. The filename for the first file starts at offset 32.
My guess would be that the other filenames would exist at a fixed offset of
24 bytes apart. So first filename is at 32, second is at 56, third at 80,
etc. There seems to be some meta data stored after the filename but I don't
know what it means.
DELETE (0x3):
Deletes the specified files. NOP on realtime file.
p1 is set to the ASCII value corresponding to the drive letter, e.g. 'A'.
The filename is specified in the data section.
OPEN (0xA):
Opens a file for reading. This action is sticky and causes this file to be
the current file for all following READC packets.
p1 is set to the ASCII value corresponding to the drive letter, e.g. 'A'.
The filename is specified in the data section.
I'm told that if there is an error opening the realtime file then no
strokes have been written yet.
TODO: Check that and implement workaround.
READC (0xB):
Reads characters from the currently opened file.
p1 is set to 1, I'm not sure why.
p3 is set to the maximum number of bytes to read but should probably be
512.
p4 is set to the block number.
p5 is set to the starting byte offset within the block.
It's possible that the machine will ignore the positional arguments to
READC when reading from the realtime file and just return successive values
for each call.
The response will have the number of bytes read in p1 (but the same is
deducible from the length). The data section will have the contents read
from the file.
CLOSE (0x2):
Closes the current file.
p1 is set to one, I don't know why.
TERM (0x15):
Unknown.
DIAG (0x19):
Unknown.
"""
import array
import itertools
import struct
import time
import threading
import Queue
from plover.virtualstenomachine.machine.base import VirtualStenotypeBase
class _ProtocolViolationException(Exception):
"""Something has happened that is doesn't follow the protocol."""
pass
class _StopException(Exception):
"""The thread was asked to stop."""
pass
_CRC_TABLE = [
0x0000, 0xc0c1, 0xc181, 0x0140, 0xc301, 0x03c0, 0x0280, 0xc241,
0xc601, 0x06c0, 0x0780, 0xc741, 0x0500, 0xc5c1, 0xc481, 0x0440,
0xcc01, 0x0cc0, 0x0d80, 0xcd41, 0x0f00, 0xcfc1, 0xce81, 0x0e40,
0x0a00, 0xcac1, 0xcb81, 0x0b40, 0xc901, 0x09c0, 0x0880, 0xc841,
0xd801, 0x18c0, 0x1980, 0xd941, 0x1b00, 0xdbc1, 0xda81, 0x1a40,
0x1e00, 0xdec1, 0xdf81, 0x1f40, 0xdd01, 0x1dc0, 0x1c80, 0xdc41,
0x1400, 0xd4c1, 0xd581, 0x1540, 0xd701, 0x17c0, 0x1680, 0xd641,
0xd201, 0x12c0, 0x1380, 0xd341, 0x1100, 0xd1c1, 0xd081, 0x1040,
0xf001, 0x30c0, 0x3180, 0xf141, 0x3300, 0xf3c1, 0xf281, 0x3240,
0x3600, 0xf6c1, 0xf781, 0x3740, 0xf501, 0x35c0, 0x3480, 0xf441,
0x3c00, 0xfcc1, 0xfd81, 0x3d40, 0xff01, 0x3fc0, 0x3e80, 0xfe41,
0xfa01, 0x3ac0, 0x3b80, 0xfb41, 0x3900, 0xf9c1, 0xf881, 0x3840,
0x2800, 0xe8c1, 0xe981, 0x2940, 0xeb01, 0x2bc0, 0x2a80, 0xea41,
0xee01, 0x2ec0, 0x2f80, 0xef41, 0x2d00, 0xedc1, 0xec81, 0x2c40,
0xe401, 0x24c0, 0x2580, 0xe541, 0x2700, 0xe7c1, 0xe681, 0x2640,
0x2200, 0xe2c1, 0xe381, 0x2340, 0xe101, 0x21c0, 0x2080, 0xe041,
0xa001, 0x60c0, 0x6180, 0xa141, 0x6300, 0xa3c1, 0xa281, 0x6240,
0x6600, 0xa6c1, 0xa781, 0x6740, 0xa501, 0x65c0, 0x6480, 0xa441,
0x6c00, 0xacc1, 0xad81, 0x6d40, 0xaf01, 0x6fc0, 0x6e80, 0xae41,
0xaa01, 0x6ac0, 0x6b80, 0xab41, 0x6900, 0xa9c1, 0xa881, 0x6840,
0x7800, 0xb8c1, 0xb981, 0x7940, 0xbb01, 0x7bc0, 0x7a80, 0xba41,
0xbe01, 0x7ec0, 0x7f80, 0xbf41, 0x7d00, 0xbdc1, 0xbc81, 0x7c40,
0xb401, 0x74c0, 0x7580, 0xb541, 0x7700, 0xb7c1, 0xb681, 0x7640,
0x7200, 0xb2c1, 0xb381, 0x7340, 0xb101, 0x71c0, 0x7080, 0xb041,
0x5000, 0x90c1, 0x9181, 0x5140, 0x9301, 0x53c0, 0x5280, 0x9241,
0x9601, 0x56c0, 0x5780, 0x9741, 0x5500, 0x95c1, 0x9481, 0x5440,
0x9c01, 0x5cc0, 0x5d80, 0x9d41, 0x5f00, 0x9fc1, 0x9e81, 0x5e40,
0x5a00, 0x9ac1, 0x9b81, 0x5b40, 0x9901, 0x59c0, 0x5880, 0x9841,
0x8801, 0x48c0, 0x4980, 0x8941, 0x4b00, 0x8bc1, 0x8a81, 0x4a40,
0x4e00, 0x8ec1, 0x8f81, 0x4f40, 0x8d01, 0x4dc0, 0x4c80, 0x8c41,
0x4400, 0x84c1, 0x8581, 0x4540, 0x8701, 0x47c0, 0x4680, 0x8641,
0x8201, 0x42c0, 0x4380, 0x8341, 0x4100, 0x81c1, 0x8081, 0x4040
]
def _crc(data):
"""Compute the Crc algorithm used by the stentura protocol.
This algorithm is described by the Rocksoft^TM Model CRC Algorithm as
follows:
Name : "CRC-16"
Width : 16
Poly : 8005
Init : 0000
RefIn : True
RefOut : True
XorOut : 0000
Check : BB3D
Args:
- data: The data to checksum. The data should be an iterable that returns
bytes
Returns: The computed crc for the data.
"""
checksum = 0
for b in data:
if isinstance(b, str):
b = ord(b)
checksum = (_CRC_TABLE[(checksum ^ b) & 0xff] ^
((checksum >> 8) & 0xff))
return checksum
def _write_to_buffer(buf, offset, data):
"""Write data to buf at offset.
Extends the size of buf as needed.
Args:
- buf: The buffer. Should be of type array('B')
- offset. The offset at which to start writing.
- data: An iterable containing the data to write.
"""
if len(buf) < offset + len(data):
buf.extend([0] * (offset + len(data) - len(buf)))
for i, v in enumerate(data, offset):
if isinstance(v, str):
v = ord(v)
buf[i] = v
# Helper table for parsing strokes of the form:
# 11^#STKP 11WHRAO* 11EUFRPB 11LGTSDZ
_STENO_KEY_CHART = ('^', '#', 'S-', 'T-', 'K-', 'P-', # Byte #1
'W-', 'H-', 'R-', 'A-', 'O-', '*', # Byte #2
'-E', '-U', '-F', '-R', '-P', '-B', # Byte #3
'-L', '-G', '-T', '-S', '-D', '-Z') # Byte #4
def _parse_stroke(a, b, c, d):
"""Parse a stroke and return a list of keys pressed.
Args:
- a: The first byte.
- b: The second byte.
- c: The third byte.
- d: The fourth byte.
Returns: A sequence with all the keys pressed in the stroke.
e.g. ['S-', 'A-', '-T']
"""
fullstroke = (((a & 0x3f) << 18) | ((b & 0x3f) << 12) |
((c & 0x3f) << 6) | d & 0x3f)
return [_STENO_KEY_CHART[i] for i in xrange(24)
if (fullstroke & (1 << (23 - i)))]
def _parse_strokes(data):
"""Parse strokes from a buffer and return a sequence of strokes.
Args:
- data: A byte buffer.
Returns: A sequence of strokes. Each stroke is a sequence of pressed keys.
Throws:
- _ProtocolViolationException if the data doesn't follow the protocol.
"""
strokes = []
if (len(data) % 4 != 0):
raise _ProtocolViolationException(
"Data size is not divisible by 4: %d" % (len(data)))
for b in data:
if (ord(b) & 0b11000000) != 0b11000000:
raise _ProtocolViolationException("Data is not stroke: 0x%X" % (b))
for a, b, c, d in itertools.izip(*([iter(data)] * 4)):
strokes.append(_parse_stroke(ord(a), ord(b), ord(c), ord(d)))
return strokes
# Actions
_CLOSE = 0x2
_DELETE = 0x3
_DIAG = 0x19
_DISKSTATUS = 0x7
_GETDOS = 0x18
_OPEN = 0xA
_READC = 0xB
_RESET = 0x14
_TERM = 0x15
# Compiled struct for writing request headers.
_REQUEST_STRUCT = struct.Struct('<2B5H')
_SHORT_STRUCT = struct.Struct('<H')
def _make_response(buf, action, seq, error=0, p1=0, p2=0, data=None):
"""Create a request packet.
Args:
- buf: The buffer used for the packet. Should be array.array('B') and will
be extended as needed.
- action: The action for the packet.
- seq: The sequence number for the packet.
- p1 - p2: Paremeter N for the packet (default: 0).
- data: The data to add to the packet as a sequence of bytes, if any
(default: None).
Returns: A buffer as a slice of the passed in buf that holds the packet.
"""
length = 14
if data:
length += len(data) + 2 # +2 for the data CRC.
if len(buf) < length:
buf.extend([0] * (length - len(buf)))
_REQUEST_STRUCT.pack_into(buf, 0, 1, seq, length, action, error, p1, p2)
crc = _crc(buffer(buf, 1, 11))
_SHORT_STRUCT.pack_into(buf, 12, crc)
crc = _crc(buffer(buf, 1, 13))
if data:
_write_to_buffer(buf, 14, data)
crc = _crc(data)
_SHORT_STRUCT.pack_into(buf, length - 2, crc)
return buffer(buf, 0, length)
def _make_open(buf, seq, drive, filename):
"""Make a packet with the OPEN command.
Args:
- buf: The buffer to use of type array.array('B'). Will be extended if
needed.
- seq: The sequence number of the packet.
- drive: The letter of the drive (probably 'A').
- filename: The name of the file (probably 'REALTIME.000').
Returns: A buffer as a slice of the passed in buf that holds the packet.
"""
return _make_request(buf, _OPEN, seq, p1=ord(drive), data=filename)
def _make_read(buf, seq, block, byte, length=512):
"""Make a packet with the READC command.
Args:
- buf: The buffer to use of type array.array('B'). Will be extended if
needed.
- seq: The sequence number of the packet.
- block: The index of the file block to read.
- byte: The byte offset within the block at which to start reading.
- length: The number of bytes to read, max 512 (default: 512).
Returns: A buffer as a slice of the passed in buf that holds the packet.
"""
return _make_request(buf, _READC, seq, p1=1, p3=length, p4=block, p5=byte)
def _make_reset(buf, seq):
"""Make a packet with the RESET command.
Args:
- buf: The buffer to use of type array.array('B'). Will be extended if
needed.
- seq: The sequence number of the packet.
Returns: A buffer as a slice of the passed in buf that holds the packet.
"""
return _make_request(buf, _RESET, seq)
def _validate_response(packet):
"""Validate a response packet.
Args:
- packet: The packet to validate.
Returns: True if the packet is valid, False otherwise.
"""
if len(packet) < 18:
return False
length = _SHORT_STRUCT.unpack(buffer(packet, 2, 2))[0]
if length != len(packet):
return False
if _crc(buffer(packet, 1, 17)) != 0:
return False
if length > 14:
if length < 17:
return False
if _crc(buffer(packet, 18)) != 0:
return False
return True
def _read_data(port, buf, offset):
"""Read data off the serial port and into port at offset.
Args:
- port: The serial port to read.
- buf: The buffer to write.
- offset: The offset into the buffer to write.
Returns: The number of bytes read.
Raises:
_StopException: If stop is set.
_TimeoutException: If the timeout is reached with no data read.
"""
num_bytes = port.inWaiting()
if num_bytes > 0:
bytes = port.read(num_bytes)
_write_to_buffer(buf, offset, bytes)
return num_bytes
def _read_packet(port, buf):
"""Read a full packet from the port.
Reads from the port until a full packet is received or the stop or timeout
conditions are met.
Args:
- port: The port to read.
- buf: The buffer to write.
Returns: A buffer as a slice of buf holding the packet.
Raises:
_ProtocolViolationException: If the packet doesn't conform to the protocol.
_TimeoutException: If the packet is not read within the timeout.
_StopException: If a stop was requested.
"""
bytes_read = 0
while bytes_read < 4:
bytes_read += _read_data(port, buf, bytes_read)
packet_length = _SHORT_STRUCT.unpack_from(buf, 2)[0]
while bytes_read < packet_length:
bytes_read += _read_data(port, buf, bytes_read)
packet = buffer(buf, 0, bytes_read)
if not _validate_response(packet):
raise _ProtocolViolationException()
return buffer(buf, 0, bytes_read)
def _write_to_port(port, data):
"""Write data to a port.
Args:
- port: The port to write.
- data: The data to write
"""
while data:
data = buffer(data, port.write(data))
def _loop(port, q, timeout=.500):
"""Loop to a requester
Send strokes when available.
Send empty return packets at a given interval when no new strokes (usually 500ms).
Args:
- port: The serial port.
- q: The queue used for the strokes.
- timeout: The timeout between empty heartbeat response packets.
"""
request_buffer, response_buffer, data_buffer = array.array('B'), array.array('B'), array.array('B')
while True:
packet = _read_packet(port, request_buffer)
if _SHORT_STRUCT.unpack(buffer(packet, 4, 2))[0] == _OPEN:
response = _make_response(response_buffer, _SHORT_STRUCT.unpack(buffer(packet, 4, 2))[0], ord(packet[1]),)
_write_to_port(port, response)
# expected a full read to the end of file
packet = _read_packet(port, request_buffer)
response = _make_response(response_buffer, _SHORT_STRUCT.unpack(buffer(packet, 4, 2))[0], ord(packet[1]))
_write_to_port(port, response)
elif _SHORT_STRUCT.unpack(buffer(packet, 4, 2))[0] == _READC:
try:
key_sets = q.get(block=True, timeout=timeout)
data_buffer = array.array('B')
_write_to_buffer(data_buffer, 0, key_sets)
# don't send huge amounts in one packet
while not q.empty() and len(data_buffer) < 256:
key_sets = q.get()
_write_to_buffer(data_buffer, len(data_buffer), key_sets)
response = _make_response(response_buffer, _SHORT_STRUCT.unpack(buffer(packet, 4, 2))[0], ord(packet[1]), p1=len(data_buffer), data=data_buffer)
_write_to_port(port, response)
# Send a packet with p1=0 to signal end of strokes
packet = _read_packet(port, request_buffer)
response = _make_response(response_buffer, _SHORT_STRUCT.unpack(buffer(packet, 4, 2))[0], ord(packet[1]))
_write_to_port(port, response)
except Queue.Empty:
response = _make_response(response_buffer, _SHORT_STRUCT.unpack(buffer(packet, 4, 2))[0], ord(packet[1]))
_write_to_port(port, response)
class VirtualStenotypeStentura(VirtualStenotypeBase):
"""Stentura interface.
This class implements the send method.
"""
def __init__(self, params):
VirtualStenotypeBase.__init__(self, params)
self.params = params
def start(self):
"""Override base class start method. Do not call directly."""
VirtualStenotypeBase.start(self)
try:
self.q = Queue.Queue()
t = threading.Thread(target=_loop, args=(self.serial_port, self.q))
t.daemon = True
t.start()
except _StopException:
pass
except Exception, e:
self._error(e)
def send(self, key_stroke):
self.key_sets = [ 0xc0, 0xc0, 0xc0, 0xc0]
keys = key_stroke.split(' ')
for key in keys:
if self._add_to_key_sets(key):
continue
char_index = -1
for char in key:
char_index += 1
if char in _STENO_KEY_CHART:
self._add_to_key_sets(char)
elif char_index < len(key) / 2:
if self._add_to_key_sets(char + '-'):
continue
elif self._add_to_key_sets('-' + char):
continue
else:
if self._add_to_key_sets('-' + char):
continue
elif self._add_to_key_sets(char + '-'):
continue
self.q.put(self.key_sets)
def _add_to_key_sets(self, key):
if key not in _STENO_KEY_CHART:
return False
index = _STENO_KEY_CHART.index(key)
key_set, key_index = divmod(index, 6)
key_code = 1 << 5 - key_index
self.key_sets[key_set] |= key_code
return True
def __repr__(self):
return "VirtualStenotypeStentura(%s)" % self.params
| gpl-2.0 | 2,355,993,840,544,893,000 | 33.616949 | 160 | 0.655014 | false |
PolicyStat/django | tests/forms_tests/models.py | 23 | 3809 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import datetime
import tempfile
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
temp_storage_location = tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
temp_storage = FileSystemStorage(location=temp_storage_location)
class BoundaryModel(models.Model):
positive_integer = models.PositiveIntegerField(null=True, blank=True)
callable_default_value = 0
def callable_default():
global callable_default_value
callable_default_value = callable_default_value + 1
return callable_default_value
class Defaults(models.Model):
name = models.CharField(max_length=255, default='class default value')
def_date = models.DateField(default=datetime.date(1980, 1, 1))
value = models.IntegerField(default=42)
callable_default = models.IntegerField(default=callable_default)
class ChoiceModel(models.Model):
"""For ModelChoiceField and ModelMultipleChoiceField tests."""
CHOICES = [
('', 'No Preference'),
('f', 'Foo'),
('b', 'Bar'),
]
INTEGER_CHOICES = [
(None, 'No Preference'),
(1, 'Foo'),
(2, 'Bar'),
]
STRING_CHOICES_WITH_NONE = [
(None, 'No Preference'),
('f', 'Foo'),
('b', 'Bar'),
]
name = models.CharField(max_length=10)
choice = models.CharField(max_length=2, blank=True, choices=CHOICES)
choice_string_w_none = models.CharField(
max_length=2, blank=True, null=True, choices=STRING_CHOICES_WITH_NONE)
choice_integer = models.IntegerField(choices=INTEGER_CHOICES, blank=True,
null=True)
@python_2_unicode_compatible
class ChoiceOptionModel(models.Model):
"""Destination for ChoiceFieldModel's ForeignKey.
Can't reuse ChoiceModel because error_message tests require that it have no instances."""
name = models.CharField(max_length=10)
class Meta:
ordering = ('name',)
def __str__(self):
return 'ChoiceOption %d' % self.pk
class ChoiceFieldModel(models.Model):
"""Model with ForeignKey to another model, for testing ModelForm
generation with ModelChoiceField."""
choice = models.ForeignKey(ChoiceOptionModel, blank=False,
default=lambda: ChoiceOptionModel.objects.get(name='default'))
choice_int = models.ForeignKey(ChoiceOptionModel, blank=False, related_name='choice_int',
default=lambda: 1)
multi_choice = models.ManyToManyField(ChoiceOptionModel, blank=False, related_name='multi_choice',
default=lambda: ChoiceOptionModel.objects.filter(name='default'))
multi_choice_int = models.ManyToManyField(ChoiceOptionModel, blank=False, related_name='multi_choice_int',
default=lambda: [1])
class OptionalMultiChoiceModel(models.Model):
multi_choice = models.ManyToManyField(ChoiceOptionModel, blank=False, related_name='not_relevant',
default=lambda: ChoiceOptionModel.objects.filter(name='default'))
multi_choice_optional = models.ManyToManyField(ChoiceOptionModel, blank=True,
related_name='not_relevant2')
class FileModel(models.Model):
file = models.FileField(storage=temp_storage, upload_to='tests')
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return '%s' % self.name
class Cheese(models.Model):
name = models.CharField(max_length=100)
class Article(models.Model):
content = models.TextField()
| bsd-3-clause | -2,897,814,414,692,491,300 | 31.836207 | 110 | 0.658966 | false |
j5shi/ST3_Config | Packages/Anaconda/anaconda_lib/jsonclient.py | 6 | 3055 | # -*- coding: utf8 -*-
# Copyright (C) 2013 - Oscar Campos <[email protected]>
# This program is Free Software see LICENSE file for details
"""Minimalist standard library Asynchronous JSON Client
"""
import sys
import uuid
import logging
import traceback
try:
import sublime
except ImportError:
try:
import ujson as json
except ImportError:
import json
from .callback import Callback
from .ioloop import EventHandler
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
class AsynClient(EventHandler):
"""Asynchronous JSON connection to anaconda server
"""
def __init__(self, port, host='localhost'):
EventHandler.__init__(self, (host, port))
self.callbacks = {}
self.rbuffer = []
def ready_to_write(self):
"""I am ready to send some data?
"""
return True if self.outbuffer else False
def handle_read(self, data):
"""Called when data is ready to be read
"""
self.rbuffer.append(data)
def add_callback(self, callback):
"""Add a new callback to the callbacks dictionary
The hex representation of the callback's uuid4 is used as index. In
case that the callback is a regular callable and not a Callback
class instance, a new uuid4 code is created on the fly.
"""
if not isinstance(callback, Callback):
hexid = uuid.uuid4().hex
else:
hexid = callback.hexid
self.callbacks[hexid] = callback
return hexid
def pop_callback(self, hexid):
"""Remove and return a callback callable from the callback dictionary
"""
return self.callbacks.pop(hexid)
def process_message(self):
"""Called when a full line has been read from the socket
"""
message = b''.join(self.rbuffer)
self.rbuffer = []
try:
data = sublime.decode_value(message.decode('utf8'))
except (NameError, ValueError):
data = json.loads(message.replace('\t', ' ' * 8).decode('utf8'))
callback = self.pop_callback(data.pop('uid'))
if callback is None:
logger.error(
'Received {} from the JSONServer but there is not callback '
'to handle it. Aborting....'.format(message)
)
try:
callback(data)
except Exception as error:
logging.error(error)
for traceback_line in traceback.format_exc().splitlines():
logging.error(traceback_line)
def send_command(self, callback, **data):
"""Send the given command that should be handled bu the given callback
"""
data['uid'] = self.add_callback(callback)
try:
self.push(
bytes('{}\r\n'.format(sublime.encode_value(data)), 'utf8')
)
except NameError:
self.push(bytes('{}\r\n'.format(json.dumps(data)), 'utf8'))
| mit | -2,131,888,267,098,840,600 | 26.772727 | 78 | 0.602291 | false |
uni-peter-zheng/tp-libvirt | libvirt/tests/src/virsh_cmd/pool/virsh_pool_create_as.py | 7 | 2051 | import os
import logging
from autotest.client.shared import error
from virttest import libvirt_storage
from virttest import virsh
def run(test, params, env):
'''
Test the command virsh pool-create-as
(1) Call virsh pool-create-as
(2) Call virsh -c remote_uri pool-create-as
(3) Call virsh pool-create-as with an unexpected option
'''
# Run test case
if not params.has_key('pool_name') or not params.has_key('pool_target'):
logging.error("Please give a 'name' and 'target'")
pool_options = params.get('pool_options', '')
pool_name = params.get('pool_name')
pool_type = params.get('pool_type')
pool_target = params.get('pool_target')
if not os.path.isdir(pool_target):
if os.path.isfile(pool_target):
logging.error('<target> must be a directory')
else:
os.makedirs(pool_target)
logging.info('Creating a %s type pool %s', pool_type, pool_name)
status = virsh.pool_create_as(pool_name, pool_type, pool_target,
extra=pool_options, uri=virsh.canonical_uri())
# Check status_error
status_error = params.get('status_error')
if status_error == 'yes':
if status:
raise error.TestFail("%d not a expected command return value"
% status)
else:
logging.info("It's an expected error")
elif status_error == 'no':
result = virsh.pool_info(pool_name, uri=virsh.canonical_uri())
if result.exit_status:
raise error.TestFail('Failed to check pool information')
else:
logging.info('Pool %s is running', pool_name)
if not status:
raise error.TestFail('%d not a expected command return value'
% status)
else:
logging.info('Succeed to create pool %s', pool_name)
# Clean up
libvirt_pool = libvirt_storage.StoragePool()
if libvirt_pool.pool_exists(pool_name):
libvirt_pool.delete_pool(pool_name)
| gpl-2.0 | -5,506,974,995,019,713,000 | 33.762712 | 80 | 0.606046 | false |
iniverno/RnR-LLC | simics-3.0-install/simics-3.0.31/amd64-linux/lib/python2.4/telnetlib.py | 8 | 21567 | """TELNET client class.
Based on RFC 854: TELNET Protocol Specification, by J. Postel and
J. Reynolds
Example:
>>> from telnetlib import Telnet
>>> tn = Telnet('www.python.org', 79) # connect to finger port
>>> tn.write('guido\r\n')
>>> print tn.read_all()
Login Name TTY Idle When Where
guido Guido van Rossum pts/2 <Dec 2 11:10> snag.cnri.reston..
>>>
Note that read_all() won't read until eof -- it just reads some data
-- but it guarantees to read at least one byte unless EOF is hit.
It is possible to pass a Telnet object to select.select() in order to
wait until more data is available. Note that in this case,
read_eager() may return '' even if there was data on the socket,
because the protocol negotiation may have eaten the data. This is why
EOFError is needed in some cases to distinguish between "no data" and
"connection closed" (since the socket also appears ready for reading
when it is closed).
To do:
- option negotiation
- timeout should be intrinsic to the connection object instead of an
option on one of the read calls only
"""
# Imported modules
import sys
import socket
import select
__all__ = ["Telnet"]
# Tunable parameters
DEBUGLEVEL = 0
# Telnet protocol defaults
TELNET_PORT = 23
# Telnet protocol characters (don't change)
IAC = chr(255) # "Interpret As Command"
DONT = chr(254)
DO = chr(253)
WONT = chr(252)
WILL = chr(251)
theNULL = chr(0)
SE = chr(240) # Subnegotiation End
NOP = chr(241) # No Operation
DM = chr(242) # Data Mark
BRK = chr(243) # Break
IP = chr(244) # Interrupt process
AO = chr(245) # Abort output
AYT = chr(246) # Are You There
EC = chr(247) # Erase Character
EL = chr(248) # Erase Line
GA = chr(249) # Go Ahead
SB = chr(250) # Subnegotiation Begin
# Telnet protocol options code (don't change)
# These ones all come from arpa/telnet.h
BINARY = chr(0) # 8-bit data path
ECHO = chr(1) # echo
RCP = chr(2) # prepare to reconnect
SGA = chr(3) # suppress go ahead
NAMS = chr(4) # approximate message size
STATUS = chr(5) # give status
TM = chr(6) # timing mark
RCTE = chr(7) # remote controlled transmission and echo
NAOL = chr(8) # negotiate about output line width
NAOP = chr(9) # negotiate about output page size
NAOCRD = chr(10) # negotiate about CR disposition
NAOHTS = chr(11) # negotiate about horizontal tabstops
NAOHTD = chr(12) # negotiate about horizontal tab disposition
NAOFFD = chr(13) # negotiate about formfeed disposition
NAOVTS = chr(14) # negotiate about vertical tab stops
NAOVTD = chr(15) # negotiate about vertical tab disposition
NAOLFD = chr(16) # negotiate about output LF disposition
XASCII = chr(17) # extended ascii character set
LOGOUT = chr(18) # force logout
BM = chr(19) # byte macro
DET = chr(20) # data entry terminal
SUPDUP = chr(21) # supdup protocol
SUPDUPOUTPUT = chr(22) # supdup output
SNDLOC = chr(23) # send location
TTYPE = chr(24) # terminal type
EOR = chr(25) # end or record
TUID = chr(26) # TACACS user identification
OUTMRK = chr(27) # output marking
TTYLOC = chr(28) # terminal location number
VT3270REGIME = chr(29) # 3270 regime
X3PAD = chr(30) # X.3 PAD
NAWS = chr(31) # window size
TSPEED = chr(32) # terminal speed
LFLOW = chr(33) # remote flow control
LINEMODE = chr(34) # Linemode option
XDISPLOC = chr(35) # X Display Location
OLD_ENVIRON = chr(36) # Old - Environment variables
AUTHENTICATION = chr(37) # Authenticate
ENCRYPT = chr(38) # Encryption option
NEW_ENVIRON = chr(39) # New - Environment variables
# the following ones come from
# http://www.iana.org/assignments/telnet-options
# Unfortunately, that document does not assign identifiers
# to all of them, so we are making them up
TN3270E = chr(40) # TN3270E
XAUTH = chr(41) # XAUTH
CHARSET = chr(42) # CHARSET
RSP = chr(43) # Telnet Remote Serial Port
COM_PORT_OPTION = chr(44) # Com Port Control Option
SUPPRESS_LOCAL_ECHO = chr(45) # Telnet Suppress Local Echo
TLS = chr(46) # Telnet Start TLS
KERMIT = chr(47) # KERMIT
SEND_URL = chr(48) # SEND-URL
FORWARD_X = chr(49) # FORWARD_X
PRAGMA_LOGON = chr(138) # TELOPT PRAGMA LOGON
SSPI_LOGON = chr(139) # TELOPT SSPI LOGON
PRAGMA_HEARTBEAT = chr(140) # TELOPT PRAGMA HEARTBEAT
EXOPL = chr(255) # Extended-Options-List
NOOPT = chr(0)
class Telnet:
"""Telnet interface class.
An instance of this class represents a connection to a telnet
server. The instance is initially not connected; the open()
method must be used to establish a connection. Alternatively, the
host name and optional port number can be passed to the
constructor, too.
Don't try to reopen an already connected instance.
This class has many read_*() methods. Note that some of them
raise EOFError when the end of the connection is read, because
they can return an empty string for other reasons. See the
individual doc strings.
read_until(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
read_all()
Read all data until EOF; may block.
read_some()
Read at least one byte or EOF; may block.
read_very_eager()
Read all data available already queued or on the socket,
without blocking.
read_eager()
Read either data already queued or some data available on the
socket, without blocking.
read_lazy()
Read all data in the raw queue (processing it first), without
doing any socket I/O.
read_very_lazy()
Reads all data in the cooked queue, without doing any socket
I/O.
read_sb_data()
Reads available data between SB ... SE sequence. Don't block.
set_option_negotiation_callback(callback)
Each time a telnet option is read on the input flow, this callback
(if set) is called with the following parameters :
callback(telnet socket, command, option)
option will be chr(0) when there is no option.
No other action is done afterwards by telnetlib.
"""
def __init__(self, host=None, port=0):
"""Constructor.
When called without arguments, create an unconnected instance.
With a hostname argument, it connects the instance; a port
number is optional.
"""
self.debuglevel = DEBUGLEVEL
self.host = host
self.port = port
self.sock = None
self.rawq = ''
self.irawq = 0
self.cookedq = ''
self.eof = 0
self.iacseq = '' # Buffer for IAC sequence.
self.sb = 0 # flag for SB and SE sequence.
self.sbdataq = ''
self.option_callback = None
if host is not None:
self.open(host, port)
def open(self, host, port=0):
"""Connect to a host.
The optional second argument is the port number, which
defaults to the standard telnet port (23).
Don't try to reopen an already connected instance.
"""
self.eof = 0
if not port:
port = TELNET_PORT
self.host = host
self.port = port
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
def __del__(self):
"""Destructor -- close the connection."""
self.close()
def msg(self, msg, *args):
"""Print a debug message, when the debug level is > 0.
If extra arguments are present, they are substituted in the
message using the standard string formatting operator.
"""
if self.debuglevel > 0:
print 'Telnet(%s,%d):' % (self.host, self.port),
if args:
print msg % args
else:
print msg
def set_debuglevel(self, debuglevel):
"""Set the debug level.
The higher it is, the more debug output you get (on sys.stdout).
"""
self.debuglevel = debuglevel
def close(self):
"""Close the connection."""
if self.sock:
self.sock.close()
self.sock = 0
self.eof = 1
self.iacseq = ''
self.sb = 0
def get_socket(self):
"""Return the socket object used internally."""
return self.sock
def fileno(self):
"""Return the fileno() of the socket object used internally."""
return self.sock.fileno()
def write(self, buffer):
"""Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.
"""
if IAC in buffer:
buffer = buffer.replace(IAC, IAC+IAC)
self.msg("send %r", buffer)
self.sock.sendall(buffer)
def read_until(self, match, timeout=None):
"""Read until a given string is encountered or until timeout.
When no match is found, return whatever is available instead,
possibly the empty string. Raise EOFError if the connection
is closed and no cooked data is available.
"""
n = len(match)
self.process_rawq()
i = self.cookedq.find(match)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
s_reply = ([self], [], [])
s_args = s_reply
if timeout is not None:
s_args = s_args + (timeout,)
while not self.eof and select.select(*s_args) == s_reply:
i = max(0, len(self.cookedq)-n)
self.fill_rawq()
self.process_rawq()
i = self.cookedq.find(match, i)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
return self.read_very_lazy()
def read_all(self):
"""Read all data until EOF; block until connection closed."""
self.process_rawq()
while not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_some(self):
"""Read at least one byte of cooked data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available.
"""
self.process_rawq()
while not self.cookedq and not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_very_eager(self):
"""Read everything that's possible without blocking in I/O (eager).
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_eager(self):
"""Read readily available data.
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.cookedq and not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_lazy(self):
"""Process and return data that's already in the queues (lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block
unless in the midst of an IAC sequence.
"""
self.process_rawq()
return self.read_very_lazy()
def read_very_lazy(self):
"""Return any data available in the cooked queue (very lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block.
"""
buf = self.cookedq
self.cookedq = ''
if not buf and self.eof and not self.rawq:
raise EOFError, 'telnet connection closed'
return buf
def read_sb_data(self):
"""Return any data available in the SB ... SE queue.
Return '' if no SB ... SE available. Should only be called
after seeing a SB or SE command. When a new SB command is
found, old unread SB data will be discarded. Don't block.
"""
buf = self.sbdataq
self.sbdataq = ''
return buf
def set_option_negotiation_callback(self, callback):
"""Provide a callback function called after each receipt of a telnet option."""
self.option_callback = callback
def process_rawq(self):
"""Transfer from raw queue to cooked queue.
Set self.eof when connection is closed. Don't block unless in
the midst of an IAC sequence.
"""
buf = ['', '']
try:
while self.rawq:
c = self.rawq_getchar()
if not self.iacseq:
if c == theNULL:
continue
if c == "\021":
continue
if c != IAC:
buf[self.sb] = buf[self.sb] + c
continue
else:
self.iacseq += c
elif len(self.iacseq) == 1:
'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
if c in (DO, DONT, WILL, WONT):
self.iacseq += c
continue
self.iacseq = ''
if c == IAC:
buf[self.sb] = buf[self.sb] + c
else:
if c == SB: # SB ... SE start.
self.sb = 1
self.sbdataq = ''
elif c == SE:
self.sb = 0
self.sbdataq = self.sbdataq + buf[1]
buf[1] = ''
if self.option_callback:
# Callback is supposed to look into
# the sbdataq
self.option_callback(self.sock, c, NOOPT)
else:
# We can't offer automatic processing of
# suboptions. Alas, we should not get any
# unless we did a WILL/DO before.
self.msg('IAC %d not recognized' % ord(c))
elif len(self.iacseq) == 2:
cmd = self.iacseq[1]
self.iacseq = ''
opt = c
if cmd in (DO, DONT):
self.msg('IAC %s %d',
cmd == DO and 'DO' or 'DONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + WONT + opt)
elif cmd in (WILL, WONT):
self.msg('IAC %s %d',
cmd == WILL and 'WILL' or 'WONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + DONT + opt)
except EOFError: # raised by self.rawq_getchar()
self.iacseq = '' # Reset on EOF
self.sb = 0
pass
self.cookedq = self.cookedq + buf[0]
self.sbdataq = self.sbdataq + buf[1]
def rawq_getchar(self):
"""Get next char from raw queue.
Block if no data is immediately available. Raise EOFError
when connection is closed.
"""
if not self.rawq:
self.fill_rawq()
if self.eof:
raise EOFError
c = self.rawq[self.irawq]
self.irawq = self.irawq + 1
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
return c
def fill_rawq(self):
"""Fill raw queue from exactly one recv() system call.
Block if no data is immediately available. Set self.eof when
connection is closed.
"""
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
# The buffer size should be fairly small so as to avoid quadratic
# behavior in process_rawq() above
buf = self.sock.recv(50)
self.msg("recv %r", buf)
self.eof = (not buf)
self.rawq = self.rawq + buf
def sock_avail(self):
"""Test whether data is available on the socket."""
return select.select([self], [], [], 0) == ([self], [], [])
def interact(self):
"""Interaction function, emulates a very dumb telnet client."""
if sys.platform == "win32":
self.mt_interact()
return
while 1:
rfd, wfd, xfd = select.select([self, sys.stdin], [], [])
if self in rfd:
try:
text = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
break
if text:
sys.stdout.write(text)
sys.stdout.flush()
if sys.stdin in rfd:
line = sys.stdin.readline()
if not line:
break
self.write(line)
def mt_interact(self):
"""Multithreaded version of interact()."""
import thread
thread.start_new_thread(self.listener, ())
while 1:
line = sys.stdin.readline()
if not line:
break
self.write(line)
def listener(self):
"""Helper for mt_interact() -- this executes in the other thread."""
while 1:
try:
data = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
return
if data:
sys.stdout.write(data)
else:
sys.stdout.flush()
def expect(self, list, timeout=None):
"""Read until one from a list of a regular expressions matches.
The first argument is a list of regular expressions, either
compiled (re.RegexObject instances) or uncompiled (strings).
The optional second argument is a timeout, in seconds; default
is no timeout.
Return a tuple of three items: the index in the list of the
first regular expression that matches; the match object
returned; and the text read up till and including the match.
If EOF is read and no text was read, raise EOFError.
Otherwise, when nothing matches, return (-1, None, text) where
text is the text received so far (may be the empty string if a
timeout happened).
If a regular expression ends with a greedy match (e.g. '.*')
or if more than one expression can match the same input, the
results are undeterministic, and may depend on the I/O timing.
"""
re = None
list = list[:]
indices = range(len(list))
for i in indices:
if not hasattr(list[i], "search"):
if not re: import re
list[i] = re.compile(list[i])
while 1:
self.process_rawq()
for i in indices:
m = list[i].search(self.cookedq)
if m:
e = m.end()
text = self.cookedq[:e]
self.cookedq = self.cookedq[e:]
return (i, m, text)
if self.eof:
break
if timeout is not None:
r, w, x = select.select([self.fileno()], [], [], timeout)
if not r:
break
self.fill_rawq()
text = self.read_very_lazy()
if not text and self.eof:
raise EOFError
return (-1, None, text)
def test():
"""Test program for telnetlib.
Usage: python telnetlib.py [-d] ... [host [port]]
Default host is localhost; default port is 23.
"""
debuglevel = 0
while sys.argv[1:] and sys.argv[1] == '-d':
debuglevel = debuglevel+1
del sys.argv[1]
host = 'localhost'
if sys.argv[1:]:
host = sys.argv[1]
port = 0
if sys.argv[2:]:
portstr = sys.argv[2]
try:
port = int(portstr)
except ValueError:
port = socket.getservbyname(portstr, 'tcp')
tn = Telnet()
tn.set_debuglevel(debuglevel)
tn.open(host, port)
tn.interact()
tn.close()
if __name__ == '__main__':
test()
| gpl-2.0 | -2,779,999,684,196,099,000 | 31.926718 | 87 | 0.557797 | false |
Elico-Corp/openerp-7.0 | l10n_cn_report_invoice/report/invoice.py | 4 | 4817 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2011 Elico Corp. All Rights Reserved.
# Author: LIN Yu <[email protected]>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from report import report_sxw
from lxml import etree
from openerp.osv import osv, fields
from tools.translate import _
class invoice(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
super(invoice, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
# 'get_partner_ref': self.get_partner_ref,
'tag': self.tag,
'get_product_code': self._get_product_code,
'multiply': self._multiply,
'get_product_desc': self.get_product_desc,
'get_product_desc_en': self.get_product_desc_en,
'get_product_unit': self.get_product_unit,
})
# def get_partner_ref(self, partner, product):
# result = ''
# ref_obj = self.pool.get('product.partner.related.fields')
# ref_obj_ids = ref_obj.search(
# self.cr, self.uid [('partner_id', '=', partner),
# ('product_id', '=', product)])
# for ref_obj_id in ref_obj.browse(self.cr, self.uid, ref_obj_ids):
# result = ref_obj_id.name + " " + ref_obj_id.value
# return result
def _get_product_code(self, product_id, partner_id):
product_obj = pooler.get_pool(self.cr.dbname).get('product.product')
return product_obj._product_code(self.cr, self.uid, [product_id], name=None, arg=None, context={'partner_id': partner_id})[product_id]
def _multiply(self, one, two):
return one * two
def get_product_desc(self, product_id):
# get desc cn for name, default_cdde, uom
trans_obj = self.pool.get('ir.translation')
#name
trans_ids = trans_obj.search(self.cr, self.uid, [('name','=','product.template,name'),('res_id','=',product_id.id)])
if trans_ids and trans_ids[0]:
desc = trans_obj.read(self.cr, self.uid, trans_ids,['value'])[0]['value']
else:
desc = u'无'
# #desc
# trans_ids = trans_obj.search(self.cr, self.uid, [('field','=','product.template,description'),('res_id','=',product_id.id)])
# if trans_ids and trans_ids[0]:
# desc += trans_obj.read(self.cr, self.uid, trans_ids,['value'])[0]['value']
if product_id.default_code:
desc = '[' + product_id.default_code + ']' + ' ' + desc
#uom
# trans_ids = trans_obj.search(self.cr, self.uid, [('name','=','product.uom,name'),('res_id','=',product_id.uom_id.id)])
# if trans_ids and trans_ids[0]:
# desc += ' ' + '(' + trans_obj.read(self.cr, self.uid, trans_ids,['value'])[0]['value'] + ')'
# if product_id.uom_id and product_id.uom_id.name:
# desc = desc + ' ' + '(' + product_id.uom_id.name + ')'
return desc
def get_product_desc_en(self, product_id):
desc = product_id.name or ''
# if product_id.uom_id and product_id.uom_id.name:
# desc = desc + ' ' + '(' + product_id.uom_id.name + ')'
return desc
def get_product_unit(self, uos_id):
trans_obj = self.pool.get('ir.translation')
print uos_id
print uos_id.name
desc = uos_id.name
trans_ids = trans_obj.search(self.cr, self.uid, [('name','=','product.uom,name'),('res_id','=',uos_id.id)])
if trans_ids and trans_ids[0]:
desc += '/' + trans_obj.read(self.cr, self.uid, trans_ids,['value'])[0]['value']
# desc = product_id.joomla_unit or ''
# if product_id.joomla_unit_cn:
# desc = desc + ' / ' + product_id.joomla_unit_cn
return desc
report_sxw.report_sxw(
'report.invoice.elico', 'account.invoice',
'addons/elico_bilingual_reports/report/elico_invoice.rml',
parser=invoice)
| agpl-3.0 | -1,293,217,834,812,588,800 | 42.772727 | 142 | 0.566771 | false |
girving/tensorflow | tensorflow/contrib/distributions/python/ops/logistic.py | 21 | 8036 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Logistic distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
class Logistic(distribution.Distribution):
"""The Logistic distribution with location `loc` and `scale` parameters.
#### Mathematical details
The cumulative density function of this distribution is:
```none
cdf(x; mu, sigma) = 1 / (1 + exp(-(x - mu) / sigma))
```
where `loc = mu` and `scale = sigma`.
The Logistic distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Logistic(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Logistic distribution.
dist = tfd.Logistic(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Logistics.
# The first has mean 1 and scale 11, the second 2 and 22.
dist = tfd.Logistic(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
# Arguments are broadcast when possible.
# Define a batch of two scalar valued Logistics.
# Both have mean 1, but different scales.
dist = tfd.Logistic(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Logistic"):
"""Construct Logistic distributions with mean and scale `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor, the means of the distribution(s).
scale: Floating point tensor, the scales of the distribution(s). Must
contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Logistic, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
uniform = random_ops.random_uniform(
shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
sampled = math_ops.log(uniform) - math_ops.log1p(-1. * uniform)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_cdf(self, x):
return -nn_ops.softplus(-self._z(x))
def _cdf(self, x):
return math_ops.sigmoid(self._z(x))
def _log_survival_function(self, x):
return -nn_ops.softplus(self._z(x))
def _survival_function(self, x):
return math_ops.sigmoid(-self._z(x))
def _log_unnormalized_prob(self, x):
z = self._z(x)
return - z - 2. * nn_ops.softplus(-z)
def _log_normalization(self):
return math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
scale = self.scale * array_ops.ones_like(self.loc)
return 2 + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(3)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit logistic."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
| apache-2.0 | 4,780,611,357,599,853,000 | 33.34188 | 80 | 0.665132 | false |
morenopc/edx-platform | lms/djangoapps/course_wiki/utils.py | 9 | 3481 | """
Utility functions for course_wiki.
"""
from django.core.exceptions import ObjectDoesNotExist
from xmodule import modulestore
import courseware
def user_is_article_course_staff(user, article):
"""
The root of a course wiki is /<course_number>. This means in case there
are two courses which have the same course_number they will end up with
the same course wiki root e.g. MITx/Phy101/Spring and HarvardX/Phy101/Fall
will share /Phy101.
This looks at the course wiki root of the article and returns True if
the user belongs to a group whose name starts with 'instructor_' or
'staff_' and contains '/<course_wiki_root_slug>/'. So if the user is
staff on course MITx/Phy101/Spring they will be in
'instructor_MITx/Phy101/Spring' or 'staff_MITx/Phy101/Spring' groups and
so this will return True.
"""
wiki_slug = article_course_wiki_root_slug(article)
if wiki_slug is None:
return False
# The wiki expects article slugs to contain at least one non-digit so if
# the course number is just a number the course wiki root slug is set to
# be '<course_number>_'. This means slug '202_' can belong to either
# course numbered '202_' or '202' and so we need to consider both.
courses = modulestore.django.modulestore().get_courses_for_wiki(wiki_slug)
if any(courseware.access.has_access(user, 'staff', course, course.course_key) for course in courses):
return True
if (wiki_slug.endswith('_') and slug_is_numerical(wiki_slug[:-1])):
courses = modulestore.django.modulestore().get_courses_for_wiki(wiki_slug[:-1])
if any(courseware.access.has_access(user, 'staff', course, course.course_key) for course in courses):
return True
return False
def slug_is_numerical(slug):
"""Returns whether the slug can be interpreted as a number."""
try:
float(slug)
except ValueError:
return False
return True
def course_wiki_slug(course):
"""Returns the slug for the course wiki root."""
slug = course.wiki_slug
# Django-wiki expects article slug to be non-numerical. In case the
# course number is numerical append an underscore.
if slug_is_numerical(slug):
slug = slug + "_"
return slug
def article_course_wiki_root_slug(article):
"""
We assume the second level ancestor is the course wiki root. Examples:
/ returns None
/Phy101 returns 'Phy101'
/Phy101/Mechanics returns 'Phy101'
/Chem101/Metals/Iron returns 'Chem101'
Note that someone can create an article /random-article/sub-article on the
wiki. In this case this function will return 'some-random-article' even
if no course with course number 'some-random-article' exists.
"""
try:
urlpath = article.urlpath_set.get()
except ObjectDoesNotExist:
return None
# Ancestors of /Phy101/Mechanics/Acceleration/ is a list of URLPaths
# ['Root', 'Phy101', 'Mechanics']
ancestors = urlpath.cached_ancestors
course_wiki_root_urlpath = None
if len(ancestors) == 0: # It is the wiki root article.
course_wiki_root_urlpath = None
elif len(ancestors) == 1: # It is a course wiki root article.
course_wiki_root_urlpath = urlpath
else: # It is an article inside a course wiki.
course_wiki_root_urlpath = ancestors[1]
if course_wiki_root_urlpath is not None:
return course_wiki_root_urlpath.slug
return None
| agpl-3.0 | -6,235,791,304,864,043,000 | 32.796117 | 109 | 0.685435 | false |
berserkerbernhard/Lidskjalv | code/cg_itemsmenu.py | 1 | 1649 | from databaseconnection import DatabaseConnection
from tools import widget_menu, result_to_menulist
import sys
class CableGuyItemsMenu():
def __init__(self):
self.menulist = []
self.menudata = {
'_text': '',
'_backtitle': 'Developed by Bernhard',
'_title': 'Lidskjalv - Cable guy -Main menu'
}
def main(self):
print("Lidskjalv - Cable guy - items menu")
self.buildmenu()
while True:
code, tag = widget_menu(
self.menulist,
self.menudata)
print(code, tag)
if code == "ok":
if tag == "Q":
break
def get_items(self):
lc = DatabaseConnection()
lc.get_db_connection()
s = lc.get_schemas()
print(s)
conn = lc.get_db_connection()
conn.autocommit = True
cur = conn.cursor()
cur.execute("select * from dvlpmnt_cg.items;")
res = cur.fetchall()
conn.close()
headers = [
"item_id",
"item_name",
"item_type",
"item_serial_number",
"item_description",
"item_created",
"connector_id",
"connector_name",
"connector_type",
"connector_description",
"connector_created",
"connected_to_i"]
returnlist = result_to_menulist(res, headers, 0)
sys.exit()
return returnlist
def buildmenu(self):
self.get_items()
self.menulist.append(["-", "----"])
self.menulist.append(["Q", "Quit"])
| gpl-3.0 | 3,975,272,956,468,921,000 | 26.483333 | 56 | 0.494845 | false |
mjfarmer/scada_py | env/lib/python2.7/site-packages/twisted/web/error.py | 4 | 12294 | # -*- test-case-name: twisted.web.test.test_error -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Exception definitions for L{twisted.web}.
"""
from __future__ import division, absolute_import
try:
from future_builtins import ascii
except ImportError:
pass
__all__ = [
'Error', 'PageRedirect', 'InfiniteRedirection', 'RenderError',
'MissingRenderMethod', 'MissingTemplateLoader', 'UnexposedMethodError',
'UnfilledSlot', 'UnsupportedType', 'FlattenerError',
'RedirectWithNoLocation',
]
from collections import Sequence
from twisted.web._responses import RESPONSES
from twisted.python.compat import unicode, nativeString, intToBytes
def _codeToMessage(code):
"""
Returns the response message corresponding to an HTTP code, or None
if the code is unknown or unrecognized.
@type code: L{bytes}
@param code: Refers to an HTTP status code, for example C{http.NOT_FOUND}.
@return: A string message or none
@rtype: L{bytes}
"""
try:
return RESPONSES.get(int(code))
except (ValueError, AttributeError):
return None
class Error(Exception):
"""
A basic HTTP error.
@type status: L{bytes}
@ivar status: Refers to an HTTP status code, for example C{http.NOT_FOUND}.
@type message: L{bytes}
@param message: A short error message, for example "NOT FOUND".
@type response: L{bytes}
@ivar response: A complete HTML document for an error page.
"""
def __init__(self, code, message=None, response=None):
"""
Initializes a basic exception.
@type code: L{bytes} or L{int}
@param code: Refers to an HTTP status code (for example, 200) either as
an integer or a bytestring representing such. If no C{message} is
given, C{code} is mapped to a descriptive bytestring that is used
instead.
@type message: L{bytes}
@param message: A short error message, for example "NOT FOUND".
@type response: L{bytes}
@param response: A complete HTML document for an error page.
"""
message = message or _codeToMessage(code)
Exception.__init__(self, code, message, response)
if isinstance(code, int):
# If we're given an int, convert it to a bytestring
# downloadPage gives a bytes, Agent gives an int, and it worked by
# accident previously, so just make it keep working.
code = intToBytes(code)
self.status = code
self.message = message
self.response = response
def __str__(self):
return nativeString(self.status + b" " + self.message)
class PageRedirect(Error):
"""
A request resulted in an HTTP redirect.
@type location: L{bytes}
@ivar location: The location of the redirect which was not followed.
"""
def __init__(self, code, message=None, response=None, location=None):
"""
Initializes a page redirect exception.
@type code: L{bytes}
@param code: Refers to an HTTP status code, for example
C{http.NOT_FOUND}. If no C{message} is given, C{code} is mapped to a
descriptive string that is used instead.
@type message: L{bytes}
@param message: A short error message, for example "NOT FOUND".
@type response: L{bytes}
@param response: A complete HTML document for an error page.
@type location: L{bytes}
@param location: The location response-header field value. It is an
absolute URI used to redirect the receiver to a location other than
the Request-URI so the request can be completed.
"""
Error.__init__(self, code, message, response)
if self.message and location:
self.message = self.message + b" to " + location
self.location = location
class InfiniteRedirection(Error):
"""
HTTP redirection is occurring endlessly.
@type location: L{bytes}
@ivar location: The first URL in the series of redirections which was
not followed.
"""
def __init__(self, code, message=None, response=None, location=None):
"""
Initializes an infinite redirection exception.
@type code: L{bytes}
@param code: Refers to an HTTP status code, for example
C{http.NOT_FOUND}. If no C{message} is given, C{code} is mapped to a
descriptive string that is used instead.
@type message: L{bytes}
@param message: A short error message, for example "NOT FOUND".
@type response: L{bytes}
@param response: A complete HTML document for an error page.
@type location: L{bytes}
@param location: The location response-header field value. It is an
absolute URI used to redirect the receiver to a location other than
the Request-URI so the request can be completed.
"""
Error.__init__(self, code, message, response)
if self.message and location:
self.message = self.message + b" to " + location
self.location = location
class RedirectWithNoLocation(Error):
"""
Exception passed to L{ResponseFailed} if we got a redirect without a
C{Location} header field.
@type uri: L{bytes}
@ivar uri: The URI which failed to give a proper location header
field.
@since: 11.1
"""
def __init__(self, code, message, uri):
"""
Initializes a page redirect exception when no location is given.
@type code: L{bytes}
@param code: Refers to an HTTP status code, for example
C{http.NOT_FOUND}. If no C{message} is given, C{code} is mapped to
a descriptive string that is used instead.
@type message: L{bytes}
@param message: A short error message.
@type uri: L{bytes}
@param uri: The URI which failed to give a proper location header
field.
"""
Error.__init__(self, code, message)
self.message = self.message + b" to " + uri
self.uri = uri
class UnsupportedMethod(Exception):
"""
Raised by a resource when faced with a strange request method.
RFC 2616 (HTTP 1.1) gives us two choices when faced with this situation:
If the type of request is known to us, but not allowed for the requested
resource, respond with NOT_ALLOWED. Otherwise, if the request is something
we don't know how to deal with in any case, respond with NOT_IMPLEMENTED.
When this exception is raised by a Resource's render method, the server
will make the appropriate response.
This exception's first argument MUST be a sequence of the methods the
resource *does* support.
"""
allowedMethods = ()
def __init__(self, allowedMethods, *args):
Exception.__init__(self, allowedMethods, *args)
self.allowedMethods = allowedMethods
if not isinstance(allowedMethods, Sequence):
raise TypeError(
"First argument must be a sequence of supported methods, "
"but my first argument is not a sequence.")
class SchemeNotSupported(Exception):
"""
The scheme of a URI was not one of the supported values.
"""
class RenderError(Exception):
"""
Base exception class for all errors which can occur during template
rendering.
"""
class MissingRenderMethod(RenderError):
"""
Tried to use a render method which does not exist.
@ivar element: The element which did not have the render method.
@ivar renderName: The name of the renderer which could not be found.
"""
def __init__(self, element, renderName):
RenderError.__init__(self, element, renderName)
self.element = element
self.renderName = renderName
def __repr__(self):
return '%r: %r had no render method named %r' % (
self.__class__.__name__, self.element, self.renderName)
class MissingTemplateLoader(RenderError):
"""
L{MissingTemplateLoader} is raised when trying to render an Element without
a template loader, i.e. a C{loader} attribute.
@ivar element: The Element which did not have a document factory.
"""
def __init__(self, element):
RenderError.__init__(self, element)
self.element = element
def __repr__(self):
return '%r: %r had no loader' % (self.__class__.__name__,
self.element)
class UnexposedMethodError(Exception):
"""
Raised on any attempt to get a method which has not been exposed.
"""
class UnfilledSlot(Exception):
"""
During flattening, a slot with no associated data was encountered.
"""
class UnsupportedType(Exception):
"""
During flattening, an object of a type which cannot be flattened was
encountered.
"""
class FlattenerError(Exception):
"""
An error occurred while flattening an object.
@ivar _roots: A list of the objects on the flattener's stack at the time
the unflattenable object was encountered. The first element is least
deeply nested object and the last element is the most deeply nested.
"""
def __init__(self, exception, roots, traceback):
self._exception = exception
self._roots = roots
self._traceback = traceback
Exception.__init__(self, exception, roots, traceback)
def _formatRoot(self, obj):
"""
Convert an object from C{self._roots} to a string suitable for
inclusion in a render-traceback (like a normal Python traceback, but
can include "frame" source locations which are not in Python source
files).
@param obj: Any object which can be a render step I{root}.
Typically, L{Tag}s, strings, and other simple Python types.
@return: A string representation of C{obj}.
@rtype: L{str}
"""
# There's a circular dependency between this class and 'Tag', although
# only for an isinstance() check.
from twisted.web.template import Tag
if isinstance(obj, (bytes, str, unicode)):
# It's somewhat unlikely that there will ever be a str in the roots
# list. However, something like a MemoryError during a str.replace
# call (eg, replacing " with ") could possibly cause this.
# Likewise, UTF-8 encoding a unicode string to a byte string might
# fail like this.
if len(obj) > 40:
if isinstance(obj, unicode):
ellipsis = u'<...>'
else:
ellipsis = b'<...>'
return ascii(obj[:20] + ellipsis + obj[-20:])
else:
return ascii(obj)
elif isinstance(obj, Tag):
if obj.filename is None:
return 'Tag <' + obj.tagName + '>'
else:
return "File \"%s\", line %d, column %d, in \"%s\"" % (
obj.filename, obj.lineNumber,
obj.columnNumber, obj.tagName)
else:
return ascii(obj)
def __repr__(self):
"""
Present a string representation which includes a template traceback, so
we can tell where this error occurred in the template, as well as in
Python.
"""
# Avoid importing things unnecessarily until we actually need them;
# since this is an 'error' module we should be extra paranoid about
# that.
from traceback import format_list
if self._roots:
roots = ' ' + '\n '.join([
self._formatRoot(r) for r in self._roots]) + '\n'
else:
roots = ''
if self._traceback:
traceback = '\n'.join([
line
for entry in format_list(self._traceback)
for line in entry.splitlines()]) + '\n'
else:
traceback = ''
return (
'Exception while flattening:\n' +
roots + traceback +
self._exception.__class__.__name__ + ': ' +
str(self._exception) + '\n')
def __str__(self):
return repr(self)
| gpl-3.0 | 1,664,815,873,751,067,600 | 30.767442 | 80 | 0.612413 | false |
supermarcos/log2bq | mapreduce/mapreduce_pipeline.py | 25 | 8815 | #!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipelines for mapreduce library."""
from __future__ import with_statement
__all__ = [
"MapPipeline",
"MapperPipeline",
"MapreducePipeline",
"ReducePipeline",
"ShufflePipeline",
]
from mapreduce.lib import pipeline
from mapreduce.lib.pipeline import common as pipeline_common
from mapreduce.lib import files
from mapreduce.lib.files import file_service_pb
from mapreduce import base_handler
from mapreduce import context
from mapreduce import errors
from mapreduce import input_readers
from mapreduce import mapper_pipeline
from mapreduce import operation
from mapreduce import output_writers
from mapreduce import shuffler
from mapreduce import util
# Mapper pipeline is extracted only to remove dependency cycle with shuffler.py
# Reimport it back.
MapperPipeline = mapper_pipeline.MapperPipeline
ShufflePipeline = shuffler.ShufflePipeline
class MapPipeline(base_handler.PipelineBase):
"""Runs the map stage of MapReduce.
Iterates over input reader and outputs data into key/value format
for shuffler consumption.
Args:
job_name: mapreduce job name as string.
mapper_spec: specification of map handler function as string.
input_reader_spec: input reader specification as string.
params: mapper and input reader parameters as dict.
shards: number of shards to start as int.
Returns:
list of filenames written to by this mapper, one for each shard.
"""
def run(self,
job_name,
mapper_spec,
input_reader_spec,
params,
shards=None):
yield MapperPipeline(
job_name + "-map",
mapper_spec,
input_reader_spec,
output_writer_spec=
output_writers.__name__ + ".KeyValueBlobstoreOutputWriter",
params=params,
shards=shards)
class _ReducerReader(input_readers.RecordsReader):
"""Reader to read KeyValues records files from Files API."""
expand_parameters = True
def __init__(self, filenames, position):
super(_ReducerReader, self).__init__(filenames, position)
self.current_key = None
self.current_values = None
def __iter__(self):
ctx = context.get()
combiner = None
if ctx:
combiner_spec = ctx.mapreduce_spec.mapper.params.get("combiner_spec")
if combiner_spec:
combiner = util.handler_for_name(combiner_spec)
self.current_key = None
self.current_values = None
for binary_record in super(_ReducerReader, self).__iter__():
proto = file_service_pb.KeyValues()
proto.ParseFromString(binary_record)
if self.current_key is None:
self.current_key = proto.key()
self.current_values = []
else:
assert proto.key() == self.current_key, (
"inconsistent key sequence. Expected %s but got %s" %
(self.current_key, proto.key()))
if combiner:
combiner_result = combiner(
self.current_key, proto.value_list(), self.current_values)
if not util.is_generator(combiner_result):
raise errors.BadCombinerOutputError(
"Combiner %s should yield values instead of returning them (%s)" %
(combiner, combiner_result))
self.current_values = []
for value in combiner_result:
if isinstance(value, operation.Operation):
value(ctx)
else:
# with combiner current values always come from combiner
self.current_values.append(value)
else:
# without combiner we just accumulate values.
self.current_values.extend(proto.value_list())
if not proto.partial():
key = self.current_key
values = self.current_values
# This is final value, don't try to serialize it.
self.current_key = None
self.current_values = None
yield (key, values)
else:
yield input_readers.ALLOW_CHECKPOINT
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
result = super(_ReducerReader, self).to_json()
result["current_key"] = self.current_key
result["current_values"] = self.current_values
return result
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
result = super(_ReducerReader, cls).from_json(json)
result.current_key = json["current_key"]
result.current_values = json["current_values"]
return result
class ReducePipeline(base_handler.PipelineBase):
"""Runs the reduce stage of MapReduce.
Merge-reads input files and runs reducer function on them.
Args:
job_name: mapreduce job name as string.
reader_spec: specification of reduce function.
output_writer_spec: specification of output write to use with reduce
function.
params: mapper parameters to use as dict.
filenames: list of filenames to reduce.
combiner_spec: Optional. Specification of a combine function. If not
supplied, no combine step will take place. The combine function takes a
key, list of values and list of previously combined results. It yields
combined values that might be processed by another combiner call, but will
eventually end up in reducer. The combiner output key is assumed to be the
same as the input key.
Returns:
filenames from output writer.
"""
def run(self,
job_name,
reducer_spec,
output_writer_spec,
params,
filenames,
combiner_spec=None):
new_params = dict(params or {})
new_params.update({
"files": filenames
})
if combiner_spec:
new_params.update({
"combiner_spec": combiner_spec,
})
yield mapper_pipeline.MapperPipeline(
job_name + "-reduce",
reducer_spec,
__name__ + "._ReducerReader",
output_writer_spec,
new_params)
class MapreducePipeline(base_handler.PipelineBase):
"""Pipeline to execute MapReduce jobs.
Args:
job_name: job name as string.
mapper_spec: specification of mapper to use.
reducer_spec: specification of reducer to use.
input_reader_spec: specification of input reader to read data from.
output_writer_spec: specification of output writer to save reduce output to.
mapper_params: parameters to use for mapper phase.
reducer_params: parameters to use for reduce phase.
shards: number of shards to use as int.
combiner_spec: Optional. Specification of a combine function. If not
supplied, no combine step will take place. The combine function takes a
key, list of values and list of previously combined results. It yields
combined values that might be processed by another combiner call, but will
eventually end up in reducer. The combiner output key is assumed to be the
same as the input key.
Returns:
filenames from output writer.
"""
def run(self,
job_name,
mapper_spec,
reducer_spec,
input_reader_spec,
output_writer_spec=None,
mapper_params=None,
reducer_params=None,
shards=None,
combiner_spec=None):
map_pipeline = yield MapPipeline(job_name,
mapper_spec,
input_reader_spec,
params=mapper_params,
shards=shards)
shuffler_pipeline = yield ShufflePipeline(
job_name, map_pipeline)
reducer_pipeline = yield ReducePipeline(
job_name,
reducer_spec,
output_writer_spec,
reducer_params,
shuffler_pipeline,
combiner_spec=combiner_spec)
with pipeline.After(reducer_pipeline):
all_temp_files = yield pipeline_common.Extend(
map_pipeline, shuffler_pipeline)
yield mapper_pipeline._CleanupPipeline(all_temp_files)
yield pipeline_common.Return(reducer_pipeline)
| apache-2.0 | -8,248,822,486,977,271,000 | 30.938406 | 80 | 0.663868 | false |
Mabrosche/turbulenz_engine | docs/source/hub/conf.py | 10 | 6399 | # -*- coding: utf-8 -*-
#
# Turbulenz Hub User Guide documentation build configuration file, created
# manually 20th May 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Turbulenz Hub User Guide'
copyright = u'2012, Turbulenz Limited'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = ['web']
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'TurbulenzHubUserGuide'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'TurbulenzHubUserGuide.tex', u'Turbulenz Hub User Guide',
u'Turbulenz Limited', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| mit | -431,316,289,491,959,400 | 31.984536 | 80 | 0.71433 | false |
sacharya/nova | nova/tests/virt/vmwareapi/db_fakes.py | 6 | 3397 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts, mocks and fixtures for the test suite
"""
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import utils
def stub_out_db_instance_api(stubs):
"""Stubs out the db API for creating Instances."""
INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
'm1.medium':
dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
'm1.xlarge':
dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def __getattr__(self, name):
return self.values[name]
def get(self, attr):
try:
return self.__getattr__(attr)
except KeyError:
return None
def __getitem__(self, key):
if key in self.values:
return self.values[key]
else:
raise NotImplementedError()
def fake_instance_create(context, values):
"""Stubs out the db.instance_create method."""
type_data = INSTANCE_TYPES[values['instance_type']]
base_options = {
'name': values['name'],
'id': values['id'],
'uuid': values['uuid'],
'reservation_id': utils.generate_uid('r'),
'image_ref': values.get('image_ref'),
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
'vm_state': vm_states.BUILDING,
'task_state': task_states.SCHEDULING,
'user_id': values['user_id'],
'project_id': values['project_id'],
'instance_type': values['instance_type'],
'memory_mb': type_data['memory_mb'],
'vcpus': type_data['vcpus'],
'mac_addresses': [{'address': values['mac_address']}],
'root_gb': type_data['root_gb'],
'node': values['node'],
'metadata': []
}
return base_options
def fake_flavor_get_all(context, inactive=0, filters=None):
return INSTANCE_TYPES.values()
def fake_flavor_get_by_name(context, name):
return INSTANCE_TYPES[name]
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'flavor_get_all', fake_flavor_get_all)
stubs.Set(db, 'flavor_get_by_name', fake_flavor_get_by_name)
| apache-2.0 | 677,161,019,840,651,100 | 33.663265 | 78 | 0.594348 | false |
demonchild2112/travis-test | grr/server/grr_response_server/gui/selenium_tests/hunt_archive_test.py | 2 | 12012 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Test the hunt_view interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
import mock
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import file_store
from grr_response_server.flows.general import collectors
from grr_response_server.flows.general import export as flow_export
from grr_response_server.gui import archive_generator
from grr_response_server.gui import gui_test_lib
from grr.test_lib import test_lib
class TestHuntArchiving(gui_test_lib.GRRSeleniumHuntTest):
"""Test the hunt archive download functionality."""
def testDoesNotShowGenerateArchiveButtonForNonExportableRDFValues(self):
values = [rdf_client.Process(pid=1), rdf_client.Process(pid=42423)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(self.IsTextPresent, "42423")
self.WaitUntilNot(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testDoesNotShowGenerateArchiveButtonWhenResultCollectionIsEmpty(self):
hunt_id, _ = self.CreateGenericHuntWithCollection([])
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(self.IsTextPresent, "Value")
self.WaitUntilNot(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testShowsGenerateArchiveButtonForFileFinderHunt(self):
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar", pathtype=rdf_paths.PathSpec.PathType.OS))
values = [rdf_file_finder.FileFinderResult(stat_entry=stat_entry)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testShowsGenerateArchiveButtonForArtifactDownloaderHunt(self):
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar", pathtype=rdf_paths.PathSpec.PathType.OS))
values = [
collectors.ArtifactFilesDownloaderResult(downloaded_file=stat_entry)
]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testExportCommandIsShownForStatEntryResults(self):
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar", pathtype=rdf_paths.PathSpec.PathType.OS))
values = [rdf_file_finder.FileFinderResult(stat_entry=stat_entry)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/#/hunts/%s/results" % hunt_id)
self.Click("link=Show export command")
self.WaitUntil(
self.IsTextPresent, "/usr/bin/grr_api_shell 'http://localhost:8000/' "
"--exec_code 'grrapi.Hunt(\"%s\").GetFilesArchive()."
"WriteToFile(\"./hunt_results_%s.zip\")'" %
(hunt_id, hunt_id.replace(":", "_")))
def testExportCommandIsNotShownWhenNoResults(self):
hunt_id, _ = self.CreateGenericHuntWithCollection([])
self.Open("/#/hunts/%s/results" % hunt_id)
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-results:contains('Value')")
self.WaitUntilNot(self.IsTextPresent, "Show export command")
def testExportCommandIsNotShownForNonFileResults(self):
values = [rdf_client.Process(pid=1), rdf_client.Process(pid=42423)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/#/hunts/%s/results" % hunt_id)
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-results:contains('Value')")
self.WaitUntilNot(self.IsTextPresent, "Show export command")
def testHuntAuthorizationIsRequiredToGenerateResultsArchive(self):
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar", pathtype=rdf_paths.PathSpec.PathType.OS))
values = [rdf_file_finder.FileFinderResult(stat_entry=stat_entry)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsTextPresent, "Create a new approval request")
def testGenerateZipButtonGetsDisabledAfterClick(self):
hunt_id = self._CreateHuntWithDownloadedFile()
self.RequestAndGrantHuntApproval(hunt_id)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsElementPresent, "css=button.DownloadButton[disabled]")
self.WaitUntil(self.IsTextPresent, "Generation has started")
def testShowsNotificationWhenArchiveGenerationIsDone(self):
hunt_id = self._CreateHuntWithDownloadedFile()
self.RequestAndGrantHuntApproval(hunt_id)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsTextPresent, "Generation has started")
self.WaitUntil(self.IsUserNotificationPresent,
"Downloaded archive of hunt %s" % hunt_id)
# Check that the archive generating flow does not end with an error.
self.WaitUntilNot(self.IsUserNotificationPresent, "terminated due to error")
def testShowsErrorMessageIfArchiveStreamingFailsBeforeFirstChunkIsSent(self):
hunt_id = self._CreateHuntWithDownloadedFile()
self.RequestAndGrantHuntApproval(hunt_id)
def RaisingStub(*unused_args, **unused_kwargs):
raise RuntimeError("something went wrong")
with utils.Stubber(archive_generator.CollectionArchiveGenerator, "Generate",
RaisingStub):
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsTextPresent,
"Can't generate archive: Unknown error")
self.WaitUntil(self.IsUserNotificationPresent,
"Archive generation failed for hunt")
def testShowsNotificationIfArchiveStreamingFailsInProgress(self):
hunt_id = self._CreateHuntWithDownloadedFile()
self.RequestAndGrantHuntApproval(hunt_id)
def RaisingStub(*unused_args, **unused_kwargs):
yield b"foo"
yield b"bar"
raise RuntimeError("something went wrong")
with utils.Stubber(archive_generator.CollectionArchiveGenerator, "Generate",
RaisingStub):
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsUserNotificationPresent,
"Archive generation failed for hunt")
# There will be no failure message, as we can't get a status from an
# iframe that triggers the download.
self.WaitUntilNot(self.IsTextPresent,
"Can't generate archive: Unknown error")
def testDoesNotShowPerFileDownloadButtonForNonExportableRDFValues(self):
values = [rdf_client.Process(pid=1), rdf_client.Process(pid=42423)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(self.IsTextPresent, "42423")
self.WaitUntilNot(
self.IsElementPresent,
"css=grr-results-collection button:has(span.glyphicon-download)")
def testShowsPerFileDownloadButtonForFileFinderHunt(self):
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar", pathtype=rdf_paths.PathSpec.PathType.OS))
values = [rdf_file_finder.FileFinderResult(stat_entry=stat_entry)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(
self.IsElementPresent,
"css=grr-results-collection button:has(span.glyphicon-download)")
def testShowsPerFileDownloadButtonForArtifactDownloaderHunt(self):
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar", pathtype=rdf_paths.PathSpec.PathType.OS))
values = [
collectors.ArtifactFilesDownloaderResult(downloaded_file=stat_entry)
]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(
self.IsElementPresent,
"css=grr-results-collection button:has(span.glyphicon-download)")
def testHuntAuthorizationIsRequiredToDownloadSingleHuntFile(self):
hunt_id = self._CreateHuntWithDownloadedFile()
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click("css=grr-results-collection button:has(span.glyphicon-download)")
self.WaitUntil(self.IsTextPresent, "Create a new approval request")
def testDownloadsSingleHuntFileIfAuthorizationIsPresent(self):
hunt_id = self._CreateHuntWithDownloadedFile()
results = self.GetHuntResults(hunt_id)
self.RequestAndGrantHuntApproval(hunt_id)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
fd = file_store.OpenFile(flow_export.CollectionItemToClientPath(results[0]))
with mock.patch.object(fd.__class__, "Read") as mock_obj:
self.Click(
"css=grr-results-collection button:has(span.glyphicon-download)")
self.WaitUntil(lambda: mock_obj.called)
def testDisplaysErrorMessageIfSingleHuntFileCanNotBeRead(self):
hunt_id = self._CreateHuntWithDownloadedFile()
results = self.GetHuntResults(hunt_id)
original_result = results[0]
payload = original_result.payload.Copy()
payload.pathspec.path += "blah"
client_id = self.SetupClients(1)[0]
self.AddResultsToHunt(hunt_id, client_id, [payload])
self.RequestAndGrantHuntApproval(hunt_id)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click(
"css=grr-results-collection button:has(span.glyphicon-download):last")
self.WaitUntil(self.IsTextPresent, "Couldn't download the file.")
if __name__ == "__main__":
app.run(test_lib.main)
| apache-2.0 | 6,818,393,420,998,139,000 | 37.5 | 80 | 0.698718 | false |
GheRivero/ansible | packaging/release/versionhelper/version_helper.py | 3 | 5870 | from __future__ import absolute_import, division, print_function
__metaclass__ = type
import argparse
import os
import re
import sys
from packaging.version import Version, VERSION_PATTERN
class AnsibleVersionMunger(object):
tag_offsets = dict(
dev=0,
a=100,
b=200,
rc=1000
)
# TODO: allow overrides here for packaging bump etc
def __init__(self, raw_version, revision=None, codename=None):
self._raw_version = raw_version
self._revision = revision
self._parsed_version = Version(raw_version)
self._codename = codename
self._parsed_regex_match = re.match(VERSION_PATTERN, raw_version, re.VERBOSE | re.IGNORECASE)
@property
def deb_version(self):
v = self._parsed_version
match = self._parsed_regex_match
if v.is_prerelease:
if match.group('pre'):
tag_value = match.group('pre')
tag_type = match.group('pre_l')
tag_ver = match.group('pre_n')
elif match.group('dev'):
tag_type = "dev"
tag_value = match.group('dev')
tag_ver = match.group('dev_n')
else:
raise Exception("unknown prerelease type for version {0}".format(self._raw_version))
elif v.is_postrelease:
raise Exception("post-release identifiers are not supported")
else:
tag_type = None
tag_value = ''
tag_ver = 0
# not a pre/post/dev release, just return base version
if not tag_type:
return '{base_version}'.format(base_version=self.base_version)
# it is a pre/dev release, include the tag value with a ~
return '{base_version}~{tag_value}'.format(base_version=self.base_version, tag_value=tag_value)
@property
def deb_release(self):
return '1' if self._revision is None else str(self._revision)
@property
def rpm_release(self):
v = self._parsed_version
match = self._parsed_regex_match
if v.is_prerelease:
if match.group('pre'):
tag_value = match.group('pre')
tag_type = match.group('pre_l')
tag_ver = match.group('pre_n')
elif match.group('dev'):
tag_type = "dev"
tag_value = match.group('dev')
tag_ver = match.group('dev_n')
else:
raise Exception("unknown prerelease type for version {0}".format(self._raw_version))
elif v.is_postrelease:
raise Exception("post-release identifiers are not supported")
else:
tag_type = None
tag_value = ''
tag_ver = 0
# not a pre/post/dev release, just append revision (default 1)
if not tag_type:
if self._revision is None:
self._revision = 1
return '{revision}'.format(revision=self._revision)
# cleanse tag value in case it starts with .
tag_value = tag_value.strip('.')
# coerce to int and None == 0
tag_ver = int(tag_ver if tag_ver else 0)
if self._revision is None:
tag_offset = self.tag_offsets.get(tag_type)
if tag_offset is None:
raise Exception('no tag offset defined for tag {0}'.format(tag_type))
pkgrel = '0.{0}'.format(tag_offset + tag_ver)
else:
pkgrel = self._revision
return '{pkgrel}.{tag_value}'.format(pkgrel=pkgrel, tag_value=tag_value)
@property
def raw(self):
return self._raw_version
# return the x.y.z version without any other modifiers present
@property
def base_version(self):
return self._parsed_version.base_version
# return the x.y version without any other modifiers present
@property
def major_version(self):
return re.match(r'^(\d+.\d+)', self._raw_version).group(1)
@property
def codename(self):
return self._codename if self._codename else "UNKNOWN"
def main():
parser = argparse.ArgumentParser(description='Extract/transform Ansible versions to various packaging formats')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--raw', action='store_true')
group.add_argument('--majorversion', action='store_true')
group.add_argument('--baseversion', action='store_true')
group.add_argument('--debversion', action='store_true')
group.add_argument('--debrelease', action='store_true')
group.add_argument('--rpmrelease', action='store_true')
group.add_argument('--codename', action='store_true')
group.add_argument('--all', action='store_true')
parser.add_argument('--revision', action='store', default='auto')
args = parser.parse_args()
mydir = os.path.dirname(__file__)
release_loc = os.path.normpath(mydir + '/../../../lib')
sys.path.insert(0, release_loc)
from ansible import release
rev = None
if args.revision != 'auto':
rev = args.revision
v_raw = release.__version__
codename = release.__codename__
v = AnsibleVersionMunger(v_raw, revision=rev, codename=codename)
if args.raw:
print(v.raw)
elif args.baseversion:
print(v.base_version)
elif args.majorversion:
print(v.major_version)
elif args.debversion:
print(v.deb_version)
elif args.debrelease:
print(v.deb_release)
elif args.rpmrelease:
print(v.rpm_release)
elif args.codename:
print(v.codename)
elif args.all:
props = [name for (name, impl) in vars(AnsibleVersionMunger).items() if isinstance(impl, property)]
for propname in props:
print('{0}: {1}'.format(propname, getattr(v, propname)))
if __name__ == '__main__':
main()
| gpl-3.0 | -6,339,623,118,046,274,000 | 31.252747 | 115 | 0.593186 | false |
enikesha/pacioli | pacioli/accounting/valuations.py | 1 | 7986 | # Copyright (c) 2014, Satoshi Nakamoto Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import uuid
import datetime
from dateutil import parser
from collections import OrderedDict
from sqlalchemy.sql import func
from pacioli import db, models
import pacioli.accounting.rates as rates
class Partial:
def __init__(self, date, tside, amount, currency, ledger, journal_entry_id, rate):
self.date = date
self.tside = tside
self.amount = amount
self.currency = currency
self.ledger = ledger
self.journal_entry_id = journal_entry_id
self.rate = rate
def calculate_bitcoin_gains(method):
usdtransactions = db.session \
.query(models.LedgerEntries) \
.filter(models.LedgerEntries.currency == 'usd') \
.delete()
transactions = db.session \
.query(models.LedgerEntries) \
.join(models.Subaccounts)\
.join(models.Accounts)\
.filter(models.Accounts.name == 'Bitcoins') \
.filter(models.LedgerEntries.currency == 'satoshis') \
.order_by(models.LedgerEntries.date.desc())\
.all()
inventory = []
while transactions:
tx = transactions.pop()
print('transaction')
print(tx.date)
print(tx.amount)
print(tx.tside)
tx_rate = rates.getRate(tx.date)
if tx.tside == 'debit':
inventory.insert(0, tx)
tx.rate = tx_rate
amount = tx.amount*tx_rate/100000000
debit_ledger_entry_id = str(uuid.uuid4())
debit_ledger_entry = models.LedgerEntries(
id=debit_ledger_entry_id,
date=tx.date,
tside="debit",
ledger=tx.ledger,
amount=amount,
currency="usd",
journal_entry_id=tx.journal_entry_id)
db.session.add(debit_ledger_entry)
credit_ledger_entry_id = str(uuid.uuid4())
credit_ledger_entry = models.LedgerEntries(
id=credit_ledger_entry_id,
date=tx.date,
tside="credit",
ledger="Revenues",
amount=amount,
currency="usd",
journal_entry_id=tx.journal_entry_id)
db.session.add(credit_ledger_entry)
db.session.commit()
if method == 'hifo':
inventory.sort(key=lambda x: x.rate)
elif tx.tside == 'credit':
if method in ['fifo','hifo']:
layer = inventory.pop()
elif method == 'lifo':
layer = inventory.pop(0)
print('layer')
print(layer.date)
print(layer.amount)
# layer_rate = rates.getRate(layer.date)
layer_rate = layer.rate
layer_costbasis = layer_rate*layer.amount/100000000
if tx.amount > layer.amount:
satoshis_sold = layer.amount
salevalue = satoshis_sold * tx_rate/100000000
costbasis = satoshis_sold * layer_rate/100000000
gain = salevalue - costbasis
residual_amount = tx.amount - satoshis_sold
new_tx = Partial(
date=tx.date,
tside=tx.tside,
amount=residual_amount,
currency=tx.currency,
ledger=tx.ledger,
journal_entry_id=tx.journal_entry_id,
rate=tx_rate)
print('new transaction')
print(new_tx.date)
print(new_tx.amount)
transactions.append(new_tx)
elif tx.amount < layer.amount:
satoshis_sold = tx.amount
salevalue = tx_rate * satoshis_sold/100000000
costbasis = layer_rate * satoshis_sold/100000000
gain = salevalue - costbasis
residual_amount = layer.amount - satoshis_sold
new_layer = Partial(
date = layer.date,
tside = layer.tside,
amount = residual_amount,
currency = layer.currency,
ledger = layer.ledger,
journal_entry_id = layer.journal_entry_id,
rate = layer.rate)
print('new layer')
print(new_layer.date)
print(new_layer.amount)
inventory.append(new_layer)
elif tx.amount == layer.amount:
satoshis_sold = tx.amount
salevalue = tx_rate * satoshis_sold/100000000
costbasis = layer_rate * satoshis_sold/100000000
gain = salevalue - costbasis
if gain:
if gain > 0:
gain_tside = 'credit'
gain_ledger = 'Gains from the Sale of Bitcoins'
elif gain < 0:
gain_tside = 'debit'
gain_ledger = 'Losses from the Sale of Bitcoins'
gain = abs(gain)
gain_leger_entry_id = str(uuid.uuid4())
gain_ledger_entry = models.LedgerEntries(
id=gain_leger_entry_id,
date=tx.date,
tside=gain_tside,
ledger=gain_ledger,
amount=gain,
currency="usd",
journal_entry_id=tx.journal_entry_id)
db.session.add(gain_ledger_entry)
debit_ledger_entry_id = str(uuid.uuid4())
debit_ledger_entry = models.LedgerEntries(
id=debit_ledger_entry_id,
date=tx.date,
tside="debit",
ledger="Expenses",
amount=salevalue,
currency="usd",
journal_entry_id=tx.journal_entry_id)
db.session.add(debit_ledger_entry)
credit_ledger_entry_id = str(uuid.uuid4())
credit_ledger_entry = models.LedgerEntries(
id=credit_ledger_entry_id,
date=tx.date,
tside="credit",
ledger=tx.ledger,
amount=costbasis,
currency="usd",
journal_entry_id=tx.journal_entry_id)
db.session.add(credit_ledger_entry)
db.session.commit()
| bsd-3-clause | 6,157,885,550,311,714,000 | 41.705882 | 757 | 0.555096 | false |
toastedcornflakes/scikit-learn | sklearn/svm/tests/test_svm.py | 2 | 34260 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError
from sklearn.multiclass import OneVsRestClassifier
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deterministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(svr.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
def test_linearsvr_fit_sampleweight():
# check correct result when sample_weight is 1
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
n_samples = len(diabetes.target)
unit_weight = np.ones(n_samples)
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=unit_weight)
score1 = lsvr.score(diabetes.data, diabetes.target)
lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=random_weight)
score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
sample_weight=random_weight)
X_flat = np.repeat(diabetes.data, random_weight, axis=0)
y_flat = np.repeat(diabetes.target, random_weight, axis=0)
lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat)
score4 = lsvr_flat.score(X_flat, y_flat)
assert_almost_equal(score3, score4, 2)
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf = svm.SVC(kernel='linear', C=0.1).fit(X_train, y_train)
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_predict():
# Test SVR's decision_function
# Sanity check, test that predict implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='macro')
<= metrics.f1_score(y, y_pred_balanced,
average='macro'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_hinge", "loss='l2'", "1.0"),
svm.LinearSVC(loss="l2").fit, X, y)
# LinearSVR
# loss l1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l1", "epsilon_insensitive", "loss='l1'",
"1.0"),
svm.LinearSVR(loss="l1").fit, X, y)
# loss l2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
def test_linear_svx_uppercase_loss_penality_raises_error():
# Check if Upper case notation raises error at _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported",
svm.LinearSVC(loss="SQuared_hinge").fit, X, y)
assert_raise_message(ValueError, ("The combination of penalty='L2'"
" and loss='squared_hinge' is not supported"),
svm.LinearSVC(penalty="L2").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_linearsvc_fit_sampleweight():
# check correct result when sample_weight is 1
n_samples = len(X)
unit_weight = np.ones(n_samples)
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf_unitweight = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=unit_weight)
# check if same as sample_weight=None
assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvc_unflat = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=random_weight)
pred1 = lsvc_unflat.predict(T)
X_flat = np.repeat(X, random_weight, axis=0)
y_flat = np.repeat(Y, random_weight, axis=0)
lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat)
pred2 = lsvc_flat.predict(T)
assert_array_equal(pred1, pred2)
assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
def test_decision_function_shape_two_class():
for n_classes in [2, 3]:
X, y = make_blobs(centers=n_classes, random_state=0)
for estimator in [svm.SVC, svm.NuSVC]:
clf = OneVsRestClassifier(estimator(
decision_function_shape="ovr")).fit(X, y)
assert_equal(len(clf.predict(X)), len(y))
| bsd-3-clause | -7,728,644,145,610,328,000 | 35.957929 | 97 | 0.624869 | false |
elkingtonmcb/h2o-2 | py/testdir_single_jvm/notest_parse_500_cols_spill.py | 9 | 4144 | import unittest, random, sys, time, os
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e
def write_syn_dataset(csvPathname, rowCount, colCount, SEED):
# 8 random generatators, 1 per column
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
# just reuse the same col data, since we're just parsing
# don't want to compress?
# r = random.random()
r = random.randint(1,1500)
for j in range(colCount):
rowData.append(r)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
java_extra_args='-XX:+PrintGCDetails'
h2o.init(1, java_heap_GB=10, java_extra_args=java_extra_args)
@classmethod
def tearDownClass(cls):
## print "sleeping 3600"
# h2o.sleep(3600)
h2o.tear_down_cloud()
def test_parse_500_cols_spill_fvec(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100000, 500, 'cA', 500, 500),
]
h2b.browseTheCloud()
for (rowCount, colCount, orig_hex_key, timeoutSecs, timeoutSecs2) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_' + str(SEEDPERFILE) + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
csvPathPattern = SYNDATASETS_DIR + '/' + '*syn*csv*'
# create sym links
# multifile = 100
multifile = 50
# there is already one file. assume it's the "0" case
for p in range(1, multifile):
csvPathnameLink = csvPathname + "_" + str(p)
os.symlink(csvFilename, csvPathnameLink)
print "\nCreating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE)
# for trial in range(5):
# try to pass with 2?
for trial in range(2):
hex_key = orig_hex_key + str(trial)
start = time.time()
parseResult = h2i.import_parse(path=csvPathPattern, hex_key=hex_key, delete_on_done=1,
timeoutSecs=timeoutSecs, retryDelaySecs=3, doSummary=False)
print "Parse:", parseResult['destination_key'], "took", time.time() - start, "seconds"
start = time.time()
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=timeoutSecs2)
print "Inspect:", parseResult['destination_key'], "took", time.time() - start, "seconds"
h2o_cmd.infoFromInspect(inspect, csvPathname)
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols']), \
" byteSize:", "{:,}".format(inspect['byteSize'])
# should match # of cols in header or ??
self.assertEqual(inspect['numCols'], colCount,
"parse created result with the wrong number of cols %s %s" % (inspect['numCols'], colCount))
self.assertEqual(inspect['numRows'], rowCount * multifile,
"parse created result with the wrong number of rows (header shouldn't count) %s %s" % \
(inspect['numRows'], rowCount * multifile))
# h2i.delete_keys_at_all_nodes()
if __name__ == '__main__':
h2o.unit_main()
# kevin@Kevin-Ubuntu4:~/h2o/py/testdir_single_jvm/sandbox/ice.W5qa_K/ice54321$ time ls -R * > /dev/null
#
# real 0m6.900s
# user 0m6.260s
# sys 0m0.628s
# kevin@Kevin-Ubuntu4:~/h2o/py/testdir_single_jvm/sandbox/ice.W5qa_K/ice54321$ ls -R * | wc -l
# 651847
#
# eventually you can hit os limits on # of files in a directory.
| apache-2.0 | -8,404,751,599,536,395,000 | 38.466667 | 112 | 0.566844 | false |
smallyear/linuxLearn | salt/salt/tops/ext_nodes.py | 3 | 2521 | # -*- coding: utf-8 -*-
'''
External Nodes Classifier
=========================
The External Nodes Classifier is a master tops subsystem that retrieves mapping
information from major configuration management systems. One of the most common
external nodes classifiers system is provided by Cobbler and is called
``cobbler-ext-nodes``.
The cobbler-ext-nodes command can be used with this configuration:
.. code-block:: yaml
master_tops:
ext_nodes: cobbler-ext-nodes
It is noteworthy that the Salt system does not directly ingest the data
sent from the ``cobbler-ext-nodes`` command, but converts the data into
information that is used by a Salt top file.
Any command can replace the call to 'cobbler-ext-nodes' above, but currently the
data must be formatted in the same way that the standard 'cobbler-ext-nodes'
does.
See (admittedly degenerate and probably not complete) example:
.. code-block:: yaml
classes:
- basepackages
- database
The above essentially is the same as a top.sls containing the following:
.. code-block:: yaml
base:
'*':
- basepackages
- database
base:
'*':
- basepackages
- database
'''
from __future__ import absolute_import
# Import python libs
import logging
import subprocess
# Import third party libs
import yaml
log = logging.getLogger(__name__)
def __virtual__():
'''
Only run if properly configured
'''
if __opts__['master_tops'].get('ext_nodes'):
return True
return False
def top(**kwargs):
'''
Run the command configured
'''
if 'id' not in kwargs['opts']:
return {}
cmd = '{0} {1}'.format(
__opts__['master_tops']['ext_nodes'],
kwargs['opts']['id']
)
ndata = yaml.safe_load(
subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE
).communicate()[0])
if not ndata:
log.info('master_tops ext_nodes call did not return any data')
ret = {}
if 'environment' in ndata:
env = ndata['environment']
else:
env = 'base'
if 'classes' in ndata:
if isinstance(ndata['classes'], dict):
ret[env] = list(ndata['classes'])
elif isinstance(ndata['classes'], list):
ret[env] = ndata['classes']
else:
return ret
else:
log.info('master_tops ext_nodes call did not have a dictionary with a "classes" key.')
return ret
| apache-2.0 | 7,257,270,866,178,661,000 | 23.475728 | 94 | 0.612852 | false |
jazkarta/edx-platform-for-isc | lms/djangoapps/bulk_email/tasks.py | 7 | 34332 | """
This module contains celery task functions for handling the sending of bulk email
to a course.
"""
import re
import random
import json
from time import sleep
import dogstats_wrapper as dog_stats_api
from smtplib import SMTPServerDisconnected, SMTPDataError, SMTPConnectError, SMTPException
from boto.ses.exceptions import (
SESAddressNotVerifiedError,
SESIdentityNotVerifiedError,
SESDomainNotConfirmedError,
SESAddressBlacklistedError,
SESDailyQuotaExceededError,
SESMaxSendingRateExceededError,
SESDomainEndsWithDotError,
SESLocalAddressCharacterError,
SESIllegalAddressError,
)
from boto.exception import AWSConnectionError
from celery import task, current_task
from celery.utils.log import get_task_logger
from celery.states import SUCCESS, FAILURE, RETRY
from celery.exceptions import RetryTaskError
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import EmailMultiAlternatives, get_connection
from django.core.urlresolvers import reverse
from bulk_email.models import (
CourseEmail, Optout, CourseEmailTemplate,
SEND_TO_MYSELF, SEND_TO_ALL, TO_OPTIONS,
)
from courseware.courses import get_course, course_image_url
from student.roles import CourseStaffRole, CourseInstructorRole
from instructor_task.models import InstructorTask
from instructor_task.subtasks import (
SubtaskStatus,
queue_subtasks_for_query,
check_subtask_is_valid,
update_subtask_status,
)
from util.query import use_read_replica_if_available
log = get_task_logger(__name__)
# Errors that an individual email is failing to be sent, and should just
# be treated as a fail.
SINGLE_EMAIL_FAILURE_ERRORS = (
SESAddressBlacklistedError, # Recipient's email address has been temporarily blacklisted.
SESDomainEndsWithDotError, # Recipient's email address' domain ends with a period/dot.
SESIllegalAddressError, # Raised when an illegal address is encountered.
SESLocalAddressCharacterError, # An address contained a control or whitespace character.
)
# Exceptions that, if caught, should cause the task to be re-tried.
# These errors will be caught a limited number of times before the task fails.
LIMITED_RETRY_ERRORS = (
SMTPConnectError,
SMTPServerDisconnected,
AWSConnectionError,
)
# Errors that indicate that a mailing task should be retried without limit.
# An example is if email is being sent too quickly, but may succeed if sent
# more slowly. When caught by a task, it triggers an exponential backoff and retry.
# Retries happen continuously until the email is sent.
# Note that the SMTPDataErrors here are only those within the 4xx range.
# Those not in this range (i.e. in the 5xx range) are treated as hard failures
# and thus like SINGLE_EMAIL_FAILURE_ERRORS.
INFINITE_RETRY_ERRORS = (
SESMaxSendingRateExceededError, # Your account's requests/second limit has been exceeded.
SMTPDataError,
)
# Errors that are known to indicate an inability to send any more emails,
# and should therefore not be retried. For example, exceeding a quota for emails.
# Also, any SMTP errors that are not explicitly enumerated above.
BULK_EMAIL_FAILURE_ERRORS = (
SESAddressNotVerifiedError, # Raised when a "Reply-To" address has not been validated in SES yet.
SESIdentityNotVerifiedError, # Raised when an identity has not been verified in SES yet.
SESDomainNotConfirmedError, # Raised when domain ownership is not confirmed for DKIM.
SESDailyQuotaExceededError, # 24-hour allotment of outbound email has been exceeded.
SMTPException,
)
def _get_recipient_queryset(user_id, to_option, course_id, course_location):
"""
Returns a query set of email recipients corresponding to the requested to_option category.
`to_option` is either SEND_TO_MYSELF, SEND_TO_STAFF, or SEND_TO_ALL.
Recipients who are in more than one category (e.g. enrolled in the course and are staff or self)
will be properly deduped.
"""
if to_option not in TO_OPTIONS:
log.error("Unexpected bulk email TO_OPTION found: %s", to_option)
raise Exception("Unexpected bulk email TO_OPTION found: {0}".format(to_option))
if to_option == SEND_TO_MYSELF:
recipient_qset = User.objects.filter(id=user_id)
else:
staff_qset = CourseStaffRole(course_id).users_with_role()
instructor_qset = CourseInstructorRole(course_id).users_with_role()
recipient_qset = (staff_qset | instructor_qset).distinct()
if to_option == SEND_TO_ALL:
# We also require students to have activated their accounts to
# provide verification that the provided email address is valid.
enrollment_qset = User.objects.filter(
is_active=True,
courseenrollment__course_id=course_id,
courseenrollment__is_active=True
)
# Now we do some queryset sidestepping to avoid doing a DISTINCT
# query across the course staff and the enrolled students, which
# forces the creation of a temporary table in the db.
unenrolled_staff_qset = recipient_qset.exclude(
courseenrollment__course_id=course_id, courseenrollment__is_active=True
)
# use read_replica if available:
unenrolled_staff_qset = use_read_replica_if_available(unenrolled_staff_qset)
unenrolled_staff_ids = [user.id for user in unenrolled_staff_qset]
recipient_qset = enrollment_qset | User.objects.filter(id__in=unenrolled_staff_ids)
# again, use read_replica if available to lighten the load for large queries
return use_read_replica_if_available(recipient_qset)
def _get_course_email_context(course):
"""
Returns context arguments to apply to all emails, independent of recipient.
"""
course_id = course.id.to_deprecated_string()
course_title = course.display_name
course_url = 'https://{}{}'.format(
settings.SITE_NAME,
reverse('course_root', kwargs={'course_id': course_id})
)
image_url = 'https://{}{}'.format(settings.SITE_NAME, course_image_url(course))
email_context = {
'course_title': course_title,
'course_url': course_url,
'course_image_url': image_url,
'account_settings_url': 'https://{}{}'.format(settings.SITE_NAME, reverse('dashboard')),
'platform_name': settings.PLATFORM_NAME,
}
return email_context
def perform_delegate_email_batches(entry_id, course_id, task_input, action_name):
"""
Delegates emails by querying for the list of recipients who should
get the mail, chopping up into batches of no more than settings.BULK_EMAIL_EMAILS_PER_TASK
in size, and queueing up worker jobs.
"""
entry = InstructorTask.objects.get(pk=entry_id)
# Get inputs to use in this task from the entry.
user_id = entry.requester.id
task_id = entry.task_id
# Perfunctory check, since expansion is made for convenience of other task
# code that doesn't need the entry_id.
if course_id != entry.course_id:
format_msg = u"Course id conflict: explicit value %r does not match task value %r"
log.warning(u"Task %s: " + format_msg, task_id, course_id, entry.course_id)
raise ValueError(format_msg % (course_id, entry.course_id))
# Fetch the CourseEmail.
email_id = task_input['email_id']
try:
email_obj = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist:
# The CourseEmail object should be committed in the view function before the task
# is submitted and reaches this point.
log.warning(u"Task %s: Failed to get CourseEmail with id %s", task_id, email_id)
raise
# Check to see if email batches have already been defined. This seems to
# happen sometimes when there is a loss of connection while a task is being
# queued. When this happens, the same task gets called again, and a whole
# new raft of subtasks gets queued up. We will assume that if subtasks
# have already been defined, there is no need to redefine them below.
# So we just return right away. We don't raise an exception, because we want
# the current task to be marked with whatever it had been marked with before.
if len(entry.subtasks) > 0 and len(entry.task_output) > 0:
log.warning(u"Task %s has already been processed for email %s! InstructorTask = %s", task_id, email_id, entry)
progress = json.loads(entry.task_output)
return progress
# Sanity check that course for email_obj matches that of the task referencing it.
if course_id != email_obj.course_id:
format_msg = u"Course id conflict: explicit value %r does not match email value %r"
log.warning(u"Task %s: " + format_msg, task_id, course_id, email_obj.course_id)
raise ValueError(format_msg % (course_id, email_obj.course_id))
# Fetch the course object.
course = get_course(course_id)
if course is None:
msg = u"Task %s: course not found: %s"
log.error(msg, task_id, course_id)
raise ValueError(msg % (task_id, course_id))
# Get arguments that will be passed to every subtask.
to_option = email_obj.to_option
global_email_context = _get_course_email_context(course)
def _create_send_email_subtask(to_list, initial_subtask_status):
"""Creates a subtask to send email to a given recipient list."""
subtask_id = initial_subtask_status.task_id
new_subtask = send_course_email.subtask(
(
entry_id,
email_id,
to_list,
global_email_context,
initial_subtask_status.to_dict(),
),
task_id=subtask_id,
routing_key=settings.BULK_EMAIL_ROUTING_KEY,
)
return new_subtask
recipient_qset = _get_recipient_queryset(user_id, to_option, course_id, course.location)
recipient_fields = ['profile__name', 'email']
log.info(u"Task %s: Preparing to queue subtasks for sending emails for course %s, email %s, to_option %s",
task_id, course_id, email_id, to_option)
progress = queue_subtasks_for_query(
entry,
action_name,
_create_send_email_subtask,
recipient_qset,
recipient_fields,
settings.BULK_EMAIL_EMAILS_PER_TASK,
)
# We want to return progress here, as this is what will be stored in the
# AsyncResult for the parent task as its return value.
# The AsyncResult will then be marked as SUCCEEDED, and have this return value as its "result".
# That's okay, for the InstructorTask will have the "real" status, and monitoring code
# should be using that instead.
return progress
@task(default_retry_delay=settings.BULK_EMAIL_DEFAULT_RETRY_DELAY, max_retries=settings.BULK_EMAIL_MAX_RETRIES) # pylint: disable=not-callable
def send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status_dict):
"""
Sends an email to a list of recipients.
Inputs are:
* `entry_id`: id of the InstructorTask object to which progress should be recorded.
* `email_id`: id of the CourseEmail model that is to be emailed.
* `to_list`: list of recipients. Each is represented as a dict with the following keys:
- 'profile__name': full name of User.
- 'email': email address of User.
- 'pk': primary key of User model.
* `global_email_context`: dict containing values that are unique for this email but the same
for all recipients of this email. This dict is to be used to fill in slots in email
template. It does not include 'name' and 'email', which will be provided by the to_list.
* `subtask_status_dict` : dict containing values representing current status. Keys are:
'task_id' : id of subtask. This is used to pass task information across retries.
'attempted' : number of attempts -- should equal succeeded plus failed
'succeeded' : number that succeeded in processing
'skipped' : number that were not processed.
'failed' : number that failed during processing
'retried_nomax' : number of times the subtask has been retried for conditions that
should not have a maximum count applied
'retried_withmax' : number of times the subtask has been retried for conditions that
should have a maximum count applied
'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
Most values will be zero on initial call, but may be different when the task is
invoked as part of a retry.
Sends to all addresses contained in to_list that are not also in the Optout table.
Emails are sent multi-part, in both plain text and html. Updates InstructorTask object
with status information (sends, failures, skips) and updates number of subtasks completed.
"""
subtask_status = SubtaskStatus.from_dict(subtask_status_dict)
current_task_id = subtask_status.task_id
num_to_send = len(to_list)
log.info(u"Preparing to send email %s to %d recipients as subtask %s for instructor task %d: context = %s, status=%s",
email_id, num_to_send, current_task_id, entry_id, global_email_context, subtask_status)
# Check that the requested subtask is actually known to the current InstructorTask entry.
# If this fails, it throws an exception, which should fail this subtask immediately.
# This can happen when the parent task has been run twice, and results in duplicate
# subtasks being created for the same InstructorTask entry. This can happen when Celery
# loses its connection to its broker, and any current tasks get requeued.
# We hope to catch this condition in perform_delegate_email_batches() when it's the parent
# task that is resubmitted, but just in case we fail to do so there, we check here as well.
# There is also a possibility that this task will be run twice by Celery, for the same reason.
# To deal with that, we need to confirm that the task has not already been completed.
check_subtask_is_valid(entry_id, current_task_id, subtask_status)
send_exception = None
new_subtask_status = None
try:
course_title = global_email_context['course_title']
with dog_stats_api.timer('course_email.single_task.time.overall', tags=[_statsd_tag(course_title)]):
new_subtask_status, send_exception = _send_course_email(
entry_id,
email_id,
to_list,
global_email_context,
subtask_status,
)
except Exception:
# Unexpected exception. Try to write out the failure to the entry before failing.
log.exception("Send-email task %s for email %s: failed unexpectedly!", current_task_id, email_id)
# We got here for really unexpected reasons. Since we don't know how far
# the task got in emailing, we count all recipients as having failed.
# It at least keeps the counts consistent.
subtask_status.increment(failed=num_to_send, state=FAILURE)
update_subtask_status(entry_id, current_task_id, subtask_status)
raise
if send_exception is None:
# Update the InstructorTask object that is storing its progress.
log.info("Send-email task %s for email %s: succeeded", current_task_id, email_id)
update_subtask_status(entry_id, current_task_id, new_subtask_status)
elif isinstance(send_exception, RetryTaskError):
# If retrying, a RetryTaskError needs to be returned to Celery.
# We assume that the the progress made before the retry condition
# was encountered has already been updated before the retry call was made,
# so we only log here.
log.warning("Send-email task %s for email %s: being retried", current_task_id, email_id)
raise send_exception # pylint: disable=raising-bad-type
else:
log.error("Send-email task %s for email %s: failed: %s", current_task_id, email_id, send_exception)
update_subtask_status(entry_id, current_task_id, new_subtask_status)
raise send_exception # pylint: disable=raising-bad-type
# return status in a form that can be serialized by Celery into JSON:
log.info("Send-email task %s for email %s: returning status %s", current_task_id, email_id, new_subtask_status)
return new_subtask_status.to_dict()
def _filter_optouts_from_recipients(to_list, course_id):
"""
Filters a recipient list based on student opt-outs for a given course.
Returns the filtered recipient list, as well as the number of optouts
removed from the list.
"""
optouts = Optout.objects.filter(
course_id=course_id,
user__in=[i['pk'] for i in to_list]
).values_list('user__email', flat=True)
optouts = set(optouts)
# Only count the num_optout for the first time the optouts are calculated.
# We assume that the number will not change on retries, and so we don't need
# to calculate it each time.
num_optout = len(optouts)
to_list = [recipient for recipient in to_list if recipient['email'] not in optouts]
return to_list, num_optout
def _get_source_address(course_id, course_title):
"""
Calculates an email address to be used as the 'from-address' for sent emails.
Makes a unique from name and address for each course, e.g.
"COURSE_TITLE" Course Staff <[email protected]>
"""
course_title_no_quotes = re.sub(r'"', '', course_title)
# For the email address, get the course. Then make sure that it can be used
# in an email address, by substituting a '_' anywhere a non-(ascii, period, or dash)
# character appears.
from_addr = u'"{0}" Course Staff <{1}-{2}>'.format(
course_title_no_quotes,
re.sub(r"[^\w.-]", '_', course_id.course),
settings.BULK_EMAIL_DEFAULT_FROM_EMAIL
)
return from_addr
def _send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status):
"""
Performs the email sending task.
Sends an email to a list of recipients.
Inputs are:
* `entry_id`: id of the InstructorTask object to which progress should be recorded.
* `email_id`: id of the CourseEmail model that is to be emailed.
* `to_list`: list of recipients. Each is represented as a dict with the following keys:
- 'profile__name': full name of User.
- 'email': email address of User.
- 'pk': primary key of User model.
* `global_email_context`: dict containing values that are unique for this email but the same
for all recipients of this email. This dict is to be used to fill in slots in email
template. It does not include 'name' and 'email', which will be provided by the to_list.
* `subtask_status` : object of class SubtaskStatus representing current status.
Sends to all addresses contained in to_list that are not also in the Optout table.
Emails are sent multi-part, in both plain text and html.
Returns a tuple of two values:
* First value is a SubtaskStatus object which represents current progress at the end of this call.
* Second value is an exception returned by the innards of the method, indicating a fatal error.
In this case, the number of recipients that were not sent have already been added to the
'failed' count above.
"""
# Get information from current task's request:
task_id = subtask_status.task_id
try:
course_email = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist as exc:
log.exception("Task %s: could not find email id:%s to send.", task_id, email_id)
raise
# Exclude optouts (if not a retry):
# Note that we don't have to do the optout logic at all if this is a retry,
# because we have presumably already performed the optout logic on the first
# attempt. Anyone on the to_list on a retry has already passed the filter
# that existed at that time, and we don't need to keep checking for changes
# in the Optout list.
if subtask_status.get_retry_count() == 0:
to_list, num_optout = _filter_optouts_from_recipients(to_list, course_email.course_id)
subtask_status.increment(skipped=num_optout)
course_title = global_email_context['course_title']
subject = "[" + course_title + "] " + course_email.subject
# use the email from address in the CourseEmail, if it is present, otherwise compute it
from_addr = course_email.from_addr if course_email.from_addr else \
_get_source_address(course_email.course_id, course_title)
# use the CourseEmailTemplate that was associated with the CourseEmail
course_email_template = course_email.get_template()
try:
connection = get_connection()
connection.open()
# Define context values to use in all course emails:
email_context = {'name': '', 'email': ''}
email_context.update(global_email_context)
while to_list:
# Update context with user-specific values from the user at the end of the list.
# At the end of processing this user, they will be popped off of the to_list.
# That way, the to_list will always contain the recipients remaining to be emailed.
# This is convenient for retries, which will need to send to those who haven't
# yet been emailed, but not send to those who have already been sent to.
current_recipient = to_list[-1]
email = current_recipient['email']
email_context['email'] = email
email_context['name'] = current_recipient['profile__name']
email_context['user_id'] = current_recipient['pk']
email_context['course_id'] = course_email.course_id
# Construct message content using templates and context:
plaintext_msg = course_email_template.render_plaintext(course_email.text_message, email_context)
html_msg = course_email_template.render_htmltext(course_email.html_message, email_context)
# Create email:
email_msg = EmailMultiAlternatives(
subject,
plaintext_msg,
from_addr,
[email],
connection=connection
)
email_msg.attach_alternative(html_msg, 'text/html')
# Throttle if we have gotten the rate limiter. This is not very high-tech,
# but if a task has been retried for rate-limiting reasons, then we sleep
# for a period of time between all emails within this task. Choice of
# the value depends on the number of workers that might be sending email in
# parallel, and what the SES throttle rate is.
if subtask_status.retried_nomax > 0:
sleep(settings.BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)
try:
log.debug('Email with id %s to be sent to %s', email_id, email)
with dog_stats_api.timer('course_email.single_send.time.overall', tags=[_statsd_tag(course_title)]):
connection.send_messages([email_msg])
except SMTPDataError as exc:
# According to SMTP spec, we'll retry error codes in the 4xx range. 5xx range indicates hard failure.
if exc.smtp_code >= 400 and exc.smtp_code < 500:
# This will cause the outer handler to catch the exception and retry the entire task.
raise exc
else:
# This will fall through and not retry the message.
log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc.smtp_error)
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
subtask_status.increment(failed=1)
except SINGLE_EMAIL_FAILURE_ERRORS as exc:
# This will fall through and not retry the message.
log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc)
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
subtask_status.increment(failed=1)
else:
dog_stats_api.increment('course_email.sent', tags=[_statsd_tag(course_title)])
if settings.BULK_EMAIL_LOG_SENT_EMAILS:
log.info('Email with id %s sent to %s', email_id, email)
else:
log.debug('Email with id %s sent to %s', email_id, email)
subtask_status.increment(succeeded=1)
# Pop the user that was emailed off the end of the list only once they have
# successfully been processed. (That way, if there were a failure that
# needed to be retried, the user is still on the list.)
to_list.pop()
except INFINITE_RETRY_ERRORS as exc:
dog_stats_api.increment('course_email.infinite_retry', tags=[_statsd_tag(course_title)])
# Increment the "retried_nomax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_nomax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=True
)
except LIMITED_RETRY_ERRORS as exc:
# Errors caught here cause the email to be retried. The entire task is actually retried
# without popping the current recipient off of the existing list.
# Errors caught are those that indicate a temporary condition that might succeed on retry.
dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])
# Increment the "retried_withmax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_withmax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False
)
except BULK_EMAIL_FAILURE_ERRORS as exc:
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
num_pending = len(to_list)
log.exception('Task %s: email with id %d caused send_course_email task to fail with "fatal" exception. %d emails unsent.',
task_id, email_id, num_pending)
# Update counters with progress to date, counting unsent emails as failures,
# and set the state to FAILURE:
subtask_status.increment(failed=num_pending, state=FAILURE)
return subtask_status, exc
except Exception as exc:
# Errors caught here cause the email to be retried. The entire task is actually retried
# without popping the current recipient off of the existing list.
# These are unexpected errors. Since they might be due to a temporary condition that might
# succeed on retry, we give them a retry.
dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])
log.exception('Task %s: email with id %d caused send_course_email task to fail with unexpected exception. Generating retry.',
task_id, email_id)
# Increment the "retried_withmax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_withmax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False
)
else:
# All went well. Update counters with progress to date,
# and set the state to SUCCESS:
subtask_status.increment(state=SUCCESS)
# Successful completion is marked by an exception value of None.
return subtask_status, None
finally:
# Clean up at the end.
connection.close()
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
def _submit_for_retry(entry_id, email_id, to_list, global_email_context, current_exception, subtask_status, skip_retry_max=False):
"""
Helper function to requeue a task for retry, using the new version of arguments provided.
Inputs are the same as for running a task, plus two extra indicating the state at the time of retry.
These include the `current_exception` that the task encountered that is causing the retry attempt,
and the `subtask_status` that is to be returned. A third extra argument `skip_retry_max`
indicates whether the current retry should be subject to a maximum test.
Returns a tuple of two values:
* First value is a dict which represents current progress. Keys are:
'task_id' : id of subtask. This is used to pass task information across retries.
'attempted' : number of attempts -- should equal succeeded plus failed
'succeeded' : number that succeeded in processing
'skipped' : number that were not processed.
'failed' : number that failed during processing
'retried_nomax' : number of times the subtask has been retried for conditions that
should not have a maximum count applied
'retried_withmax' : number of times the subtask has been retried for conditions that
should have a maximum count applied
'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
* Second value is an exception returned by the innards of the method. If the retry was
successfully submitted, this value will be the RetryTaskError that retry() returns.
Otherwise, it (ought to be) the current_exception passed in.
"""
task_id = subtask_status.task_id
log.info("Task %s: Successfully sent to %s users; failed to send to %s users (and skipped %s users)",
task_id, subtask_status.succeeded, subtask_status.failed, subtask_status.skipped)
# Calculate time until we retry this task (in seconds):
# The value for max_retries is increased by the number of times an "infinite-retry" exception
# has been retried. We want the regular retries to trigger max-retry checking, but not these
# special retries. So we count them separately.
max_retries = _get_current_task().max_retries + subtask_status.retried_nomax
base_delay = _get_current_task().default_retry_delay
if skip_retry_max:
# once we reach five retries, don't increase the countdown further.
retry_index = min(subtask_status.retried_nomax, 5)
exception_type = 'sending-rate'
# if we have a cap, after all, apply it now:
if hasattr(settings, 'BULK_EMAIL_INFINITE_RETRY_CAP'):
retry_cap = settings.BULK_EMAIL_INFINITE_RETRY_CAP + subtask_status.retried_withmax
max_retries = min(max_retries, retry_cap)
else:
retry_index = subtask_status.retried_withmax
exception_type = 'transient'
# Skew the new countdown value by a random factor, so that not all
# retries are deferred by the same amount.
countdown = ((2 ** retry_index) * base_delay) * random.uniform(.75, 1.25)
log.warning('Task %s: email with id %d not delivered due to %s error %s, retrying send to %d recipients in %s seconds (with max_retry=%s)',
task_id, email_id, exception_type, current_exception, len(to_list), countdown, max_retries)
# we make sure that we update the InstructorTask with the current subtask status
# *before* actually calling retry(), to be sure that there is no race
# condition between this update and the update made by the retried task.
update_subtask_status(entry_id, task_id, subtask_status)
# Now attempt the retry. If it succeeds, it returns a RetryTaskError that
# needs to be returned back to Celery. If it fails, we return the existing
# exception.
try:
send_course_email.retry(
args=[
entry_id,
email_id,
to_list,
global_email_context,
subtask_status.to_dict(),
],
exc=current_exception,
countdown=countdown,
max_retries=max_retries,
throw=True,
)
except RetryTaskError as retry_error:
# If the retry call is successful, update with the current progress:
log.exception(u'Task %s: email with id %d caused send_course_email task to retry.',
task_id, email_id)
return subtask_status, retry_error
except Exception as retry_exc:
# If there are no more retries, because the maximum has been reached,
# we expect the original exception to be raised. We catch it here
# (and put it in retry_exc just in case it's different, but it shouldn't be),
# and update status as if it were any other failure. That means that
# the recipients still in the to_list are counted as failures.
log.exception(u'Task %s: email with id %d caused send_course_email task to fail to retry. To list: %s',
task_id, email_id, [i['email'] for i in to_list])
num_failed = len(to_list)
subtask_status.increment(subtask_status, failed=num_failed, state=FAILURE)
return subtask_status, retry_exc
def _statsd_tag(course_title):
"""
Prefix the tag we will use for DataDog.
The tag also gets modified by our dogstats_wrapper code.
"""
return u"course_email:{0}".format(course_title)
| agpl-3.0 | -6,050,697,556,803,473,000 | 48.186246 | 143 | 0.673541 | false |
ywarezk/nerdeez-tastypie | tests/core/tests/api.py | 6 | 7489 | from django.contrib.auth.models import User
from django.http import HttpRequest
from django.test import TestCase
from tastypie.api import Api
from tastypie.exceptions import NotRegistered, BadRequest
from tastypie.resources import Resource, ModelResource
from tastypie.serializers import Serializer
from core.models import Note
class NoteResource(ModelResource):
class Meta:
resource_name = 'notes'
queryset = Note.objects.filter(is_active=True)
class UserResource(ModelResource):
class Meta:
resource_name = 'users'
queryset = User.objects.all()
class ApiTestCase(TestCase):
urls = 'core.tests.api_urls'
def test_register(self):
api = Api()
self.assertEqual(len(api._registry), 0)
api.register(NoteResource())
self.assertEqual(len(api._registry), 1)
self.assertEqual(sorted(api._registry.keys()), ['notes'])
api.register(UserResource())
self.assertEqual(len(api._registry), 2)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
api.register(UserResource())
self.assertEqual(len(api._registry), 2)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
self.assertEqual(len(api._canonicals), 2)
api.register(UserResource(), canonical=False)
self.assertEqual(len(api._registry), 2)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
self.assertEqual(len(api._canonicals), 2)
def test_global_registry(self):
api = Api()
self.assertEqual(len(api._registry), 0)
api.register(NoteResource())
self.assertEqual(len(api._registry), 1)
self.assertEqual(sorted(api._registry.keys()), ['notes'])
api.register(UserResource())
self.assertEqual(len(api._registry), 2)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
api.register(UserResource())
self.assertEqual(len(api._registry), 2)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
self.assertEqual(len(api._canonicals), 2)
api.register(UserResource(), canonical=False)
self.assertEqual(len(api._registry), 2)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
self.assertEqual(len(api._canonicals), 2)
def test_unregister(self):
api = Api()
api.register(NoteResource())
api.register(UserResource(), canonical=False)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
self.assertEqual(len(api._canonicals), 1)
api.unregister('users')
self.assertEqual(len(api._registry), 1)
self.assertEqual(sorted(api._registry.keys()), ['notes'])
self.assertEqual(len(api._canonicals), 1)
api.unregister('notes')
self.assertEqual(len(api._registry), 0)
self.assertEqual(sorted(api._registry.keys()), [])
api.unregister('users')
self.assertEqual(len(api._registry), 0)
self.assertEqual(sorted(api._registry.keys()), [])
def test_canonical_resource_for(self):
api = Api()
note_resource = NoteResource()
user_resource = UserResource()
api.register(note_resource)
api.register(user_resource)
self.assertEqual(len(api._canonicals), 2)
self.assertEqual(isinstance(api.canonical_resource_for('notes'), NoteResource), True)
api_2 = Api()
api.unregister(user_resource._meta.resource_name)
self.assertRaises(NotRegistered, api.canonical_resource_for, 'users')
def test_urls(self):
api = Api()
api.register(NoteResource())
api.register(UserResource())
patterns = api.urls
self.assertEqual(len(patterns), 3)
self.assertEqual(sorted([pattern.name for pattern in patterns if hasattr(pattern, 'name')]), ['api_v1_top_level'])
self.assertEqual([[pattern.name for pattern in include.url_patterns if hasattr(pattern, 'name')] for include in patterns if hasattr(include, 'reverse_dict')], [['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail'], ['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail']])
api = Api(api_name='v2')
api.register(NoteResource())
api.register(UserResource())
patterns = api.urls
self.assertEqual(len(patterns), 3)
self.assertEqual(sorted([pattern.name for pattern in patterns if hasattr(pattern, 'name')]), ['api_v2_top_level'])
self.assertEqual([[pattern.name for pattern in include.url_patterns if hasattr(pattern, 'name')] for include in patterns if hasattr(include, 'reverse_dict')], [['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail'], ['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail']])
def test_top_level(self):
api = Api()
api.register(NoteResource())
api.register(UserResource())
request = HttpRequest()
resp = api.top_level(request)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content.decode('utf-8'), '{"notes": {"list_endpoint": "/api/v1/notes/", "schema": "/api/v1/notes/schema/"}, "users": {"list_endpoint": "/api/v1/users/", "schema": "/api/v1/users/schema/"}}')
def test_top_level_jsonp(self):
api = Api()
api.register(NoteResource())
api.register(UserResource())
request = HttpRequest()
request.META = {'HTTP_ACCEPT': 'text/javascript'}
request.GET = {'callback': 'foo'}
resp = api.top_level(request)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['content-type'].split(';')[0], 'text/javascript')
self.assertEqual(resp.content.decode('utf-8'), 'foo({"notes": {"list_endpoint": "/api/v1/notes/", "schema": "/api/v1/notes/schema/"}, "users": {"list_endpoint": "/api/v1/users/", "schema": "/api/v1/users/schema/"}})')
request = HttpRequest()
request.META = {'HTTP_ACCEPT': 'text/javascript'}
request.GET = {'callback': ''}
try:
resp = api.top_level(request)
self.fail("Broken callback didn't fail!")
except BadRequest:
# Regression: We expect this, which is fine, but this used to
# be an import error.
pass
def test_custom_api_serializer(self):
"""Confirm that an Api can use a custom serializer"""
# Origin: https://github.com/toastdriven/django-tastypie/pull/817
class JSONSerializer(Serializer):
formats = ('json', )
api = Api(serializer_class=JSONSerializer)
api.register(NoteResource())
request = HttpRequest()
request.META = {'HTTP_ACCEPT': 'text/javascript'}
resp = api.top_level(request)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['content-type'], 'application/json',
msg="Expected application/json response but received %s" % resp['content-type'])
request = HttpRequest()
request.META = {'HTTP_ACCEPT': 'application/xml'}
resp = api.top_level(request)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['content-type'], 'application/json',
msg="Expected application/json response but received %s" % resp['content-type'])
| bsd-3-clause | 7,010,847,839,818,804,000 | 39.923497 | 336 | 0.630658 | false |
sam-m888/gprime | gprime/filters/rules/family/_hassourcecount.py | 1 | 1794 | #
# gPrime - A web-based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .._hassourcecountbase import HasSourceCountBase
#-------------------------------------------------------------------------
# "Families having sources"
#-------------------------------------------------------------------------
class HasSourceCount(HasSourceCountBase):
"""Families with sources"""
name = _('Families with <count> sources')
description = _("Matches families with a certain number of sources connected to it")
| gpl-2.0 | -539,494,350,069,998,900 | 38 | 88 | 0.557971 | false |
tectronics/enso | enso/commands/manager.py | 7 | 10908 | # Copyright (c) 2008, Humanized, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.commands.manager
#
# ----------------------------------------------------------------------------
"""
The CommandManager singleton.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
import logging
from enso.commands.interfaces import CommandExpression, CommandObject
from enso.commands.interfaces import AbstractCommandFactory
from enso.commands.factories import GenericPrefixFactory
# ----------------------------------------------------------------------------
# The Command Manager
# ----------------------------------------------------------------------------
class CommandManager:
"""
Provides an interface to register and retrieve all commands.
Allows client code to register new command implementations, find
suggestions, and retrieve command objects.
"""
__instance = None
@classmethod
def get( cls ):
if not cls.__instance:
cls.__instance = cls()
return cls.__instance
CMD_KEY = CommandExpression( "{all named commands}" )
def __init__( self ):
"""
Initializes the command manager.
"""
self.__cmdObjReg = CommandObjectRegistry()
self.__cmdFactoryDict = {
self.CMD_KEY : self.__cmdObjReg,
}
def registerCommand( self, cmdName, cmdObj ):
"""
Called to register a new command with the command manager.
"""
try:
cmdExpr = CommandExpression( cmdName )
except AssertionError, why:
logging.error( "Could not register %s : %s "
% ( cmdName, why ) )
raise
if cmdExpr.hasArgument():
# The command expression has an argument; it is a command
# with an argument.
assert isinstance( cmdObj, AbstractCommandFactory )
assert not self.__cmdFactoryDict.has_key( cmdExpr )
self.__cmdFactoryDict[ cmdExpr ] = cmdObj
else:
# The command expression has no argument; it is a
# simple command with an exact name.
assert isinstance( cmdObj, CommandObject ), \
"Could not register %s" % cmdName
self.__cmdObjReg.addCommandObj( cmdObj, cmdExpr )
def unregisterCommand( self, cmdName ):
cmdFound = False
for cmdExpr in self.__cmdFactoryDict.keys():
if str(cmdExpr) == cmdName:
del self.__cmdFactoryDict[cmdExpr]
cmdFound = True
break
if not cmdFound:
self.__cmdObjReg.removeCommandObj( cmdName )
cmdFound = True
if not cmdFound:
raise RuntimeError( "Command '%s' does not exist." % cmdName )
def getCommandExpression( self, commandName ):
"""
Returns the unique command expression that is assosciated with
commandName. For example, if commandName is 'open emacs', and
the command expression was 'open {file}', then a command expression
object for 'open {file}' will be returned.
"""
commands = []
for expr in self.__cmdFactoryDict.iterkeys():
if expr.matches( commandName ):
# This expression matches commandName; try to fetch a
# command object from the corresponding factory.
cmd = self.__cmdFactoryDict[expr].getCommandObj( commandName )
if expr == self.CMD_KEY and cmd != None:
commands.append( ( commandName, commandName ) )
elif cmd != None:
# The factory returned a non-nil command object.
# Make sure that nothing else has matched this
# commandName.
commands.append( (expr.getPrefix(), expr) )
if len(commands) == 0:
return None
else:
# If there are several matching commands, return only
# the alphabetically first.
commands.sort( lambda a,b : cmp( a[0], b[0] ) )
return commands[0][1]
def getCommand( self, commandName ):
"""
Returns the unique command with commandName, based on the
registered CommandObjects and the registered CommandFactories.
If no command matches, returns None explicitly.
"""
commands = []
for expr in self.__cmdFactoryDict.iterkeys():
if expr.matches( commandName ):
# This expression matches commandName; try to fetch a
# command object from the corresponding factory.
cmd = self.__cmdFactoryDict[expr].getCommandObj( commandName )
if cmd != None:
# The factory returned a non-nil command object.
commands.append( ( expr, cmd ) )
if len( commands ) == 0:
return None
else:
# If there are several matching commands, return only
# the alphabetically first.
prefixes = [ (e.getPrefix(),c) for (e,c) in commands ]
prefixes.sort( lambda a,b : cmp( a[0], b[0] ) )
return prefixes[0][1]
def autoComplete( self, userText ):
"""
Returns the best match it can find to userText, or None.
"""
completions = []
# Check each of the command factories for a match.
for expr in self.__cmdFactoryDict.iterkeys():
if expr.matches( userText ):
cmdFact = self.__cmdFactoryDict[expr]
completion = cmdFact.autoComplete( userText )
if completion != None:
completions.append( completion )
if len( completions ) == 0:
return None
else:
completions.sort( lambda a,b : cmp( a.toText(), b.toText() ) )
return completions[0]
def retrieveSuggestions( self, userText ):
"""
Returns an unsorted list of suggestions.
"""
suggestions = []
# Extend the suggestions using each of the command factories
for expr in self.__cmdFactoryDict.iterkeys():
if expr.matches( userText ):
factory = self.__cmdFactoryDict[expr]
suggestions += factory.retrieveSuggestions( userText )
return suggestions
def getCommands( self ):
"""
Returns a dictionary of command expression strings and their
associated implementations (command objects or factories).
"""
# Get a dictionary form of the command object registry:
cmdDict = self.__cmdObjReg.getDict()
# Extend the dictionary to cover the command factories.
for expr in self.__cmdFactoryDict.keys():
if expr == self.CMD_KEY:
# This is the command object registry; pass.
pass
else:
# Cast the expression as a string.
cmdDict[ str(expr) ] = self.__cmdFactoryDict[expr]
return cmdDict
# ----------------------------------------------------------------------------
# A CommandObject Registry
# ----------------------------------------------------------------------------
class CommandAlreadyRegisteredError( Exception ):
"""
Error raised when someone tries to register two commands under
the same name with the registry.
"""
pass
class CommandObjectRegistry( GenericPrefixFactory ):
"""
Class for efficiently storing and searching a large number of
commands (where each command is an object with a corresponding
command name).
"""
PREFIX = ""
def __init__( self ):
"""
Initialize the command registry.
"""
GenericPrefixFactory.__init__( self )
self.__cmdObjDict = {}
self.__dictTouched = False
def update( self ):
pass
def getDict( self ):
return self.__cmdObjDict
def addCommandObj( self, command, cmdExpr ):
"""
Adds command to the registry under the name str(cmdExpr).
"""
assert isinstance( cmdExpr, CommandExpression )
assert not cmdExpr.hasArgument()
cmdName = str(cmdExpr)
if self.__cmdObjDict.has_key( cmdName ):
raise CommandAlreadyRegisteredError()
self.__cmdObjDict[ cmdName ] = command
self.__dictTouched = True
self._addPostfix( cmdName )
def removeCommandObj( self, cmdExpr ):
cmdFound = False
if self.__cmdObjDict.has_key( cmdExpr ):
del self.__cmdObjDict[cmdExpr]
cmdFound = True
if cmdFound:
self.__dictTouched = True
self._removePostfix( cmdExpr )
else:
raise RuntimeError( "Command object '%s' not found." % cmdExpr )
def getCommandObj( self, cmdNameString ):
"""
Returns the object corresponding to cmdNameString.
NOTE: This will raise a KeyError if cmdNameString is not a
valid command name.
"""
try:
return self.__cmdObjDict[ cmdNameString ]
except KeyError:
return None
| bsd-3-clause | 5,743,637,388,559,139,000 | 32.981308 | 79 | 0.567198 | false |
obarquero/intro_machine_learning_udacity | Projects/ud120-projects-master/decision_tree/dt_author_id.py | 1 | 1243 | #!/usr/bin/python
"""
This is the code to accompany the Lesson 3 (decision tree) mini-project.
Use a Decision Tree to identify emails from the Enron corpus by author:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#########################################################
### your code goes here ###
from sklearn import tree
from sklearn.metrics import accuracy_score
clf = tree.DecisionTreeClassifier(min_samples_split=40)
#train
t0 = time()
clf.fit(features_train,labels_train)
print "training time:", round(time()-t0,3), "s"
#compute the accuracy on test data set
t0 = time()
y_pred = clf.predict(features_test)
print "predicting time:", round(time()-t0,3), "s"
#compute the accuracy
acc = accuracy_score(labels_test,y_pred)
print "accuracy: ", round(acc,4)
#test_time = time.clock() - start_time
#########################################################
| gpl-2.0 | 616,414,235,818,283,400 | 23.86 | 79 | 0.654063 | false |
RedHatQE/cfme_tests | cfme/control/explorer/conditions.py | 1 | 12415 | # -*- coding: utf-8 -*-
import attr
from navmazing import NavigateToAttribute
from navmazing import NavigateToSibling
from widgetastic.utils import WaitFillViewStrategy
from widgetastic.widget import Text
from widgetastic.widget import TextInput
from widgetastic.widget import Widget
from widgetastic_patternfly import Button
from widgetastic_patternfly import Input
from . import ControlExplorerView
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.utils import ParamClassName
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.blockers import BZ
from cfme.utils.pretty import Pretty
from cfme.utils.update import Updateable
from widgetastic_manageiq.expression_editor import ExpressionEditor
class Expression(Widget):
ROOT = "div#condition_info_div"
def __init__(self, parent, type_, logger=None):
Widget.__init__(self, parent, logger=logger)
if type_ not in ["Scope", "Expression"]:
raise ValueError("Type should be Scope or Expression only")
else:
self.type = type_
def __locator__(self):
return self.ROOT
@property
def text_list(self):
return self.browser.element(self).text.split("\n")
@property
def text(self):
"""
In Condition details view Scope and Expression don't have any locator. So we
have to scrape whole text in the parent div and split it by "\\n". After that in text_list
we receive something like that:
.. code-block:: python
[u'Scope',
u'COUNT OF VM and Instance.Files > 150',
u'Expression',
u'VM and Instance : Boot Time BEFORE "03/04/2014 00:00"',
u'Notes',
u'No notes have been entered.',
u'Assigned to Policies',
u'This Condition is not assigned to any Policies.']
To get value of Scope or Expression firstly we find its index in the list and then just
seek next member.
"""
index = self.text_list.index(self.type)
return self.text_list[index + 1]
def read(self):
return self.text
class ConditionsAllView(ControlExplorerView):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_control_explorer and
# there is a BZ 1683697 that some Condition view is shown for All Conditions
self.title.text == "All Conditions" if not BZ(1683697).blocks else True and
self.conditions.is_opened and
self.conditions.tree.currently_selected == ["All Conditions"]
)
class ConditionFormCommon(ControlExplorerView):
title = Text("#explorer_title_text")
description = Input(name="description")
scope = ExpressionEditor("//button[normalize-space(.)='Define Scope']")
expression = ExpressionEditor("//button[normalize-space(.)='Define Expression']")
notes = TextInput(name="notes")
cancel_button = Button("Cancel")
class NewConditionView(ConditionFormCommon):
add_button = Button("Add")
@property
def is_displayed(self):
expected_tree = [
"All Conditions",
"{} Conditions".format(self.context["object"].TREE_NODE)
]
return (
self.in_control_explorer and
self.title.text == "Adding a new Condition" and
self.conditions.is_opened and
self.conditions.tree.currently_selected == expected_tree
)
class ConditionClassAllView(ControlExplorerView):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == "All {} Conditions".format(self.context["object"].FIELD_VALUE) and
self.conditions.is_opened and
self.conditions.tree.currently_selected ==
["All Conditions", "{} Conditions".format(self.context["object"].TREE_NODE)]
)
class EditConditionView(ConditionFormCommon):
fill_strategy = WaitFillViewStrategy()
title = Text("#explorer_title_text")
save_button = Button("Save")
cancel_button = Button("Cancel")
reset_button = Button("Reset")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == 'Editing {} Condition "{}"'.format(
self.context["object"].FIELD_VALUE,
self.context["object"].description
) and
self.conditions.is_opened and
self.conditions.tree.currently_selected == [
"All Conditions",
"{} Conditions".format(self.context["object"].TREE_NODE),
self.context["object"].description
]
)
class ConditionDetailsView(ControlExplorerView):
title = Text("#explorer_title_text")
scope = Expression("Scope")
expression = Expression("Expression")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == '{} Condition "{}"'.format(self.context["object"].FIELD_VALUE,
self.context["object"].description) and
self.conditions.is_opened
# TODO add in a check against the tree once BZ 1683697 is fixed
)
class ConditionPolicyDetailsView(ControlExplorerView):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == '{} Condition "{}"'.format(
self.context["object"].context_policy.PRETTY,
self.context["object"].description) and
self.policies.is_opened and
self.policies.tree.currently_selected == [
"All Policies",
"{} Policies".format(self.context["object"].context_policy.TYPE),
"{} {} Policies".format(self.context["object"].context_policy.TREE_NODE,
self.context["object"].context_policy.TYPE),
self.context["object"].context_policy.description,
self.context["object"].description
]
)
@attr.s
class BaseCondition(BaseEntity, Updateable, Pretty):
TREE_NODE = None
PRETTY = None
FIELD_VALUE = None
_param_name = ParamClassName('description')
description = attr.ib()
expression = attr.ib(default=None)
scope = attr.ib(default=None)
notes = attr.ib(default=None)
def update(self, updates):
"""Update this Condition in UI.
Args:
updates: Provided by update() context manager.
"""
view = navigate_to(self, "Edit")
view.fill(updates)
view.save_button.click()
view = self.create_view(ConditionDetailsView, override=updates, wait="10s")
view.flash.assert_success_message(
'Condition "{}" was saved'.format(updates.get("description", self.description))
)
def delete(self, cancel=False):
"""Delete this Condition in UI.
Args:
cancel: Whether to cancel the deletion (default False).
"""
view = navigate_to(self, "Details")
view.configuration.item_select("Delete this {} Condition".format(self.FIELD_VALUE),
handle_alert=not cancel)
if cancel:
assert view.is_displayed
view.flash.assert_no_error()
else:
view = self.create_view(ConditionClassAllView, wait="20s")
view.flash.assert_success_message('Condition "{}": Delete successful'.format(
self.description))
def read_expression(self):
view = navigate_to(self, "Details")
assert view.is_displayed
return view.expression.text
def read_scope(self):
view = navigate_to(self, "Details")
assert view.is_displayed
return view.scope.text
@property
def exists(self):
"""Check existence of this Condition.
Returns: :py:class:`bool` signalizing the presence of the Condition in the database.
"""
try:
self.appliance.rest_api.collections.conditions.get(description=self.description)
return True
except ValueError:
return False
@attr.s
class ConditionCollection(BaseCollection):
ENTITY = BaseCondition
def create(self, condition_class, description, expression=None, scope=None, notes=None):
condition = condition_class(self, description, expression=expression, scope=scope,
notes=notes)
view = navigate_to(condition, "Add")
# first fill description, expression, and notes
view.fill({
"description": condition.description,
"expression": condition.expression,
"scope": condition.scope,
"notes": condition.notes
})
view.wait_displayed()
view.add_button.click()
view = condition.create_view(ConditionDetailsView, wait="10s")
view.flash.assert_success_message('Condition "{}" was added'.format(condition.description))
return condition
def all(self):
raise NotImplementedError
@navigator.register(ConditionCollection, "All")
class AllConditions(CFMENavigateStep):
VIEW = ConditionsAllView
prerequisite = NavigateToAttribute("appliance.server", "ControlExplorer")
def step(self, *args, **kwargs):
self.prerequisite_view.conditions.tree.click_path("All Conditions")
@navigator.register(BaseCondition, "Add")
class ConditionNew(CFMENavigateStep):
VIEW = NewConditionView
prerequisite = NavigateToAttribute("parent", "All")
def step(self, *args, **kwargs):
self.prerequisite_view.conditions.tree.click_path(
"All Conditions",
"{} Conditions".format(self.obj.TREE_NODE)
)
self.prerequisite_view.configuration.item_select(
"Add a New {} Condition".format(self.obj.PRETTY))
@navigator.register(BaseCondition, "Edit")
class ConditionEdit(CFMENavigateStep):
VIEW = EditConditionView
prerequisite = NavigateToSibling("Details")
def step(self, *args, **kwargs):
self.view.conditions.tree.click_path(
"All Conditions",
"{} Conditions".format(self.obj.TREE_NODE),
self.obj.description
)
self.prerequisite_view.configuration.item_select("Edit this Condition")
@navigator.register(BaseCondition, "Details")
class ConditionDetails(CFMENavigateStep):
VIEW = ConditionDetailsView
prerequisite = NavigateToAttribute("parent", "All")
def step(self, *args, **kwargs):
self.prerequisite_view.conditions.tree.click_path(
"All Conditions",
"{} Conditions".format(self.obj.TREE_NODE),
self.obj.description
)
@navigator.register(BaseCondition, "Details in policy")
class PolicyConditionDetails(CFMENavigateStep):
VIEW = ConditionPolicyDetailsView
prerequisite = NavigateToAttribute("appliance.server", "ControlExplorer")
def step(self, *args, **kwargs):
self.prerequisite_view.policies.tree.click_path(
"All Policies",
"{} Policies".format(self.obj.context_policy.TYPE),
"{} {} Policies".format(
self.obj.context_policy.TREE_NODE,
self.obj.context_policy.TYPE
),
self.obj.context_policy.description,
self.obj.description
)
class HostCondition(BaseCondition):
TREE_NODE = "Host"
PRETTY = FIELD_VALUE = "Host / Node"
class VMCondition(BaseCondition):
FIELD_VALUE = TREE_NODE = "VM and Instance"
PRETTY = "VM"
class ReplicatorCondition(BaseCondition):
TREE_NODE = "Replicator"
PRETTY = FIELD_VALUE = "Container Replicator"
class PodCondition(BaseCondition):
TREE_NODE = "Pod"
PRETTY = FIELD_VALUE = "Container Pod"
class ContainerNodeCondition(BaseCondition):
TREE_NODE = PRETTY = FIELD_VALUE = "Container Node"
class ContainerImageCondition(BaseCondition):
TREE_NODE = PRETTY = FIELD_VALUE = "Container Image"
class ProviderCondition(BaseCondition):
TREE_NODE = PRETTY = FIELD_VALUE = "Provider"
| gpl-2.0 | -5,987,522,244,085,307,000 | 31.163212 | 99 | 0.638824 | false |
gautamMalu/rootfs_xen_arndale | usr/lib/python3.4/locale.py | 3 | 74534 | """ Locale support.
The module provides low-level access to the C lib's locale APIs
and adds high level number formatting APIs as well as a locale
aliasing engine to complement these.
The aliasing engine includes support for many commonly used locale
names and maps them to values suitable for passing to the C lib's
setlocale() function. It also includes default encodings for all
supported locale names.
"""
import sys
import encodings
import encodings.aliases
import re
import collections
from builtins import str as _builtin_str
import functools
# Try importing the _locale module.
#
# If this fails, fall back on a basic 'C' locale emulation.
# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before
# trying the import. So __all__ is also fiddled at the end of the file.
__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error",
"setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm",
"str", "atof", "atoi", "format", "format_string", "currency",
"normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY",
"LC_NUMERIC", "LC_ALL", "CHAR_MAX"]
def _strcoll(a,b):
""" strcoll(string,string) -> int.
Compares two strings according to the locale.
"""
return (a > b) - (a < b)
def _strxfrm(s):
""" strxfrm(string) -> string.
Returns a string that behaves for cmp locale-aware.
"""
return s
try:
from _locale import *
except ImportError:
# Locale emulation
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error('_locale emulation only supports "C" locale')
return 'C'
# These may or may not exist in _locale, so be sure to set them.
if 'strxfrm' not in globals():
strxfrm = _strxfrm
if 'strcoll' not in globals():
strcoll = _strcoll
_localeconv = localeconv
# With this dict, you can override some items of localeconv's return value.
# This is useful for testing purposes.
_override_localeconv = {}
@functools.wraps(_localeconv)
def localeconv():
d = _localeconv()
if _override_localeconv:
d.update(_override_localeconv)
return d
### Number formatting APIs
# Author: Martin von Loewis
# improved by Georg Brandl
# Iterate over grouping intervals
def _grouping_intervals(grouping):
last_interval = None
for interval in grouping:
# if grouping is -1, we are done
if interval == CHAR_MAX:
return
# 0: re-use last group ad infinitum
if interval == 0:
if last_interval is None:
raise ValueError("invalid grouping")
while True:
yield last_interval
yield interval
last_interval = interval
#perform the grouping from right to left
def _group(s, monetary=False):
conv = localeconv()
thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
grouping = conv[monetary and 'mon_grouping' or 'grouping']
if not grouping:
return (s, 0)
if s[-1] == ' ':
stripped = s.rstrip()
right_spaces = s[len(stripped):]
s = stripped
else:
right_spaces = ''
left_spaces = ''
groups = []
for interval in _grouping_intervals(grouping):
if not s or s[-1] not in "0123456789":
# only non-digit characters remain (sign, spaces)
left_spaces = s
s = ''
break
groups.append(s[-interval:])
s = s[:-interval]
if s:
groups.append(s)
groups.reverse()
return (
left_spaces + thousands_sep.join(groups) + right_spaces,
len(thousands_sep) * (len(groups) - 1)
)
# Strip a given amount of excess padding from the given string
def _strip_padding(s, amount):
lpos = 0
while amount and s[lpos] == ' ':
lpos += 1
amount -= 1
rpos = len(s) - 1
while amount and s[rpos] == ' ':
rpos -= 1
amount -= 1
return s[lpos:rpos+1]
_percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
def format(percent, value, grouping=False, monetary=False, *additional):
"""Returns the locale-aware substitution of a %? specifier
(percent).
additional is for format strings which contain one or more
'*' modifiers."""
# this is only for one-percent-specifier strings and this should be checked
match = _percent_re.match(percent)
if not match or len(match.group())!= len(percent):
raise ValueError(("format() must be given exactly one %%char "
"format specifier, %s not valid") % repr(percent))
return _format(percent, value, grouping, monetary, *additional)
def _format(percent, value, grouping=False, monetary=False, *additional):
if additional:
formatted = percent % ((value,) + additional)
else:
formatted = percent % value
# floats and decimal ints need special action!
if percent[-1] in 'eEfFgG':
seps = 0
parts = formatted.split('.')
if grouping:
parts[0], seps = _group(parts[0], monetary=monetary)
decimal_point = localeconv()[monetary and 'mon_decimal_point'
or 'decimal_point']
formatted = decimal_point.join(parts)
if seps:
formatted = _strip_padding(formatted, seps)
elif percent[-1] in 'diu':
seps = 0
if grouping:
formatted, seps = _group(formatted, monetary=monetary)
if seps:
formatted = _strip_padding(formatted, seps)
return formatted
def format_string(f, val, grouping=False):
"""Formats a string in the same way that the % formatting would use,
but takes the current locale into account.
Grouping is applied if the third parameter is true."""
percents = list(_percent_re.finditer(f))
new_f = _percent_re.sub('%s', f)
if isinstance(val, collections.Mapping):
new_val = []
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
new_val.append(format(perc.group(), val, grouping))
else:
if not isinstance(val, tuple):
val = (val,)
new_val = []
i = 0
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
starcount = perc.group('modifiers').count('*')
new_val.append(_format(perc.group(),
val[i],
grouping,
False,
*val[i+1:i+1+starcount]))
i += (1 + starcount)
val = tuple(new_val)
return new_f % val
def currency(val, symbol=True, grouping=False, international=False):
"""Formats val according to the currency settings
in the current locale."""
conv = localeconv()
# check for illegal values
digits = conv[international and 'int_frac_digits' or 'frac_digits']
if digits == 127:
raise ValueError("Currency formatting is not possible using "
"the 'C' locale.")
s = format('%%.%if' % digits, abs(val), grouping, monetary=True)
# '<' and '>' are markers if the sign must be inserted between symbol and value
s = '<' + s + '>'
if symbol:
smb = conv[international and 'int_curr_symbol' or 'currency_symbol']
precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes']
separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space']
if precedes:
s = smb + (separated and ' ' or '') + s
else:
s = s + (separated and ' ' or '') + smb
sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
sign = conv[val<0 and 'negative_sign' or 'positive_sign']
if sign_pos == 0:
s = '(' + s + ')'
elif sign_pos == 1:
s = sign + s
elif sign_pos == 2:
s = s + sign
elif sign_pos == 3:
s = s.replace('<', sign)
elif sign_pos == 4:
s = s.replace('>', sign)
else:
# the default if nothing specified;
# this should be the most fitting sign position
s = sign + s
return s.replace('<', '').replace('>', '')
def str(val):
"""Convert float to integer, taking the locale into account."""
return format("%.12g", val)
def atof(string, func=float):
"Parses a string as a float according to the locale settings."
#First, get rid of the grouping
ts = localeconv()['thousands_sep']
if ts:
string = string.replace(ts, '')
#next, replace the decimal point with a dot
dd = localeconv()['decimal_point']
if dd:
string = string.replace(dd, '.')
#finally, parse the string
return func(string)
def atoi(str):
"Converts a string to an integer according to the locale settings."
return atof(str, int)
def _test():
setlocale(LC_ALL, "")
#do grouping
s1 = format("%d", 123456789,1)
print(s1, "is", atoi(s1))
#standard formatting
s1 = str(3.14)
print(s1, "is", atof(s1))
### Locale name aliasing engine
# Author: Marc-Andre Lemburg, [email protected]
# Various tweaks by Fredrik Lundh <[email protected]>
# store away the low-level version of setlocale (it's
# overridden below)
_setlocale = setlocale
def _replace_encoding(code, encoding):
if '.' in code:
langname = code[:code.index('.')]
else:
langname = code
# Convert the encoding to a C lib compatible encoding string
norm_encoding = encodings.normalize_encoding(encoding)
#print('norm encoding: %r' % norm_encoding)
norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(),
norm_encoding)
#print('aliased encoding: %r' % norm_encoding)
encoding = norm_encoding
norm_encoding = norm_encoding.lower()
if norm_encoding in locale_encoding_alias:
encoding = locale_encoding_alias[norm_encoding]
else:
norm_encoding = norm_encoding.replace('_', '')
norm_encoding = norm_encoding.replace('-', '')
if norm_encoding in locale_encoding_alias:
encoding = locale_encoding_alias[norm_encoding]
#print('found encoding %r' % encoding)
return langname + '.' + encoding
def _append_modifier(code, modifier):
if modifier == 'euro':
if '.' not in code:
return code + '.ISO8859-15'
_, _, encoding = code.partition('.')
if encoding in ('ISO8859-15', 'UTF-8'):
return code
if encoding == 'ISO8859-1':
return _replace_encoding(code, 'ISO8859-15')
return code + '@' + modifier
def normalize(localename):
""" Returns a normalized locale code for the given locale
name.
The returned locale code is formatted for use with
setlocale().
If normalization fails, the original name is returned
unchanged.
If the given encoding is not known, the function defaults to
the default encoding for the locale code just like setlocale()
does.
"""
# Normalize the locale name and extract the encoding and modifier
code = localename.lower()
if ':' in code:
# ':' is sometimes used as encoding delimiter.
code = code.replace(':', '.')
if '@' in code:
code, modifier = code.split('@', 1)
else:
modifier = ''
if '.' in code:
langname, encoding = code.split('.')[:2]
else:
langname = code
encoding = ''
# First lookup: fullname (possibly with encoding and modifier)
lang_enc = langname
if encoding:
norm_encoding = encoding.replace('-', '')
norm_encoding = norm_encoding.replace('_', '')
lang_enc += '.' + norm_encoding
lookup_name = lang_enc
if modifier:
lookup_name += '@' + modifier
code = locale_alias.get(lookup_name, None)
if code is not None:
return code
#print('first lookup failed')
if modifier:
# Second try: fullname without modifier (possibly with encoding)
code = locale_alias.get(lang_enc, None)
if code is not None:
#print('lookup without modifier succeeded')
if '@' not in code:
return _append_modifier(code, modifier)
if code.split('@', 1)[1].lower() == modifier:
return code
#print('second lookup failed')
if encoding:
# Third try: langname (without encoding, possibly with modifier)
lookup_name = langname
if modifier:
lookup_name += '@' + modifier
code = locale_alias.get(lookup_name, None)
if code is not None:
#print('lookup without encoding succeeded')
if '@' not in code:
return _replace_encoding(code, encoding)
code, modifier = code.split('@', 1)
return _replace_encoding(code, encoding) + '@' + modifier
if modifier:
# Fourth try: langname (without encoding and modifier)
code = locale_alias.get(langname, None)
if code is not None:
#print('lookup without modifier and encoding succeeded')
if '@' not in code:
code = _replace_encoding(code, encoding)
return _append_modifier(code, modifier)
code, defmod = code.split('@', 1)
if defmod.lower() == modifier:
return _replace_encoding(code, encoding) + '@' + defmod
return localename
def _parse_localename(localename):
""" Parses the locale code for localename and returns the
result as tuple (language code, encoding).
The localename is normalized and passed through the locale
alias engine. A ValueError is raised in case the locale name
cannot be parsed.
The language code corresponds to RFC 1766. code and encoding
can be None in case the values cannot be determined or are
unknown to this implementation.
"""
code = normalize(localename)
if '@' in code:
# Deal with locale modifiers
code, modifier = code.split('@', 1)
if modifier == 'euro' and '.' not in code:
# Assume Latin-9 for @euro locales. This is bogus,
# since some systems may use other encodings for these
# locales. Also, we ignore other modifiers.
return code, 'iso-8859-15'
if '.' in code:
return tuple(code.split('.')[:2])
elif code == 'C':
return None, None
raise ValueError('unknown locale: %s' % localename)
def _build_localename(localetuple):
""" Builds a locale code from the given tuple (language code,
encoding).
No aliasing or normalizing takes place.
"""
try:
language, encoding = localetuple
if language is None:
language = 'C'
if encoding is None:
return language
else:
return language + '.' + encoding
except (TypeError, ValueError):
raise TypeError('Locale must be None, a string, or an iterable of two strings -- language code, encoding.')
def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')):
""" Tries to determine the default locale settings and returns
them as tuple (language code, encoding).
According to POSIX, a program which has not called
setlocale(LC_ALL, "") runs using the portable 'C' locale.
Calling setlocale(LC_ALL, "") lets it use the default locale as
defined by the LANG variable. Since we don't want to interfere
with the current locale setting we thus emulate the behavior
in the way described above.
To maintain compatibility with other platforms, not only the
LANG variable is tested, but a list of variables given as
envvars parameter. The first found to be defined will be
used. envvars defaults to the search path used in GNU gettext;
it must always contain the variable name 'LANG'.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
try:
# check if it's supported by the _locale module
import _locale
code, encoding = _locale._getdefaultlocale()
except (ImportError, AttributeError):
pass
else:
# make sure the code/encoding values are valid
if sys.platform == "win32" and code and code[:2] == "0x":
# map windows language identifier to language name
code = windows_locale.get(int(code, 0))
# ...add other platform-specific processing here, if
# necessary...
return code, encoding
# fall back on POSIX behaviour
import os
lookup = os.environ.get
for variable in envvars:
localename = lookup(variable,None)
if localename:
if variable == 'LANGUAGE':
localename = localename.split(':')[0]
break
else:
localename = 'C'
return _parse_localename(localename)
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
localename = _setlocale(category)
if category == LC_ALL and ';' in localename:
raise TypeError('category LC_ALL is not supported')
return _parse_localename(localename)
def setlocale(category, locale=None):
""" Set the locale for the given category. The locale can be
a string, an iterable of two strings (language code and encoding),
or None.
Iterables are converted to strings using the locale aliasing
engine. Locale strings are passed directly to the C lib.
category may be given as one of the LC_* values.
"""
if locale and not isinstance(locale, _builtin_str):
# convert to string
locale = normalize(_build_localename(locale))
return _setlocale(category, locale)
def resetlocale(category=LC_ALL):
""" Sets the locale for category to the default setting.
The default setting is determined by calling
getdefaultlocale(). category defaults to LC_ALL.
"""
_setlocale(category, _build_localename(getdefaultlocale()))
if sys.platform.startswith("win"):
# On Win32, this will return the ANSI code page
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using."""
import _bootlocale
return _bootlocale.getpreferredencoding(False)
else:
# On Unix, if CODESET is available, use that.
try:
CODESET
except NameError:
# Fall back to parsing environment variables :-(
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
by looking at environment variables."""
res = getdefaultlocale()[1]
if res is None:
# LANG not set, default conservatively to ASCII
res = 'ascii'
return res
else:
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
according to the system configuration."""
import _bootlocale
if do_setlocale:
oldloc = setlocale(LC_CTYPE)
try:
setlocale(LC_CTYPE, "")
except Error:
pass
result = _bootlocale.getpreferredencoding(False)
if do_setlocale:
setlocale(LC_CTYPE, oldloc)
return result
### Database
#
# The following data was extracted from the locale.alias file which
# comes with X11 and then hand edited removing the explicit encoding
# definitions and adding some more aliases. The file is usually
# available as /usr/lib/X11/locale/locale.alias.
#
#
# The local_encoding_alias table maps lowercase encoding alias names
# to C locale encoding names (case-sensitive). Note that normalize()
# first looks up the encoding in the encodings.aliases dictionary and
# then applies this mapping to find the correct C lib name for the
# encoding.
#
locale_encoding_alias = {
# Mappings for non-standard encoding names used in locale names
'437': 'C',
'c': 'C',
'en': 'ISO8859-1',
'jis': 'JIS7',
'jis7': 'JIS7',
'ajec': 'eucJP',
'koi8c': 'KOI8-C',
'microsoftcp1251': 'CP1251',
'microsoftcp1255': 'CP1255',
'microsoftcp1256': 'CP1256',
'88591': 'ISO8859-1',
'88592': 'ISO8859-2',
'88595': 'ISO8859-5',
'885915': 'ISO8859-15',
# Mappings from Python codec names to C lib encoding names
'ascii': 'ISO8859-1',
'latin_1': 'ISO8859-1',
'iso8859_1': 'ISO8859-1',
'iso8859_10': 'ISO8859-10',
'iso8859_11': 'ISO8859-11',
'iso8859_13': 'ISO8859-13',
'iso8859_14': 'ISO8859-14',
'iso8859_15': 'ISO8859-15',
'iso8859_16': 'ISO8859-16',
'iso8859_2': 'ISO8859-2',
'iso8859_3': 'ISO8859-3',
'iso8859_4': 'ISO8859-4',
'iso8859_5': 'ISO8859-5',
'iso8859_6': 'ISO8859-6',
'iso8859_7': 'ISO8859-7',
'iso8859_8': 'ISO8859-8',
'iso8859_9': 'ISO8859-9',
'iso2022_jp': 'JIS7',
'shift_jis': 'SJIS',
'tactis': 'TACTIS',
'euc_jp': 'eucJP',
'euc_kr': 'eucKR',
'utf_8': 'UTF-8',
'koi8_r': 'KOI8-R',
'koi8_u': 'KOI8-U',
'cp1251': 'CP1251',
'cp1255': 'CP1255',
'cp1256': 'CP1256',
# XXX This list is still incomplete. If you know more
# mappings, please file a bug report. Thanks.
}
for k, v in sorted(locale_encoding_alias.items()):
k = k.replace('_', '')
locale_encoding_alias.setdefault(k, v)
#
# The locale_alias table maps lowercase alias names to C locale names
# (case-sensitive). Encodings are always separated from the locale
# name using a dot ('.'); they should only be given in case the
# language name is needed to interpret the given encoding alias
# correctly (CJK codes often have this need).
#
# Note that the normalize() function which uses this tables
# removes '_' and '-' characters from the encoding part of the
# locale name before doing the lookup. This saves a lot of
# space in the table.
#
# MAL 2004-12-10:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.4
# and older):
#
# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1'
# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP'
# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13'
# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13'
# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11'
# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312'
# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5'
# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5'
#
# MAL 2008-05-30:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.5
# and older):
#
# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2'
# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2'
# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
#
# AP 2010-04-12:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.6.5
# and older):
#
# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin'
# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin'
# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
#
# SS 2013-12-20:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 3.3.3
# and older):
#
# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'sd' -> '[email protected]' to 'sd_IN.UTF-8'
# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8'
# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
#
# SS 2014-10-01:
# Updated alias mapping with glibc 2.19 supported locales.
locale_alias = {
'a3': 'az_AZ.KOI8-C',
'a3_az': 'az_AZ.KOI8-C',
'a3_az.koic': 'az_AZ.KOI8-C',
'aa_dj': 'aa_DJ.ISO8859-1',
'aa_er': 'aa_ER.UTF-8',
'aa_et': 'aa_ET.UTF-8',
'af': 'af_ZA.ISO8859-1',
'af_za': 'af_ZA.ISO8859-1',
'am': 'am_ET.UTF-8',
'am_et': 'am_ET.UTF-8',
'american': 'en_US.ISO8859-1',
'an_es': 'an_ES.ISO8859-15',
'ar': 'ar_AA.ISO8859-6',
'ar_aa': 'ar_AA.ISO8859-6',
'ar_ae': 'ar_AE.ISO8859-6',
'ar_bh': 'ar_BH.ISO8859-6',
'ar_dz': 'ar_DZ.ISO8859-6',
'ar_eg': 'ar_EG.ISO8859-6',
'ar_in': 'ar_IN.UTF-8',
'ar_iq': 'ar_IQ.ISO8859-6',
'ar_jo': 'ar_JO.ISO8859-6',
'ar_kw': 'ar_KW.ISO8859-6',
'ar_lb': 'ar_LB.ISO8859-6',
'ar_ly': 'ar_LY.ISO8859-6',
'ar_ma': 'ar_MA.ISO8859-6',
'ar_om': 'ar_OM.ISO8859-6',
'ar_qa': 'ar_QA.ISO8859-6',
'ar_sa': 'ar_SA.ISO8859-6',
'ar_sd': 'ar_SD.ISO8859-6',
'ar_sy': 'ar_SY.ISO8859-6',
'ar_tn': 'ar_TN.ISO8859-6',
'ar_ye': 'ar_YE.ISO8859-6',
'arabic': 'ar_AA.ISO8859-6',
'as': 'as_IN.UTF-8',
'as_in': 'as_IN.UTF-8',
'ast_es': 'ast_ES.ISO8859-15',
'ayc_pe': 'ayc_PE.UTF-8',
'az': 'az_AZ.ISO8859-9E',
'az_az': 'az_AZ.ISO8859-9E',
'az_az.iso88599e': 'az_AZ.ISO8859-9E',
'be': 'be_BY.CP1251',
'be@latin': 'be_BY.UTF-8@latin',
'be_bg.utf8': 'bg_BG.UTF-8',
'be_by': 'be_BY.CP1251',
'be_by@latin': 'be_BY.UTF-8@latin',
'bem_zm': 'bem_ZM.UTF-8',
'ber_dz': 'ber_DZ.UTF-8',
'ber_ma': 'ber_MA.UTF-8',
'bg': 'bg_BG.CP1251',
'bg_bg': 'bg_BG.CP1251',
'bho_in': 'bho_IN.UTF-8',
'bn_bd': 'bn_BD.UTF-8',
'bn_in': 'bn_IN.UTF-8',
'bo_cn': 'bo_CN.UTF-8',
'bo_in': 'bo_IN.UTF-8',
'bokmal': 'nb_NO.ISO8859-1',
'bokm\xe5l': 'nb_NO.ISO8859-1',
'br': 'br_FR.ISO8859-1',
'br_fr': 'br_FR.ISO8859-1',
'brx_in': 'brx_IN.UTF-8',
'bs': 'bs_BA.ISO8859-2',
'bs_ba': 'bs_BA.ISO8859-2',
'bulgarian': 'bg_BG.CP1251',
'byn_er': 'byn_ER.UTF-8',
'c': 'C',
'c-french': 'fr_CA.ISO8859-1',
'c.ascii': 'C',
'c.en': 'C',
'c.iso88591': 'en_US.ISO8859-1',
'c.utf8': 'en_US.UTF-8',
'c_c': 'C',
'c_c.c': 'C',
'ca': 'ca_ES.ISO8859-1',
'ca_ad': 'ca_AD.ISO8859-1',
'ca_es': 'ca_ES.ISO8859-1',
'ca_es@valencia': 'ca_ES.ISO8859-15@valencia',
'ca_fr': 'ca_FR.ISO8859-1',
'ca_it': 'ca_IT.ISO8859-1',
'catalan': 'ca_ES.ISO8859-1',
'cextend': 'en_US.ISO8859-1',
'chinese-s': 'zh_CN.eucCN',
'chinese-t': 'zh_TW.eucTW',
'crh_ua': 'crh_UA.UTF-8',
'croatian': 'hr_HR.ISO8859-2',
'cs': 'cs_CZ.ISO8859-2',
'cs_cs': 'cs_CZ.ISO8859-2',
'cs_cz': 'cs_CZ.ISO8859-2',
'csb_pl': 'csb_PL.UTF-8',
'cv_ru': 'cv_RU.UTF-8',
'cy': 'cy_GB.ISO8859-1',
'cy_gb': 'cy_GB.ISO8859-1',
'cz': 'cs_CZ.ISO8859-2',
'cz_cz': 'cs_CZ.ISO8859-2',
'czech': 'cs_CZ.ISO8859-2',
'da': 'da_DK.ISO8859-1',
'da_dk': 'da_DK.ISO8859-1',
'danish': 'da_DK.ISO8859-1',
'dansk': 'da_DK.ISO8859-1',
'de': 'de_DE.ISO8859-1',
'de_at': 'de_AT.ISO8859-1',
'de_be': 'de_BE.ISO8859-1',
'de_ch': 'de_CH.ISO8859-1',
'de_de': 'de_DE.ISO8859-1',
'de_li.utf8': 'de_LI.UTF-8',
'de_lu': 'de_LU.ISO8859-1',
'deutsch': 'de_DE.ISO8859-1',
'doi_in': 'doi_IN.UTF-8',
'dutch': 'nl_NL.ISO8859-1',
'dutch.iso88591': 'nl_BE.ISO8859-1',
'dv_mv': 'dv_MV.UTF-8',
'dz_bt': 'dz_BT.UTF-8',
'ee': 'ee_EE.ISO8859-4',
'ee_ee': 'ee_EE.ISO8859-4',
'eesti': 'et_EE.ISO8859-1',
'el': 'el_GR.ISO8859-7',
'el_cy': 'el_CY.ISO8859-7',
'el_gr': 'el_GR.ISO8859-7',
'el_gr@euro': 'el_GR.ISO8859-15',
'en': 'en_US.ISO8859-1',
'en_ag': 'en_AG.UTF-8',
'en_au': 'en_AU.ISO8859-1',
'en_be': 'en_BE.ISO8859-1',
'en_bw': 'en_BW.ISO8859-1',
'en_ca': 'en_CA.ISO8859-1',
'en_dk': 'en_DK.ISO8859-1',
'en_dl.utf8': 'en_DL.UTF-8',
'en_gb': 'en_GB.ISO8859-1',
'en_hk': 'en_HK.ISO8859-1',
'en_ie': 'en_IE.ISO8859-1',
'en_in': 'en_IN.ISO8859-1',
'en_ng': 'en_NG.UTF-8',
'en_nz': 'en_NZ.ISO8859-1',
'en_ph': 'en_PH.ISO8859-1',
'en_sg': 'en_SG.ISO8859-1',
'en_uk': 'en_GB.ISO8859-1',
'en_us': 'en_US.ISO8859-1',
'en_us@euro@euro': 'en_US.ISO8859-15',
'en_za': 'en_ZA.ISO8859-1',
'en_zm': 'en_ZM.UTF-8',
'en_zw': 'en_ZW.ISO8859-1',
'en_zw.utf8': 'en_ZS.UTF-8',
'eng_gb': 'en_GB.ISO8859-1',
'english': 'en_EN.ISO8859-1',
'english_uk': 'en_GB.ISO8859-1',
'english_united-states': 'en_US.ISO8859-1',
'english_united-states.437': 'C',
'english_us': 'en_US.ISO8859-1',
'eo': 'eo_XX.ISO8859-3',
'eo.utf8': 'eo.UTF-8',
'eo_eo': 'eo_EO.ISO8859-3',
'eo_us.utf8': 'eo_US.UTF-8',
'eo_xx': 'eo_XX.ISO8859-3',
'es': 'es_ES.ISO8859-1',
'es_ar': 'es_AR.ISO8859-1',
'es_bo': 'es_BO.ISO8859-1',
'es_cl': 'es_CL.ISO8859-1',
'es_co': 'es_CO.ISO8859-1',
'es_cr': 'es_CR.ISO8859-1',
'es_cu': 'es_CU.UTF-8',
'es_do': 'es_DO.ISO8859-1',
'es_ec': 'es_EC.ISO8859-1',
'es_es': 'es_ES.ISO8859-1',
'es_gt': 'es_GT.ISO8859-1',
'es_hn': 'es_HN.ISO8859-1',
'es_mx': 'es_MX.ISO8859-1',
'es_ni': 'es_NI.ISO8859-1',
'es_pa': 'es_PA.ISO8859-1',
'es_pe': 'es_PE.ISO8859-1',
'es_pr': 'es_PR.ISO8859-1',
'es_py': 'es_PY.ISO8859-1',
'es_sv': 'es_SV.ISO8859-1',
'es_us': 'es_US.ISO8859-1',
'es_uy': 'es_UY.ISO8859-1',
'es_ve': 'es_VE.ISO8859-1',
'estonian': 'et_EE.ISO8859-1',
'et': 'et_EE.ISO8859-15',
'et_ee': 'et_EE.ISO8859-15',
'eu': 'eu_ES.ISO8859-1',
'eu_es': 'eu_ES.ISO8859-1',
'eu_fr': 'eu_FR.ISO8859-1',
'fa': 'fa_IR.UTF-8',
'fa_ir': 'fa_IR.UTF-8',
'fa_ir.isiri3342': 'fa_IR.ISIRI-3342',
'ff_sn': 'ff_SN.UTF-8',
'fi': 'fi_FI.ISO8859-15',
'fi_fi': 'fi_FI.ISO8859-15',
'fil_ph': 'fil_PH.UTF-8',
'finnish': 'fi_FI.ISO8859-1',
'fo': 'fo_FO.ISO8859-1',
'fo_fo': 'fo_FO.ISO8859-1',
'fr': 'fr_FR.ISO8859-1',
'fr_be': 'fr_BE.ISO8859-1',
'fr_ca': 'fr_CA.ISO8859-1',
'fr_ch': 'fr_CH.ISO8859-1',
'fr_fr': 'fr_FR.ISO8859-1',
'fr_lu': 'fr_LU.ISO8859-1',
'fran\xe7ais': 'fr_FR.ISO8859-1',
'fre_fr': 'fr_FR.ISO8859-1',
'french': 'fr_FR.ISO8859-1',
'french.iso88591': 'fr_CH.ISO8859-1',
'french_france': 'fr_FR.ISO8859-1',
'fur_it': 'fur_IT.UTF-8',
'fy_de': 'fy_DE.UTF-8',
'fy_nl': 'fy_NL.UTF-8',
'ga': 'ga_IE.ISO8859-1',
'ga_ie': 'ga_IE.ISO8859-1',
'galego': 'gl_ES.ISO8859-1',
'galician': 'gl_ES.ISO8859-1',
'gd': 'gd_GB.ISO8859-1',
'gd_gb': 'gd_GB.ISO8859-1',
'ger_de': 'de_DE.ISO8859-1',
'german': 'de_DE.ISO8859-1',
'german.iso88591': 'de_CH.ISO8859-1',
'german_germany': 'de_DE.ISO8859-1',
'gez_er': 'gez_ER.UTF-8',
'gez_et': 'gez_ET.UTF-8',
'gl': 'gl_ES.ISO8859-1',
'gl_es': 'gl_ES.ISO8859-1',
'greek': 'el_GR.ISO8859-7',
'gu_in': 'gu_IN.UTF-8',
'gv': 'gv_GB.ISO8859-1',
'gv_gb': 'gv_GB.ISO8859-1',
'ha_ng': 'ha_NG.UTF-8',
'he': 'he_IL.ISO8859-8',
'he_il': 'he_IL.ISO8859-8',
'hebrew': 'he_IL.ISO8859-8',
'hi': 'hi_IN.ISCII-DEV',
'hi_in': 'hi_IN.ISCII-DEV',
'hi_in.isciidev': 'hi_IN.ISCII-DEV',
'hne': 'hne_IN.UTF-8',
'hne_in': 'hne_IN.UTF-8',
'hr': 'hr_HR.ISO8859-2',
'hr_hr': 'hr_HR.ISO8859-2',
'hrvatski': 'hr_HR.ISO8859-2',
'hsb_de': 'hsb_DE.ISO8859-2',
'ht_ht': 'ht_HT.UTF-8',
'hu': 'hu_HU.ISO8859-2',
'hu_hu': 'hu_HU.ISO8859-2',
'hungarian': 'hu_HU.ISO8859-2',
'hy_am': 'hy_AM.UTF-8',
'hy_am.armscii8': 'hy_AM.ARMSCII_8',
'ia': 'ia.UTF-8',
'ia_fr': 'ia_FR.UTF-8',
'icelandic': 'is_IS.ISO8859-1',
'id': 'id_ID.ISO8859-1',
'id_id': 'id_ID.ISO8859-1',
'ig_ng': 'ig_NG.UTF-8',
'ik_ca': 'ik_CA.UTF-8',
'in': 'id_ID.ISO8859-1',
'in_id': 'id_ID.ISO8859-1',
'is': 'is_IS.ISO8859-1',
'is_is': 'is_IS.ISO8859-1',
'iso-8859-1': 'en_US.ISO8859-1',
'iso-8859-15': 'en_US.ISO8859-15',
'iso8859-1': 'en_US.ISO8859-1',
'iso8859-15': 'en_US.ISO8859-15',
'iso_8859_1': 'en_US.ISO8859-1',
'iso_8859_15': 'en_US.ISO8859-15',
'it': 'it_IT.ISO8859-1',
'it_ch': 'it_CH.ISO8859-1',
'it_it': 'it_IT.ISO8859-1',
'italian': 'it_IT.ISO8859-1',
'iu': 'iu_CA.NUNACOM-8',
'iu_ca': 'iu_CA.NUNACOM-8',
'iu_ca.nunacom8': 'iu_CA.NUNACOM-8',
'iw': 'he_IL.ISO8859-8',
'iw_il': 'he_IL.ISO8859-8',
'iw_il.utf8': 'iw_IL.UTF-8',
'ja': 'ja_JP.eucJP',
'ja_jp': 'ja_JP.eucJP',
'ja_jp.euc': 'ja_JP.eucJP',
'ja_jp.mscode': 'ja_JP.SJIS',
'ja_jp.pck': 'ja_JP.SJIS',
'japan': 'ja_JP.eucJP',
'japanese': 'ja_JP.eucJP',
'japanese-euc': 'ja_JP.eucJP',
'japanese.euc': 'ja_JP.eucJP',
'jp_jp': 'ja_JP.eucJP',
'ka': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS',
'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY',
'kk_kz': 'kk_KZ.RK1048',
'kl': 'kl_GL.ISO8859-1',
'kl_gl': 'kl_GL.ISO8859-1',
'km_kh': 'km_KH.UTF-8',
'kn': 'kn_IN.UTF-8',
'kn_in': 'kn_IN.UTF-8',
'ko': 'ko_KR.eucKR',
'ko_kr': 'ko_KR.eucKR',
'ko_kr.euc': 'ko_KR.eucKR',
'kok_in': 'kok_IN.UTF-8',
'korean': 'ko_KR.eucKR',
'korean.euc': 'ko_KR.eucKR',
'ks': 'ks_IN.UTF-8',
'ks_in': 'ks_IN.UTF-8',
'[email protected]': 'ks_IN.UTF-8@devanagari',
'ku_tr': 'ku_TR.ISO8859-9',
'kw': 'kw_GB.ISO8859-1',
'kw_gb': 'kw_GB.ISO8859-1',
'ky': 'ky_KG.UTF-8',
'ky_kg': 'ky_KG.UTF-8',
'lb_lu': 'lb_LU.UTF-8',
'lg_ug': 'lg_UG.ISO8859-10',
'li_be': 'li_BE.UTF-8',
'li_nl': 'li_NL.UTF-8',
'lij_it': 'lij_IT.UTF-8',
'lithuanian': 'lt_LT.ISO8859-13',
'lo': 'lo_LA.MULELAO-1',
'lo_la': 'lo_LA.MULELAO-1',
'lo_la.cp1133': 'lo_LA.IBM-CP1133',
'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133',
'lo_la.mulelao1': 'lo_LA.MULELAO-1',
'lt': 'lt_LT.ISO8859-13',
'lt_lt': 'lt_LT.ISO8859-13',
'lv': 'lv_LV.ISO8859-13',
'lv_lv': 'lv_LV.ISO8859-13',
'mag_in': 'mag_IN.UTF-8',
'mai': 'mai_IN.UTF-8',
'mai_in': 'mai_IN.UTF-8',
'mg_mg': 'mg_MG.ISO8859-15',
'mhr_ru': 'mhr_RU.UTF-8',
'mi': 'mi_NZ.ISO8859-1',
'mi_nz': 'mi_NZ.ISO8859-1',
'mk': 'mk_MK.ISO8859-5',
'mk_mk': 'mk_MK.ISO8859-5',
'ml': 'ml_IN.UTF-8',
'ml_in': 'ml_IN.UTF-8',
'mn_mn': 'mn_MN.UTF-8',
'mni_in': 'mni_IN.UTF-8',
'mr': 'mr_IN.UTF-8',
'mr_in': 'mr_IN.UTF-8',
'ms': 'ms_MY.ISO8859-1',
'ms_my': 'ms_MY.ISO8859-1',
'mt': 'mt_MT.ISO8859-3',
'mt_mt': 'mt_MT.ISO8859-3',
'my_mm': 'my_MM.UTF-8',
'nan_tw@latin': 'nan_TW.UTF-8@latin',
'nb': 'nb_NO.ISO8859-1',
'nb_no': 'nb_NO.ISO8859-1',
'nds_de': 'nds_DE.UTF-8',
'nds_nl': 'nds_NL.UTF-8',
'ne_np': 'ne_NP.UTF-8',
'nhn_mx': 'nhn_MX.UTF-8',
'niu_nu': 'niu_NU.UTF-8',
'niu_nz': 'niu_NZ.UTF-8',
'nl': 'nl_NL.ISO8859-1',
'nl_aw': 'nl_AW.UTF-8',
'nl_be': 'nl_BE.ISO8859-1',
'nl_nl': 'nl_NL.ISO8859-1',
'nn': 'nn_NO.ISO8859-1',
'nn_no': 'nn_NO.ISO8859-1',
'no': 'no_NO.ISO8859-1',
'no@nynorsk': 'ny_NO.ISO8859-1',
'no_no': 'no_NO.ISO8859-1',
'no_no.iso88591@bokmal': 'no_NO.ISO8859-1',
'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1',
'norwegian': 'no_NO.ISO8859-1',
'nr': 'nr_ZA.ISO8859-1',
'nr_za': 'nr_ZA.ISO8859-1',
'nso': 'nso_ZA.ISO8859-15',
'nso_za': 'nso_ZA.ISO8859-15',
'ny': 'ny_NO.ISO8859-1',
'ny_no': 'ny_NO.ISO8859-1',
'nynorsk': 'nn_NO.ISO8859-1',
'oc': 'oc_FR.ISO8859-1',
'oc_fr': 'oc_FR.ISO8859-1',
'om_et': 'om_ET.UTF-8',
'om_ke': 'om_KE.ISO8859-1',
'or': 'or_IN.UTF-8',
'or_in': 'or_IN.UTF-8',
'os_ru': 'os_RU.UTF-8',
'pa': 'pa_IN.UTF-8',
'pa_in': 'pa_IN.UTF-8',
'pa_pk': 'pa_PK.UTF-8',
'pap_an': 'pap_AN.UTF-8',
'pd': 'pd_US.ISO8859-1',
'pd_de': 'pd_DE.ISO8859-1',
'pd_us': 'pd_US.ISO8859-1',
'ph': 'ph_PH.ISO8859-1',
'ph_ph': 'ph_PH.ISO8859-1',
'pl': 'pl_PL.ISO8859-2',
'pl_pl': 'pl_PL.ISO8859-2',
'polish': 'pl_PL.ISO8859-2',
'portuguese': 'pt_PT.ISO8859-1',
'portuguese_brazil': 'pt_BR.ISO8859-1',
'posix': 'C',
'posix-utf2': 'C',
'pp': 'pp_AN.ISO8859-1',
'pp_an': 'pp_AN.ISO8859-1',
'ps_af': 'ps_AF.UTF-8',
'pt': 'pt_PT.ISO8859-1',
'pt_br': 'pt_BR.ISO8859-1',
'pt_pt': 'pt_PT.ISO8859-1',
'ro': 'ro_RO.ISO8859-2',
'ro_ro': 'ro_RO.ISO8859-2',
'romanian': 'ro_RO.ISO8859-2',
'ru': 'ru_RU.UTF-8',
'ru_ru': 'ru_RU.UTF-8',
'ru_ua': 'ru_UA.KOI8-U',
'rumanian': 'ro_RO.ISO8859-2',
'russian': 'ru_RU.ISO8859-5',
'rw': 'rw_RW.ISO8859-1',
'rw_rw': 'rw_RW.ISO8859-1',
'sa_in': 'sa_IN.UTF-8',
'sat_in': 'sat_IN.UTF-8',
'sc_it': 'sc_IT.UTF-8',
'sd': 'sd_IN.UTF-8',
'sd_in': 'sd_IN.UTF-8',
'[email protected]': 'sd_IN.UTF-8@devanagari',
'sd_pk': 'sd_PK.UTF-8',
'se_no': 'se_NO.UTF-8',
'serbocroatian': 'sr_RS.UTF-8@latin',
'sh': 'sr_RS.UTF-8@latin',
'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2',
'sh_hr': 'sh_HR.ISO8859-2',
'sh_hr.iso88592': 'hr_HR.ISO8859-2',
'sh_sp': 'sr_CS.ISO8859-2',
'sh_yu': 'sr_RS.UTF-8@latin',
'shs_ca': 'shs_CA.UTF-8',
'si': 'si_LK.UTF-8',
'si_lk': 'si_LK.UTF-8',
'sid_et': 'sid_ET.UTF-8',
'sinhala': 'si_LK.UTF-8',
'sk': 'sk_SK.ISO8859-2',
'sk_sk': 'sk_SK.ISO8859-2',
'sl': 'sl_SI.ISO8859-2',
'sl_cs': 'sl_CS.ISO8859-2',
'sl_si': 'sl_SI.ISO8859-2',
'slovak': 'sk_SK.ISO8859-2',
'slovene': 'sl_SI.ISO8859-2',
'slovenian': 'sl_SI.ISO8859-2',
'so_dj': 'so_DJ.ISO8859-1',
'so_et': 'so_ET.UTF-8',
'so_ke': 'so_KE.ISO8859-1',
'so_so': 'so_SO.ISO8859-1',
'sp': 'sr_CS.ISO8859-5',
'sp_yu': 'sr_CS.ISO8859-5',
'spanish': 'es_ES.ISO8859-1',
'spanish_spain': 'es_ES.ISO8859-1',
'sq': 'sq_AL.ISO8859-2',
'sq_al': 'sq_AL.ISO8859-2',
'sq_mk': 'sq_MK.UTF-8',
'sr': 'sr_RS.UTF-8',
'sr@cyrillic': 'sr_RS.UTF-8',
'sr@latn': 'sr_CS.UTF-8@latin',
'sr_cs': 'sr_CS.UTF-8',
'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2',
'sr_cs@latn': 'sr_CS.UTF-8@latin',
'sr_me': 'sr_ME.UTF-8',
'sr_rs': 'sr_RS.UTF-8',
'sr_rs@latn': 'sr_RS.UTF-8@latin',
'sr_sp': 'sr_CS.ISO8859-2',
'sr_yu': 'sr_RS.UTF-8@latin',
'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.iso88592': 'sr_CS.ISO8859-2',
'sr_yu.iso88595': 'sr_CS.ISO8859-5',
'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5',
'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.utf8': 'sr_RS.UTF-8',
'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8',
'sr_yu@cyrillic': 'sr_RS.UTF-8',
'ss': 'ss_ZA.ISO8859-1',
'ss_za': 'ss_ZA.ISO8859-1',
'st': 'st_ZA.ISO8859-1',
'st_za': 'st_ZA.ISO8859-1',
'sv': 'sv_SE.ISO8859-1',
'sv_fi': 'sv_FI.ISO8859-1',
'sv_se': 'sv_SE.ISO8859-1',
'sw_ke': 'sw_KE.UTF-8',
'sw_tz': 'sw_TZ.UTF-8',
'swedish': 'sv_SE.ISO8859-1',
'szl_pl': 'szl_PL.UTF-8',
'ta': 'ta_IN.TSCII-0',
'ta_in': 'ta_IN.TSCII-0',
'ta_in.tscii': 'ta_IN.TSCII-0',
'ta_in.tscii0': 'ta_IN.TSCII-0',
'ta_lk': 'ta_LK.UTF-8',
'te': 'te_IN.UTF-8',
'te_in': 'te_IN.UTF-8',
'tg': 'tg_TJ.KOI8-C',
'tg_tj': 'tg_TJ.KOI8-C',
'th': 'th_TH.ISO8859-11',
'th_th': 'th_TH.ISO8859-11',
'th_th.tactis': 'th_TH.TIS620',
'th_th.tis620': 'th_TH.TIS620',
'thai': 'th_TH.ISO8859-11',
'ti_er': 'ti_ER.UTF-8',
'ti_et': 'ti_ET.UTF-8',
'tig_er': 'tig_ER.UTF-8',
'tk_tm': 'tk_TM.UTF-8',
'tl': 'tl_PH.ISO8859-1',
'tl_ph': 'tl_PH.ISO8859-1',
'tn': 'tn_ZA.ISO8859-15',
'tn_za': 'tn_ZA.ISO8859-15',
'tr': 'tr_TR.ISO8859-9',
'tr_cy': 'tr_CY.ISO8859-9',
'tr_tr': 'tr_TR.ISO8859-9',
'ts': 'ts_ZA.ISO8859-1',
'ts_za': 'ts_ZA.ISO8859-1',
'tt': 'tt_RU.TATAR-CYR',
'tt_ru': 'tt_RU.TATAR-CYR',
'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR',
'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif',
'turkish': 'tr_TR.ISO8859-9',
'ug_cn': 'ug_CN.UTF-8',
'uk': 'uk_UA.KOI8-U',
'uk_ua': 'uk_UA.KOI8-U',
'univ': 'en_US.UTF-8',
'universal': 'en_US.UTF-8',
'universal.utf8@ucs4': 'en_US.UTF-8',
'unm_us': 'unm_US.UTF-8',
'ur': 'ur_PK.CP1256',
'ur_in': 'ur_IN.UTF-8',
'ur_pk': 'ur_PK.CP1256',
'uz': 'uz_UZ.UTF-8',
'uz_uz': 'uz_UZ.UTF-8',
'uz_uz@cyrillic': 'uz_UZ.UTF-8',
've': 've_ZA.UTF-8',
've_za': 've_ZA.UTF-8',
'vi': 'vi_VN.TCVN',
'vi_vn': 'vi_VN.TCVN',
'vi_vn.tcvn': 'vi_VN.TCVN',
'vi_vn.tcvn5712': 'vi_VN.TCVN',
'vi_vn.viscii': 'vi_VN.VISCII',
'vi_vn.viscii111': 'vi_VN.VISCII',
'wa': 'wa_BE.ISO8859-1',
'wa_be': 'wa_BE.ISO8859-1',
'wae_ch': 'wae_CH.UTF-8',
'wal_et': 'wal_ET.UTF-8',
'wo_sn': 'wo_SN.UTF-8',
'xh': 'xh_ZA.ISO8859-1',
'xh_za': 'xh_ZA.ISO8859-1',
'yi': 'yi_US.CP1255',
'yi_us': 'yi_US.CP1255',
'yo_ng': 'yo_NG.UTF-8',
'yue_hk': 'yue_HK.UTF-8',
'zh': 'zh_CN.eucCN',
'zh_cn': 'zh_CN.gb2312',
'zh_cn.big5': 'zh_TW.big5',
'zh_cn.euc': 'zh_CN.eucCN',
'zh_hk': 'zh_HK.big5hkscs',
'zh_hk.big5hk': 'zh_HK.big5hkscs',
'zh_sg': 'zh_SG.GB2312',
'zh_sg.gbk': 'zh_SG.GBK',
'zh_tw': 'zh_TW.big5',
'zh_tw.euc': 'zh_TW.eucTW',
'zh_tw.euctw': 'zh_TW.eucTW',
'zu': 'zu_ZA.ISO8859-1',
'zu_za': 'zu_ZA.ISO8859-1',
}
#
# This maps Windows language identifiers to locale strings.
#
# This list has been updated from
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp
# to include every locale up to Windows Vista.
#
# NOTE: this mapping is incomplete. If your language is missing, please
# submit a bug report to the Python bug tracker at http://bugs.python.org/
# Make sure you include the missing language identifier and the suggested
# locale code.
#
windows_locale = {
0x0436: "af_ZA", # Afrikaans
0x041c: "sq_AL", # Albanian
0x0484: "gsw_FR",# Alsatian - France
0x045e: "am_ET", # Amharic - Ethiopia
0x0401: "ar_SA", # Arabic - Saudi Arabia
0x0801: "ar_IQ", # Arabic - Iraq
0x0c01: "ar_EG", # Arabic - Egypt
0x1001: "ar_LY", # Arabic - Libya
0x1401: "ar_DZ", # Arabic - Algeria
0x1801: "ar_MA", # Arabic - Morocco
0x1c01: "ar_TN", # Arabic - Tunisia
0x2001: "ar_OM", # Arabic - Oman
0x2401: "ar_YE", # Arabic - Yemen
0x2801: "ar_SY", # Arabic - Syria
0x2c01: "ar_JO", # Arabic - Jordan
0x3001: "ar_LB", # Arabic - Lebanon
0x3401: "ar_KW", # Arabic - Kuwait
0x3801: "ar_AE", # Arabic - United Arab Emirates
0x3c01: "ar_BH", # Arabic - Bahrain
0x4001: "ar_QA", # Arabic - Qatar
0x042b: "hy_AM", # Armenian
0x044d: "as_IN", # Assamese - India
0x042c: "az_AZ", # Azeri - Latin
0x082c: "az_AZ", # Azeri - Cyrillic
0x046d: "ba_RU", # Bashkir
0x042d: "eu_ES", # Basque - Russia
0x0423: "be_BY", # Belarusian
0x0445: "bn_IN", # Begali
0x201a: "bs_BA", # Bosnian - Cyrillic
0x141a: "bs_BA", # Bosnian - Latin
0x047e: "br_FR", # Breton - France
0x0402: "bg_BG", # Bulgarian
# 0x0455: "my_MM", # Burmese - Not supported
0x0403: "ca_ES", # Catalan
0x0004: "zh_CHS",# Chinese - Simplified
0x0404: "zh_TW", # Chinese - Taiwan
0x0804: "zh_CN", # Chinese - PRC
0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R.
0x1004: "zh_SG", # Chinese - Singapore
0x1404: "zh_MO", # Chinese - Macao S.A.R.
0x7c04: "zh_CHT",# Chinese - Traditional
0x0483: "co_FR", # Corsican - France
0x041a: "hr_HR", # Croatian
0x101a: "hr_BA", # Croatian - Bosnia
0x0405: "cs_CZ", # Czech
0x0406: "da_DK", # Danish
0x048c: "gbz_AF",# Dari - Afghanistan
0x0465: "div_MV",# Divehi - Maldives
0x0413: "nl_NL", # Dutch - The Netherlands
0x0813: "nl_BE", # Dutch - Belgium
0x0409: "en_US", # English - United States
0x0809: "en_GB", # English - United Kingdom
0x0c09: "en_AU", # English - Australia
0x1009: "en_CA", # English - Canada
0x1409: "en_NZ", # English - New Zealand
0x1809: "en_IE", # English - Ireland
0x1c09: "en_ZA", # English - South Africa
0x2009: "en_JA", # English - Jamaica
0x2409: "en_CB", # English - Carribbean
0x2809: "en_BZ", # English - Belize
0x2c09: "en_TT", # English - Trinidad
0x3009: "en_ZW", # English - Zimbabwe
0x3409: "en_PH", # English - Philippines
0x4009: "en_IN", # English - India
0x4409: "en_MY", # English - Malaysia
0x4809: "en_IN", # English - Singapore
0x0425: "et_EE", # Estonian
0x0438: "fo_FO", # Faroese
0x0464: "fil_PH",# Filipino
0x040b: "fi_FI", # Finnish
0x040c: "fr_FR", # French - France
0x080c: "fr_BE", # French - Belgium
0x0c0c: "fr_CA", # French - Canada
0x100c: "fr_CH", # French - Switzerland
0x140c: "fr_LU", # French - Luxembourg
0x180c: "fr_MC", # French - Monaco
0x0462: "fy_NL", # Frisian - Netherlands
0x0456: "gl_ES", # Galician
0x0437: "ka_GE", # Georgian
0x0407: "de_DE", # German - Germany
0x0807: "de_CH", # German - Switzerland
0x0c07: "de_AT", # German - Austria
0x1007: "de_LU", # German - Luxembourg
0x1407: "de_LI", # German - Liechtenstein
0x0408: "el_GR", # Greek
0x046f: "kl_GL", # Greenlandic - Greenland
0x0447: "gu_IN", # Gujarati
0x0468: "ha_NG", # Hausa - Latin
0x040d: "he_IL", # Hebrew
0x0439: "hi_IN", # Hindi
0x040e: "hu_HU", # Hungarian
0x040f: "is_IS", # Icelandic
0x0421: "id_ID", # Indonesian
0x045d: "iu_CA", # Inuktitut - Syllabics
0x085d: "iu_CA", # Inuktitut - Latin
0x083c: "ga_IE", # Irish - Ireland
0x0410: "it_IT", # Italian - Italy
0x0810: "it_CH", # Italian - Switzerland
0x0411: "ja_JP", # Japanese
0x044b: "kn_IN", # Kannada - India
0x043f: "kk_KZ", # Kazakh
0x0453: "kh_KH", # Khmer - Cambodia
0x0486: "qut_GT",# K'iche - Guatemala
0x0487: "rw_RW", # Kinyarwanda - Rwanda
0x0457: "kok_IN",# Konkani
0x0412: "ko_KR", # Korean
0x0440: "ky_KG", # Kyrgyz
0x0454: "lo_LA", # Lao - Lao PDR
0x0426: "lv_LV", # Latvian
0x0427: "lt_LT", # Lithuanian
0x082e: "dsb_DE",# Lower Sorbian - Germany
0x046e: "lb_LU", # Luxembourgish
0x042f: "mk_MK", # FYROM Macedonian
0x043e: "ms_MY", # Malay - Malaysia
0x083e: "ms_BN", # Malay - Brunei Darussalam
0x044c: "ml_IN", # Malayalam - India
0x043a: "mt_MT", # Maltese
0x0481: "mi_NZ", # Maori
0x047a: "arn_CL",# Mapudungun
0x044e: "mr_IN", # Marathi
0x047c: "moh_CA",# Mohawk - Canada
0x0450: "mn_MN", # Mongolian - Cyrillic
0x0850: "mn_CN", # Mongolian - PRC
0x0461: "ne_NP", # Nepali
0x0414: "nb_NO", # Norwegian - Bokmal
0x0814: "nn_NO", # Norwegian - Nynorsk
0x0482: "oc_FR", # Occitan - France
0x0448: "or_IN", # Oriya - India
0x0463: "ps_AF", # Pashto - Afghanistan
0x0429: "fa_IR", # Persian
0x0415: "pl_PL", # Polish
0x0416: "pt_BR", # Portuguese - Brazil
0x0816: "pt_PT", # Portuguese - Portugal
0x0446: "pa_IN", # Punjabi
0x046b: "quz_BO",# Quechua (Bolivia)
0x086b: "quz_EC",# Quechua (Ecuador)
0x0c6b: "quz_PE",# Quechua (Peru)
0x0418: "ro_RO", # Romanian - Romania
0x0417: "rm_CH", # Romansh
0x0419: "ru_RU", # Russian
0x243b: "smn_FI",# Sami Finland
0x103b: "smj_NO",# Sami Norway
0x143b: "smj_SE",# Sami Sweden
0x043b: "se_NO", # Sami Northern Norway
0x083b: "se_SE", # Sami Northern Sweden
0x0c3b: "se_FI", # Sami Northern Finland
0x203b: "sms_FI",# Sami Skolt
0x183b: "sma_NO",# Sami Southern Norway
0x1c3b: "sma_SE",# Sami Southern Sweden
0x044f: "sa_IN", # Sanskrit
0x0c1a: "sr_SP", # Serbian - Cyrillic
0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic
0x081a: "sr_SP", # Serbian - Latin
0x181a: "sr_BA", # Serbian - Bosnia Latin
0x045b: "si_LK", # Sinhala - Sri Lanka
0x046c: "ns_ZA", # Northern Sotho
0x0432: "tn_ZA", # Setswana - Southern Africa
0x041b: "sk_SK", # Slovak
0x0424: "sl_SI", # Slovenian
0x040a: "es_ES", # Spanish - Spain
0x080a: "es_MX", # Spanish - Mexico
0x0c0a: "es_ES", # Spanish - Spain (Modern)
0x100a: "es_GT", # Spanish - Guatemala
0x140a: "es_CR", # Spanish - Costa Rica
0x180a: "es_PA", # Spanish - Panama
0x1c0a: "es_DO", # Spanish - Dominican Republic
0x200a: "es_VE", # Spanish - Venezuela
0x240a: "es_CO", # Spanish - Colombia
0x280a: "es_PE", # Spanish - Peru
0x2c0a: "es_AR", # Spanish - Argentina
0x300a: "es_EC", # Spanish - Ecuador
0x340a: "es_CL", # Spanish - Chile
0x380a: "es_UR", # Spanish - Uruguay
0x3c0a: "es_PY", # Spanish - Paraguay
0x400a: "es_BO", # Spanish - Bolivia
0x440a: "es_SV", # Spanish - El Salvador
0x480a: "es_HN", # Spanish - Honduras
0x4c0a: "es_NI", # Spanish - Nicaragua
0x500a: "es_PR", # Spanish - Puerto Rico
0x540a: "es_US", # Spanish - United States
# 0x0430: "", # Sutu - Not supported
0x0441: "sw_KE", # Swahili
0x041d: "sv_SE", # Swedish - Sweden
0x081d: "sv_FI", # Swedish - Finland
0x045a: "syr_SY",# Syriac
0x0428: "tg_TJ", # Tajik - Cyrillic
0x085f: "tmz_DZ",# Tamazight - Latin
0x0449: "ta_IN", # Tamil
0x0444: "tt_RU", # Tatar
0x044a: "te_IN", # Telugu
0x041e: "th_TH", # Thai
0x0851: "bo_BT", # Tibetan - Bhutan
0x0451: "bo_CN", # Tibetan - PRC
0x041f: "tr_TR", # Turkish
0x0442: "tk_TM", # Turkmen - Cyrillic
0x0480: "ug_CN", # Uighur - Arabic
0x0422: "uk_UA", # Ukrainian
0x042e: "wen_DE",# Upper Sorbian - Germany
0x0420: "ur_PK", # Urdu
0x0820: "ur_IN", # Urdu - India
0x0443: "uz_UZ", # Uzbek - Latin
0x0843: "uz_UZ", # Uzbek - Cyrillic
0x042a: "vi_VN", # Vietnamese
0x0452: "cy_GB", # Welsh
0x0488: "wo_SN", # Wolof - Senegal
0x0434: "xh_ZA", # Xhosa - South Africa
0x0485: "sah_RU",# Yakut - Cyrillic
0x0478: "ii_CN", # Yi - PRC
0x046a: "yo_NG", # Yoruba - Nigeria
0x0435: "zu_ZA", # Zulu
}
def _print_locale():
""" Test function.
"""
categories = {}
def _init_categories(categories=categories):
for k,v in globals().items():
if k[:3] == 'LC_':
categories[k] = v
_init_categories()
del categories['LC_ALL']
print('Locale defaults as determined by getdefaultlocale():')
print('-'*72)
lang, enc = getdefaultlocale()
print('Language: ', lang or '(undefined)')
print('Encoding: ', enc or '(undefined)')
print()
print('Locale settings on startup:')
print('-'*72)
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
print()
print('Locale settings after calling resetlocale():')
print('-'*72)
resetlocale()
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
try:
setlocale(LC_ALL, "")
except:
print('NOTE:')
print('setlocale(LC_ALL, "") does not support the default locale')
print('given in the OS environment variables.')
else:
print()
print('Locale settings after calling setlocale(LC_ALL, ""):')
print('-'*72)
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
###
try:
LC_MESSAGES
except NameError:
pass
else:
__all__.append("LC_MESSAGES")
if __name__=='__main__':
print('Locale aliasing:')
print()
_print_locale()
print()
print('Number formatting:')
print()
_test()
| gpl-2.0 | 850,608,073,741,775,200 | 43.47136 | 115 | 0.434607 | false |
kennedyshead/home-assistant | homeassistant/components/neato/api.py | 2 | 1898 | """API for Neato Botvac bound to Home Assistant OAuth."""
from asyncio import run_coroutine_threadsafe
import pybotvac
from homeassistant import config_entries, core
from homeassistant.helpers import config_entry_oauth2_flow
class ConfigEntryAuth(pybotvac.OAuthSession):
"""Provide Neato Botvac authentication tied to an OAuth2 based config entry."""
def __init__(
self,
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
implementation: config_entry_oauth2_flow.AbstractOAuth2Implementation,
) -> None:
"""Initialize Neato Botvac Auth."""
self.hass = hass
self.session = config_entry_oauth2_flow.OAuth2Session(
hass, config_entry, implementation
)
super().__init__(self.session.token, vendor=pybotvac.Neato())
def refresh_tokens(self) -> str:
"""Refresh and return new Neato Botvac tokens using Home Assistant OAuth2 session."""
run_coroutine_threadsafe(
self.session.async_ensure_token_valid(), self.hass.loop
).result()
return self.session.token["access_token"]
class NeatoImplementation(config_entry_oauth2_flow.LocalOAuth2Implementation):
"""Neato implementation of LocalOAuth2Implementation.
We need this class because we have to add client_secret and scope to the authorization request.
"""
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {"client_secret": self.client_secret}
async def async_generate_authorize_url(self, flow_id: str) -> str:
"""Generate a url for the user to authorize.
We must make sure that the plus signs are not encoded.
"""
url = await super().async_generate_authorize_url(flow_id)
return f"{url}&scope=public_profile+control_robots+maps"
| apache-2.0 | 1,394,616,307,001,043,700 | 35.5 | 99 | 0.683878 | false |
facebookresearch/ParlAI | parlai/agents/hugging_face/dict.py | 1 | 7732 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from abc import ABC, abstractmethod, abstractproperty
from collections import defaultdict
from typing import List
from parlai.core.dict import DictionaryAgent
from parlai.core.opt import Opt
from parlai.utils.io import PathManager
try:
from transformers import GPT2Tokenizer, T5TokenizerFast
except ImportError:
raise ImportError(
"Need to install Hugging Face transformers repository. "
"Try `pip install transformers`."
)
SPECIAL_TOKENS = {"bos_token": "<bos>", "eos_token": "<eos>", "pad_token": "<pad>"}
NO_OP = "x"
class HuggingFaceDictionaryAgent(DictionaryAgent, ABC):
"""
Use Hugging Face tokenizers.
"""
def __init__(self, opt: Opt, shared=None):
if not shared:
self.hf_tokenizer = self.get_tokenizer(opt)
self.tok2ind = self.hf_tokenizer.get_vocab()
self.ind2tok = {v: k for k, v in self.tok2ind.items()}
else:
self.hf_tokenizer = shared['hf_tokenizer']
self.tok2ind = shared['tok2ind']
self.ind2tok = shared['ind2tok']
self.freq = defaultdict(int)
for tok in self.tok2ind:
self.freq[tok] = 1
self.minfreq = opt.get('dict_minfreq', DictionaryAgent.default_minfreq)
self._unk_token_idx = self.hf_tokenizer.unk_token_id
self.override_special_tokens(opt)
self.lower = opt.get('dict_lower', DictionaryAgent.default_lower)
self.tokenizer = 'hf'
self.opt = opt
self.max_length = (
self.opt.get('text_truncate') or self.hf_tokenizer.model_max_length
)
@abstractmethod
def get_tokenizer(self, opt):
"""
Instantiate the HuggingFace tokenizer for your model.
"""
pass
@abstractmethod
def override_special_tokens(opt):
"""
Override the special tokens for your tokenizer.
"""
pass
@abstractproperty
def add_special_tokens(self) -> bool:
"""
Whether to add special tokens when tokenizing.
"""
@abstractproperty
def skip_decode_special_tokens(self) -> bool:
"""
Whether to skip special tokens when converting tokens to text.
"""
def share(self):
shared = super().share()
shared['hf_tokenizer'] = self.hf_tokenizer
shared['ind2tok'] = self.ind2tok
shared['tok2ind'] = self.tok2ind
return shared
def format_text(self, text: str) -> str:
"""
Format text prior to encoding with tokenizer.
"""
return text
def txt2vec(self, text, vec_type=list):
return self.hf_tokenizer.encode(
self.format_text(text),
add_special_tokens=self.add_special_tokens,
max_length=self.max_length,
pad_to_max_length=False,
truncation='longest_first',
)
def vec2txt(self, vec, **kwargs):
return self.hf_tokenizer.decode(
vec, skip_special_tokens=self.skip_decode_special_tokens, **kwargs
)
def act(self):
return {}
class Gpt2DictionaryAgent(HuggingFaceDictionaryAgent):
def is_prebuilt(self):
"""
Indicates whether the dictionary is fixed, and does not require building.
"""
return True
@property
def add_special_tokens(self) -> bool:
"""
Whether to add special tokens when tokenizing.
"""
return True
@property
def skip_decode_special_tokens(self) -> bool:
"""
Whether to skip special tokens when converting tokens to text.
"""
return False
def get_tokenizer(self, opt):
"""
Instantiate tokenizer.
"""
model_sz = opt["gpt2_size"]
if model_sz == "small":
model_key = "gpt2"
elif model_sz == "distilgpt2":
model_key = "distilgpt2"
else:
model_key = f"gpt2-{model_sz}"
# check if datapath has the files that hugging face code looks for
hf_dir = os.path.join(opt["datapath"], "hf", model_key)
if all(
PathManager.exists(os.path.join(hf_dir, file_name))
for file_name in ["merges.txt", "vocab.json"]
):
fle_key = PathManager.get_local_path(hf_dir, recursive=True)
else:
fle_key = model_key
return GPT2Tokenizer.from_pretrained(fle_key)
def add_additional_special_tokens(self, additional_special_tokens: List[str]):
"""
Add additional special tokens to the dictionary.
"""
self.additional_special_tokens = additional_special_tokens
self.hf_tokenizer.add_special_tokens(
{'additional_special_tokens': additional_special_tokens}
)
for tok in self.additional_special_tokens:
self.add_token(tok)
def _define_special_tokens(self, opt):
if opt["add_special_tokens"]:
# Add addtional start/end/pad tokens
self.hf_tokenizer.add_special_tokens(SPECIAL_TOKENS)
self.start_token = SPECIAL_TOKENS["bos_token"]
self.end_token = SPECIAL_TOKENS["eos_token"]
self.null_token = SPECIAL_TOKENS["pad_token"]
else:
# Only special token is end of text
self.start_token = NO_OP # hack, we cut off the start token
self.end_token = "<|endoftext|>"
self.null_token = "<|endoftext|>"
def override_special_tokens(self, opt):
# define special tokens
self._define_special_tokens(opt)
# now override
self.start_idx = self.hf_tokenizer.convert_tokens_to_ids([self.start_token])[0]
self.end_idx = self.hf_tokenizer.convert_tokens_to_ids([self.end_token])[0]
self.null_idx = self.hf_tokenizer.convert_tokens_to_ids([self.null_token])[0]
# set tok2ind for special tokens
self.tok2ind[self.end_token] = self.end_idx
self.tok2ind[self.start_token] = self.start_idx
self.tok2ind[self.null_token] = self.null_idx
# set ind2tok for special tokens
self.ind2tok[self.end_idx] = self.end_token
self.ind2tok[self.start_idx] = self.start_token
self.ind2tok[self.null_idx] = self.null_token
class DialoGPTDictionaryAgent(Gpt2DictionaryAgent):
def get_tokenizer(self, opt):
"""
Instantiate tokenizer.
"""
model_sz = opt["gpt2_size"]
fle_key = f"microsoft/DialoGPT-{model_sz}"
return GPT2Tokenizer.from_pretrained(fle_key)
class T5DictionaryAgent(HuggingFaceDictionaryAgent):
def get_tokenizer(self, opt):
return T5TokenizerFast.from_pretrained(opt['t5_model_arch'], truncation=True)
@property
def add_special_tokens(self) -> bool:
"""
Whether to add special tokens when tokenizing.
"""
return True
@property
def skip_decode_special_tokens(self) -> bool:
"""
Whether to add special tokens when tokenizing.
"""
return True
def override_special_tokens(self, opt):
# now override
self.start_token = self.hf_tokenizer.pad_token
self.end_token = self.hf_tokenizer.eos_token
self.null_token = self.hf_tokenizer.pad_token
self.unk_token = self.hf_tokenizer.unk_token
self._unk_token_idx = self.hf_tokenizer.unk_token_id
self.start_idx = self[self.start_token]
self.end_idx = self[self.end_token]
self.null_idx = self[self.null_token]
| mit | -7,326,710,980,904,250,000 | 30.950413 | 87 | 0.609027 | false |
GuardianRG/CuckooSploit | analyzer/windows/modules/packages/generic.py | 6 | 1111 | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from random import randint
from lib.common.abstracts import Package
class Generic(Package):
"""Generic analysis package.
The sample is started using START command in a cmd.exe prompt.
"""
PATHS = [
("SystemRoot", "system32", "cmd.exe"),
]
def start(self, path):
cmd_path = self.get_path("cmd.exe")
# Create random cmd.exe window title.
rand_title = "".join([chr(randint(0, 128)) for i in xrange(0, randint(1, 10))])
# START syntax.
# See: https://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/start.mspx?mfr=true
# start ["title"] [/dPath] [/i] [/min] [/max] [{/separate | /shared}]
# [{/low | /normal | /high | /realtime | /abovenormal | belownormal}]
# [/wait] [/b] [FileName] [parameters]
cmd_args = "/c start /wait \"{0}\" \"{1}\"".format(rand_title, path)
return self.execute(cmd_path, cmd_args)
| gpl-3.0 | -3,756,068,470,696,723,500 | 40.148148 | 114 | 0.621962 | false |
Miiha/FilmAnalyzerKit | analyzer/minio_upload.py | 1 | 2543 | import glob
from tqdm import tqdm
from analyzer.project import Project, StoragePath
from minio import Minio
from minio.policy import Policy
from minio.error import ResponseError
from analyzer.utils import env
from analyzer.path_utils import filename
from os.path import join
BUCKET_NAME = "thesis-video-data"
STORE_HOST = env("STORE_HOST", "localhost")
ACCESS_KEY = env("STORE_ACCESS_KEY")
SECRET_KEY = env("STORE_SECRET_KEY")
class Uploader(object):
def __init__(self):
if STORE_HOST is None:
raise Exception("Missing minio host info")
if ACCESS_KEY is None or SECRET_KEY is None:
raise Exception("Missing minio credentials")
self.minio_client = Minio(STORE_HOST + ':9000',
access_key=ACCESS_KEY,
secret_key=SECRET_KEY,
secure=False)
try:
if not self.minio_client.bucket_exists(BUCKET_NAME):
self.minio_client.make_bucket(BUCKET_NAME, location="us-east-1")
self.minio_client.set_bucket_policy(BUCKET_NAME, "", Policy.READ_ONLY)
except ResponseError as err:
print(err)
def upload_frames(self, project):
source_path = project.folder_path(Project.Folder.frames)
remote_path = project.folder_path(Project.Folder.frames, storage_env=StoragePath.remote)
self.upload_images(source_path, remote_path)
def upload_keyframes(self, project):
source_path = project.folder_path(Project.Folder.keyframes)
remote_path = project.folder_path(Project.Folder.keyframes, storage_env=StoragePath.remote)
self.upload_images(source_path, remote_path)
def upload_slices(self, project):
source_path = project.folder_path(Project.Folder.spatio)
remote_path = project.folder_path(Project.Folder.spatio, storage_env=StoragePath.remote)
self.upload_images(source_path, remote_path)
def upload_keyframe_thumbnails(self, project):
source_path = project.folder_path(Project.Folder.keyframe_thumbnails)
remote_path = project.folder_path(Project.Folder.keyframe_thumbnails, storage_env=StoragePath.remote)
self.upload_images(source_path, remote_path)
def upload_images(self, source_path, destination_path):
image_paths = sorted(glob.glob(join(source_path, "*.jpg")))
progress_bar = tqdm(total=len(image_paths), desc="upload")
for path in image_paths:
remote_path = join(destination_path, filename(path))
try:
self.minio_client.fput_object(BUCKET_NAME, remote_path, path)
except ResponseError as error:
print("Upload failed")
print(error)
progress_bar.update()
progress_bar.close()
| mit | -8,729,186,016,562,689,000 | 32.025974 | 103 | 0.726308 | false |
Vagab0nd/SiCKRAGE | lib3/imdb/parser/http/listParser.py | 1 | 4108 | # Copyright 2004-2018 Davide Alberani <[email protected]>
# 2008-2018 H. Turgut Uyar <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the classes (and the instances) that are used to parse
the the contents of a list.
For example, when you want to parse the list "Golden Globes 2020: Trending Titles"
the corresponding url would be:
https://www.imdb.com/list/ls091843609/
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import string
from imdb.utils import analyze_title
from .piculet import Path, Rule, Rules, reducers
from .utils import DOMParserBase, analyze_imdbid
non_numeric_chars = ''.join(set(string.printable) - set(string.digits))
class DOMHTMLListParser(DOMParserBase):
"""A parser for the title search page."""
rules = [
Rule(
key='chart',
extractor=Rules(
foreach='//div[@class="lister-item mode-detail"]',
rules=[
Rule(
key='link',
extractor=Path('.//h3[@class="lister-item-header"]/a/@href')
),
Rule(
key='rank',
extractor=Path('.//span[@class="lister-item-index unbold text-primary"]/text()',
reduce=reducers.first,
transform=lambda x: int(''.join(i for i in x if i.isdigit())))
),
Rule(
key='rating',
extractor=Path('.//span[@class="ipl-rating-star__rating"]/text()',
reduce=reducers.first,
transform=lambda x: round(float(x), 1))
),
Rule(
key='movieID',
extractor=Path('.//h3[@class="lister-item-header"]/a/@href')
),
Rule(
key='title',
extractor=Path('.//h3[@class="lister-item-header"]/a/text()')
),
Rule(
key='year',
extractor=Path('.//span[@class="lister-item-year text-muted unbold"]/text()',
transform=lambda x: int(''.join(i for i in x if i.isdigit())[:4]) )
),
Rule(
key='votes',
extractor=Path('.//span[@name="nv"]/@data-value', reduce=reducers.first,
transform=int)
)
]
)
)
]
def postprocess_data(self, data):
if (not data) or ('chart' not in data):
return []
movies = []
for entry in data['chart']:
if ('movieID' not in entry) or ('rank' not in entry) or ('title' not in entry):
continue
movie_id = analyze_imdbid(entry['movieID']) # actually url parser to filter out id
if movie_id is None:
continue
del entry['movieID']
title = analyze_title(entry['title'])
entry.update(title)
movies.append((movie_id, entry))
return movies
_OBJECTS = {
'list_parser': ((DOMHTMLListParser,), None)
}
| gpl-3.0 | -8,093,793,847,827,971,000 | 36.345455 | 107 | 0.51777 | false |
clemenshage/grslra | experiments/4_grpca/runtime/plot_runtime_eval.py | 1 | 1432 | from matplotlib import pyplot as plt
import numpy as np
import matplotlib
matplotlib.rcParams.update({'font.size': 24})
matplotlib.rcParams.update({'text.usetex': True})
colors = ['k', 'b', 'g', 'r', 'm']
data = np.load('result_subsampling.npz')
times = data["times"]
dims = data["dims"]
paramfiles = data["paramfiles"]
nofparamfiles = paramfiles.__len__()
plt.figure(figsize=(15, 5))
plt.hold(True)
for j in xrange(nofparamfiles):
plt.loglog(dims**2, times[:, j], linewidth=3, color=colors[j], label="$\\textrm{" + paramfiles[j] + "}$")
axes = plt.gca()
axes.set_ylim([1,1e4])
axes.set_xlim([dims[0]**2,dims[-1]**2])
plt.grid(b=True, which='both', color='0.65', linestyle='-')
plt.tight_layout()
plt.ylabel('time (s)')
plt.xlabel('$m^2$')
plt.legend()
plt.savefig('subsampling_eval.pdf', dpi=200)
plt.close()
data = np.load('result_smmprod.npz')
times = data["times"]
dims = data["dims"]
paramfiles = data["paramfiles"]
nofparamfiles = paramfiles.__len__()
plt.figure(figsize=(15, 5))
plt.hold(True)
for j in xrange(nofparamfiles):
plt.loglog(dims[:10]**2, times[:10, j], linewidth=3, color=colors[j], label="$\\textrm{" + paramfiles[j] + "}$")
axes = plt.gca()
axes.set_ylim([1,1e4])
axes.set_xlim([dims[0]**2,dims[9]**2])
plt.grid(b=True, which='both', color='0.65', linestyle='-')
plt.tight_layout()
plt.ylabel('time (s)')
plt.xlabel('$m^2$')
plt.legend()
plt.savefig('smmprod_eval.pdf', dpi=200)
plt.close() | mit | -5,522,453,626,326,070,000 | 24.589286 | 116 | 0.65852 | false |
lukas-ke/faint-graphics-editor | build-sys/build_sys/build_sys.py | 1 | 9425 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2013 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
from optparse import OptionParser
from build_sys.util.util import print_timing
import build_sys.util.util as util
import os
import sys
import build_sys.gen_build_info as gen_build_info
import build_sys.gen_help as gen_help
import build_sys.gen_nsis as gen_nsis
import build_sys.dependencies as depend
from os.path import abspath
unknown_version_str = "unknown"
def fail(message):
print("Error: %s" % message)
exit(1)
def parse_bool(config, group, name):
value = config.get(group, name)
if value.lower() in ['1', 'true', 'yes', 'y']:
return True
if value.lower() in ['0', 'false', 'no', 'n']:
return False
print("Error: %s should be 1 or 0 in build.cfg" % name)
exit(1)
def parse_command_line():
optParser = OptionParser()
optParser.add_option("", "--rebuild",
action="store_true",
dest="do_rebuild",
default=False,
help="Rebuild and relink")
optParser.add_option("", "--version",
dest="version",
default=unknown_version_str,
help="Application version number")
optParser.add_option("", "--debug",
action="store_true",
dest="debug",
default=False,
help="Build with debug symbols")
optParser.add_option("", "--filthy",
action="store_true",
dest="filthy",
default=False,
help="Allow building unclean installer")
optParser.add_option("", "--timed",
action="store_true",
dest="timed",
default=False,
help="Time the build")
optParser.add_option("", "--stdout",
action="store_true",
dest="use_stdout",
default=False,
help="Print compilation output on standard out")
return optParser.parse_args()
def clean_file_name(f):
return abspath(f).replace('\\', '/')
def versioned(cmd_opts):
return cmd_opts.version != unknown_version_str
def versioned_unfilthy(cmd_opts):
return versioned(cmd_opts) and not cmd_opts.filthy
def check_valid_release(cmd_opts, opts, clean_build):
if not clean_build:
if versioned_unfilthy(cmd_opts):
fail("Build with --version requires empty output folder "
"(build/objs-release)")
if versioned_unfilthy(cmd_opts):
if gen_build_info.working_copy_modified(opts.project_root):
fail("Build with --version requires unmodified working copy.")
return
if versioned(cmd_opts) and opts.makensis_exe is None:
fail("Build with --version requires path to nsis in build.cfg")
def prepare_out_dir(obj_root):
"""Recreates the output-dir. Returns true if the folder was recreated
or empty. (this signifies a clean build)"""
if not os.path.exists(obj_root):
os.mkdir(obj_root)
return True
elif len(os.listdir(obj_root)) == 0:
return True
else:
return False
def _ext_cpp_to_obj(f, obj_ext):
return f.replace('.cpp', obj_ext).replace('.c', obj_ext)
def _to_obj(obj_root, cpp, obj_ext):
return os.path.join(obj_root,
os.path.basename(cpp).replace('.cpp', obj_ext).replace('.c', obj_ext))
def _modified_directly(source_files, obj_root, obj_ext):
"""Returns the cpp-files which are modified more recently than their
object files.
"""
modified = []
for cpp in source_files:
obj_file = _to_obj(obj_root, cpp, obj_ext)
if util.changed(cpp, obj_file):
modified.append(cpp)
return set(modified)
def _modified_dependencies(deps, obj_root, obj_ext, ignore):
"""Returns the cpp-files for which an object file is older than a
header it depends on.
"""
modified_deps = []
for incl in deps:
for cpp in deps.get(incl, []):
cpp = clean_file_name(cpp)
objFile = _to_obj(obj_root, cpp, obj_ext)
if cpp not in ignore and util.changed(incl, objFile):
modified_deps.append(cpp)
return set(modified_deps)
def build(opts, cmdline):
"""Build according by the build options and command line options and
arguments.
"""
# Check that the parameters are valid
opts.verify()
if opts.compiler is None:
fail("No compiler specified")
if opts.compiler == 'msvc':
import build_sys.compile_msw as compile_impl
compile = compile_impl.compile
link = compile_impl.link
else:
import build_sys.compile_linux as compile_impl
if opts.compiler == 'clang':
compile = compile_impl.compile_clang
link = compile_impl.link_clang
elif opts.compiler == 'gcc':
compile = compile_impl.compile_gcc
link = compile_impl.link_gcc
elif opts.compiler == 'iwyu':
compile = compile_impl.compile_iwyu
link = compile_impl.link_iwyu
else:
fail("Unsupported compiler (%s) specified." % opts.compiler)
obj_ext = compile_impl.obj_ext
create_installer = compile_impl.create_installer
cmd_opts, args = cmdline
opts.set_debug_compile(cmd_opts.debug)
objRoot = opts.get_obj_root()
opts.timed = cmd_opts.timed
clean_build = prepare_out_dir(objRoot)
check_valid_release(cmd_opts, opts, clean_build)
do_compile = True
do_link = True
do_rebuild = cmd_opts.do_rebuild
err = open('err.txt', 'w')
if cmd_opts.use_stdout:
out = sys.stdout
else:
out = open('out.txt', 'w')
modified = ()
to_compile = set()
depsChanged = set()
timed = util.timed if opts.timed else util.not_timed
if opts.check_deps:
print("* Generating dependencies.")
deps = timed(depend.get_flat_header_dependencies, opts.project_root,
opts.source_folders)
else:
deps = []
modified = set()
depModified = set()
opts.source_files = list(set([os.path.abspath(f) for f in opts.source_files]))
opts.source_files = [clean_file_name(f) for f in opts.source_files]
compile_unchanged = False
if do_compile:
if do_rebuild:
to_compile = opts.source_files
else:
modified = _modified_directly(opts.source_files, objRoot, obj_ext)
to_compile = to_compile.union(modified)
depModified = _modified_dependencies(deps, objRoot, obj_ext,
ignore=to_compile)
to_compile = to_compile.union(depModified)
if len(to_compile) > 0:
if opts.create_build_info:
print("* Generating Build-info.")
to_compile.add(clean_file_name(gen_build_info.run(opts.project_root,
cmd_opts.version)))
print("* Compiling (%s)." % opts.compiler)
print(' %d cpp %s modified + %d indirectly (of %d total).' %
(len(modified),
"files" if len(modified) == 0 or len(modified) > 1 else "file",
len(depModified),
len(opts.source_files)))
timed(compile, list(sorted(list(to_compile))), opts,
out, err, debug=cmd_opts.debug)
else:
print("* Nothing to compile.")
compile_unchanged = True
obj_files = util.joined(opts.get_obj_root(),
util.list_by_ext(opts.get_obj_root(), obj_ext))
obj_files.extend([o + obj_ext for o in opts.extra_objs])
if compile_unchanged:
if util.any_changed(obj_files, opts.get_out_path()):
do_link = True
else:
print("* Not relinking either.") # Fixme: Tidy up
do_link = False
if do_link:
print("* Linking (%d object files)." % len(obj_files))
timed(link, obj_files, opts, out, err, debug=cmd_opts.debug)
return 0
def build_installer(version, makensis_exe):
import build_sys.compile_msw as compile_impl
print("* Generating %s installer." % version)
with working_dir("../installer"):
nsiFile = gen_nsis.run(version)
compile_impl.create_installer(makensis_exe, nsiFile)
if __name__ == '__main__':
build()
| apache-2.0 | 3,591,846,196,415,151,600 | 29.841216 | 82 | 0.565517 | false |
jingxiang-li/kaggle-yelp | model/level3_model_ext.py | 1 | 5661 | from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import f1_score
import argparse
from os import path
import os
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from utils import *
import pickle
np.random.seed(8089)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--yix', type=int, default=0)
return parser.parse_args()
# functions for hyperparameters optimization
class Score:
def __init__(self, X, y):
self.y = y
self.X = X
def get_score(self, params):
params['n_estimators'] = int(params['n_estimators'])
params['max_depth'] = int(params['max_depth'])
params['min_samples_split'] = int(params['min_samples_split'])
params['min_samples_leaf'] = int(params['min_samples_leaf'])
params['n_estimators'] = int(params['n_estimators'])
print('Training with params:')
print(params)
# cross validation here
scores = []
for train_ix, test_ix in makeKFold(5, self.y, 1):
X_train, y_train = self.X[train_ix, :], self.y[train_ix]
X_test, y_test = self.X[test_ix, :], self.y[test_ix]
weight = y_train.shape[0] / (2 * np.bincount(y_train))
sample_weight = np.array([weight[i] for i in y_train])
clf = ExtraTreesClassifier(**params)
cclf = CalibratedClassifierCV(base_estimator=clf,
method='isotonic',
cv=makeKFold(3, y_train, 1))
cclf.fit(X_train, y_train, sample_weight)
pred = cclf.predict(X_test)
scores.append(f1_score(y_true=y_test, y_pred=pred))
print(scores)
score = np.mean(scores)
print(score)
return {'loss': -score, 'status': STATUS_OK}
def optimize(trials, X, y, max_evals):
space = {
'n_estimators': hp.quniform('n_estimators', 200, 600, 50),
'criterion': hp.choice('criterion', ['gini', 'entropy']),
'max_depth': hp.quniform('max_depth', 1, 7, 1),
'min_samples_split': hp.quniform('min_samples_split', 1, 9, 2),
'min_samples_leaf': hp.quniform('min_samples_leaf', 1, 5, 1),
'bootstrap': True,
'oob_score': True,
'n_jobs': -1
}
s = Score(X, y)
best = fmin(s.get_score,
space,
algo=tpe.suggest,
trials=trials,
max_evals=max_evals
)
best['n_estimators'] = int(best['n_estimators'])
best['max_depth'] = int(best['max_depth'])
best['min_samples_split'] = int(best['min_samples_split'])
best['min_samples_leaf'] = int(best['min_samples_leaf'])
best['n_estimators'] = int(best['n_estimators'])
best['criterion'] = ['gini', 'entropy'][best['criterion']]
best['bootstrap'] = True
best['oob_score'] = True
best['n_jobs'] = -1
del s
return best
def out_fold_pred(params, X, y):
# cross validation here
preds = np.zeros((y.shape[0]))
for train_ix, test_ix in makeKFold(5, y, 1):
X_train, y_train = X[train_ix, :], y[train_ix]
X_test = X[test_ix, :]
weight = y_train.shape[0] / (2 * np.bincount(y_train))
sample_weight = np.array([weight[i] for i in y_train])
clf = ExtraTreesClassifier(**params)
cclf = CalibratedClassifierCV(base_estimator=clf,
method='isotonic',
cv=makeKFold(3, y_train, 1))
cclf.fit(X_train, y_train, sample_weight)
pred = cclf.predict_proba(X_test)[:, 1]
preds[test_ix] = pred
return preds
def get_model(params, X, y):
clf = ExtraTreesClassifier(**params)
cclf = CalibratedClassifierCV(base_estimator=clf,
method='isotonic',
cv=makeKFold(3, y, 1))
weight = y.shape[0] / (2 * np.bincount(y))
sample_weight = np.array([weight[i] for i in y])
cclf.fit(X, y, sample_weight)
return cclf
args = parse_args()
data_dir = '../level3-feature/' + str(args.yix)
X_train = np.load(path.join(data_dir, 'X_train.npy'))
X_test = np.load(path.join(data_dir, 'X_test.npy'))
y_train = np.load(path.join(data_dir, 'y_train.npy'))
print(X_train.shape, X_test.shape, y_train.shape)
X_train_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_train_ext.npy')
X_test_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_test_ext.npy')
print(X_train_ext.shape, X_test_ext.shape)
X_train = np.hstack((X_train, X_train_ext))
X_test = np.hstack((X_test, X_test_ext))
print('Add Extra')
print(X_train.shape, X_test.shape, y_train.shape)
# Now we have X_train, X_test, y_train
trials = Trials()
params = optimize(trials, X_train, y_train, 50)
out_fold = out_fold_pred(params, X_train, y_train)
clf = get_model(params, X_train, y_train)
preds = clf.predict_proba(X_test)[:, 1]
save_dir = '../level3-model-final/' + str(args.yix)
print(save_dir)
if not path.exists(save_dir):
os.makedirs(save_dir)
# save model, parameter, outFold_pred, pred
with open(path.join(save_dir, 'model_ext.pkl'), 'wb') as f_model:
pickle.dump(clf.calibrated_classifiers_, f_model)
with open(path.join(save_dir, 'param_ext.pkl'), 'wb') as f_param:
pickle.dump(params, f_param)
np.save(path.join(save_dir, 'pred_ext.npy'), preds)
np.save(path.join(save_dir, 'outFold_ext.npy'), out_fold)
| mit | 6,005,950,304,981,379,000 | 33.309091 | 76 | 0.594241 | false |
GavinSchalliol/rwadar | pageparser.py | 1 | 1876 | #!/usr/bin/python
from pyquery import PyQuery as pq
from config import i, site, limit, incr
class Listing:
def __init__(self,d):
pass
def title(self,d):
titleObj = [title.text for title in d('title')]
if titleObj:
title = titleObj[0].encode('utf8')
return str(title)
def city(self,d):
cityObj = d('div').filter('.col-sm-4')
cityObj = cityObj.text()
cityObj = cityObj.split()
if cityObj:
city = cityObj[2].encode('utf8')
return str(city)
def plz(self,d):
cityObj = d('div').filter('.col-sm-4')
cityObj = cityObj.text()
cityObj = cityObj.split()
if cityObj:
plz = cityObj[1]
return str(plz)
def rent(self,d):
rentObj = d('h1').filter('.headline-key-facts')
rentObj = rentObj.text()
if rentObj:
rentObj = rentObj.split()
rent = rentObj[3].encode('utf8')
return str(rent)
def size(self,d):
rentObj = d('h1').filter('.headline-key-facts')
rentObj = rentObj.text()
if rentObj:
rentObj = rentObj.split()
size = rentObj[1].encode('utf8')
return str(size)
def categorizer(self,d):
if "Suchkriterien" in d.text():
# This is a Gesuch
return 2
if self.title(d) and self.city(d) and self.plz(d) and self.rent(d) and self.size(d):
return 1
else:
return 0
def resultsString(d):
listing = Listing(d)
resultsString = listing.title(d) + ";" + listing.city(d) + ";" + listing.plz(d) + ";" + listing.rent(d) + ";" + listing.size(d)
return resultsString
if __name__ == '__main__':
file = open('results.txt', 'a')
while (i < limit):
page = ("http://" + site + "/" + str(i) + ".html")
i = i + 1
d = pq(url=page)
listing = Listing(d)
print page
if listing.categorizer(d) == 1:
file.write(resultsString(d))
| gpl-3.0 | -1,182,340,012,393,348,000 | 27.861538 | 128 | 0.567697 | false |
loongson-community/EFI-MIPS | ToolKit/cmds/python/Lib/pkgutil.py | 13 | 3100 | """Utilities to support packages."""
import os
import sys
def extend_path(path, name):
"""Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
"""
if not isinstance(path, list):
# This could happen e.g. when this is called from inside a
# frozen package. Return the path unchanged in that case.
return path
pname = os.path.join(*name.split('.')) # Reconstitute as relative path
# Just in case os.extsep != '.'
sname = os.extsep.join(name.split('.'))
sname_pkg = sname + os.extsep + "pkg"
init_py = "__init__" + os.extsep + "py"
path = path[:] # Start with a copy of the existing path
for dir in sys.path:
if not isinstance(dir, basestring) or not os.path.isdir(dir):
continue
subdir = os.path.join(dir, pname)
# XXX This may still add duplicate entries to path on
# case-insensitive filesystems
initfile = os.path.join(subdir, init_py)
if subdir not in path and os.path.isfile(initfile):
path.append(subdir)
# XXX Is this the right thing for subpackages like zope.app?
# It looks for a file named "zope.app.pkg"
pkgfile = os.path.join(dir, sname_pkg)
if os.path.isfile(pkgfile):
try:
f = open(pkgfile)
except IOError, msg:
sys.stderr.write("Can't open %s: %s\n" %
(pkgfile, msg))
else:
for line in f:
line = line.rstrip('\n')
if not line or line.startswith('#'):
continue
path.append(line) # Don't check for existence!
f.close()
return path
| bsd-3-clause | 5,448,160,484,743,543,000 | 38.74359 | 75 | 0.619677 | false |
ampax/edx-platform-backup | common/lib/xmodule/xmodule/modulestore/mixed.py | 6 | 40248 | """
MixedModuleStore allows for aggregation between multiple modulestores.
In this way, courses can be served up both - say - XMLModuleStore or MongoModuleStore
"""
import logging
from contextlib import contextmanager
import itertools
import functools
from contracts import contract, new_contract
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, AssetKey
from opaque_keys.edx.locator import LibraryLocator
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.assetstore import AssetMetadata
from . import ModuleStoreWriteBase
from . import ModuleStoreEnum
from .exceptions import ItemNotFoundError, DuplicateCourseError
from .draft_and_published import ModuleStoreDraftAndPublished
from .split_migrator import SplitMigrator
new_contract('CourseKey', CourseKey)
new_contract('AssetKey', AssetKey)
new_contract('AssetMetadata', AssetMetadata)
new_contract('LibraryLocator', LibraryLocator)
new_contract('long', long)
log = logging.getLogger(__name__)
def strip_key(func):
"""
A decorator for stripping version and branch information from return values that are, or contain, UsageKeys or
CourseKeys.
Additionally, the decorated function is called with an optional 'field_decorator' parameter that can be used
to strip any location(-containing) fields, which are not directly returned by the function.
The behavior can be controlled by passing 'remove_version' and 'remove_branch' booleans to the decorated
function's kwargs.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
"""
Supported kwargs:
remove_version - If True, calls 'version_agnostic' on all return values, including those in lists and dicts.
remove_branch - If True, calls 'for_branch(None)' on all return values, including those in lists and dicts.
Note: The 'field_decorator' parameter passed to the decorated function is a function that honors the
values of these kwargs.
"""
# remove version and branch, by default
rem_vers = kwargs.pop('remove_version', True)
rem_branch = kwargs.pop('remove_branch', True)
# helper function for stripping individual values
def strip_key_func(val):
"""
Strips the version and branch information according to the settings of rem_vers and rem_branch.
Recursively calls this function if the given value has a 'location' attribute.
"""
retval = val
if rem_vers and hasattr(retval, 'version_agnostic'):
retval = retval.version_agnostic()
if rem_branch and hasattr(retval, 'for_branch'):
retval = retval.for_branch(None)
if hasattr(retval, 'location'):
retval.location = strip_key_func(retval.location)
return retval
# function for stripping both, collection of, and individual, values
def strip_key_collection(field_value):
"""
Calls strip_key_func for each element in the given value.
"""
if rem_vers or rem_branch:
if isinstance(field_value, list):
field_value = [strip_key_func(fv) for fv in field_value]
elif isinstance(field_value, dict):
for key, val in field_value.iteritems():
field_value[key] = strip_key_func(val)
else:
field_value = strip_key_func(field_value)
return field_value
# call the decorated function
retval = func(field_decorator=strip_key_collection, *args, **kwargs)
# strip the return value
return strip_key_collection(retval)
return inner
class MixedModuleStore(ModuleStoreDraftAndPublished, ModuleStoreWriteBase):
"""
ModuleStore knows how to route requests to the right persistence ms
"""
def __init__(
self,
contentstore,
mappings,
stores,
i18n_service=None,
fs_service=None,
user_service=None,
create_modulestore_instance=None,
**kwargs
):
"""
Initialize a MixedModuleStore. Here we look into our passed in kwargs which should be a
collection of other modulestore configuration information
"""
super(MixedModuleStore, self).__init__(contentstore, **kwargs)
if create_modulestore_instance is None:
raise ValueError('MixedModuleStore constructor must be passed a create_modulestore_instance function')
self.modulestores = []
self.mappings = {}
for course_id, store_name in mappings.iteritems():
try:
self.mappings[CourseKey.from_string(course_id)] = store_name
except InvalidKeyError:
try:
self.mappings[SlashSeparatedCourseKey.from_deprecated_string(course_id)] = store_name
except InvalidKeyError:
log.exception("Invalid MixedModuleStore configuration. Unable to parse course_id %r", course_id)
continue
for store_settings in stores:
key = store_settings['NAME']
is_xml = 'XMLModuleStore' in store_settings['ENGINE']
if is_xml:
# restrict xml to only load courses in mapping
store_settings['OPTIONS']['course_ids'] = [
course_key.to_deprecated_string()
for course_key, store_key in self.mappings.iteritems()
if store_key == key
]
store = create_modulestore_instance(
store_settings['ENGINE'],
self.contentstore,
store_settings.get('DOC_STORE_CONFIG', {}),
store_settings.get('OPTIONS', {}),
i18n_service=i18n_service,
fs_service=fs_service,
user_service=user_service,
)
# replace all named pointers to the store into actual pointers
for course_key, store_name in self.mappings.iteritems():
if store_name == key:
self.mappings[course_key] = store
self.modulestores.append(store)
def _clean_locator_for_mapping(self, locator):
"""
In order for mapping to work, the locator must be minimal--no version, no branch--
as we never store one version or one branch in one ms and another in another ms.
:param locator: the CourseKey
"""
if hasattr(locator, 'version_agnostic'):
locator = locator.version_agnostic()
if hasattr(locator, 'branch'):
locator = locator.replace(branch=None)
return locator
def _get_modulestore_for_courselike(self, locator=None):
"""
For a given locator, look in the mapping table and see if it has been pinned
to a particular modulestore
If locator is None, returns the first (ordered) store as the default
"""
if locator is not None:
locator = self._clean_locator_for_mapping(locator)
mapping = self.mappings.get(locator, None)
if mapping is not None:
return mapping
else:
if isinstance(locator, LibraryLocator):
has_locator = lambda store: hasattr(store, 'has_library') and store.has_library(locator)
else:
has_locator = lambda store: store.has_course(locator)
for store in self.modulestores:
if has_locator(store):
self.mappings[locator] = store
return store
# return the default store
return self.default_modulestore
def _get_modulestore_by_type(self, modulestore_type):
"""
This method should only really be used by tests and migration scripts when necessary.
Returns the module store as requested by type. The type can be a value from ModuleStoreEnum.Type.
"""
for store in self.modulestores:
if store.get_modulestore_type() == modulestore_type:
return store
return None
def fill_in_run(self, course_key):
"""
Some course_keys are used without runs. This function calls the corresponding
fill_in_run function on the appropriate modulestore.
"""
store = self._get_modulestore_for_courselike(course_key)
if not hasattr(store, 'fill_in_run'):
return course_key
return store.fill_in_run(course_key)
def has_item(self, usage_key, **kwargs):
"""
Does the course include the xblock who's id is reference?
"""
store = self._get_modulestore_for_courselike(usage_key.course_key)
return store.has_item(usage_key, **kwargs)
@strip_key
def get_item(self, usage_key, depth=0, **kwargs):
"""
see parent doc
"""
store = self._get_modulestore_for_courselike(usage_key.course_key)
return store.get_item(usage_key, depth, **kwargs)
@strip_key
def get_items(self, course_key, **kwargs):
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_key
NOTE: don't use this to look for courses
as the course_key is required. Use get_courses.
Args:
course_key (CourseKey): the course identifier
kwargs:
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as kwargs below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as kwargs below.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
Substring matching pass a regex object.
For some modulestores, ``name`` is another commonly provided key (Location based stores)
For some modulestores,
you can search by ``edited_by``, ``edited_on`` providing either a datetime for == (probably
useless) or a function accepting one arg to do inequality
"""
if not isinstance(course_key, CourseKey):
raise Exception("Must pass in a course_key when calling get_items()")
store = self._get_modulestore_for_courselike(course_key)
return store.get_items(course_key, **kwargs)
@strip_key
def get_courses(self, **kwargs):
'''
Returns a list containing the top level XModuleDescriptors of the courses in this modulestore.
'''
courses = {}
for store in self.modulestores:
# filter out ones which were fetched from earlier stores but locations may not be ==
for course in store.get_courses(**kwargs):
course_id = self._clean_locator_for_mapping(course.id)
if course_id not in courses:
# course is indeed unique. save it in result
courses[course_id] = course
return courses.values()
@strip_key
def get_libraries(self, **kwargs):
"""
Returns a list containing the top level XBlock of the libraries (LibraryRoot) in this modulestore.
"""
libraries = {}
for store in self.modulestores:
if not hasattr(store, 'get_libraries'):
continue
# filter out ones which were fetched from earlier stores but locations may not be ==
for library in store.get_libraries(**kwargs):
library_id = self._clean_locator_for_mapping(library.location)
if library_id not in libraries:
# library is indeed unique. save it in result
libraries[library_id] = library
return libraries.values()
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
# If there is a mapping that match this org/course/run, use that
for course_id, store in self.mappings.iteritems():
candidate_key = store.make_course_key(org, course, run)
if candidate_key == course_id:
return candidate_key
# Otherwise, return the key created by the default store
return self.default_modulestore.make_course_key(org, course, run)
@strip_key
def get_course(self, course_key, depth=0, **kwargs):
"""
returns the course module associated with the course_id. If no such course exists,
it returns None
:param course_key: must be a CourseKey
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
try:
return store.get_course(course_key, depth=depth, **kwargs)
except ItemNotFoundError:
return None
@strip_key
@contract(library_key='LibraryLocator')
def get_library(self, library_key, depth=0, **kwargs):
"""
returns the library block associated with the given key. If no such library exists,
it returns None
:param library_key: must be a LibraryLocator
"""
try:
store = self._verify_modulestore_support(library_key, 'get_library')
return store.get_library(library_key, depth=depth, **kwargs)
except NotImplementedError:
log.exception("Modulestore configured for %s does not have get_library method", library_key)
return None
except ItemNotFoundError:
return None
@strip_key
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
returns the course_id of the course if it was found, else None
Note: we return the course_id instead of a boolean here since the found course may have
a different id than the given course_id when ignore_case is True.
Args:
* course_id (CourseKey)
* ignore_case (bool): If True, do a case insensitive search. If
False, do a case sensitive search
"""
assert isinstance(course_id, CourseKey)
store = self._get_modulestore_for_courselike(course_id)
return store.has_course(course_id, ignore_case, **kwargs)
def delete_course(self, course_key, user_id):
"""
See xmodule.modulestore.__init__.ModuleStoreWrite.delete_course
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
return store.delete_course(course_key, user_id)
@contract(asset_metadata='AssetMetadata', user_id='int|long', import_only=bool)
def save_asset_metadata(self, asset_metadata, user_id, import_only=False):
"""
Saves the asset metadata for a particular course's asset.
Args:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
store = self._get_modulestore_for_courselike(asset_metadata.asset_id.course_key)
return store.save_asset_metadata(asset_metadata, user_id, import_only)
@contract(asset_metadata_list='list(AssetMetadata)', user_id='int|long', import_only=bool)
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Saves the asset metadata for each asset in a list of asset metadata.
Optimizes the saving of many assets.
Args:
asset_metadata_list (list(AssetMetadata)): list of data about several course assets
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
if len(asset_metadata_list) == 0:
return True
store = self._get_modulestore_for_courselike(asset_metadata_list[0].asset_id.course_key)
return store.save_asset_metadata_list(asset_metadata_list, user_id, import_only)
@strip_key
@contract(asset_key='AssetKey')
def find_asset_metadata(self, asset_key, **kwargs):
"""
Find the metadata for a particular course asset.
Args:
asset_key (AssetKey): locator containing original asset filename
Returns:
asset metadata (AssetMetadata) -or- None if not found
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.find_asset_metadata(asset_key, **kwargs)
@strip_key
@contract(course_key='CourseKey', asset_type='None | basestring', start=int, maxresults=int, sort='tuple|None')
def get_all_asset_metadata(self, course_key, asset_type, start=0, maxresults=-1, sort=None, **kwargs):
"""
Returns a list of static assets for a course.
By default all assets are returned, but start and maxresults can be provided to limit the query.
Args:
course_key (CourseKey): course identifier
asset_type (str): type of asset, such as 'asset', 'video', etc. If None, return assets of all types.
start (int): optional - start at this asset number
maxresults (int): optional - return at most this many, -1 means no limit
sort (array): optional - None means no sort
(sort_by (str), sort_order (str))
sort_by - one of 'uploadDate' or 'displayname'
sort_order - one of 'ascending' or 'descending'
Returns:
List of AssetMetadata objects.
"""
store = self._get_modulestore_for_courselike(course_key)
return store.get_all_asset_metadata(course_key, asset_type, start, maxresults, sort, **kwargs)
@contract(asset_key='AssetKey', user_id='int|long')
def delete_asset_metadata(self, asset_key, user_id):
"""
Deletes a single asset's metadata.
Arguments:
asset_id (AssetKey): locator containing original asset filename
user_id (int_long): user deleting the metadata
Returns:
Number of asset metadata entries deleted (0 or 1)
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.delete_asset_metadata(asset_key, user_id)
@contract(source_course_key='CourseKey', dest_course_key='CourseKey', user_id='int|long')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
user_id (int|long): user copying the asset metadata
"""
source_store = self._get_modulestore_for_courselike(source_course_key)
dest_store = self._get_modulestore_for_courselike(dest_course_key)
if source_store != dest_store:
with self.bulk_operations(dest_course_key):
# Get all the asset metadata in the source course.
all_assets = source_store.get_all_asset_metadata(source_course_key, 'asset')
# Store it all in the dest course.
for asset in all_assets:
new_asset_key = dest_course_key.make_asset_key('asset', asset.asset_id.path)
copied_asset = AssetMetadata(new_asset_key)
copied_asset.from_storable(asset.to_storable())
dest_store.save_asset_metadata(copied_asset, user_id)
else:
# Courses in the same modulestore can be handled by the modulestore itself.
source_store.copy_all_asset_metadata(source_course_key, dest_course_key, user_id)
@contract(asset_key='AssetKey', attr=str, user_id='int|long')
def set_asset_metadata_attr(self, asset_key, attr, value, user_id):
"""
Add/set the given attr on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr (str): which attribute to set
value: the value to set it to (any type pymongo accepts such as datetime, number, string)
user_id: (int|long): user setting the attribute
Raises:
NotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.set_asset_metadata_attrs(asset_key, {attr: value}, user_id)
@contract(asset_key='AssetKey', attr_dict=dict, user_id='int|long')
def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id):
"""
Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr_dict (dict): attribute/value pairs to set
user_id: (int|long): user setting the attributes
Raises:
NotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.set_asset_metadata_attrs(asset_key, attr_dict, user_id)
@strip_key
def get_parent_location(self, location, **kwargs):
"""
returns the parent locations for a given location
"""
store = self._get_modulestore_for_courselike(location.course_key)
return store.get_parent_location(location, **kwargs)
def get_block_original_usage(self, usage_key):
"""
If a block was inherited into another structure using copy_from_template,
this will return the original block usage locator from which the
copy was inherited.
"""
try:
store = self._verify_modulestore_support(usage_key.course_key, 'get_block_original_usage')
return store.get_block_original_usage(usage_key)
except NotImplementedError:
return None, None
def get_modulestore_type(self, course_id):
"""
Returns a type which identifies which modulestore is servicing the given course_id.
The return can be one of:
"xml" (for XML based courses),
"mongo" for old-style MongoDB backed courses,
"split" for new-style split MongoDB backed courses.
"""
return self._get_modulestore_for_courselike(course_id).get_modulestore_type()
@strip_key
def get_orphans(self, course_key, **kwargs):
"""
Get all of the xblocks in the given course which have no parents and are not of types which are
usually orphaned. NOTE: may include xblocks which still have references via xblocks which don't
use children to point to their dependents.
"""
store = self._get_modulestore_for_courselike(course_key)
return store.get_orphans(course_key, **kwargs)
def get_errored_courses(self):
"""
Return a dictionary of course_dir -> [(msg, exception_str)], for each
course_dir where course loading failed.
"""
errs = {}
for store in self.modulestores:
errs.update(store.get_errored_courses())
return errs
@strip_key
def create_course(self, org, course, run, user_id, **kwargs):
"""
Creates and returns the course.
Args:
org (str): the organization that owns the course
course (str): the name of the course
run (str): the name of the run
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a CourseDescriptor
"""
# first make sure an existing course doesn't already exist in the mapping
course_key = self.make_course_key(org, course, run)
if course_key in self.mappings:
raise DuplicateCourseError(course_key, course_key)
# create the course
store = self._verify_modulestore_support(None, 'create_course')
course = store.create_course(org, course, run, user_id, **kwargs)
# add new course to the mapping
self.mappings[course_key] = store
return course
@strip_key
def create_library(self, org, library, user_id, fields, **kwargs):
"""
Creates and returns a new library.
Args:
org (str): the organization that owns the course
library (str): the code/number/name of the library
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization - e.g. display_name
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a LibraryRoot
"""
# first make sure an existing course/lib doesn't already exist in the mapping
lib_key = LibraryLocator(org=org, library=library)
if lib_key in self.mappings:
raise DuplicateCourseError(lib_key, lib_key)
# create the library
store = self._verify_modulestore_support(None, 'create_library')
library = store.create_library(org, library, user_id, fields, **kwargs)
# add new library to the mapping
self.mappings[lib_key] = store
return library
@strip_key
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs):
"""
See the superclass for the general documentation.
If cloning w/in a store, delegates to that store's clone_course which, in order to be self-
sufficient, should handle the asset copying (call the same method as this one does)
If cloning between stores,
* copy the assets
* migrate the courseware
"""
source_modulestore = self._get_modulestore_for_courselike(source_course_id)
# for a temporary period of time, we may want to hardcode dest_modulestore as split if there's a split
# to have only course re-runs go to split. This code, however, uses the config'd priority
dest_modulestore = self._get_modulestore_for_courselike(dest_course_id)
if source_modulestore == dest_modulestore:
return source_modulestore.clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
if dest_modulestore.get_modulestore_type() == ModuleStoreEnum.Type.split:
split_migrator = SplitMigrator(dest_modulestore, source_modulestore)
split_migrator.migrate_mongo_course(
source_course_id, user_id, dest_course_id.org, dest_course_id.course, dest_course_id.run, fields, **kwargs
)
# the super handles assets and any other necessities
super(MixedModuleStore, self).clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
else:
raise NotImplementedError("No code for cloning from {} to {}".format(
source_modulestore, dest_modulestore
))
@strip_key
def create_item(self, user_id, course_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new item in a course.
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
course_key: A :class:`~opaque_keys.edx.CourseKey` identifying which course to create
this item in
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
modulestore = self._verify_modulestore_support(course_key, 'create_item')
return modulestore.create_item(user_id, course_key, block_type, block_id=block_id, fields=fields, **kwargs)
@strip_key
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new xblock that is a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifying the
block that this item should be parented under
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
modulestore = self._verify_modulestore_support(parent_usage_key.course_key, 'create_child')
return modulestore.create_child(user_id, parent_usage_key, block_type, block_id=block_id, fields=fields, **kwargs)
@strip_key
def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs):
"""
See :py:meth `ModuleStoreDraftAndPublished.import_xblock`
Defer to the course's modulestore if it supports this method
"""
store = self._verify_modulestore_support(course_key, 'import_xblock')
return store.import_xblock(user_id, course_key, block_type, block_id, fields, runtime)
@strip_key
def copy_from_template(self, source_keys, dest_key, user_id, **kwargs):
"""
See :py:meth `SplitMongoModuleStore.copy_from_template`
"""
store = self._verify_modulestore_support(dest_key.course_key, 'copy_from_template')
return store.copy_from_template(source_keys, dest_key, user_id)
@strip_key
def update_item(self, xblock, user_id, allow_not_found=False, **kwargs):
"""
Update the xblock persisted to be the same as the given for all types of fields
(content, children, and metadata) attribute the change to the given user.
"""
store = self._verify_modulestore_support(xblock.location.course_key, 'update_item')
return store.update_item(xblock, user_id, allow_not_found, **kwargs)
@strip_key
def delete_item(self, location, user_id, **kwargs):
"""
Delete the given item from persistence. kwargs allow modulestore specific parameters.
"""
store = self._verify_modulestore_support(location.course_key, 'delete_item')
return store.delete_item(location, user_id=user_id, **kwargs)
def revert_to_published(self, location, user_id):
"""
Reverts an item to its last published version (recursively traversing all of its descendants).
If no published version exists, an InvalidVersionError is thrown.
If a published version exists but there is no draft version of this item or any of its descendants, this
method is a no-op.
:raises InvalidVersionError: if no published version exists for the location specified
"""
store = self._verify_modulestore_support(location.course_key, 'revert_to_published')
return store.revert_to_published(location, user_id)
def close_all_connections(self):
"""
Close all db connections
"""
for modulestore in self.modulestores:
modulestore.close_connections()
def _drop_database(self):
"""
A destructive operation to drop all databases and close all db connections.
Intended to be used by test code for cleanup.
"""
for modulestore in self.modulestores:
# drop database if the store supports it (read-only stores do not)
if hasattr(modulestore, '_drop_database'):
modulestore._drop_database() # pylint: disable=protected-access
@strip_key
def create_xblock(self, runtime, course_key, block_type, block_id=None, fields=None, **kwargs):
"""
Create the new xmodule but don't save it. Returns the new module.
Args:
runtime: :py:class `xblock.runtime` from another xblock in the same course. Providing this
significantly speeds up processing (inheritance and subsequent persistence)
course_key: :py:class `opaque_keys.CourseKey`
block_type: :py:class `string`: the string identifying the xblock type
block_id: the string uniquely identifying the block within the given course
fields: :py:class `dict` field_name, value pairs for initializing the xblock fields. Values
should be the pythonic types not the json serialized ones.
"""
store = self._verify_modulestore_support(course_key, 'create_xblock')
return store.create_xblock(runtime, course_key, block_type, block_id, fields or {}, **kwargs)
@strip_key
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
courses = []
for modulestore in self.modulestores:
courses.extend(modulestore.get_courses_for_wiki(wiki_slug, **kwargs))
return courses
def heartbeat(self):
"""
Delegate to each modulestore and package the results for the caller.
"""
# could be done in parallel threads if needed
return dict(
itertools.chain.from_iterable(
store.heartbeat().iteritems()
for store in self.modulestores
)
)
def has_published_version(self, xblock):
"""
Returns whether this xblock is draft, public, or private.
Returns:
PublishState.draft - content is in the process of being edited, but still has a previous
version deployed to LMS
PublishState.public - content is locked and deployed to LMS
PublishState.private - content is editable and not deployed to LMS
"""
course_id = xblock.scope_ids.usage_id.course_key
store = self._get_modulestore_for_courselike(course_id)
return store.has_published_version(xblock)
@strip_key
def publish(self, location, user_id, **kwargs):
"""
Save a current draft to the underlying modulestore
Returns the newly published item.
"""
store = self._verify_modulestore_support(location.course_key, 'publish')
return store.publish(location, user_id, **kwargs)
@strip_key
def unpublish(self, location, user_id, **kwargs):
"""
Save a current draft to the underlying modulestore
Returns the newly unpublished item.
"""
store = self._verify_modulestore_support(location.course_key, 'unpublish')
return store.unpublish(location, user_id, **kwargs)
def convert_to_draft(self, location, user_id):
"""
Create a copy of the source and mark its revision as draft.
Note: This method is to support the Mongo Modulestore and may be deprecated.
:param location: the location of the source (its revision must be None)
"""
store = self._verify_modulestore_support(location.course_key, 'convert_to_draft')
return store.convert_to_draft(location, user_id)
def has_changes(self, xblock):
"""
Checks if the given block has unpublished changes
:param xblock: the block to check
:return: True if the draft and published versions differ
"""
store = self._verify_modulestore_support(xblock.location.course_key, 'has_changes')
return store.has_changes(xblock)
def check_supports(self, course_key, method):
"""
Verifies that the modulestore for a particular course supports a feature.
Returns True/false based on this.
"""
try:
self._verify_modulestore_support(course_key, method)
return True
except NotImplementedError:
return False
def _verify_modulestore_support(self, course_key, method):
"""
Finds and returns the store that contains the course for the given location, and verifying
that the store supports the given method.
Raises NotImplementedError if the found store does not support the given method.
"""
store = self._get_modulestore_for_courselike(course_key)
if hasattr(store, method):
return store
else:
raise NotImplementedError(u"Cannot call {} on store {}".format(method, store))
@property
def default_modulestore(self):
"""
Return the default modulestore
"""
thread_local_default_store = getattr(self.thread_cache, 'default_store', None)
if thread_local_default_store:
# return the thread-local cache, if found
return thread_local_default_store
else:
# else return the default store
return self.modulestores[0]
@contextmanager
def default_store(self, store_type):
"""
A context manager for temporarily changing the default store in the Mixed modulestore to the given store type
"""
# find the store corresponding to the given type
store = next((store for store in self.modulestores if store.get_modulestore_type() == store_type), None)
if not store:
raise Exception(u"Cannot find store of type {}".format(store_type))
prev_thread_local_store = getattr(self.thread_cache, 'default_store', None)
try:
self.thread_cache.default_store = store
yield
finally:
self.thread_cache.default_store = prev_thread_local_store
@contextmanager
def branch_setting(self, branch_setting, course_id=None):
"""
A context manager for temporarily setting the branch value for the given course' store
to the given branch_setting. If course_id is None, the default store is used.
"""
store = self._verify_modulestore_support(course_id, 'branch_setting')
with store.branch_setting(branch_setting, course_id):
yield
@contextmanager
def bulk_operations(self, course_id):
"""
A context manager for notifying the store of bulk operations.
If course_id is None, the default store is used.
"""
store = self._get_modulestore_for_courselike(course_id)
with store.bulk_operations(course_id):
yield
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
for store in self.modulestores:
store.ensure_indexes()
| agpl-3.0 | 7,524,367,871,797,235,000 | 42 | 122 | 0.628727 | false |
tectronics/admiral-jiscmrd | test/RDFDatabank/rdfextras/tools/QueryRunner.py | 2 | 8455 | # -*- coding: utf-8 -*-
from __future__ import generators
import rdflib
from rdflib.Graph import ConjunctiveGraph
from rdflib import plugin
from rdflib.store import Store, NO_STORE, VALID_STORE
from rdflib import Namespace
from rdflib import Literal
from rdflib import URIRef
import time
from os.path import dirname
from Ft.Xml import Parse
from datetime import datetime
from CSVWriter import CSVWriter, WriteAllResults
from rdflib.sparql.sql.DatabaseStats import *
ResultsColumns = ['time','comment','dsName', 'dsSize', 'query', 'sparql', 'rc', 'totalQueryTime','mainQueryCount','mainQueryTime','rowPrepQueryCount','rowPrepQueryTime','sqlQueries', 'err'] #list[0].keys()
def Query(graph, queryName, sparql, dsProps={}, initNamespaces={}, clearCache=False):
graph.store.resetPerfLog(clearCache) # clear cache?
print " Query '%s': " % (queryName),
#print sparql
startTime = time.time()
try:
results = graph.query(sparql, initNs=initNamespaces, DEBUG=False)
rc = len(results)
err = ""
except Exception,ex:
results = []
rc = "Error"
err = repr(ex)
elapsed = time.time()-startTime
perfLog = graph.store.getPerfLog()
print rc, "in", elapsed, "ms total"
#print " PerfLog:", perfLog
# if resultPattern != "" and diplayResults:
# for row in results:
# print resultPattern % row
# print "-------------"
# add more info to query results
perfLog['time'] = "'" + str(datetime.now()) + "'" # make date field text in Excel
perfLog['query'] = queryName
perfLog['sparql'] = sparql.strip()
perfLog['rc'] = rc
perfLog['err'] = err
perfLog['totalQueryTime'] = elapsed
for k in dsProps.keys(): # add data set properties
perfLog[k] = dsProps[k]
return perfLog
def OpenGraph(storeType, configStr, graphUri, storeName='rdfstore'):
# Get the mysql plugin. You may have to install the python mysql libraries
store = plugin.get(storeType, Store)(storeName,debug=False,perfLog=True)
# Open previously created store, or create it if it doesn't exist yet
rt = store.open(configStr,create=False)
assert rt != NO_STORE,"'%s' store '%s' not found using config string '%s!'" % (storeType, storeName, configStr)
assert rt == VALID_STORE or rt is not None,"There underlying store is corrupted"
#There is a store, use it; use ConjunctiveGraph to see everything!
graph = ConjunctiveGraph(store, identifier = URIRef(graphUri))
return graph
def SetupGraph(storeType, configStr, graphUri, storeName, commonNS, owlFile=None):
graph = OpenGraph(storeType, configStr, graphUri, storeName=storeName)
dsProps = {'dsName':storeName, 'dsSize':len(graph.store)}
# search for schema in a specified owl file
if False:#owlFile != None:
ontGraph=Graph().parse(owlFile)
for litProp,resProp in ontGraph.query(
"""
SELECT ?literalProperty ?resourceProperty
WHERE {
{ ?literalProperty a owl:DatatypeProperty }
UNION
{ ?resourceProperty a ?propType
FILTER(
?propType = owl:ObjectProperty ||
?propType = owl:TransitiveProperty ||
?propType = owl:SymmetricProperty ||
?propType = owl:InverseFunctionalProperty ) }
}"""
,
initNs={u'owl':OWL_NS}):
if litProp:
graph.store.literal_properties.add(litProp)
if resProp:
#Need to account for OWL Full, where datatype properties
#can be IFPs
if (resProp,
RDF.type,
OWL_NS.DatatypeProperty) not in ontGraph:
graph.store.resource_properties.add(resProp)
return graph, dsProps #, commonNS
def RunRepository(w, fileName, repos, store, repeatTimes, clearCache, storeType, configString, commonNS, comment):
# get repository name
storeName = repos.attributes[(None,'name')].value
print " ---------------------------"
print " Repository: ", storeName
graph,dsProps = LoadGraph(fileName, repos, storeType, configString, commonNS)
dsProps['comment'] = comment
queries = store.xpath('./Queries/Query')
# repeat n times
for k in range(repeatTimes):
print " Repeat %s/%s..." %(k+1,repeatTimes)
# for each query
for query in queries:
queryName = query.attributes[(None,'name')].value
sparqlQuery = query.childNodes[0].nodeValue # text node
w.WriteEntry(Query(graph, queryName, sparqlQuery, dsProps, commonNS, clearCache))
def LoadGraph(fileName, repos, storeType, configString, commonNS):
storeName = repos.attributes[(None,'name')].value
graphUri = repos.attributes[(None, 'graphUri')].value
owlFile = None
if repos.attributes.has_key((None, 'owlFile')):
owlFile = repos.attributes[(None, 'owlFile')].value
if len(owlFile.strip()) > 0:
owlFile = os.path.join(dirname(fileName), owlFile)
else:
owlFile = None
# load graph
graph, dsProps = SetupGraph(storeType, configString, graphUri, storeName, commonNS, owlFile)
return graph, dsProps
def RunStore(w, fileName, store, repeatTimes, clearCache, comment):
# get store config
storeType = store.attributes[(None,'type')].value
configString = store.attributes[(None,'configString')] .value
commonNS = GetCommonNS(store)
print "================================================="
print "Processing %s Store '%s'" % (storeType, configString)
# for each repository
repositories = store.xpath('./Repositories/Repository')
for repos in repositories:
RunRepository(w, fileName, repos, store, repeatTimes, clearCache, storeType, configString, commonNS, comment)
def GetCommonNS(store):
# get common name space prefixes
commonNS = dict()
prefixes = store.xpath('./CommonPrefixes/Prefix')
for prefix in prefixes:
commonNS[prefix.attributes[(None, 'name')].value] = Namespace(prefix.attributes[(None, 'value')].value)
return commonNS
def GetQueryEntries(root):
queryEntries = []
stores = root.xpath('./Store')
for store in stores:
GetStoreQueryEntries(queryEntries, store)
return queryEntries
def GetStoreQueryEntries(queryEntries, store):
commonNS = GetCommonNS(store)
queries = store.xpath('./Queries/Query')
for query in queries:
queryName = query.attributes[(None, 'name')].value
sparqlQuery = query.childNodes[0].nodeValue # text node
queryEntries.append(dict(queryName=queryName,
queryString=sparqlQuery,
lineNum=queryName))
def RunQueryFile(fileName, comment, options):
doc = Parse(fileName)
#doc.xpath('string(ham//em[1])')
# read overall benchmark settings
root = doc.rootNode.childNodes[0]
repeatTimes = int(root.attributes[(None,'repeatTimes')].value)
clearCache = root.attributes[(None,'clearCache')].value == "True"
if options.runQueries:
# setup output
w = CSVWriter()
w.Open(ResultsColumns, fileName + '.out.txt')
# for each store
stores = root.xpath('./Store')
for store in stores:
RunStore(w, fileName, store, repeatTimes, clearCache, comment)
w.Close()
print "Done."
if __name__ == '__main__':
from optparse import OptionParser
usage = '''usage: %prog [options] queryFile [comment]'''
op = OptionParser(usage=usage)
#op.add_option('--buildOWL',action="store_true",default=True,
# help = 'Build OWL from components')
op.add_option('--noExecution',action='store_false',default=True, dest='runQueries', help='Execute the SPARQL queries in the XML file.')
op.add_option('--sql',action='store_true',default=False, dest='sql', help='Generate SQL from SPARQL.')
(options, args) = op.parse_args()
RunQueryFile(args[0], args[1] or '', options)
| mit | 7,355,744,225,455,303,000 | 35.246696 | 205 | 0.610171 | false |
dgjustice/ansible | lib/ansible/modules/notification/flowdock.py | 25 | 6132 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Matt Coddington <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: flowdock
version_added: "1.2"
author: "Matt Coddington (@mcodd)"
short_description: Send a message to a flowdock
description:
- Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
options:
token:
description:
- API token.
required: true
type:
description:
- Whether to post to 'inbox' or 'chat'
required: true
choices: [ "inbox", "chat" ]
msg:
description:
- Content of the message
required: true
tags:
description:
- tags of the message, separated by commas
required: false
external_user_name:
description:
- (chat only - required) Name of the "user" sending the message
required: false
from_address:
description:
- (inbox only - required) Email address of the message sender
required: false
source:
description:
- (inbox only - required) Human readable identifier of the application that uses the Flowdock API
required: false
subject:
description:
- (inbox only - required) Subject line of the message
required: false
from_name:
description:
- (inbox only) Name of the message sender
required: false
reply_to:
description:
- (inbox only) Email address for replies
required: false
project:
description:
- (inbox only) Human readable identifier for more detailed message categorization
required: false
link:
description:
- (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
requirements: [ ]
'''
EXAMPLES = '''
- flowdock:
type: inbox
token: AAAAAA
from_address: [email protected]
source: my cool app
msg: test from ansible
subject: test subject
- flowdock:
type: chat
token: AAAAAA
external_user_name: testuser
msg: test from ansible
tags: tag1,tag2,tag3
'''
import urllib
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
msg=dict(required=True),
type=dict(required=True, choices=["inbox","chat"]),
external_user_name=dict(required=False),
from_address=dict(required=False),
source=dict(required=False),
subject=dict(required=False),
from_name=dict(required=False),
reply_to=dict(required=False),
project=dict(required=False),
tags=dict(required=False),
link=dict(required=False),
validate_certs = dict(default='yes', type='bool'),
),
supports_check_mode=True
)
type = module.params["type"]
token = module.params["token"]
if type == 'inbox':
url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
else:
url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
params = {}
# required params
params['content'] = module.params["msg"]
# required params for the 'chat' type
if module.params['external_user_name']:
if type == 'inbox':
module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
else:
params['external_user_name'] = module.params["external_user_name"]
elif type == 'chat':
module.fail_json(msg="%s is required for the 'inbox' type" % item)
# required params for the 'inbox' type
for item in [ 'from_address', 'source', 'subject' ]:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
elif type == 'inbox':
module.fail_json(msg="%s is required for the 'inbox' type" % item)
# optional params
if module.params["tags"]:
params['tags'] = module.params["tags"]
# optional params for the 'inbox' type
for item in [ 'from_name', 'reply_to', 'project', 'link' ]:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=False)
# Send the data to Flowdock
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] != 200:
module.fail_json(msg="unable to send msg: %s" % info['msg'])
module.exit_json(changed=True, msg=module.params["msg"])
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 | 6,330,704,003,700,405,000 | 29.81407 | 155 | 0.626549 | false |
jaredjennings/snowy | wsgi/snowy/snowy/notes/templates.py | 8 | 1171 | #
# Copyright (c) 2009 Brad Taylor <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# libxml2 doesn't munge encodings, so forcibly encode items to UTF-8
# http://mail.gnome.org/archives/xml/2004-February/msg00363.html
CONTENT_TEMPLATES = {
'0.1': """
<note-content version="0.1"
xmlns:link="http://beatniksoftware.com/tomboy/link"
xmlns:size="http://beatniksoftware.com/tomboy/size"
xmlns="http://beatniksoftware.com/tomboy">
%%%CONTENT%%%
</note-content>""".encode('UTF-8'),
}
DEFAULT_CONTENT_TEMPLATE = CONTENT_TEMPLATES['0.1']
| agpl-3.0 | 8,860,821,987,692,872,000 | 38.033333 | 79 | 0.742101 | false |
adammaikai/OmicsPipe2.0 | build/lib.linux-x86_64-2.7/omics_pipe/Variant_annotation.py | 2 | 2526 | #!/usr/bin/env python
#from sumatra.projects import load_project
#from sumatra.parameters import build_parameters
#from sumatra.decorators import capture
from ruffus import *
import sys
import os
import time
import datetime
import drmaa
from omics_pipe.utils import *
from omics_pipe.parameters.default_parameters import default_parameters
from omics_pipe.modules.intogen import intogen
from omics_pipe.modules.annovar import annovar
from omics_pipe.modules.filter_variants_DNA import filter_variants_DNA
from omics_pipe.modules.DNAseq_variant_report import DNAseq_variant_report
p = Bunch(default_parameters)
os.chdir(p.WORKING_DIR)
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d %H:%M")
print p
for step in p.STEPS:
vars()['inputList_' + step] = []
for sample in p.SAMPLE_LIST:
vars()['inputList_' + step].append([sample, "%s/%s_%s_completed.flag" % (p.FLAG_PATH, step, sample)])
print vars()['inputList_' + step]
#Filter Variants
@parallel(inputList_filter_variants_DNA)
@check_if_uptodate(check_file_exists)
def run_filter_variants_DNA(sample, filter_variants_DNA_flag):
filter_variants_DNA(sample, filter_variants_DNA_flag)
return
#Annovar
@parallel(inputList_annovar)
@check_if_uptodate(check_file_exists)
def run_annovar(sample, annovar_flag):
annovar(sample, annovar_flag)
return
#Intogen
@parallel(inputList_intogen)
@check_if_uptodate(check_file_exists)
@follows(run_filter_variants_DNA)
def run_intogen(sample, intogen_flag):
intogen(sample, intogen_flag)
return
#DNAseq Report
@parallel(inputList_DNAseq_variant_report)
@check_if_uptodate(check_file_exists)
@follows(run_intogen, run_annovar)
def run_DNAseq_variant_report(sample, DNAseq_variant_report_flag):
DNAseq_variant_report(sample,DNAseq_variant_report_flag)
return
@parallel(inputList_last_function)
@check_if_uptodate(check_file_exists)
@follows(run_DNAseq_variant_report)
def last_function(sample, last_function_flag):
print "PIPELINE HAS FINISHED SUCCESSFULLY!!! YAY!"
pipeline_graph_output = p.FLAG_PATH + "/pipeline_" + sample + "_" + str(date) + ".pdf"
#pipeline_printout_graph (pipeline_graph_output,'pdf', step, no_key_legend=False)
stage = "last_function"
flag_file = "%s/%s_%s_completed.flag" % (p.FLAG_PATH, stage, sample)
open(flag_file, 'w').close()
return
if __name__ == '__main__':
pipeline_run(p.STEP, multiprocess = p.PIPE_MULTIPROCESS, verbose = p.PIPE_VERBOSE, gnu_make_maximal_rebuild_mode = p.PIPE_REBUILD)
| mit | -1,511,548,284,810,683,100 | 30.185185 | 134 | 0.732383 | false |
andresailer/DIRAC | Core/DISET/private/Transports/SSL/SocketInfo.py | 1 | 13505 | # $HeadURL$
__RCSID__ = "$Id$"
import time
import copy
import os.path
import GSI
from DIRAC.Core.Utilities.ReturnValues import S_ERROR, S_OK
from DIRAC.Core.Utilities.Network import checkHostsMatch
from DIRAC.Core.Utilities.LockRing import LockRing
from DIRAC.Core.Security import Locations
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.FrameworkSystem.Client.Logger import gLogger
DEFAULT_SSL_CIPHERS = "AES256-GCM-SHA384:AES256-SHA256:AES256-SHA:CAMELLIA256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:HIGH:MEDIUM:RSA:!3DES:!RC4:!aNULL:!eNULL:!MD5:!SEED:!IDEA"
class SocketInfo:
__cachedCAsCRLs = False
__cachedCAsCRLsLastLoaded = 0
__cachedCAsCRLsLoadLock = LockRing().getLock()
def __init__( self, infoDict, sslContext = None ):
self.__retry = 0
self.infoDict = infoDict
if sslContext:
self.sslContext = sslContext
else:
if self.infoDict[ 'clientMode' ]:
if 'useCertificates' in self.infoDict and self.infoDict[ 'useCertificates' ]:
retVal = self.__generateContextWithCerts()
elif 'proxyString' in self.infoDict:
retVal = self.__generateContextWithProxyString()
else:
retVal = self.__generateContextWithProxy()
else:
retVal = self.__generateServerContext()
if not retVal[ 'OK' ]:
raise Exception( retVal[ 'Message' ] )
def __getValue( self, optName, default ):
if optName not in self.infoDict:
return default
return self.infoDict[ optName ]
def setLocalCredentialsLocation( self, credTuple ):
self.infoDict[ 'localCredentialsLocation' ] = credTuple
def getLocalCredentialsLocation( self ):
return self.infoDict[ 'localCredentialsLocation' ]
def gatherPeerCredentials( self ):
certList = self.sslSocket.get_peer_certificate_chain()
#Servers don't receive the whole chain, the last cert comes alone
if not self.infoDict[ 'clientMode' ]:
certList.insert( 0, self.sslSocket.get_peer_certificate() )
peerChain = X509Chain( certList = certList )
isProxyChain = peerChain.isProxy()['Value']
isLimitedProxyChain = peerChain.isLimitedProxy()['Value']
if isProxyChain:
if peerChain.isPUSP()['Value']:
identitySubject = peerChain.getCertInChain( -2 )['Value'].getSubjectNameObject()[ 'Value' ]
else:
identitySubject = peerChain.getIssuerCert()['Value'].getSubjectNameObject()[ 'Value' ]
else:
identitySubject = peerChain.getCertInChain( 0 )['Value'].getSubjectNameObject()[ 'Value' ]
credDict = { 'DN' : identitySubject.one_line(),
'CN' : identitySubject.commonName,
'x509Chain' : peerChain,
'isProxy' : isProxyChain,
'isLimitedProxy' : isLimitedProxyChain }
diracGroup = peerChain.getDIRACGroup()
if diracGroup[ 'OK' ] and diracGroup[ 'Value' ]:
credDict[ 'group' ] = diracGroup[ 'Value' ]
self.infoDict[ 'peerCredentials' ] = credDict
return credDict
def setSSLSocket( self, sslSocket ):
self.sslSocket = sslSocket
def getSSLSocket( self ):
return self.sslSocket
def getSSLContext( self ):
return self.sslContext
def clone( self ):
try:
return S_OK( SocketInfo( dict( self.infoDict ), self.sslContext ) )
except Exception as e:
return S_ERROR( str( e ) )
def verifyCallback( self, *args, **kwargs ):
#gLogger.debug( "verify Callback %s" % str( args ) )
if self.infoDict[ 'clientMode' ]:
return self._clientCallback( *args, **kwargs )
else:
return self._serverCallback( *args, **kwargs )
def __isSameHost( self, hostCN, hostConn ):
""" Guess if it is the same host or not
"""
hostCN_m = hostCN
if '/' in hostCN:
hostCN_m = hostCN.split( '/' )[1]
if hostCN_m == hostConn:
return True
result = checkHostsMatch( hostCN_m, hostConn )
if not result[ 'OK' ]:
return False
return result[ 'Value' ]
def _clientCallback( self, conn, cert, errnum, depth, ok ):
# This obviously has to be updated
if depth == 0 and ok == 1:
hostnameCN = cert.get_subject().commonName
#if hostnameCN in ( self.infoDict[ 'hostname' ], "host/%s" % self.infoDict[ 'hostname' ] ):
if self.__isSameHost( hostnameCN, self.infoDict['hostname'] ):
return 1
else:
gLogger.warn( "Server is not who it's supposed to be",
"Connecting to %s and it's %s" % ( self.infoDict[ 'hostname' ], hostnameCN ) )
return ok
return ok
def _serverCallback( self, conn, cert, errnum, depth, ok ):
return ok
def __getCAStore( self ):
SocketInfo.__cachedCAsCRLsLoadLock.acquire()
try:
if not SocketInfo.__cachedCAsCRLs or time.time() - SocketInfo.__cachedCAsCRLsLastLoaded > 900:
#Need to generate the CA Store
casDict = {}
crlsDict = {}
casPath = Locations.getCAsLocation()
if not casPath:
return S_ERROR( "No valid CAs location found" )
gLogger.debug( "CAs location is %s" % casPath )
casFound = 0
crlsFound = 0
SocketInfo.__caStore = GSI.crypto.X509Store()
for fileName in os.listdir( casPath ):
filePath = os.path.join( casPath, fileName )
if not os.path.isfile( filePath ):
continue
fObj = file( filePath, "rb" )
pemData = fObj.read()
fObj.close()
#Try to load CA Cert
try:
caCert = GSI.crypto.load_certificate( GSI.crypto.FILETYPE_PEM, pemData )
if caCert.has_expired():
continue
caID = ( caCert.get_subject().one_line(), caCert.get_issuer().one_line() )
caNotAfter = caCert.get_not_after()
if caID not in casDict:
casDict[ caID ] = ( caNotAfter, caCert )
casFound += 1
else:
if casDict[ caID ][0] < caNotAfter:
casDict[ caID ] = ( caNotAfter, caCert )
continue
except:
if fileName.find( ".0" ) == len( fileName ) - 2:
gLogger.exception( "LOADING %s" % filePath )
if 'IgnoreCRLs' not in self.infoDict or not self.infoDict[ 'IgnoreCRLs' ]:
#Try to load CRL
try:
crl = GSI.crypto.load_crl( GSI.crypto.FILETYPE_PEM, pemData )
if crl.has_expired():
continue
crlID = crl.get_issuer().one_line()
crlsDict[ crlID ] = crl
crlsFound += 1
continue
except Exception as e:
if fileName.find( ".r0" ) == len( fileName ) - 2:
gLogger.exception( "LOADING %s ,Exception: %s" % ( filePath , str(e) ) )
gLogger.debug( "Loaded %s CAs [%s CRLs]" % ( casFound, crlsFound ) )
SocketInfo.__cachedCAsCRLs = ( [ casDict[k][1] for k in casDict ],
[ crlsDict[k] for k in crlsDict ] )
SocketInfo.__cachedCAsCRLsLastLoaded = time.time()
except:
gLogger.exception( "Failed to init CA store" )
finally:
SocketInfo.__cachedCAsCRLsLoadLock.release()
#Generate CA Store
caStore = GSI.crypto.X509Store()
caList = SocketInfo.__cachedCAsCRLs[0]
for caCert in caList:
caStore.add_cert( caCert )
crlList = SocketInfo.__cachedCAsCRLs[1]
for crl in crlList:
caStore.add_crl( crl )
return S_OK( caStore )
def __createContext( self ):
clientContext = self.__getValue( 'clientMode', False )
# Initialize context
contextOptions = GSI.SSL.OP_ALL
if clientContext:
methodSuffix = "CLIENT_METHOD"
else:
methodSuffix = "SERVER_METHOD"
contextOptions |= GSI.SSL.OP_NO_SSLv2 | GSI.SSL.OP_NO_SSLv3
if 'sslMethod' in self.infoDict:
methodName = "%s_%s" % ( self.infoDict[ 'sslMethod' ], methodSuffix )
else:
methodName = "TLSv1_%s" % ( methodSuffix )
try:
method = getattr( GSI.SSL, methodName )
except:
return S_ERROR( "SSL method %s is not valid" % self.infoDict[ 'sslMethod' ] )
self.sslContext = GSI.SSL.Context( method )
self.sslContext.set_cipher_list( self.infoDict.get( 'sslCiphers', DEFAULT_SSL_CIPHERS ) )
if contextOptions:
self.sslContext.set_options( contextOptions )
#self.sslContext.set_read_ahead( 1 )
#Enable GSI?
gsiEnable = False
if not clientContext or self.__getValue( 'gsiEnable', False ):
gsiEnable = True
#DO CA Checks?
if not self.__getValue( 'skipCACheck', False ):
#self.sslContext.set_verify( SSL.VERIFY_PEER|SSL.VERIFY_FAIL_IF_NO_PEER_CERT, self.verifyCallback ) # Demand a certificate
self.sslContext.set_verify( GSI.SSL.VERIFY_PEER | GSI.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, None, gsiEnable ) # Demand a certificate
result = self.__getCAStore()
if not result[ 'OK' ]:
return result
caStore = result[ 'Value' ]
self.sslContext.set_cert_store( caStore )
else:
self.sslContext.set_verify( GSI.SSL.VERIFY_NONE, None, gsiEnable ) # Demand a certificate
return S_OK()
def __generateContextWithCerts( self ):
certKeyTuple = Locations.getHostCertificateAndKeyLocation()
if not certKeyTuple:
return S_ERROR( "No valid certificate or key found" )
self.setLocalCredentialsLocation( certKeyTuple )
gLogger.debug( "Using certificate %s\nUsing key %s" % certKeyTuple )
retVal = self.__createContext()
if not retVal[ 'OK' ]:
return retVal
#Verify depth to 20 to ensure accepting proxies of proxies of proxies....
self.sslContext.set_verify_depth( 50 )
self.sslContext.use_certificate_chain_file( certKeyTuple[0] )
self.sslContext.use_privatekey_file( certKeyTuple[1] )
return S_OK()
def __generateContextWithProxy( self ):
if 'proxyLocation' in self.infoDict:
proxyPath = self.infoDict[ 'proxyLocation' ]
if not os.path.isfile( proxyPath ):
return S_ERROR( "Defined proxy is not a file" )
else:
proxyPath = Locations.getProxyLocation()
if not proxyPath:
return S_ERROR( "No valid proxy found" )
self.setLocalCredentialsLocation( ( proxyPath, proxyPath ) )
gLogger.debug( "Using proxy %s" % proxyPath )
retVal = self.__createContext()
if not retVal[ 'OK' ]:
return retVal
self.sslContext.use_certificate_chain_file( proxyPath )
self.sslContext.use_privatekey_file( proxyPath )
return S_OK()
def __generateContextWithProxyString( self ):
proxyString = self.infoDict[ 'proxyString' ]
self.setLocalCredentialsLocation( ( proxyString, proxyString ) )
gLogger.debug( "Using string proxy" )
retVal = self.__createContext()
if not retVal[ 'OK' ]:
return retVal
self.sslContext.use_certificate_chain_string( proxyString )
self.sslContext.use_privatekey_string( proxyString )
return S_OK()
def __generateServerContext( self ):
retVal = self.__generateContextWithCerts()
if not retVal[ 'OK' ]:
return retVal
self.sslContext.set_session_id( "DISETConnection%s" % str( time.time() ) )
#self.sslContext.get_cert_store().set_flags( GSI.crypto.X509_CRL_CHECK )
if 'SSLSessionTimeout' in self.infoDict:
timeout = int( self.infoDict['SSLSessionTimeout'] )
gLogger.debug( "Setting session timeout to %s" % timeout )
self.sslContext.set_session_timeout( timeout )
return S_OK()
def doClientHandshake( self ):
self.sslSocket.set_connect_state()
return self.__sslHandshake()
def doServerHandshake( self ):
self.sslSocket.set_accept_state()
return self.__sslHandshake()
#@gSynchro
def __sslHandshake( self ):
"""
Do the SSL Handshake
:return: S_ERROR / S_OK with dictionary of user credentials
"""
start = time.time()
timeout = self.infoDict[ 'timeout' ]
while True:
if timeout:
if time.time() - start > timeout:
return S_ERROR( "Handshake timeout exceeded" )
try:
self.sslSocket.do_handshake()
break
except GSI.SSL.WantReadError:
time.sleep( 0.001 )
except GSI.SSL.WantWriteError:
time.sleep( 0.001 )
except GSI.SSL.Error, v:
if self.__retry < 3:
self.__retry += 1
return self.__sslHandshake()
else:
# gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) )
gLogger.warn( "Error while handshaking", v )
return S_ERROR( "Error while handshaking" )
except Exception, v:
gLogger.warn( "Error while handshaking", v )
if self.__retry < 3:
self.__retry += 1
return self.__sslHandshake()
else:
# gLogger.warn( "Error while handshaking", "\n".join( [ stError[2] for stError in v.args[0] ] ) )
gLogger.warn( "Error while handshaking", v )
return S_ERROR( "Error while handshaking" )
credentialsDict = self.gatherPeerCredentials()
if self.infoDict[ 'clientMode' ]:
hostnameCN = credentialsDict[ 'CN' ]
#if hostnameCN.split("/")[-1] != self.infoDict[ 'hostname' ]:
if not self.__isSameHost( hostnameCN, self.infoDict[ 'hostname' ] ):
gLogger.warn( "Server is not who it's supposed to be",
"Connecting to %s and it's %s" % ( self.infoDict[ 'hostname' ], hostnameCN ) )
gLogger.debug( "", "Authenticated peer (%s)" % credentialsDict[ 'DN' ] )
return S_OK( credentialsDict )
| gpl-3.0 | -4,418,772,929,986,420,700 | 37.475783 | 183 | 0.628064 | false |
cyrillg/python-playground | first-dive/observer.py | 2 | 1155 | '''
Definition of observer classes
Observers gather the functions used to compute the current estimate of the
state
author: Cyrill Guillemot
email: [email protected]
website: http://serial-robotics.org
license: GNU GPL
'''
#!/usr/bin/env python
from lib import *
class IdealObs:
''' Definition of an ideal observer
Detail:
This controller's estimate matches the actual state perfectly
'''
def __init__(self, cart):
self.cart = cart
self.p = cart.p
self.L = cart.L
self.base_shape = cart.base_shape
self.shape = self.base_shape
def update_shape(self):
''' Update the drawing of the cart
'''
p = self.p.flatten()
M = self.L*array(self.base_shape)
M = transform_pattern(M, p[0], p[1], p[2])
self.shape = M
def update_est(self, sensor_readings, dt):
''' Provide the new estimate of the system state
Inputs:
- sensor_readings: current sensor readings
- dt: time passed since last estimation
'''
self.p = sensor_readings[0]
self.update_shape()
| gpl-3.0 | 2,911,166,167,924,669,400 | 23.574468 | 74 | 0.603463 | false |
Upande/MaMaSe | apps/utils/migrations/0012_aggregatedailyfeed_aggregatemonthlyfeed.py | 1 | 1349 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('utils', '0011_auto_20150922_0857'),
]
operations = [
migrations.CreateModel(
name='AggregateDailyFeed',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', jsonfield.fields.JSONField(null=True, blank=True)),
('lastupdate', models.DateTimeField(auto_now_add=True)),
('timestamp', models.DateTimeField()),
('channel', models.ForeignKey(related_name='daily_channels', to='utils.Channel')),
],
),
migrations.CreateModel(
name='AggregateMonthlyFeed',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', jsonfield.fields.JSONField(null=True, blank=True)),
('lastupdate', models.DateTimeField(auto_now_add=True)),
('timestamp', models.DateTimeField()),
('channel', models.ForeignKey(related_name='monthly_channels', to='utils.Channel')),
],
),
]
| apache-2.0 | 7,480,409,801,439,744,000 | 37.542857 | 114 | 0.576723 | false |
benjamin9999/python-stix | stix/incident/loss_estimation.py | 1 | 1999 | # Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import stix
import stix.bindings.incident as incident_binding
class LossEstimation(stix.Entity):
_namespace = "http://stix.mitre.org/Incident-1"
_binding = incident_binding
_binding_class = incident_binding.LossEstimationType
def __init__(self):
super(LossEstimation, self).__init__()
self.iso_currency_code = None
self.amount = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def iso_currency_code(self):
return self._iso_currency_code
@iso_currency_code.setter
def iso_currency_code(self, value):
self._iso_currency_code = value
def to_obj(self, return_obj=None, ns_info=None):
super(LossEstimation, self).to_obj(return_obj=return_obj, ns_info=ns_info)
obj = self._binding_class()
if self.amount:
obj.amount = self.amount
if self.iso_currency_code:
obj.iso_currency_code = self.iso_currency_code
return obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if not obj:
return None
if not return_obj:
return_obj = cls()
return_obj.amount = obj.amount
return_obj.iso_currency_code = obj.iso_currency_code
return return_obj
def to_dict(self):
d = {}
if self.amount:
d['amount'] = self.amount
if self.iso_currency_code:
d['iso_currency_code'] = self.iso_currency_code
return d
@classmethod
def from_dict(cls, dict_, return_obj=None):
if not dict_:
return None
if not return_obj:
return_obj = cls()
return_obj.amount = dict_.get('amount')
return_obj.iso_currency_code = dict_.get('iso_currency_code')
return return_obj
| bsd-3-clause | 5,275,042,585,921,421,000 | 26.013514 | 82 | 0.601801 | false |
titouanc/lechbot | plugins/giphy.py | 1 | 1125 | from .helpers import public_api
from ircbot.plugin import BotPlugin
import random
import re
class Giphy(BotPlugin):
def __init__(self, giphy_key):
self.giphy_key = giphy_key
def clean_url(self, url):
m = re.match(r'^(https://.+/giphy\.gif).*', url)
if m:
return m.group(1)
return url
def search_gif(self, query):
url = "http://api.giphy.com/v1/gifs/search?api_key={}&q={}"
q = query.replace(' ', '+')
r = yield from public_api(url.format(self.giphy_key, q))
chosen = random.choice(r['data'])
return self.clean_url(chosen['images']['original']['url'])
@BotPlugin.command(r'\!gif (#[\w\d_-]+) (.+)')
def gif(self, msg):
"""Cherche un gif et le poste sur un autre chan"""
gif = yield from self.search_gif(msg.args[1])
reply = "{}: {}".format(msg.user, gif)
self.bot.say(reply, target=msg.args[0])
@BotPlugin.command(r'\!gif (.+)')
def gif_here(self, msg):
"""Cherche un gif et le poste ici"""
gif = yield from self.search_gif(msg.args[0])
msg.reply(gif)
| unlicense | -4,878,654,805,742,700,000 | 31.142857 | 67 | 0.566222 | false |
mspark93/VTK | Filters/Core/Testing/Python/tubeComb.py | 17 | 1879 | #!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
# create planes
# Create the RenderWindow, Renderer
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer( ren )
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create pipeline
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName( vtkGetDataRoot() + '/Data/combxyz.bin' )
pl3d.SetQFileName( vtkGetDataRoot() + '/Data/combq.bin' )
pl3d.SetScalarFunctionNumber( 100 )
pl3d.SetVectorFunctionNumber( 202 )
pl3d.Update()
pl3d_output = pl3d.GetOutput().GetBlock(0)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(pl3d_output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
seeds = vtk.vtkLineSource()
seeds.SetPoint1(15, -5, 32)
seeds.SetPoint2(15, 5, 32)
seeds.SetResolution(10)
integ = vtk.vtkRungeKutta4()
sl = vtk.vtkStreamLine()
sl.SetIntegrator(integ)
sl.SetInputData(pl3d_output)
sl.SetSourceConnection(seeds.GetOutputPort())
sl.SetMaximumPropagationTime(0.1)
sl.SetIntegrationStepLength(0.1)
sl.SetIntegrationDirectionToBackward()
sl.SetStepLength(0.001)
tube = vtk.vtkTubeFilter()
tube.SetInputConnection(sl.GetOutputPort())
tube.SetRadius(0.1)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tube.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
mmapper = vtk.vtkPolyDataMapper()
mmapper.SetInputConnection(seeds.GetOutputPort())
mactor = vtk.vtkActor()
mactor.SetMapper(mmapper)
ren.AddActor(mactor)
ren.AddActor(actor)
ren.AddActor(outlineActor)
cam=ren.GetActiveCamera()
cam.SetClippingRange( 3.95297, 50 )
cam.SetFocalPoint( 8.88908, 0.595038, 29.3342 )
cam.SetPosition( -12.3332, 31.7479, 41.2387 )
cam.SetViewUp( 0.060772, -0.319905, 0.945498 )
renWin.Render()
| bsd-3-clause | -5,095,630,595,092,316,000 | 23.723684 | 61 | 0.786056 | false |
mganeva/mantid | scripts/test/Muon/utilities/muon_workspace_wrapper_test.py | 1 | 15900 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import unittest
import six
import mantid.simpleapi as simpleapi
import mantid.api as api
from mantid.api import ITableWorkspace, WorkspaceGroup
from mantid.dataobjects import Workspace2D
from Muon.GUI.Common.ADSHandler.muon_workspace_wrapper import MuonWorkspaceWrapper, add_directory_structure
def create_simple_workspace(data_x, data_y):
alg = simpleapi.AlgorithmManager.create("CreateWorkspace")
alg.initialize()
alg.setAlwaysStoreInADS(False)
alg.setLogging(False)
alg.setProperty("dataX", data_x)
alg.setProperty("dataY", data_y)
alg.setProperty("OutputWorkspace", "__notUsed")
alg.execute()
return alg.getProperty("OutputWorkspace").value
def create_simple_table_workspace():
alg = simpleapi.AlgorithmManager.create("CreateEmptyTableWorkspace")
alg.initialize()
alg.setAlwaysStoreInADS(False)
alg.setLogging(False)
alg.setProperty("OutputWorkspace", "__notUsed")
alg.execute()
table = alg.getProperty("OutputWorkspace").value
table.addColumn("int", "col1", 0)
table.addColumn("int", "col2", 0)
[table.addRow([i + 1, 2 * i]) for i in range(4)]
return table
class MuonWorkspaceTest(unittest.TestCase):
"""
The MuonWorkspaceWrapper object is a key class in the muon interface. It is a wrapper around a normal
Mantid workspace, which maintains a handle to the workspace whilst allowing it to be in the ADS
or not.
This allows certain workspaces to be held in the interface, and displayed to the user as and
when they are necessary.
It has some extra functionality which allows the workspace to be put in the ADS inside a "folder
structure" using workspace groups. So for example if I give the name "dir1/dir2/name" then a
workspace called "name" will be placed inside a group called "dir2" which will istelf be placed
inside a group called "dir1".
This allows the complex array of different workspaces from the muon interface to be structured
in the ADS to improve the user experience.
"""
def setUp(self):
self.workspace = create_simple_workspace(data_x=[1, 2, 3, 4], data_y=[10, 10, 10, 10])
simpleapi.mtd.clear()
assert isinstance(self.workspace, Workspace2D)
assert simpleapi.mtd.size() == 0
def tearDown(self):
# clear the ADS
simpleapi.mtd.clear()
# ----------------------------------------------------------------------------------------------
# Test Initialization
# ----------------------------------------------------------------------------------------------
def test_that_cannot_initialize_without_supplying_a_workspace(self):
with self.assertRaises(TypeError):
MuonWorkspaceWrapper()
def test_that_can_initialize_with_Workspace2D_object(self):
MuonWorkspaceWrapper(workspace=self.workspace)
def test_that_can_initialize_with_TableWorkspace_object(self):
table_workspace = create_simple_table_workspace()
assert isinstance(table_workspace, ITableWorkspace)
MuonWorkspaceWrapper(workspace=table_workspace)
def test_that_cannot_initialize_with_WorkspaceGroup_object(self):
group_workspace = api.WorkspaceGroup()
assert isinstance(group_workspace, WorkspaceGroup)
with self.assertRaises(AttributeError):
MuonWorkspaceWrapper(workspace=group_workspace)
def test_that_cannot_initialize_with_non_workspace_objects(self):
with self.assertRaises(AttributeError):
MuonWorkspaceWrapper(workspace="string")
with self.assertRaises(AttributeError):
MuonWorkspaceWrapper(workspace=1234)
with self.assertRaises(AttributeError):
MuonWorkspaceWrapper(workspace=5.5)
def test_that_initialized_object_is_not_in_ADS_by_default(self):
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
self.assertEqual(workspace_handle.is_hidden, True)
self.assertEqual(simpleapi.mtd.size(), 0)
def test_that_initialized_object_starts_with_empty_string_for_name(self):
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
self.assertEqual(workspace_handle.name, "")
# ----------------------------------------------------------------------------------------------
# Test Show/Hide
# ----------------------------------------------------------------------------------------------
def test_that_cannot_modify_is_hidden_property(self):
# the ADS handling interface is restricted to the show() / hide() methods
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
with self.assertRaises(AttributeError):
workspace_handle.is_hidden = True
def test_that_showing_the_workspace_with_empty_string_for_name_raises_ValueError(self):
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
with self.assertRaises(ValueError):
workspace_handle.show("")
def test_that_showing_the_workspace_puts_it_in_ADS(self):
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
workspace_handle.show("test")
self.assertTrue(simpleapi.mtd.doesExist("test"))
ads_workspace = simpleapi.mtd["test"]
six.assertCountEqual(self, ads_workspace.readX(0), [1, 2, 3, 4])
six.assertCountEqual(self, ads_workspace.readY(0), [10, 10, 10, 10])
def test_that_hiding_the_workspace_removes_it_from_ADS(self):
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
workspace_handle.show("test")
workspace_handle.hide()
self.assertEqual(workspace_handle.is_hidden, True)
self.assertFalse(simpleapi.mtd.doesExist("test"))
def test_that_workspace_property_returns_workspace_when_not_in_ADS(self):
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
ws_property = workspace_handle.workspace
six.assertCountEqual(self, ws_property.readX(0), [1, 2, 3, 4])
six.assertCountEqual(self, ws_property.readY(0), [10, 10, 10, 10])
def test_that_workspace_property_returns_workspace_when_in_ADS(self):
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
workspace_handle.show("arbitrary_name")
ws_property = workspace_handle.workspace
six.assertCountEqual(self, ws_property.readX(0), [1, 2, 3, 4])
six.assertCountEqual(self, ws_property.readY(0), [10, 10, 10, 10])
def test_that_can_change_name_when_workspace_not_in_ADS(self):
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
workspace_handle.name = "new_name"
self.assertEqual(workspace_handle.name, "new_name")
def test_that_cannot_change_name_when_workspace_in_ADS(self):
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
workspace_handle.show("name1")
with self.assertRaises(ValueError):
workspace_handle.name = "new_name"
def test_that_hiding_workspace_more_than_once_has_no_effect(self):
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
workspace_handle.show("name1")
workspace_handle.hide()
def test_that_if_workspace_deleted_from_ADS_then_hide_does_nothing(self):
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
workspace_handle.show("name1")
simpleapi.mtd.clear()
def test_that_hiding_workspace_deletes_groups_which_are_left_empty(self):
# TODO
pass
def test_that_hiding_workspace_does_not_delete_groups_which_still_contain_workspaces(self):
# TODO
pass
# ----------------------------------------------------------------------------------------------
# Overwriting the workspace via the workspace property
# ----------------------------------------------------------------------------------------------
def test_that_setting_a_new_workspace_removes_the_previous_one_from_the_ADS(self):
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
workspace_handle.show("name1")
workspace2 = create_simple_workspace(data_x=[5, 6, 7, 8], data_y=[20, 20, 20, 20])
self.assertTrue(simpleapi.mtd.doesExist("name1"))
workspace_handle.workspace = workspace2
self.assertFalse(simpleapi.mtd.doesExist("name1"))
def test_that_setting_a_new_workspace_resets_the_name_to_empty_string(self):
workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)
workspace_handle.show("name1")
workspace2 = create_simple_workspace(data_x=[5, 6, 7, 8], data_y=[20, 20, 20, 20])
self.assertEqual(workspace_handle.name, "name1")
workspace_handle.workspace = workspace2
self.assertEqual(workspace_handle.name, "")
class MuonWorkspaceAddDirectoryTest(unittest.TestCase):
"""
Test the functionality surrounding adding "directory structures" to the ADS, in other words
adding nested structures of WorkspaceGroups to help structure the data.
"""
def setUp(self):
assert simpleapi.mtd.size() == 0
def tearDown(self):
# clear the ADS
simpleapi.mtd.clear()
def assert_group_workspace_exists(self, name):
self.assertTrue(simpleapi.mtd.doesExist(name))
self.assertEqual(type(simpleapi.mtd.retrieve(name)), WorkspaceGroup)
def assert_group1_is_inside_group2(self, group1_name, group2_name):
group2 = simpleapi.mtd.retrieve(group2_name)
self.assertIn(group1_name, group2.getNames())
def assert_workspace_in_group(self, workspace_name, group_name):
group = simpleapi.mtd.retrieve(group_name)
self.assertIn(workspace_name, group.getNames())
# ----------------------------------------------------------------------------------------------
# Test add_directory_structure function
# ----------------------------------------------------------------------------------------------
def test_that_passing_empty_list_has_no_effect(self):
add_directory_structure([])
self.assertEqual(simpleapi.mtd.size(), 0)
def test_that_passing_a_list_of_a_single_string_creates_an_empty_group_in_ADS(self):
add_directory_structure(["testGroup"])
self.assertEqual(simpleapi.mtd.size(), 1)
self.assertTrue(simpleapi.mtd.doesExist("testGroup"))
self.assertEqual(type(simpleapi.mtd.retrieve("testGroup")), WorkspaceGroup)
group = simpleapi.mtd.retrieve("testGroup")
self.assertEqual(group.getNumberOfEntries(), 0)
def test_that_passing_a_list_of_strings_creates_a_group_for_each_string(self):
add_directory_structure(["testGroup1", "testGroup2", "testGroup3"])
self.assertEqual(simpleapi.mtd.size(), 3)
self.assert_group_workspace_exists("testGroup1")
self.assert_group_workspace_exists("testGroup2")
self.assert_group_workspace_exists("testGroup3")
def test_raises_ValueError_if_duplicate_names_given(self):
# this is necessary due to the ADS requiring all names to be unique irrespectie of object
# type or nesting
with self.assertRaises(ValueError):
add_directory_structure(["testGroup1", "testGroup2", "testGroup2"])
def test_that_for_two_names_the_second_group_is_nested_inside_the_first(self):
add_directory_structure(["testGroup1", "testGroup2"])
self.assert_group_workspace_exists("testGroup1")
self.assert_group_workspace_exists("testGroup2")
self.assert_group1_is_inside_group2("testGroup2", "testGroup1")
def test_that_nested_groups_up_to_four_layers_are_possible(self):
add_directory_structure(["testGroup1", "testGroup2", "testGroup3", "testGroup4"])
self.assert_group1_is_inside_group2("testGroup2", "testGroup1")
self.assert_group1_is_inside_group2("testGroup3", "testGroup2")
self.assert_group1_is_inside_group2("testGroup4", "testGroup3")
def test_that_overwriting_previous_structure_with_a_permutation_works(self):
add_directory_structure(["testGroup1", "testGroup2", "testGroup3", "testGroup4"])
add_directory_structure(["testGroup4", "testGroup3", "testGroup2", "testGroup1"])
self.assert_group1_is_inside_group2("testGroup1", "testGroup2")
self.assert_group1_is_inside_group2("testGroup2", "testGroup3")
self.assert_group1_is_inside_group2("testGroup3", "testGroup4")
def test_that_if_workspace_already_exists_it_is_removed(self):
workspace = create_simple_workspace(data_x=[1, 2, 3, 4], data_y=[10, 10, 10, 10])
simpleapi.mtd.add("testGroup1", workspace)
add_directory_structure(["testGroup1", "testGroup2"])
self.assert_group_workspace_exists("testGroup1")
self.assert_group_workspace_exists("testGroup2")
self.assert_group1_is_inside_group2("testGroup2", "testGroup1")
# ----------------------------------------------------------------------------------------------
# Test directory structure functionality in MuonWorkspaceWrapper
# ----------------------------------------------------------------------------------------------
def test_that_if_workspace_exists_with_same_name_as_group_then_it_is_replaced(self):
workspace = create_simple_workspace(data_x=[1, 2, 3, 4], data_y=[10, 10, 10, 10])
simpleapi.mtd.add("group", workspace)
workspace_handle = MuonWorkspaceWrapper(workspace=workspace)
workspace_handle.show("group/ws1")
self.assert_group_workspace_exists("group")
def test_that_workspace_added_correctly_for_single_nested_structure(self):
workspace = create_simple_workspace(data_x=[1, 2, 3, 4], data_y=[10, 10, 10, 10])
workspace_handle = MuonWorkspaceWrapper(workspace=workspace)
workspace_handle.show("group1/ws1")
self.assert_group_workspace_exists("group1")
self.assert_workspace_in_group("ws1", "group1")
def test_that_workspace_added_correctly_for_doubly_nested_structure(self):
workspace = create_simple_workspace(data_x=[1, 2, 3, 4], data_y=[10, 10, 10, 10])
workspace_handle = MuonWorkspaceWrapper(workspace=workspace)
workspace_handle.show("group1/group2/ws1")
self.assert_group_workspace_exists("group1")
self.assert_group_workspace_exists("group2")
self.assert_group1_is_inside_group2("group2", "group1")
self.assert_workspace_in_group("ws1", "group2")
def test_that_workspaces_in_existing_folders_are_not_moved_by_directory_manipulation(self):
workspace1 = create_simple_workspace(data_x=[1, 2, 3, 4], data_y=[10, 10, 10, 10])
workspace2 = create_simple_workspace(data_x=[1, 2, 3, 4], data_y=[10, 10, 10, 10])
workspace_handle1 = MuonWorkspaceWrapper(workspace=workspace1)
workspace_handle2 = MuonWorkspaceWrapper(workspace=workspace2)
workspace_handle1.show("group1/group2/ws1")
workspace_handle2.show("group1/group2/group3/ws2")
self.assert_group_workspace_exists("group1")
self.assert_group_workspace_exists("group2")
self.assert_group_workspace_exists("group3")
self.assert_group1_is_inside_group2("group2", "group1")
self.assert_group1_is_inside_group2("group3", "group2")
self.assert_workspace_in_group("ws1", "group2")
self.assert_workspace_in_group("ws2", "group3")
if __name__ == '__main__':
unittest.main(buffer=False, verbosity=2)
| gpl-3.0 | 2,940,811,313,415,089,700 | 40.952507 | 107 | 0.651761 | false |
odoousers2014/odoo | addons/mail/ir_actions.py | 7 | 3449 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013 OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class actions_server(osv.Model):
""" Add email option in server actions. """
_name = 'ir.actions.server'
_inherit = ['ir.actions.server']
def _get_states(self, cr, uid, context=None):
res = super(actions_server, self)._get_states(cr, uid, context=context)
res.insert(0, ('email', 'Send Email'))
return res
_columns = {
'email_from': fields.related(
'template_id', 'email_from', type='char',
readonly=True, string='From'
),
'email_to': fields.related(
'template_id', 'email_to', type='char',
readonly=True, string='To (Emails)'
),
'partner_to': fields.related(
'template_id', 'partner_to', type='char',
readonly=True, string='To (Partners)'
),
'subject': fields.related(
'template_id', 'subject', type='char',
readonly=True, string='Subject'
),
'body_html': fields.related(
'template_id', 'body_html', type='text',
readonly=True, string='Body'
),
'template_id': fields.many2one(
'mail.template', 'Email Template', ondelete='set null',
domain="[('model_id', '=', model_id)]",
),
}
def on_change_template_id(self, cr, uid, ids, template_id, context=None):
""" Render the raw template in the server action fields. """
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to']
if template_id:
template_values = self.pool.get('mail.template').read(cr, uid, [template_id], fields, context)[0]
values = dict((field, template_values[field]) for field in fields if template_values.get(field))
if not values.get('email_from'):
return {'warning': {'title': 'Incomplete template', 'message': 'Your template should define email_from'}, 'value': values}
else:
values = dict.fromkeys(fields, False)
return {'value': values}
def run_action_email(self, cr, uid, action, eval_context=None, context=None):
if not action.template_id or not context.get('active_id'):
return False
self.pool['mail.template'].send_mail(cr, uid, action.template_id.id, context.get('active_id'),
force_send=False, raise_exception=False, context=context)
return False
| agpl-3.0 | -1,191,183,614,548,943,600 | 42.1125 | 138 | 0.574659 | false |
intgr/django | tests/requests/tests.py | 22 | 37836 | import time
from datetime import datetime, timedelta
from http import cookies
from io import BytesIO
from itertools import chain
from urllib.parse import urlencode
from django.core.exceptions import SuspiciousOperation
from django.core.handlers.wsgi import LimitedStream, WSGIRequest
from django.http import (
HttpRequest, HttpResponse, RawPostDataException, UnreadablePostError,
)
from django.http.request import split_domain_port
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.client import FakePayload
from django.test.utils import freeze_time
from django.utils.http import cookie_date
from django.utils.timezone import utc
class RequestsTests(SimpleTestCase):
def test_httprequest(self):
request = HttpRequest()
self.assertEqual(list(request.GET.keys()), [])
self.assertEqual(list(request.POST.keys()), [])
self.assertEqual(list(request.COOKIES.keys()), [])
self.assertEqual(list(request.META.keys()), [])
# .GET and .POST should be QueryDicts
self.assertEqual(request.GET.urlencode(), '')
self.assertEqual(request.POST.urlencode(), '')
# and FILES should be MultiValueDict
self.assertEqual(request.FILES.getlist('foo'), [])
self.assertIsNone(request.content_type)
self.assertIsNone(request.content_params)
def test_httprequest_full_path(self):
request = HttpRequest()
request.path = request.path_info = '/;some/?awful/=path/foo:bar/'
request.META['QUERY_STRING'] = ';some=query&+query=string'
expected = '/%3Bsome/%3Fawful/%3Dpath/foo:bar/?;some=query&+query=string'
self.assertEqual(request.get_full_path(), expected)
def test_httprequest_full_path_with_query_string_and_fragment(self):
request = HttpRequest()
request.path = request.path_info = '/foo#bar'
request.META['QUERY_STRING'] = 'baz#quux'
self.assertEqual(request.get_full_path(), '/foo%23bar?baz#quux')
def test_httprequest_repr(self):
request = HttpRequest()
request.path = '/somepath/'
request.method = 'GET'
request.GET = {'get-key': 'get-value'}
request.POST = {'post-key': 'post-value'}
request.COOKIES = {'post-key': 'post-value'}
request.META = {'post-key': 'post-value'}
self.assertEqual(repr(request), "<HttpRequest: GET '/somepath/'>")
def test_httprequest_repr_invalid_method_and_path(self):
request = HttpRequest()
self.assertEqual(repr(request), "<HttpRequest>")
request = HttpRequest()
request.method = "GET"
self.assertEqual(repr(request), "<HttpRequest>")
request = HttpRequest()
request.path = ""
self.assertEqual(repr(request), "<HttpRequest>")
def test_wsgirequest(self):
request = WSGIRequest({
'PATH_INFO': 'bogus',
'REQUEST_METHOD': 'bogus',
'CONTENT_TYPE': 'text/html; charset=utf8',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(list(request.GET.keys()), [])
self.assertEqual(list(request.POST.keys()), [])
self.assertEqual(list(request.COOKIES.keys()), [])
self.assertEqual(
set(request.META.keys()),
{'PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME', 'CONTENT_TYPE', 'wsgi.input'}
)
self.assertEqual(request.META['PATH_INFO'], 'bogus')
self.assertEqual(request.META['REQUEST_METHOD'], 'bogus')
self.assertEqual(request.META['SCRIPT_NAME'], '')
self.assertEqual(request.content_type, 'text/html')
self.assertEqual(request.content_params, {'charset': 'utf8'})
def test_wsgirequest_with_script_name(self):
"""
The request's path is correctly assembled, regardless of whether or
not the SCRIPT_NAME has a trailing slash (#20169).
"""
# With trailing slash
request = WSGIRequest({
'PATH_INFO': '/somepath/',
'SCRIPT_NAME': '/PREFIX/',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/PREFIX/somepath/')
# Without trailing slash
request = WSGIRequest({
'PATH_INFO': '/somepath/',
'SCRIPT_NAME': '/PREFIX',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/PREFIX/somepath/')
def test_wsgirequest_script_url_double_slashes(self):
"""
WSGI squashes multiple successive slashes in PATH_INFO, WSGIRequest
should take that into account when populating request.path and
request.META['SCRIPT_NAME'] (#17133).
"""
request = WSGIRequest({
'SCRIPT_URL': '/mst/milestones//accounts/login//help',
'PATH_INFO': '/milestones/accounts/login/help',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/mst/milestones/accounts/login/help')
self.assertEqual(request.META['SCRIPT_NAME'], '/mst')
def test_wsgirequest_with_force_script_name(self):
"""
The FORCE_SCRIPT_NAME setting takes precedence over the request's
SCRIPT_NAME environment parameter (#20169).
"""
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'):
request = WSGIRequest({
'PATH_INFO': '/somepath/',
'SCRIPT_NAME': '/PREFIX/',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
def test_wsgirequest_path_with_force_script_name_trailing_slash(self):
"""
The request's path is correctly assembled, regardless of whether or not
the FORCE_SCRIPT_NAME setting has a trailing slash (#20169).
"""
# With trailing slash
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
# Without trailing slash
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
def test_wsgirequest_repr(self):
request = WSGIRequest({'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(repr(request), "<WSGIRequest: GET '/'>")
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
request.GET = {'get-key': 'get-value'}
request.POST = {'post-key': 'post-value'}
request.COOKIES = {'post-key': 'post-value'}
request.META = {'post-key': 'post-value'}
self.assertEqual(repr(request), "<WSGIRequest: GET '/somepath/'>")
def test_wsgirequest_path_info(self):
def wsgi_str(path_info, encoding='utf-8'):
path_info = path_info.encode(encoding) # Actual URL sent by the browser (bytestring)
path_info = path_info.decode('iso-8859-1') # Value in the WSGI environ dict (native string)
return path_info
# Regression for #19468
request = WSGIRequest({'PATH_INFO': wsgi_str("/سلام/"), 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, "/سلام/")
# The URL may be incorrectly encoded in a non-UTF-8 encoding (#26971)
request = WSGIRequest({
'PATH_INFO': wsgi_str("/café/", encoding='iso-8859-1'),
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
# Since it's impossible to decide the (wrong) encoding of the URL, it's
# left percent-encoded in the path.
self.assertEqual(request.path, "/caf%E9/")
def test_httprequest_location(self):
request = HttpRequest()
self.assertEqual(
request.build_absolute_uri(location="https://www.example.com/asdf"),
'https://www.example.com/asdf'
)
request.get_host = lambda: 'www.example.com'
request.path = ''
self.assertEqual(
request.build_absolute_uri(location="/path/with:colons"),
'http://www.example.com/path/with:colons'
)
def test_near_expiration(self):
"Cookie will expire when an near expiration time is provided"
response = HttpResponse()
# There is a timing weakness in this test; The
# expected result for max-age requires that there be
# a very slight difference between the evaluated expiration
# time, and the time evaluated in set_cookie(). If this
# difference doesn't exist, the cookie time will be
# 1 second larger. To avoid the problem, put in a quick sleep,
# which guarantees that there will be a time difference.
expires = datetime.utcnow() + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_aware_expiration(self):
"Cookie accepts an aware datetime as expiration time"
response = HttpResponse()
expires = (datetime.utcnow() + timedelta(seconds=10)).replace(tzinfo=utc)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_create_cookie_after_deleting_cookie(self):
"""
Setting a cookie after deletion should clear the expiry date.
"""
response = HttpResponse()
response.set_cookie('c', 'old-value')
self.assertEqual(response.cookies['c']['expires'], '')
response.delete_cookie('c')
self.assertEqual(response.cookies['c']['expires'], 'Thu, 01-Jan-1970 00:00:00 GMT')
response.set_cookie('c', 'new-value')
self.assertEqual(response.cookies['c']['expires'], '')
def test_far_expiration(self):
"Cookie will expire when an distant expiration time is provided"
response = HttpResponse()
response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6))
datetime_cookie = response.cookies['datetime']
self.assertIn(
datetime_cookie['expires'],
# assertIn accounts for slight time dependency (#23450)
('Sat, 01-Jan-2028 04:05:06 GMT', 'Sat, 01-Jan-2028 04:05:07 GMT')
)
def test_max_age_expiration(self):
"Cookie will expire if max_age is provided"
response = HttpResponse()
set_cookie_time = time.time()
with freeze_time(set_cookie_time):
response.set_cookie('max_age', max_age=10)
max_age_cookie = response.cookies['max_age']
self.assertEqual(max_age_cookie['max-age'], 10)
self.assertEqual(max_age_cookie['expires'], cookie_date(set_cookie_time + 10))
def test_httponly_cookie(self):
response = HttpResponse()
response.set_cookie('example', httponly=True)
example_cookie = response.cookies['example']
# A compat cookie may be in use -- check that it has worked
# both as an output string, and using the cookie attributes
self.assertIn('; %s' % cookies.Morsel._reserved['httponly'], str(example_cookie))
self.assertTrue(example_cookie['httponly'])
def test_unicode_cookie(self):
"Verify HttpResponse.set_cookie() works with unicode data."
response = HttpResponse()
cookie_value = '清風'
response.set_cookie('test', cookie_value)
self.assertEqual(cookie_value, response.cookies['test'].value)
def test_limited_stream(self):
# Read all of a limited stream
stream = LimitedStream(BytesIO(b'test'), 2)
self.assertEqual(stream.read(), b'te')
# Reading again returns nothing.
self.assertEqual(stream.read(), b'')
# Read a number of characters greater than the stream has to offer
stream = LimitedStream(BytesIO(b'test'), 2)
self.assertEqual(stream.read(5), b'te')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b'')
# Read sequentially from a stream
stream = LimitedStream(BytesIO(b'12345678'), 8)
self.assertEqual(stream.read(5), b'12345')
self.assertEqual(stream.read(5), b'678')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b'')
# Read lines from a stream
stream = LimitedStream(BytesIO(b'1234\n5678\nabcd\nefgh\nijkl'), 24)
# Read a full line, unconditionally
self.assertEqual(stream.readline(), b'1234\n')
# Read a number of characters less than a line
self.assertEqual(stream.readline(2), b'56')
# Read the rest of the partial line
self.assertEqual(stream.readline(), b'78\n')
# Read a full line, with a character limit greater than the line length
self.assertEqual(stream.readline(6), b'abcd\n')
# Read the next line, deliberately terminated at the line end
self.assertEqual(stream.readline(4), b'efgh')
# Read the next line... just the line end
self.assertEqual(stream.readline(), b'\n')
# Read everything else.
self.assertEqual(stream.readline(), b'ijkl')
# Regression for #15018
# If a stream contains a newline, but the provided length
# is less than the number of provided characters, the newline
# doesn't reset the available character count
stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9)
self.assertEqual(stream.readline(10), b'1234\n')
self.assertEqual(stream.readline(3), b'abc')
# Now expire the available characters
self.assertEqual(stream.readline(3), b'd')
# Reading again returns nothing.
self.assertEqual(stream.readline(2), b'')
# Same test, but with read, not readline.
stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9)
self.assertEqual(stream.read(6), b'1234\na')
self.assertEqual(stream.read(2), b'bc')
self.assertEqual(stream.read(2), b'd')
self.assertEqual(stream.read(2), b'')
self.assertEqual(stream.read(), b'')
def test_stream(self):
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.read(), b'name=value')
def test_read_after_value(self):
"""
Reading from request is allowed after accessing request contents as
POST or body.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {'name': ['value']})
self.assertEqual(request.body, b'name=value')
self.assertEqual(request.read(), b'name=value')
def test_value_after_read(self):
"""
Construction of POST or body is not allowed after reading
from request.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.read(2), b'na')
with self.assertRaises(RawPostDataException):
request.body
self.assertEqual(request.POST, {})
def test_non_ascii_POST(self):
payload = FakePayload(urlencode({'key': 'España'}))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_alternate_charset_POST(self):
"""
Test a POST with non-utf-8 payload encoding.
"""
payload = FakePayload(urlencode({'key': 'España'.encode('latin-1')}))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/x-www-form-urlencoded; charset=iso-8859-1',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_body_after_POST_multipart_form_data(self):
"""
Reading body after parsing multipart/form-data is not allowed
"""
# Because multipart is used for large amounts of data i.e. file uploads,
# we don't want the data held in memory twice, and we don't want to
# silence the error by setting body = '' either.
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {'name': ['value']})
with self.assertRaises(RawPostDataException):
request.body
def test_body_after_POST_multipart_related(self):
"""
Reading body after parsing multipart that isn't form-data is allowed
"""
# Ticket #9054
# There are cases in which the multipart data is related instead of
# being a binary upload, in which case it should still be accessible
# via body.
payload_data = b"\r\n".join([
b'--boundary',
b'Content-ID: id; name="name"',
b'',
b'value',
b'--boundary--'
b''])
payload = FakePayload(payload_data)
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/related; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {})
self.assertEqual(request.body, payload_data)
def test_POST_multipart_with_content_length_zero(self):
"""
Multipart POST requests with Content-Length >= 0 are valid and need to be handled.
"""
# According to:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
# Every request.POST with Content-Length >= 0 is a valid request,
# this test ensures that we handle Content-Length == 0.
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': 0,
'wsgi.input': payload})
self.assertEqual(request.POST, {})
def test_POST_binary_only(self):
payload = b'\r\n\x01\x00\x00\x00ab\x00\x00\xcd\xcc,@'
environ = {'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/octet-stream',
'CONTENT_LENGTH': len(payload),
'wsgi.input': BytesIO(payload)}
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
# Same test without specifying content-type
environ.update({'CONTENT_TYPE': '', 'wsgi.input': BytesIO(payload)})
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
def test_read_by_lines(self):
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(list(request), [b'name=value'])
def test_POST_after_body_read(self):
"""
POST should be populated even if body is read first
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read(self):
"""
POST should be populated even if body is read first, and then
the stream is read second.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
self.assertEqual(request.read(1), b'n')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read_multipart(self):
"""
POST should be populated even if body is read first, and then
the stream is read second. Using multipart/form-data instead of urlencoded.
"""
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
# Consume enough data to mess up the parsing:
self.assertEqual(request.read(13), b'--boundary\r\nC')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_immutable_for_mutipart(self):
"""
MultiPartParser.parse() leaves request.POST immutable.
"""
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--',
]))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertFalse(request.POST._mutable)
def test_POST_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
POST, the exception should be identifiable (not a generic IOError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise IOError("kaboom!")
payload = b'name=value'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload)})
with self.assertRaises(UnreadablePostError):
request.body
def test_set_encoding_clears_POST(self):
payload = FakePayload('name=Hello Günter')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'name': ['Hello Günter']})
request.encoding = 'iso-8859-16'
self.assertEqual(request.POST, {'name': ['Hello GĂŒnter']})
def test_set_encoding_clears_GET(self):
request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'wsgi.input': '',
'QUERY_STRING': 'name=Hello%20G%C3%BCnter',
})
self.assertEqual(request.GET, {'name': ['Hello Günter']})
request.encoding = 'iso-8859-16'
self.assertEqual(request.GET, {'name': ['Hello G\u0102\u0152nter']})
def test_FILES_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
FILES, the exception should be identifiable (not a generic IOError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise IOError("kaboom!")
payload = b'x'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=foo_',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload)})
with self.assertRaises(UnreadablePostError):
request.FILES
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_raw_uri(self):
factory = RequestFactory(HTTP_HOST='evil.com')
request = factory.get('////absolute-uri')
self.assertEqual(request.get_raw_uri(), 'http://evil.com//absolute-uri')
request = factory.get('/?foo=bar')
self.assertEqual(request.get_raw_uri(), 'http://evil.com/?foo=bar')
request = factory.get('/path/with:colons')
self.assertEqual(request.get_raw_uri(), 'http://evil.com/path/with:colons')
class HostValidationTests(SimpleTestCase):
poisoned_hosts = [
'[email protected]',
'example.com:[email protected]',
'example.com:[email protected]:80',
'example.com:80/badpath',
'example.com: recovermypassword.com',
]
@override_settings(
USE_X_FORWARDED_HOST=False,
ALLOWED_HOSTS=[
'forward.com', 'example.com', 'internal.com', '12.34.56.78',
'[2001:19f0:feee::dead:beef:cafe]', 'xn--4ca9at.com',
'.multitenant.com', 'INSENSITIVE.com', '[::ffff:169.254.169.254]',
])
def test_http_get_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_HOST': 'forward.com',
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is ignored.
self.assertEqual(request.get_host(), 'example.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
legit_hosts = [
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punycode for öäü.com
'anything.multitenant.com',
'multitenant.com',
'insensitive.com',
'example.com.',
'example.com.:80',
'[::ffff:169.254.169.254]',
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
# Poisoned host headers are rejected as suspicious
for host in chain(self.poisoned_hosts, ['other.com', 'example.com..']):
with self.assertRaises(SuspiciousOperation):
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
@override_settings(USE_X_FORWARDED_HOST=True, ALLOWED_HOSTS=['*'])
def test_http_get_host_with_x_forwarded_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_HOST': 'forward.com',
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is obeyed.
self.assertEqual(request.get_host(), 'forward.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
# Poisoned host headers are rejected as suspicious
legit_hosts = [
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punycode for öäü.com
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
for host in self.poisoned_hosts:
with self.assertRaises(SuspiciousOperation):
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
@override_settings(USE_X_FORWARDED_PORT=False)
def test_get_port(self):
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
'HTTP_X_FORWARDED_PORT': '80',
}
# Shouldn't use the X-Forwarded-Port header
self.assertEqual(request.get_port(), '8080')
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
}
self.assertEqual(request.get_port(), '8080')
@override_settings(USE_X_FORWARDED_PORT=True)
def test_get_port_with_x_forwarded_port(self):
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
'HTTP_X_FORWARDED_PORT': '80',
}
# Should use the X-Forwarded-Port header
self.assertEqual(request.get_port(), '80')
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
}
self.assertEqual(request.get_port(), '8080')
@override_settings(DEBUG=True, ALLOWED_HOSTS=[])
def test_host_validation_in_debug_mode(self):
"""
If ALLOWED_HOSTS is empty and DEBUG is True, variants of localhost are
allowed.
"""
valid_hosts = ['localhost', '127.0.0.1', '[::1]']
for host in valid_hosts:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
self.assertEqual(request.get_host(), host)
# Other hostnames raise a SuspiciousOperation.
with self.assertRaises(SuspiciousOperation):
request = HttpRequest()
request.META = {'HTTP_HOST': 'example.com'}
request.get_host()
@override_settings(ALLOWED_HOSTS=[])
def test_get_host_suggestion_of_allowed_host(self):
"""get_host() makes helpful suggestions if a valid-looking host is not in ALLOWED_HOSTS."""
msg_invalid_host = "Invalid HTTP_HOST header: %r."
msg_suggestion = msg_invalid_host + " You may need to add %r to ALLOWED_HOSTS."
msg_suggestion2 = msg_invalid_host + " The domain name provided is not valid according to RFC 1034/1035"
for host in [ # Valid-looking hosts
'example.com',
'12.34.56.78',
'[2001:19f0:feee::dead:beef:cafe]',
'xn--4ca9at.com', # Punycode for öäü.com
]:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
with self.assertRaisesMessage(SuspiciousOperation, msg_suggestion % (host, host)):
request.get_host()
for domain, port in [ # Valid-looking hosts with a port number
('example.com', 80),
('12.34.56.78', 443),
('[2001:19f0:feee::dead:beef:cafe]', 8080),
]:
host = '%s:%s' % (domain, port)
request = HttpRequest()
request.META = {'HTTP_HOST': host}
with self.assertRaisesMessage(SuspiciousOperation, msg_suggestion % (host, domain)):
request.get_host()
for host in self.poisoned_hosts:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
with self.assertRaisesMessage(SuspiciousOperation, msg_invalid_host % host):
request.get_host()
request = HttpRequest()
request.META = {'HTTP_HOST': "invalid_hostname.com"}
with self.assertRaisesMessage(SuspiciousOperation, msg_suggestion2 % "invalid_hostname.com"):
request.get_host()
def test_split_domain_port_removes_trailing_dot(self):
domain, port = split_domain_port('example.com.:8080')
self.assertEqual(domain, 'example.com')
self.assertEqual(port, '8080')
class BuildAbsoluteURITestCase(SimpleTestCase):
"""
Regression tests for ticket #18314.
"""
def setUp(self):
self.factory = RequestFactory()
def test_build_absolute_uri_no_location(self):
"""
``request.build_absolute_uri()`` returns the proper value when
the ``location`` argument is not provided, and ``request.path``
begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(),
'http://testserver//absolute-uri'
)
def test_build_absolute_uri_absolute_location(self):
"""
``request.build_absolute_uri()`` returns the proper value when
an absolute URL ``location`` argument is provided, and ``request.path``
begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='http://example.com/?foo=bar'),
'http://example.com/?foo=bar'
)
def test_build_absolute_uri_schema_relative_location(self):
"""
``request.build_absolute_uri()`` returns the proper value when
a schema-relative URL ``location`` argument is provided, and
``request.path`` begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='//example.com/?foo=bar'),
'http://example.com/?foo=bar'
)
def test_build_absolute_uri_relative_location(self):
"""
``request.build_absolute_uri()`` returns the proper value when
a relative URL ``location`` argument is provided, and ``request.path``
begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='/foo/bar/'),
'http://testserver/foo/bar/'
)
| bsd-3-clause | 6,678,246,708,988,646,000 | 39.95883 | 117 | 0.573919 | false |
KaceyLeavitt/Tetrahedra_Integration | Tetrahedra_Integration/convergence_plots.py | 1 | 4912 | import numpy as np
import math
import tetrahedron_method
import matplotlib.pyplot as plt
import scipy.integrate as integrate
"""Creates convergence plots for the total energy for grids in the first reciprocal unit
cell."""
def toy_energy1(k_point, number_of_bands):
"""A toy energy model used for testing.
Args:
k_point (:obj:'numpy.ndarray' of :obj:'numpy.float64'): the point to calculate the
energy density at.
number_of_bands (int): the number of eigenvalues to return. Unlike the real energy
density functions, only one value is generated regardless of the value of this
parameter.
Returns:
energy (float): the energy density at the k_point. Calculated using the Free
Electron Model with E = r ^ 2 where E is the energy density and r is the
distance from the origin.
"""
energy = np.dot(k_point, k_point)
return energy
def theoretical_E_Fermi1(V_G, valence_electrons):
E_Fermi = (3 * V_G * valence_electrons / (8 * np.pi)) ** (2 / 3)
return E_Fermi
def theoretical_integral_result1(E_Fermi):
rho = np.sqrt(E_Fermi)
integral_result = np.pi / 10 * rho ** 5
return integral_result
def toy_energy2(k_point, number_of_bands):
energy = np.sqrt(np.dot(k_point, k_point))
return energy
def theoretical_E_Fermi2(V_G, valence_electrons):
E_Fermi = (3 * V_G * valence_electrons / (8 * np.pi)) ** (1 / 3)
return E_Fermi
def theoretical_integral_result2(E_Fermi):
rho = E_Fermi
integral_result = np.pi / 8 * rho ** 4
return integral_result
def toy_energy3(k_point, number_of_bands):
energy = np.dot(k_point, k_point) ** .25
return energy
def theoretical_E_Fermi3(V_G, valence_electrons):
E_Fermi = (3 * V_G * valence_electrons / (8 * np.pi)) ** (.5 / 3)
return E_Fermi
def theoretical_integral_result3(E_Fermi):
rho = E_Fermi ** 2
integral_result = np.pi / 7 * rho ** 3.5
return integral_result
def toy_energy4(k_point, number_of_bands):
if np.dot(k_point, k_point) < 1:
energy = math.exp(math.cos(2 * math.pi * np.dot(k_point, k_point) ** .5))
else:
energy = math.exp(math.cos(2 * math.pi * np.dot(k_point, k_point) ** .5))
return energy
def theoretical_E_Fermi4(V_G, valence_electrons):
E_Fermi = math.exp(math.cos((3 * math.pi ** 2 * V_G * valence_electrons
+ math.pi ** 3) ** (1 / 3)))
return E_Fermi
def theoretical_integral_result4(E_Fermi):
left_bound = math.acos(math.log(E_Fermi)) / (2 * math.pi)
right_bound = 1 - left_bound
integral_result = integrate.quad(lambda x:
math.exp(math.cos(2 * math.pi * x)), left_bound, right_bound)[0]
return integral_result
r_lattice_vectors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
V_G = 1
valence_electrons = 2
theoretical_E_Fermi = theoretical_E_Fermi1(V_G, valence_electrons)
print(theoretical_E_Fermi)
for k in range(1):
number_of_intervals = int(25 + k ** 1.313)
number_of_grid_points = (number_of_intervals + 1) ** 3
grid_vecs = np.array([[(1 - .000001) / number_of_intervals, 0, 0],
[0, (1 - .000001) / number_of_intervals, 0],
[0, 0, (1 - .000001) / number_of_intervals]])
grid = []
for m in range(number_of_intervals + 1):
for n in range(number_of_intervals + 1):
for l in range(number_of_intervals + 1):
grid.append((grid_vecs[:,0] * m + grid_vecs[:,1] * n +
grid_vecs[:,2] * l).tolist())
offset = np.array([0, 0, 0])
apply_weight_correction = True
(calculated_E_Fermi, calculated_integral_result) = \
tetrahedron_method.integrate(r_lattice_vectors, grid_vecs, grid,
toy_energy1, valence_electrons, offset,
apply_weight_correction)
E_Fermi_error = abs(theoretical_E_Fermi - calculated_E_Fermi)
theoretical_integral_result = \
theoretical_integral_result1(theoretical_E_Fermi)
integral_error = abs(theoretical_integral_result -
calculated_integral_result)
print(theoretical_integral_result - calculated_integral_result)
theoretical_integral_result_for_calculated_E_Fermi = \
theoretical_integral_result1(calculated_E_Fermi)
integral_error_for_calculated_E_Fermi = \
abs(theoretical_integral_result_for_calculated_E_Fermi -
calculated_integral_result)
plt.yscale('log')
plt.xscale('log')
plt.scatter(number_of_grid_points, E_Fermi_error, color='red')
plt.scatter(number_of_grid_points, integral_error, color='blue')
plt.scatter(number_of_grid_points, integral_error_for_calculated_E_Fermi,
color='green')
#print(E_Fermi_error, integral_error, integral_error_for_calculated_E_Fermi)
plt.show()
| mit | -8,331,203,432,642,270,000 | 35.385185 | 91 | 0.621132 | false |
PennyDreadfulMTG/Penny-Dreadful-Tools | shared/fetch_tools.py | 1 | 5598 | import json
import os
import urllib.request
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from shared import perf
from shared.pd_exception import OperationalException
def fetch(url: str, character_encoding: Optional[str] = None, force: bool = False, retry: bool = False, session: Optional[requests.Session] = None) -> str:
headers = {}
if force:
headers['Cache-Control'] = 'no-cache'
print('Fetching {url} ({cache})'.format(url=url, cache='no cache' if force else 'cache ok'))
try:
p = perf.start()
if session is not None:
response = session.get(url, headers=headers)
else:
response = requests.get(url, headers=headers)
perf.check(p, 'slow_fetch', (url, headers), 'fetch')
if character_encoding is not None:
response.encoding = character_encoding
if response.status_code in [500, 502, 503]:
raise FetchException(f'Server returned a {response.status_code} from {url}')
p = perf.start()
t = response.text
took = round(perf.took(p), 2)
if took > 1:
print('Getting text from response was very slow. Setting an explicit character_encoding may help.')
return t
except (urllib.error.HTTPError, requests.exceptions.ConnectionError, TimeoutError) as e: # type: ignore # urllib isn't fully stubbed
if retry:
return fetch(url, character_encoding, force, retry=False)
raise FetchException(e) from e
async def fetch_async(url: str) -> str:
print(f'Async fetching {url}')
try:
async with aiohttp.ClientSession() as aios:
response = await aios.get(url)
return await response.text()
except (urllib.error.HTTPError, requests.exceptions.ConnectionError) as e: # type: ignore # urllib isn't fully stubbed
raise FetchException(e) from e
def fetch_json(url: str, character_encoding: Optional[str] = None, session: Optional[requests.Session] = None) -> Any:
try:
blob = fetch(url, character_encoding, session=session)
if blob:
return json.loads(blob)
return None
except json.decoder.JSONDecodeError as e:
print('Failed to load JSON:\n{0}'.format(blob))
raise FetchException(e) from e
async def fetch_json_async(url: str) -> Any:
try:
blob = await fetch_async(url)
if blob:
return json.loads(blob)
return None
except json.decoder.JSONDecodeError:
print('Failed to load JSON:\n{0}'.format(blob))
raise
def post(url: str,
data: Optional[Dict[str, str]] = None,
json_data: Any = None,
) -> str:
print('POSTing to {url} with {data} / {json_data}'.format(url=url, data=data, json_data=json_data))
try:
response = requests.post(url, data=data, json=json_data)
return response.text
except requests.exceptions.ConnectionError as e:
raise FetchException(e) from e
def store(url: str, path: str) -> requests.Response:
print('Storing {url} in {path}'.format(url=url, path=path))
try:
response = requests.get(url, stream=True)
with open(path, 'wb') as fout:
for chunk in response.iter_content(1024):
fout.write(chunk)
return response
except urllib.error.HTTPError as e: # type: ignore
raise FetchException(e) from e
except requests.exceptions.ConnectionError as e: # type: ignore
raise FetchException(e) from e
async def store_async(url: str, path: str) -> aiohttp.ClientResponse:
print('Async storing {url} in {path}'.format(url=url, path=path))
try:
async with aiohttp.ClientSession() as aios:
response = await aios.get(url)
with open(path, 'wb') as fout:
while True:
chunk = await response.content.read(1024)
if not chunk:
break
fout.write(chunk)
return response
# type: ignore # urllib isn't fully stubbed
except (urllib.error.HTTPError, aiohttp.ClientError) as e:
raise FetchException(e) from e
class FetchException(OperationalException):
pass
def acceptable_file(filepath: str) -> bool:
return os.path.isfile(filepath) and os.path.getsize(filepath) > 1000
def escape(str_input: str, skip_double_slash: bool = False) -> str:
# Expand 'AE' into two characters. This matches the legal list and
# WotC's naming scheme in Kaladesh, and is compatible with the
# image server and scryfall.
s = str_input
if skip_double_slash:
s = s.replace('//', '-split-')
s = urllib.parse.quote_plus(s.replace(u'Æ', 'AE')).lower() # type: ignore # urllib isn't fully stubbed
if skip_double_slash:
s = s.replace('-split-', '//')
return s
# pylint: disable=R0913
def post_discord_webhook(webhook_id: str,
webhook_token: str,
message: Optional[str] = None,
username: str = None,
avatar_url: str = None,
embeds: List[Dict[str, Any]] = None,
) -> bool:
if webhook_id is None or webhook_token is None:
return False
url = 'https://discordapp.com/api/webhooks/{id}/{token}'.format(
id=webhook_id, token=webhook_token)
post(url, json_data={
'content': message,
'username': username,
'avatar_url': avatar_url,
'embeds': embeds,
})
return True
| gpl-3.0 | 3,236,549,472,231,532,500 | 37.335616 | 155 | 0.611935 | false |
CarlosCondor/pelisalacarta-xbmc-plus | pelisalacarta/channels/pelisflv.py | 2 | 30568 | # -*- coding: iso-8859-1 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para pelisflv.net by Bandavi
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
import sys
import xbmc
import xbmcgui
import xbmcplugin
from xml.dom import minidom
from xml.dom import EMPTY_NAMESPACE
from core import scrapertools
from core import config
from core import logger
from platformcode.xbmc import xbmctools
from core.item import Item
from core import downloadtools
from servers import vk
from servers import servertools
__channel__ = "pelisflv"
__category__ = "F"
__type__ = "xbmc"
__title__ = "PelisFlv"
__language__ = "ES"
DEBUG = config.get_setting("debug")
ATOM_NS = 'http://www.w3.org/2005/Atom'
PLAYLIST_FILENAME_TEMP = "video_playlist.temp.pls"
FULL_FILENAME_PATH = os.path.join( config.get_setting("downloadpath"), PLAYLIST_FILENAME_TEMP )
# Esto permite su ejecución en modo emulado
try:
pluginhandle = int( sys.argv[ 1 ] )
except:
pluginhandle = ""
# Traza el inicio del canal
logger.info("[pelisflv.py] init")
def mainlist(params,url,category):
logger.info("[pelisflv.py] mainlist")
# Añade al listado de XBMC
xbmctools.addnewfolder( __channel__ , "listvideofeeds" , category , "Listar - Novedades" ,"http://www.blogger.com/feeds/3207505541212690627/posts/default?start-index=1&max-results=25","","")
xbmctools.addnewfolder( __channel__ , "listvideos" , category , "Listar - Estrenos","http://www.pelisflv.net/search/label/Estrenos","","")
xbmctools.addnewfolder( __channel__ , "ListadoSeries" , category , "Listar - Generos" ,"http://www.pelisflv.net/","","")
#xbmctools.addnewfolder( __channel__ , "ListadoSeries" , category , "Listar - Series" ,"http://www.pelisflv.net/","","")
xbmctools.addnewfolder( __channel__ , "listvideos" , category , "Listar - Animacion" ,"http://www.pelisflv.net/search/label/Animaci%C3%B3n","","")
xbmctools.addnewfolder( __channel__ , "listvideos" , category , "Listar - Videos no Megavideo (FLV)" ,"http://www.pelisflv.net/search/label/Flv","","")
xbmctools.addnewfolder( __channel__ , "listvideos" , category , "Listar - Videos en Megavideo" ,"http://www.pelisflv.net/search/label/Megavideo","","")
xbmctools.addnewfolder( __channel__ , "listvideos" , category , "Videos Audio Español" ,"http://www.pelisflv.net/search/label/Espa%C3%B1ol","","")
xbmctools.addnewfolder( __channel__ , "listvideos" , category , "Videos Audio Latino" ,"http://www.pelisflv.net/search/label/Latino","","")
xbmctools.addnewfolder( __channel__ , "listvideos" , category , "Videos Audio Original Sub Español" ,"http://www.pelisflv.net/search/label/Sub%20Espa%C3%B1ol","","")
xbmctools.addnewfolder( __channel__ , "search" , category , "Buscar","http://www.pelisflv.net/","","")
# Label (top-right)...
xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )
# Disable sorting...
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
# End of directory...
xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
def search(params,url,category):
logger.info("[pelisflv.py] search")
keyboard = xbmc.Keyboard()
#keyboard.setDefault('')
keyboard.doModal()
if (keyboard.isConfirmed()):
tecleado = keyboard.getText()
if len(tecleado)>0:
#convert to HTML
tecleado = tecleado.replace(" ", "+")
searchUrl = "http://www.pelisflv.net/search?q="+tecleado
listvideos(params,searchUrl,category)
def searchresults(params,url,category):
logger.info("[pelisflv.py] SearchResult")
# Descarga la página
data = scrapertools.cachePage(url)
#print data
# Extrae las entradas (carpetas)
patronvideos = '<div class="poster">[^<]+<a href="([^"]+)"' # URL
patronvideos += '><img src="([^"]+)" width=[^\/]+\/>' # TUMBNAIL
patronvideos += '</a>[^<]+<[^>]+>[^<]+<[^>]+>[^<]+<a href="[^"]+">([^<]+)</a>' # TITULO
matches = re.compile(patronvideos,re.DOTALL).findall(data)
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
# Atributos
scrapedurl = match[0]
scrapedtitle =match[2]
scrapedtitle = scrapedtitle.replace("–","-")
scrapedtitle = scrapedtitle.replace(" "," ")
scrapedthumbnail = match[1]
scrapedplot = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
# Añade al listado de XBMC
xbmctools.addnewfolder( __channel__ , "detail" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )
# Propiedades
xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )
xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
def ListadoCapitulosSeries(params,url,category):
logger.info("[pelisflv.py] ListadoCapitulosSeries")
title = urllib.unquote_plus( params.get("title") )
thumbnail = urllib.unquote_plus( params.get("thumbnail") )
# Descarga la página
data = scrapertools.downloadpageGzip(url)
#logger.info(data)
# Patron de las entradas
patron = "<div class='post-body entry-content'>(.*?)<div class='post-footer'>"
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
patron = '<a href="([^"]+)"[^>]+><[^>]+>(.*?)<'
matches = re.compile(patron,re.DOTALL).findall(matches[0])
scrapertools.printMatches(matches)
patron2 = '<iframe src="([^"]+)"'
# Añade las entradas encontradas
for match in matches:
# Atributos
scrapedtitle = match[1]
data2 = scrapertools.downloadpageGzip(match[0])
matches2 = re.compile(patron2,re.DOTALL).findall(data2)
scrapertools.printMatches(matches2)
scrapedurl = matches2[0]
scrapedthumbnail = thumbnail
scrapedplot = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
# Añade al listado de XBMC
xbmctools.addnewfolder( __channel__ , "detail" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )
# Asigna el título, desactiva la ordenación, y cierra el directorio
xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def ListadoSeries(params,url,category):
logger.info("[pelisflv.py] ListadoSeries")
title = urllib.unquote_plus( params.get("title") )
# Descarga la página
data = scrapertools.cachePage(url)
#logger.info(data)
# Patron de las entradas
if "Series" in title:
patron = "<center><form>(.*?)</form></center>"
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
patron = '<option value="([^"]+)" />(.*?)\n'
matches = re.compile(patron,re.DOTALL).findall(matches[0])
scrapertools.printMatches(matches)
elif "Generos" in title:
patron = "<h2>Generos</h2>[^<]+<[^>]+>[^<]+<ul>(.*?)</ul>"
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
patron = "<a dir='ltr' href='([^']+)'>(.*?)</a>"
matches = re.compile(patron,re.DOTALL).findall(matches[0])
scrapertools.printMatches(matches)
# Añade las entradas encontradas
for match in matches:
# Atributos
scrapedtitle = match[1]
scrapedurl = match[0]
scrapedthumbnail = ""
scrapedplot = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
# Añade al listado de XBMC
xbmctools.addnewfolder( __channel__ , "listvideos" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )
# Asigna el título, desactiva la ordenación, y cierra el directorio
xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def listvideos(params,url,category):
logger.info("[pelisflv.py] listvideos")
if url=="":
url = "http://www.pelisflv.net/"
# Descarga la página
data = scrapertools.cachePage(url)
#logger.info(data)
# Extrae las entradas (carpetas)
patronvideos = "<h3 class='post-title entry-title'>[^<]+<a href='([^']+)'" # URL
patronvideos += ">([^<]+)</a>.*?" # Titulo
patronvideos += '<img style="[^"]+" src="([^"]+).*?' # TUMBNAIL
patronvideos += 'border=[^>]+>.*?<span[^>]+>(.*?)</span></div>' # Argumento
#patronvideos += '</h1>[^<]+</div>.*?<div class=[^>]+>[^<]+'
#patronvideos += '</div>[^<]+<div class=[^>]+>.*?href="[^"]+"><img '
#patronvideos += 'style=.*?src="([^"]+)".*?alt=.*?bold.*?>(.*?)</div>' # IMAGEN , DESCRIPCION
#patronvideos += '.*?flashvars="file=(.*?flv)\&' # VIDEO FLV
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
scrapedtitle = ""
for match in matches:
# Titulo
scrapedtitle = match[1]
# URL
scrapedurl = match[0]
# Thumbnail
scrapedthumbnail = match[2]
# Argumento
scrapedplot = match[3]
scrapedplot = re.sub("<[^>]+>"," ",scrapedplot)
scrapedplot = scrapedplot.replace('“','"')
scrapedplot = scrapedplot.replace('”','"')
scrapedplot = scrapedplot.replace('…','...')
scrapedplot = scrapedplot.replace(" ","")
# Depuracion
if (DEBUG):
logger.info("scrapedtitle="+scrapedtitle)
logger.info("scrapedurl="+scrapedurl)
logger.info("scrapedthumbnail="+scrapedthumbnail)
# Añade al listado de XBMC
xbmctools.addnewfolder( __channel__ , "detail" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )
# Extrae la marca de siguiente página
patronvideos = "<div class='status-msg-hidden'>.+?<a href=\"([^\"]+)\""
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches)>0:
scrapedtitle = "Página siguiente"
scrapedurl = matches[0]
scrapedthumbnail = ""
scrapedplot = ""
xbmctools.addnewfolder( __channel__ , "listvideos" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )
# Label (top-right)...
xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
# Disable sorting...
xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
# End of directory...
xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def listvideofeeds(params,url,category):
logger.info("[pelisflv.py] listvideosfeeds")
data = None
thumbnail = ""
xmldata = urllib2.urlopen(url,data)
xmldoc = minidom.parse(xmldata)
xmldoc.normalize()
#print xmldoc.toxml().encode('utf-8')
xmldata.close()
c = 0
plot = ""
for entry in xmldoc.getElementsByTagNameNS(ATOM_NS, u'entry'):
#First title element in doc order within the entry is the title
entrytitle = entry.getElementsByTagNameNS(ATOM_NS, u'title')[0]
entrylink = entry.getElementsByTagNameNS(ATOM_NS, u'link')[2]
entrythumbnail = entry.getElementsByTagNameNS(ATOM_NS, u'content')[0]
etitletext = get_text_from_construct(entrytitle)
elinktext = entrylink.getAttributeNS(EMPTY_NAMESPACE, u'href')
ethumbnailtext = get_text_from_construct(entrythumbnail)
regexp = re.compile(r'src="([^"]+)"')
match = regexp.search(ethumbnailtext)
if match is not None:
thumbnail = match.group(1)
regexp = re.compile(r'bold;">([^<]+)<')
match = regexp.search(ethumbnailtext)
if match is not None:
plot = match.group(1)
print ethumbnailtext
# Depuracion
if (DEBUG):
logger.info("scrapedtitle="+etitletext)
logger.info("scrapedurl="+elinktext)
logger.info("scrapedthumbnail="+thumbnail)
#print etitletext, '(', elinktext, thumbnail,plot, ')'
xbmctools.addnewfolder( __channel__ , "detail" , category , etitletext, elinktext, thumbnail, plot )
c +=1
if c >= 25:
regexp = re.compile(r'start-index=([^\&]+)&')
match = regexp.search(url)
if match is not None:
start_index = int(match.group(1)) + 25
scrapedtitle = "Página siguiente"
scrapedurl = "http://www.blogger.com/feeds/3207505541212690627/posts/default?start-index="+str(start_index)+"&max-results=25"
scrapedthumbnail = ""
scrapedplot = ""
xbmctools.addnewfolder( __channel__ , "listvideofeeds" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )
# Cierra el directorio
xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def get_text_from_construct(element):
'''
Return the content of an Atom element declared with the
atomTextConstruct pattern. Handle both plain text and XHTML
forms. Return a UTF-8 encoded string.
'''
if element.getAttributeNS(EMPTY_NAMESPACE, u'type') == u'xhtml':
#Grab the XML serialization of each child
childtext = [ c.toxml('utf-8') for c in element.childNodes ]
#And stitch it together
content = ''.join(childtext).strip()
return content
else:
return element.firstChild.data.encode('utf-8')
def detail(params,url,category):
logger.info("[pelisflv.py] detail")
title = urllib.unquote_plus( params.get("title") )
thumbnail = urllib.unquote_plus( params.get("thumbnail") )
plot = urllib.unquote_plus( params.get("plot") )
accion = params.get("accion")
# Descarga la página
datafull = scrapertools.cachePage(url)
#logger.info(data)
patron = "google_ad_section_start(.*?)google_ad_section_end -->"
matches = re.compile(patron,re.DOTALL).findall(datafull)
data2 = ""
if len(matches)>0:
data = matches[0]
else:
data = datafull
patron = '<iframe src="(http://pelisflv.net63.net/player/[^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)>0:
data = scrapertools.cachePage(matches[0])
patron = 'href="(http://gamezinepelisflv.webcindario.com/[^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(datafull)
if len(matches)>0:
data2 = scrapertools.cachePage(matches[0])
data = data + data2
ok = False
# ------------------------------------------------------------------------------------
# Busca los enlaces a los videos
# ------------------------------------------------------------------------------------
listavideos = servertools.findvideos(data)
for video in listavideos:
videotitle = video[0]
url = video[1]
server = video[2]
xbmctools.addnewvideo( __channel__ , "play" , category , server , title.strip() + " - " + videotitle , url , thumbnail , plot )
# Busca enlaces en el servidor Stagevu - "el modulo servertools.findvideos() no los encuentra"
patronvideos = "(http://stagevu.com[^']+)'"
matches = re.compile(patronvideos,re.DOTALL).findall(data)
if len(matches)>0:
logger.info(" Servidor Stagevu")
for match in matches:
ok = True
scrapedurl = match.replace("&","&")
xbmctools.addnewvideo( __channel__ ,"play" , category , "Stagevu" , title+" - [Stagevu]", scrapedurl , thumbnail , plot )
# Busca enlaces en el servidor Movshare - "el modulo servertools.findvideos() no los encuentra"
patronvideos = "(http://www.movshare.net[^']+)'"
matches = re.compile(patronvideos,re.DOTALL).findall(data)
if len(matches)>0:
logger.info(" Servidor Movshare")
for match in matches:
ok = True
scrapedurl = match.replace("&","&")
xbmctools.addnewvideo( __channel__ ,"play" , category , "Movshare" , title+" - [Movshare]", scrapedurl , thumbnail , plot )
# ------------------------------------------------------------------------------------
#--- Busca los videos Directos
patronvideos = 'file=(http\:\/\/[^\&]+)\&'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
print "link directos encontrados :%s" %matches
#print data
if len(matches)>0:
for match in matches:
subtitle = "[FLV-Directo]"
if ("xml" in match):
data2 = scrapertools.cachePage(match)
logger.info("data2="+data2)
patronvideos = '<track>.*?'
patronvideos += '<title>([^<]+)</title>[^<]+'
patronvideos += '<location>([^<]+)</location>(?:[^<]+'
patronvideos += '<meta rel="type">video</meta>[^<]+|[^<]+)'
patronvideos += '<meta rel="captions">([^<]+)</meta>[^<]+'
patronvideos += '</track>'
matches2 = re.compile(patronvideos,re.DOTALL).findall(data2)
scrapertools.printMatches(matches)
for match2 in matches2:
sub = ""
playWithSubt = "play"
if match2[2].endswith(".xml"): # Subtitulos con formato xml son incompatibles con XBMC
sub = "[Subtitulo incompatible con xbmc]"
if ".mp4" in match2[1]:
subtitle = "[MP4-Directo]"
scrapedtitle = '%s - (%s) %s' %(title,match2[0],subtitle)
scrapedurl = match2[1].strip()
scrapedthumbnail = thumbnail
scrapedplot = plot
if match2[2].endswith(".srt"):
scrapedurl = scrapedurl + "|" + match2[2]
playWithSubt = "play2"
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
# Añade al listado de XBMC
xbmctools.addnewvideo( __channel__ , playWithSubt , category , "Directo" , scrapedtitle, scrapedurl , scrapedthumbnail, scrapedplot )
ok = True
else:
if match.endswith(".srt"):
scrapedurl = scrapedurl + "|" + match
xbmctools.addnewvideo( __channel__ ,"play2" , category , "Directo" , title + " (V.O.S) - "+subtitle, scrapedurl , thumbnail , plot )
ok = True
if match.endswith(".xml"):
sub = "[Subtitulo incompatible con xbmc]"
xbmctools.addnewvideo( __channel__ ,"play" , category , "Directo" , title + " (V.O) - %s %s" %(subtitle,sub), scrapedurl , thumbnail , plot )
ok = True
scrapedurl = match
print scrapedurl
#src="http://pelisflv.net63.net/player/videos.php?x=http://pelisflv.net63.net/player/xmls/The-Lord-Of-The-Ring.xml"
patronvideos = '(http\:\/\/[^\/]+\/[^\/]+\/[^\/]+\/[^\.]+\.xml)'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
#print data
if len(matches)>0:
playlistFile = open(FULL_FILENAME_PATH,"w")
playlistFile.write("[playlist]\n")
playlistFile.write("\n")
for match in matches:
subtitle = "[FLV-Directo]"
data2 = scrapertools.cachePage(match.replace(" ","%20"))
logger.info("data2="+data2)
patronvideos = '<track>.*?'
patronvideos += '<title>([^<]+)</title>.*?'
patronvideos += '<location>([^<]+)</location>(?:[^<]+'
patronvideos += '<meta rel="captions">([^<]+)</meta>[^<]+'
patronvideos += '|([^<]+))</track>'
matches2 = re.compile(patronvideos,re.DOTALL).findall(data2)
scrapertools.printMatches(matches)
c = 0
for match2 in matches2:
c +=1
sub = ""
playWithSubt = "play"
if match2[2].endswith(".xml"): # Subtitulos con formato xml son incompatibles con XBMC
sub = "[Subtitulo incompatible con xbmc]"
if match2[1].endswith(".mp4"):
subtitle = "[MP4-Directo]"
scrapedtitle = '%s - (%s) %s' %(title,match2[0],subtitle)
scrapedurl = match2[1].strip()
scrapedthumbnail = thumbnail
scrapedplot = plot
if match2[2].endswith(".srt"):
scrapedurl = scrapedurl + "|" + match2[2]
playWithSubt = "play2"
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
# Añade al listado de XBMC
xbmctools.addnewvideo( __channel__ , playWithSubt , category , "Directo" , scrapedtitle, scrapedurl , scrapedthumbnail, scrapedplot )
ok =True
playlistFile.write("File%d=%s\n" %(c,match2[1]))
playlistFile.write("Title%d=%s\n" %(c,match2[0]))
playlistFile.write("\n")
playlistFile.write("NumberOfEntries=%d\n" %c)
playlistFile.write("Version=2\n")
playlistFile.flush();
playlistFile.close()
if c>0:
xbmctools.addnewvideo( __channel__ , "play" , category , "Directo" , "Reproducir Todo a la vez...", FULL_FILENAME_PATH , scrapedthumbnail, scrapedplot )
# Busca enlaces en el servidor Videoweed - "el modulo servertools.findvideos() no los encuentra"
patronvideos = '(http\:\/\/[^\.]+\.videoweed.com\/[^"]+)"'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches)>0:
logger.info(" Servidor Videoweed")
for match in matches:
ok = True
scrapedurl = match.replace("&","&")
xbmctools.addnewvideo( __channel__ ,"play" , category , "Videoweed" , title+" - [Videoweed]", scrapedurl , thumbnail , plot )
# Busca enlaces en el servidor Gigabyteupload # http://cdn-2.gigabyteupload.com/files/207bb7b658d5068650ebabaca8ffc52d/vFuriadeTitanes_newg.es.avi
patronvideos = '(http\:\/\/[^\.]+\.gigabyteupload.com\/[^"]+)"'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches)>0:
logger.info(" Servidor Gigabyteupload")
for match in matches:
ok = True
xbmctools.addnewvideo( __channel__ ,"play" , category , "Gigabyteupload" , title+" - [Gigabyteupload]",match , thumbnail , plot )
## --------------------------------------------------------------------------------------##
# Busca enlaces de videos para el servidor vk.com #
## --------------------------------------------------------------------------------------##
'''
var video_host = '447.gt3.vkadre.ru';
var video_uid = '0';
var video_vtag = '2638f17ddd39-';
var video_no_flv = 0;
var video_max_hd = '0';
var video_title = 'newCine.NET+-+neWG.Es+%7C+Chicken+Little';
patronvideos = 'src="(http://[^\/]+\/video_ext.php[^"]+)"'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
if len(matches)>0:
ok = True
print " encontro VK.COM :%s" %matches[0]
videourl = vk.geturl(matches[0])
xbmctools.addnewvideo( __channel__ , "play" , category , "Directo" , title + " - "+"[VK]", videourl , thumbnail , plot )
'''
## --------------------------------------------------------------------------------------##
# Busca enlaces a video en el servidor Dailymotion #
## --------------------------------------------------------------------------------------##
patronvideos = 'http://www.dailymotion.com/swf/video/([^"]+)"'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
playWithSubt = "play"
subtit = ""
if len(matches)>0:
daily = 'http://www.dailymotion.com/video/%s'%matches[0]
data2 = scrapertools.cachePage(daily)
# Busca los subtitulos en español
subtitulo = re.compile('%22es%22%3A%22(.+?)%22').findall(data2)
if len(subtitulo)>0:
subtit = urllib.unquote(subtitulo[0])
subtit = subtit.replace("\/","/")
# Busca el enlace al video con formato FLV
Lowres=re.compile('%22sdURL%22%3A%22(.+?)%22').findall(data2)
if len(Lowres)>0:
videourl = urllib.unquote(Lowres[0])
videourl = videourl.replace("\/","/")
if len(subtit)>0:
videourl = videourl + "|" + subtit
playWithSubt = "play2"
subtitle = "[FLV-Directo-Dailymotion]"
xbmctools.addnewvideo( __channel__ , playWithSubt , category , "Directo" , title + " - "+subtitle, videourl , thumbnail , plot )
# Busca el enlace al video con formato HQ (H264)
Highres=re.compile('%22hqURL%22%3A%22(.+?)%22').findall(data2)
if len(Highres)>0:
videourl = urllib.unquote(Highres[0])
videourl = videourl.replace("\/","/")
if len(subtit)>0:
videourl = videourl + "|" + subtit
playWithSubt = "play2"
subtitle = "[h264-Directo-Dailymotion-este video no es soportado en versiones antiguas o xbox plataforma]"
xbmctools.addnewvideo( __channel__ , playWithSubt , category , "Directo" , title + " - "+subtitle, videourl , thumbnail , plot )
if not ok:
patron = "SeriesPage"
matches = re.compile(patron,re.DOTALL).findall(datafull)
if len(matches)>0:
ListadoCapitulosSeries(params,url,category)
# Label (top-right)...
xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
# Disable sorting...
xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
# End of directory...
xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def play(params,url,category):
logger.info("[pelisflv.py] play")
title = unicode( xbmc.getInfoLabel( "ListItem.Title" ), "utf-8" )
thumbnail = urllib.unquote_plus( params.get("thumbnail") )
plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" )
server = params["server"]
xbmctools.play_video(__channel__,server,url,category,title,thumbnail,plot)
def play2(params,url,category):
logger.info("[pelisflv.py] play2")
url1 = url
if "|" in url:
urlsplited = url.split("|")
url1 = urlsplited[0]
urlsubtit = urlsplited[1]
subt_ok = "0"
count = 0
while subt_ok == "0":
if count==0:
subt_ok = downloadstr(urlsubtit)
count += 1
print "subtitulo subt_ok = %s" % str(subt_ok)
if subt_ok is None: # si es None la descarga del subtitulo esta ok
config.set_setting("subtitulo", "true")
break
play(params,url1,category)
def acentos(title):
title = title.replace("ÃÂ", "")
title = title.replace("é","é")
title = title.replace("á","á")
title = title.replace("ó","ó")
title = title.replace("ú","ú")
title = title.replace("ÃÂ","í")
title = title.replace("ñ","ñ")
title = title.replace("ââ¬Â", "")
title = title.replace("ââ¬ÅÃÂ", "")
title = title.replace("ââ¬Å","")
title = title.replace("é","é")
title = title.replace("á","á")
title = title.replace("ó","ó")
title = title.replace("ú","ú")
title = title.replace("Ã","í")
title = title.replace("ñ","ñ")
title = title.replace("Ãâ","Ó")
return(title)
def downloadstr(urlsub):
from core import downloadtools
fullpath = os.path.join( config.get_data_path() , 'subtitulo.srt' )
if os.path.exists(fullpath):
try:
subtitfile = open(fullpath,"w")
subtitfile.close()
except IOError:
logger.info("Error al limpiar el archivo subtitulo.srt "+fullpath)
raise
try:
ok = downloadtools.downloadfile(urlsub,fullpath)
except IOError:
logger.info("Error al descargar el subtitulo "+urlsub)
return -1
return ok
| gpl-3.0 | 3,176,220,575,973,220,000 | 42.114731 | 197 | 0.564741 | false |
strongh/GPy | GPy/inference/latent_function_inference/var_dtc.py | 4 | 10062 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from posterior import Posterior
from ...util.linalg import mdot, jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify
from ...util import diag
from ...core.parameterization.variational import VariationalPosterior
import numpy as np
from . import LatentFunctionInference
log_2_pi = np.log(2*np.pi)
import logging, itertools
logger = logging.getLogger('vardtc')
class VarDTC(LatentFunctionInference):
"""
An object for inference when the likelihood is Gaussian, but we want to do sparse inference.
The function self.inference returns a Posterior object, which summarizes
the posterior.
For efficiency, we sometimes work with the cholesky of Y*Y.T. To save repeatedly recomputing this, we cache it.
"""
const_jitter = 1e-6
def __init__(self, limit=1):
#self._YYTfactor_cache = caching.cache()
from ...util.caching import Cacher
self.limit = limit
self.get_trYYT = Cacher(self._get_trYYT, limit)
self.get_YYTfactor = Cacher(self._get_YYTfactor, limit)
def set_limit(self, limit):
self.get_trYYT.limit = limit
self.get_YYTfactor.limit = limit
def _get_trYYT(self, Y):
return np.einsum("ij,ij->", Y, Y)
# faster than, but same as:
# return np.sum(np.square(Y))
def __getstate__(self):
# has to be overridden, as Cacher objects cannot be pickled.
return self.limit
def __setstate__(self, state):
# has to be overridden, as Cacher objects cannot be pickled.
self.limit = state
from ...util.caching import Cacher
self.get_trYYT = Cacher(self._get_trYYT, self.limit)
self.get_YYTfactor = Cacher(self._get_YYTfactor, self.limit)
def _get_YYTfactor(self, Y):
"""
find a matrix L which satisfies LLT = YYT.
Note that L may have fewer columns than Y.
"""
N, D = Y.shape
if (N>=D):
return Y.view(np.ndarray)
else:
return jitchol(tdot(Y))
def get_VVTfactor(self, Y, prec):
return Y * prec # TODO chache this, and make it effective
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, Lm=None, dL_dKmm=None):
_, output_dim = Y.shape
uncertain_inputs = isinstance(X, VariationalPosterior)
#see whether we've got a different noise variance for each datum
beta = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6)
# VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency!
#self.YYTfactor = self.get_YYTfactor(Y)
#VVT_factor = self.get_VVTfactor(self.YYTfactor, beta)
het_noise = beta.size > 1
if beta.ndim == 1:
beta = beta[:, None]
VVT_factor = beta*Y
#VVT_factor = beta*Y
trYYT = self.get_trYYT(Y)
# do the inference:
num_inducing = Z.shape[0]
num_data = Y.shape[0]
# kernel computations, using BGPLVM notation
Kmm = kern.K(Z).copy()
diag.add(Kmm, self.const_jitter)
if Lm is None:
Lm = jitchol(Kmm)
# The rather complex computations of A, and the psi stats
if uncertain_inputs:
psi0 = kern.psi0(Z, X)
psi1 = kern.psi1(Z, X)
if het_noise:
psi2_beta = np.sum([kern.psi2(Z,X[i:i+1,:]) * beta_i for i,beta_i in enumerate(beta)],0)
else:
psi2_beta = kern.psi2(Z,X) * beta
LmInv = dtrtri(Lm)
A = LmInv.dot(psi2_beta.dot(LmInv.T))
else:
psi0 = kern.Kdiag(X)
psi1 = kern.K(X, Z)
if het_noise:
tmp = psi1 * (np.sqrt(beta))
else:
tmp = psi1 * (np.sqrt(beta))
tmp, _ = dtrtrs(Lm, tmp.T, lower=1)
A = tdot(tmp) #print A.sum()
# factor B
B = np.eye(num_inducing) + A
LB = jitchol(B)
psi1Vf = np.dot(psi1.T, VVT_factor)
# back substutue C into psi1Vf
tmp, _ = dtrtrs(Lm, psi1Vf, lower=1, trans=0)
_LBi_Lmi_psi1Vf, _ = dtrtrs(LB, tmp, lower=1, trans=0)
tmp, _ = dtrtrs(LB, _LBi_Lmi_psi1Vf, lower=1, trans=1)
Cpsi1Vf, _ = dtrtrs(Lm, tmp, lower=1, trans=1)
# data fit and derivative of L w.r.t. Kmm
delit = tdot(_LBi_Lmi_psi1Vf)
data_fit = np.trace(delit)
DBi_plus_BiPBi = backsub_both_sides(LB, output_dim * np.eye(num_inducing) + delit)
if dL_dKmm is None:
delit = -0.5 * DBi_plus_BiPBi
delit += -0.5 * B * output_dim
delit += output_dim * np.eye(num_inducing)
# Compute dL_dKmm
dL_dKmm = backsub_both_sides(Lm, delit)
# derivatives of L w.r.t. psi
dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm,
VVT_factor, Cpsi1Vf, DBi_plus_BiPBi,
psi1, het_noise, uncertain_inputs)
# log marginal likelihood
log_marginal = _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise,
psi0, A, LB, trYYT, data_fit, Y)
#noise derivatives
dL_dR = _compute_dL_dR(likelihood,
het_noise, uncertain_inputs, LB,
_LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A,
psi0, psi1, beta,
data_fit, num_data, output_dim, trYYT, Y, VVT_factor)
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR,Y_metadata)
#put the gradients in the right places
if uncertain_inputs:
grad_dict = {'dL_dKmm': dL_dKmm,
'dL_dpsi0':dL_dpsi0,
'dL_dpsi1':dL_dpsi1,
'dL_dpsi2':dL_dpsi2,
'dL_dthetaL':dL_dthetaL}
else:
grad_dict = {'dL_dKmm': dL_dKmm,
'dL_dKdiag':dL_dpsi0,
'dL_dKnm':dL_dpsi1,
'dL_dthetaL':dL_dthetaL}
#get sufficient things for posterior prediction
#TODO: do we really want to do this in the loop?
if VVT_factor.shape[1] == Y.shape[1]:
woodbury_vector = Cpsi1Vf # == Cpsi1V
else:
print 'foobar'
import ipdb; ipdb.set_trace()
psi1V = np.dot(Y.T*beta, psi1).T
tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0)
tmp, _ = dpotrs(LB, tmp, lower=1)
woodbury_vector, _ = dtrtrs(Lm, tmp, lower=1, trans=1)
Bi, _ = dpotri(LB, lower=1)
symmetrify(Bi)
Bi = -dpotri(LB, lower=1)[0]
diag.add(Bi, 1)
woodbury_inv = backsub_both_sides(Lm, Bi)
#construct a posterior object
post = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm)
return post, log_marginal, grad_dict
def _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, VVT_factor, Cpsi1Vf, DBi_plus_BiPBi, psi1, het_noise, uncertain_inputs):
dL_dpsi0 = -0.5 * output_dim * (beta* np.ones([num_data, 1])).flatten()
dL_dpsi1 = np.dot(VVT_factor, Cpsi1Vf.T)
dL_dpsi2_beta = 0.5 * backsub_both_sides(Lm, output_dim * np.eye(num_inducing) - DBi_plus_BiPBi)
if het_noise:
if uncertain_inputs:
dL_dpsi2 = beta[:, None] * dL_dpsi2_beta[None, :, :]
else:
dL_dpsi1 += 2.*np.dot(dL_dpsi2_beta, (psi1 * beta).T).T
dL_dpsi2 = None
else:
dL_dpsi2 = beta * dL_dpsi2_beta
if not uncertain_inputs:
# subsume back into psi1 (==Kmn)
dL_dpsi1 += 2.*np.dot(psi1, dL_dpsi2)
dL_dpsi2 = None
return dL_dpsi0, dL_dpsi1, dL_dpsi2
def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, psi0, psi1, beta, data_fit, num_data, output_dim, trYYT, Y, VVT_factr=None):
# the partial derivative vector for the likelihood
if likelihood.size == 0:
# save computation here.
dL_dR = None
elif het_noise:
if uncertain_inputs:
raise NotImplementedError, "heteroscedatic derivates with uncertain inputs not implemented"
else:
#from ...util.linalg import chol_inv
#LBi = chol_inv(LB)
LBi, _ = dtrtrs(LB,np.eye(LB.shape[0]))
Lmi_psi1, nil = dtrtrs(Lm, psi1.T, lower=1, trans=0)
_LBi_Lmi_psi1, _ = dtrtrs(LB, Lmi_psi1, lower=1, trans=0)
dL_dR = -0.5 * beta + 0.5 * VVT_factr**2
dL_dR += 0.5 * output_dim * (psi0 - np.sum(Lmi_psi1**2,0))[:,None] * beta**2
dL_dR += 0.5*np.sum(mdot(LBi.T,LBi,Lmi_psi1)*Lmi_psi1,0)[:,None]*beta**2
dL_dR += -np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T * Y * beta**2
dL_dR += 0.5*np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T**2 * beta**2
else:
# likelihood is not heteroscedatic
dL_dR = -0.5 * num_data * output_dim * beta + 0.5 * trYYT * beta ** 2
dL_dR += 0.5 * output_dim * (psi0.sum() * beta ** 2 - np.trace(A) * beta)
dL_dR += beta * (0.5 * np.sum(A * DBi_plus_BiPBi) - data_fit)
return dL_dR
def _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, psi0, A, LB, trYYT, data_fit, Y):
#compute log marginal likelihood
if het_noise:
lik_1 = -0.5 * num_data * output_dim * np.log(2. * np.pi) + 0.5 * output_dim * np.sum(np.log(beta)) - 0.5 * np.sum(beta.ravel() * np.square(Y).sum(axis=-1))
lik_2 = -0.5 * output_dim * (np.sum(beta.flatten() * psi0) - np.trace(A))
else:
lik_1 = -0.5 * num_data * output_dim * (np.log(2. * np.pi) - np.log(beta)) - 0.5 * beta * trYYT
lik_2 = -0.5 * output_dim * (np.sum(beta * psi0) - np.trace(A))
lik_3 = -output_dim * (np.sum(np.log(np.diag(LB))))
lik_4 = 0.5 * data_fit
log_marginal = lik_1 + lik_2 + lik_3 + lik_4
return log_marginal
| bsd-3-clause | 7,008,197,342,620,847,000 | 39.409639 | 180 | 0.569271 | false |
procangroup/edx-platform | openedx/core/djangoapps/schedules/management/commands/tests/test_send_course_update.py | 4 | 4366 | """
Tests for send_course_update management command.
"""
# pylint: disable=no-member
import ddt
from mock import patch, _is_started
from unittest import skipUnless
from django.conf import settings
from edx_ace.utils.date import serialize
from openedx.core.djangoapps.schedules import resolvers, tasks
from openedx.core.djangoapps.schedules.config import COURSE_UPDATE_WAFFLE_FLAG
from openedx.core.djangoapps.schedules.management.commands import send_course_update as nudge
from openedx.core.djangoapps.schedules.management.commands.tests.send_email_base import (
ScheduleSendEmailTestMixin,
ExperienceTest
)
from openedx.core.djangoapps.schedules.management.commands.tests.upsell_base import ScheduleUpsellTestMixin
from openedx.core.djangoapps.schedules.models import ScheduleExperience
from openedx.core.djangolib.testing.utils import skip_unless_lms
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from student.tests.factories import CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@ddt.ddt
@skip_unless_lms
@skipUnless(
'openedx.core.djangoapps.schedules.apps.SchedulesConfig' in settings.INSTALLED_APPS,
"Can't test schedules if the app isn't installed",
)
class TestSendCourseUpdate(ScheduleUpsellTestMixin, ScheduleSendEmailTestMixin, ModuleStoreTestCase):
__test__ = True
# pylint: disable=protected-access
resolver = resolvers.CourseUpdateResolver
task = tasks.ScheduleCourseUpdate
deliver_task = tasks._course_update_schedule_send
command = nudge.Command
deliver_config = 'deliver_course_update'
enqueue_config = 'enqueue_course_update'
expected_offsets = range(-7, -77, -7)
experience_type = ScheduleExperience.EXPERIENCES.course_updates
queries_deadline_for_each_course = True
def setUp(self):
super(TestSendCourseUpdate, self).setUp()
self.highlights_patcher = patch('openedx.core.djangoapps.schedules.resolvers.get_week_highlights')
mock_highlights = self.highlights_patcher.start()
mock_highlights.return_value = ['Highlight {}'.format(num + 1) for num in range(3)]
self.addCleanup(self.stop_highlights_patcher)
def stop_highlights_patcher(self):
"""
Stops the patcher for the get_week_highlights method
if the patch is still in progress.
"""
if _is_started(self.highlights_patcher):
self.highlights_patcher.stop()
@ddt.data(
ExperienceTest(experience=ScheduleExperience.EXPERIENCES.default, offset=expected_offsets[0], email_sent=False),
ExperienceTest(experience=ScheduleExperience.EXPERIENCES.course_updates, offset=expected_offsets[0], email_sent=True),
ExperienceTest(experience=None, offset=expected_offsets[0], email_sent=False),
)
def test_schedule_in_different_experience(self, test_config):
self._check_if_email_sent_for_experience(test_config)
@override_waffle_flag(COURSE_UPDATE_WAFFLE_FLAG, True)
@patch('openedx.core.djangoapps.schedules.signals.get_current_site')
def test_with_course_data(self, mock_get_current_site):
self.highlights_patcher.stop()
mock_get_current_site.return_value = self.site_config.site
course = CourseFactory(highlights_enabled_for_messaging=True, self_paced=True)
with self.store.bulk_operations(course.id):
ItemFactory.create(parent=course, category='chapter', highlights=[u'highlights'])
enrollment = CourseEnrollmentFactory(course_id=course.id, user=self.user, mode=u'audit')
self.assertEqual(enrollment.schedule.get_experience_type(), ScheduleExperience.EXPERIENCES.course_updates)
_, offset, target_day, _ = self._get_dates(offset=self.expected_offsets[0])
enrollment.schedule.start = target_day
enrollment.schedule.save()
with patch.object(tasks, 'ace') as mock_ace:
self.task().apply(kwargs=dict( # pylint: disable=no-value-for-parameter
site_id=self.site_config.site.id,
target_day_str=serialize(target_day),
day_offset=offset,
bin_num=self._calculate_bin_for_user(enrollment.user),
))
self.assertTrue(mock_ace.send.called)
| agpl-3.0 | -7,804,825,046,453,626,000 | 44.010309 | 126 | 0.734769 | false |
kayhayen/Nuitka | nuitka/build/inline_copy/tqdm/tqdm/cli.py | 3 | 10509 | """
Module version for monitoring CLI pipes (`... | python -m tqdm | ...`).
"""
import logging
import re
import sys
from ast import literal_eval as numeric
from .std import TqdmKeyError, TqdmTypeError, tqdm
from .version import __version__
__all__ = ["main"]
log = logging.getLogger(__name__)
def cast(val, typ):
log.debug((val, typ))
if " or " in typ:
for t in typ.split(" or "):
try:
return cast(val, t)
except TqdmTypeError:
pass
raise TqdmTypeError(val + ' : ' + typ)
# sys.stderr.write('\ndebug | `val:type`: `' + val + ':' + typ + '`.\n')
if typ == 'bool':
if (val == 'True') or (val == ''):
return True
elif val == 'False':
return False
else:
raise TqdmTypeError(val + ' : ' + typ)
try:
return eval(typ + '("' + val + '")')
except Exception:
if typ == 'chr':
return chr(ord(eval('"' + val + '"'))).encode()
else:
raise TqdmTypeError(val + ' : ' + typ)
def posix_pipe(fin, fout, delim=b'\\n', buf_size=256,
callback=lambda float: None, callback_len=True):
"""
Params
------
fin : binary file with `read(buf_size : int)` method
fout : binary file with `write` (and optionally `flush`) methods.
callback : function(float), e.g.: `tqdm.update`
callback_len : If (default: True) do `callback(len(buffer))`.
Otherwise, do `callback(data) for data in buffer.split(delim)`.
"""
fp_write = fout.write
if not delim:
while True:
tmp = fin.read(buf_size)
# flush at EOF
if not tmp:
getattr(fout, 'flush', lambda: None)()
return
fp_write(tmp)
callback(len(tmp))
# return
buf = b''
# n = 0
while True:
tmp = fin.read(buf_size)
# flush at EOF
if not tmp:
if buf:
fp_write(buf)
if callback_len:
# n += 1 + buf.count(delim)
callback(1 + buf.count(delim))
else:
for i in buf.split(delim):
callback(i)
getattr(fout, 'flush', lambda: None)()
return # n
while True:
try:
i = tmp.index(delim)
except ValueError:
buf += tmp
break
else:
fp_write(buf + tmp[:i + len(delim)])
# n += 1
callback(1 if callback_len else (buf + tmp[:i]))
buf = b''
tmp = tmp[i + len(delim):]
# ((opt, type), ... )
RE_OPTS = re.compile(r'\n {8}(\S+)\s{2,}:\s*([^,]+)')
# better split method assuming no positional args
RE_SHLEX = re.compile(r'\s*(?<!\S)--?([^\s=]+)(\s+|=|$)')
# TODO: add custom support for some of the following?
UNSUPPORTED_OPTS = ('iterable', 'gui', 'out', 'file')
# The 8 leading spaces are required for consistency
CLI_EXTRA_DOC = r"""
Extra CLI Options
-----------------
name : type, optional
TODO: find out why this is needed.
delim : chr, optional
Delimiting character [default: '\n']. Use '\0' for null.
N.B.: on Windows systems, Python converts '\n' to '\r\n'.
buf_size : int, optional
String buffer size in bytes [default: 256]
used when `delim` is specified.
bytes : bool, optional
If true, will count bytes, ignore `delim`, and default
`unit_scale` to True, `unit_divisor` to 1024, and `unit` to 'B'.
tee : bool, optional
If true, passes `stdin` to both `stderr` and `stdout`.
update : bool, optional
If true, will treat input as newly elapsed iterations,
i.e. numbers to pass to `update()`. Note that this is slow
(~2e5 it/s) since every input must be decoded as a number.
update_to : bool, optional
If true, will treat input as total elapsed iterations,
i.e. numbers to assign to `self.n`. Note that this is slow
(~2e5 it/s) since every input must be decoded as a number.
null : bool, optional
If true, will discard input (no stdout).
manpath : str, optional
Directory in which to install tqdm man pages.
comppath : str, optional
Directory in which to place tqdm completion.
log : str, optional
CRITICAL|FATAL|ERROR|WARN(ING)|[default: 'INFO']|DEBUG|NOTSET.
"""
def main(fp=sys.stderr, argv=None):
"""
Parameters (internal use only)
---------
fp : file-like object for tqdm
argv : list (default: sys.argv[1:])
"""
if argv is None:
argv = sys.argv[1:]
try:
log_idx = argv.index('--log')
except ValueError:
for i in argv:
if i.startswith('--log='):
logLevel = i[len('--log='):]
break
else:
logLevel = 'INFO'
else:
# argv.pop(log_idx)
# logLevel = argv.pop(log_idx)
logLevel = argv[log_idx + 1]
logging.basicConfig(level=getattr(logging, logLevel),
format="%(levelname)s:%(module)s:%(lineno)d:%(message)s")
d = tqdm.__init__.__doc__ + CLI_EXTRA_DOC
opt_types = dict(RE_OPTS.findall(d))
# opt_types['delim'] = 'chr'
for o in UNSUPPORTED_OPTS:
opt_types.pop(o)
log.debug(sorted(opt_types.items()))
# d = RE_OPTS.sub(r' --\1=<\1> : \2', d)
split = RE_OPTS.split(d)
opt_types_desc = zip(split[1::3], split[2::3], split[3::3])
d = ''.join(('\n --{0} : {2}{3}' if otd[1] == 'bool' else
'\n --{0}=<{1}> : {2}{3}').format(
otd[0].replace('_', '-'), otd[0], *otd[1:])
for otd in opt_types_desc if otd[0] not in UNSUPPORTED_OPTS)
d = """Usage:
tqdm [--help | options]
Options:
-h, --help Print this help and exit.
-v, --version Print version and exit.
""" + d.strip('\n') + '\n'
# opts = docopt(d, version=__version__)
if any(v in argv for v in ('-v', '--version')):
sys.stdout.write(__version__ + '\n')
sys.exit(0)
elif any(v in argv for v in ('-h', '--help')):
sys.stdout.write(d + '\n')
sys.exit(0)
argv = RE_SHLEX.split(' '.join(["tqdm"] + argv))
opts = dict(zip(argv[1::3], argv[3::3]))
log.debug(opts)
opts.pop('log', True)
tqdm_args = {'file': fp}
try:
for (o, v) in opts.items():
o = o.replace('-', '_')
try:
tqdm_args[o] = cast(v, opt_types[o])
except KeyError as e:
raise TqdmKeyError(str(e))
log.debug('args:' + str(tqdm_args))
delim_per_char = tqdm_args.pop('bytes', False)
update = tqdm_args.pop('update', False)
update_to = tqdm_args.pop('update_to', False)
if sum((delim_per_char, update, update_to)) > 1:
raise TqdmKeyError("Can only have one of --bytes --update --update_to")
except Exception:
fp.write('\nError:\nUsage:\n tqdm [--help | options]\n')
for i in sys.stdin:
sys.stdout.write(i)
raise
else:
buf_size = tqdm_args.pop('buf_size', 256)
delim = tqdm_args.pop('delim', b'\\n')
tee = tqdm_args.pop('tee', False)
manpath = tqdm_args.pop('manpath', None)
comppath = tqdm_args.pop('comppath', None)
if tqdm_args.pop('null', False):
class stdout(object):
@staticmethod
def write(_):
pass
else:
stdout = sys.stdout
stdout = getattr(stdout, 'buffer', stdout)
stdin = getattr(sys.stdin, 'buffer', sys.stdin)
if manpath or comppath:
from os import path
from shutil import copyfile
from pkg_resources import Requirement, resource_filename
def cp(src, dst):
"""copies from src path to dst"""
copyfile(src, dst)
log.info("written:" + dst)
if manpath is not None:
cp(resource_filename(Requirement.parse('tqdm'), 'tqdm/tqdm.1'),
path.join(manpath, 'tqdm.1'))
if comppath is not None:
cp(resource_filename(Requirement.parse('tqdm'), 'tqdm/completion.sh'),
path.join(comppath, 'tqdm_completion.sh'))
sys.exit(0)
if tee:
stdout_write = stdout.write
fp_write = getattr(fp, 'buffer', fp).write
class stdout(object): # pylint: disable=function-redefined
@staticmethod
def write(x):
with tqdm.external_write_mode(file=fp):
fp_write(x)
stdout_write(x)
if delim_per_char:
tqdm_args.setdefault('unit', 'B')
tqdm_args.setdefault('unit_scale', True)
tqdm_args.setdefault('unit_divisor', 1024)
log.debug(tqdm_args)
with tqdm(**tqdm_args) as t:
posix_pipe(stdin, stdout, '', buf_size, t.update)
elif delim == b'\\n':
log.debug(tqdm_args)
if update or update_to:
with tqdm(**tqdm_args) as t:
if update:
def callback(i):
t.update(numeric(i.decode()))
else: # update_to
def callback(i):
t.update(numeric(i.decode()) - t.n)
for i in stdin:
stdout.write(i)
callback(i)
else:
for i in tqdm(stdin, **tqdm_args):
stdout.write(i)
else:
log.debug(tqdm_args)
with tqdm(**tqdm_args) as t:
callback_len = False
if update:
def callback(i):
t.update(numeric(i.decode()))
elif update_to:
def callback(i):
t.update(numeric(i.decode()) - t.n)
else:
callback = t.update
callback_len = True
posix_pipe(stdin, stdout, delim, buf_size, callback, callback_len)
| apache-2.0 | 5,075,268,966,244,176,000 | 33.12013 | 86 | 0.491293 | false |
edisona/androguard | androguard/core/bytecodes/apk.py | 36 | 58119 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from androguard.core import bytecode
from androguard.core import androconf
from androguard.core.bytecodes.dvm_permissions import DVM_PERMISSIONS
import StringIO
from struct import pack, unpack
from xml.sax.saxutils import escape
from zlib import crc32
import re
from xml.dom import minidom
# 0: chilkat
# 1: default python zipfile module
# 2: patch zipfile module
ZIPMODULE = 1
import sys
if sys.hexversion < 0x2070000 :
try :
import chilkat
ZIPMODULE = 0
# UNLOCK : change it with your valid key !
try :
CHILKAT_KEY = open("key.txt", "rb").read()
except Exception :
CHILKAT_KEY = "testme"
except ImportError :
ZIPMODULE = 1
else :
ZIPMODULE = 1
################################################### CHILKAT ZIP FORMAT #####################################################
class ChilkatZip :
def __init__(self, raw) :
self.files = []
self.zip = chilkat.CkZip()
self.zip.UnlockComponent( CHILKAT_KEY )
self.zip.OpenFromMemory( raw, len(raw) )
filename = chilkat.CkString()
e = self.zip.FirstEntry()
while e != None :
e.get_FileName(filename)
self.files.append( filename.getString() )
e = e.NextEntry()
def delete(self, patterns) :
el = []
filename = chilkat.CkString()
e = self.zip.FirstEntry()
while e != None :
e.get_FileName(filename)
if re.match(patterns, filename.getString()) != None :
el.append( e )
e = e.NextEntry()
for i in el :
self.zip.DeleteEntry( i )
def remplace_file(self, filename, buff) :
entry = self.zip.GetEntryByName(filename)
if entry != None :
obj = chilkat.CkByteData()
obj.append2( buff, len(buff) )
return entry.ReplaceData( obj )
return False
def write(self) :
obj = chilkat.CkByteData()
self.zip.WriteToMemory( obj )
return obj.getBytes()
def namelist(self) :
return self.files
def read(self, elem) :
e = self.zip.GetEntryByName( elem )
s = chilkat.CkByteData()
e.Inflate( s )
return s.getBytes()
def sign_apk(filename, keystore, storepass):
from subprocess import Popen, PIPE, STDOUT
compile = Popen([androconf.CONF["PATH_JARSIGNER"],
"-sigalg",
"MD5withRSA",
"-digestalg",
"SHA1",
"-storepass",
storepass,
"-keystore",
keystore,
filename,
"alias_name"],
stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
######################################################## APK FORMAT ########################################################
class APK:
"""
This class can access to all elements in an APK file
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param mode: specify the mode to open the file (optional)
:param magic_file: specify the magic file (optional)
:param zipmodule: specify the type of zip module to use (0:chilkat, 1:zipfile, 2:patch zipfile)
:type filename: string
:type raw: boolean
:type mode: string
:type magic_file: string
:type zipmodule: int
:Example:
APK("myfile.apk")
APK(open("myfile.apk", "rb").read(), raw=True)
"""
def __init__(self, filename, raw=False, mode="r", magic_file=None, zipmodule=ZIPMODULE):
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.valid_apk = False
self.files = {}
self.files_crc32 = {}
self.magic_file = magic_file
if raw == True:
self.__raw = filename
else:
fd = open(filename, "rb")
self.__raw = fd.read()
fd.close()
self.zipmodule = zipmodule
if zipmodule == 0:
self.zip = ChilkatZip(self.__raw)
elif zipmodule == 2:
from androguard.patch import zipfile
self.zip = zipfile.ZipFile(StringIO.StringIO(self.__raw), mode=mode)
else:
import zipfile
self.zip = zipfile.ZipFile(StringIO.StringIO(self.__raw), mode=mode)
for i in self.zip.namelist():
if i == "AndroidManifest.xml":
self.axml[i] = AXMLPrinter(self.zip.read(i))
try:
self.xml[i] = minidom.parseString(self.axml[i].get_buff())
except:
self.xml[i] = None
if self.xml[i] != None:
self.package = self.xml[i].documentElement.getAttribute("package")
self.androidversion["Code"] = self.xml[i].documentElement.getAttribute("android:versionCode")
self.androidversion["Name"] = self.xml[i].documentElement.getAttribute("android:versionName")
for item in self.xml[i].getElementsByTagName('uses-permission'):
self.permissions.append(str(item.getAttribute("android:name")))
self.valid_apk = True
self.get_files_types()
def get_AndroidManifest(self):
"""
Return the Android Manifest XML file
:rtype: xml object
"""
return self.xml["AndroidManifest.xml"]
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: string
"""
return self.filename
def get_package(self):
"""
Return the name of the package
:rtype: string
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
:rtype: string
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
:rtype: string
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the files inside the APK
:rtype: a list of strings
"""
return self.zip.namelist()
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
try:
import magic
except ImportError:
# no lib magic !
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
self.files[i] = "Unknown"
return self.files
if self.files != {}:
return self.files
builtin_magic = 0
try:
getattr(magic, "MagicException")
except AttributeError:
builtin_magic = 1
if builtin_magic:
ms = magic.open(magic.MAGIC_NONE)
ms.load()
for i in self.get_files():
buffer = self.zip.read(i)
self.files[i] = ms.buffer(buffer)
self.files[i] = self._patch_magic(buffer, self.files[i])
self.files_crc32[i] = crc32(buffer)
else:
m = magic.Magic(magic_file=self.magic_file)
for i in self.get_files():
buffer = self.zip.read(i)
self.files[i] = m.from_buffer(buffer)
self.files[i] = self._patch_magic(buffer, self.files[i])
self.files_crc32[i] = crc32(buffer)
return self.files
def _patch_magic(self, buffer, orig):
if ("Zip" in orig) or ("DBase" in orig):
val = androconf.is_android_raw(buffer)
if val == "APK":
if androconf.is_valid_android_raw(buffer):
return "Android application package file"
elif val == "AXML":
return "Android's binary XML"
return orig
def get_files_crc32(self):
if self.files_crc32 == {}:
self.get_files_types()
return self.files_crc32
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: string, string, int
"""
if self.files == {}:
self.get_files_types()
for i in self.get_files():
try:
yield i, self.files[i], self.files_crc32[i]
except KeyError:
yield i, "", ""
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: string
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
:rtype: string
"""
try:
return self.zip.read(filename)
except KeyError:
return ""
def get_dex(self):
"""
Return the raw data of the classes dex file
:rtype: string
"""
return self.get_file("classes.dex")
def get_elements(self, tag_name, attribute):
"""
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
l = []
for i in self.xml :
for item in self.xml[i].getElementsByTagName(tag_name) :
value = item.getAttribute(attribute)
value = self.format_value( value )
l.append( str( value ) )
return l
def format_value(self, value) :
if len(value) > 0 :
if value[0] == "." :
value = self.package + value
else :
v_dot = value.find(".")
if v_dot == 0 :
value = self.package + "." + value
elif v_dot == -1 :
value = self.package + "." + value
return value
def get_element(self, tag_name, attribute):
"""
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml :
for item in self.xml[i].getElementsByTagName(tag_name) :
value = item.getAttribute(attribute)
if len(value) > 0 :
return value
return None
def get_main_activity(self) :
"""
Return the name of the main activity
:rtype: string
"""
x = set()
y = set()
for i in self.xml:
for item in self.xml[i].getElementsByTagName("activity") :
for sitem in item.getElementsByTagName( "action" ) :
val = sitem.getAttribute( "android:name" )
if val == "android.intent.action.MAIN" :
x.add( item.getAttribute( "android:name" ) )
for sitem in item.getElementsByTagName( "category" ) :
val = sitem.getAttribute( "android:name" )
if val == "android.intent.category.LAUNCHER" :
y.add( item.getAttribute( "android:name" ) )
z = x.intersection(y)
if len(z) > 0 :
return self.format_value(z.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of string
"""
return self.get_elements("activity", "android:name")
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of string
"""
return self.get_elements("service", "android:name")
def get_receivers(self) :
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return self.get_elements("receiver", "android:name")
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return self.get_elements("provider", "android:name")
def get_intent_filters(self, category, name):
d = {}
d["action"] = []
d["category"] = []
for i in self.xml:
for item in self.xml[i].getElementsByTagName(category):
if self.format_value(item.getAttribute("android:name")) == name:
for sitem in item.getElementsByTagName("intent-filter"):
for ssitem in sitem.getElementsByTagName("action"):
if ssitem.getAttribute("android:name") not in d["action"]:
d["action"].append(ssitem.getAttribute("android:name"))
for ssitem in sitem.getElementsByTagName("category"):
if ssitem.getAttribute("android:name") not in d["category"]:
d["category"].append(ssitem.getAttribute("android:name"))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions
:rtype: list of string
"""
return self.permissions
def get_details_permissions(self):
"""
Return permissions with details
:rtype: list of string
"""
l = {}
for i in self.permissions :
perm = i
pos = i.rfind(".")
if pos != -1 :
perm = i[pos+1:]
try :
l[ i ] = DVM_PERMISSIONS["MANIFEST_PERMISSION"][ perm ]
except KeyError :
l[ i ] = [ "normal", "Unknown permission from android reference", "Unknown permission from android reference" ]
return l
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_element("uses-sdk", "android:maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_element("uses-sdk", "android:minSdkVersion")
def get_target_sdk_version(self) :
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_element( "uses-sdk", "android:targetSdkVersion" )
def get_libraries(self) :
"""
Return the android:name attributes for libraries
:rtype: list
"""
return self.get_elements( "uses-library", "android:name" )
def get_certificate(self, filename):
"""
Return a certificate object by giving the name in the apk file
"""
import chilkat
cert = chilkat.CkCert()
f = self.get_file(filename)
success = cert.LoadFromBinary2(f, len(f))
return success, cert
def new_zip(self, filename, deleted_files=None, new_files={}) :
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
if self.zipmodule == 2:
from androguard.patch import zipfile
zout = zipfile.ZipFile(filename, 'w')
else:
import zipfile
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
if deleted_files != None:
if re.match(deleted_files, item.filename) == None:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the xml object which corresponds to the AndroidManifest.xml file
:rtype: object
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`ARSCParser` object which corresponds to the resources.arsc file
:rtype: :class:`ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
try:
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
except KeyError:
return None
def get_signature_name(self):
signature_expr = re.compile("^(META-INF/)(.*)(\.RSA)$")
for i in self.get_files():
if signature_expr.search(i):
return i
return None
def get_signature(self):
signature_expr = re.compile("^(META-INF/)(.*)(\.RSA)$")
for i in self.get_files():
if signature_expr.search(i):
return self.get_file(i)
return None
def show(self):
self.get_files_types()
print "FILES: "
for i in self.get_files():
try:
print "\t", i, self.files[i], "%x" % self.files_crc32[i]
except KeyError:
print "\t", i, "%x" % self.files_crc32[i]
print "PERMISSIONS: "
details_permissions = self.get_details_permissions()
for i in details_permissions:
print "\t", i, details_permissions[i]
print "MAIN ACTIVITY: ", self.get_main_activity()
print "ACTIVITIES: "
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print "\t", i, filters or ""
print "SERVICES: "
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print "\t", i, filters or ""
print "RECEIVERS: "
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print "\t", i, filters or ""
print "PROVIDERS: ", self.get_providers()
def show_Certificate(cert):
print "Issuer: C=%s, CN=%s, DN=%s, E=%s, L=%s, O=%s, OU=%s, S=%s" % (cert.issuerC(), cert.issuerCN(), cert.issuerDN(), cert.issuerE(), cert.issuerL(), cert.issuerO(), cert.issuerOU(), cert.issuerS())
print "Subject: C=%s, CN=%s, DN=%s, E=%s, L=%s, O=%s, OU=%s, S=%s" % (cert.subjectC(), cert.subjectCN(), cert.subjectDN(), cert.subjectE(), cert.subjectL(), cert.subjectO(), cert.subjectOU(), cert.subjectS())
######################################################## AXML FORMAT ########################################################
# Translated from http://code.google.com/p/android4me/source/browse/src/android/content/res/AXmlResourceParser.java
UTF8_FLAG = 0x00000100
class StringBlock:
def __init__(self, buff):
self.start = buff.get_idx()
self._cache = {}
self.header = unpack('<h', buff.read(2))[0]
self.header_size = unpack('<h', buff.read(2))[0]
self.chunkSize = unpack('<i', buff.read(4))[0]
self.stringCount = unpack('<i', buff.read(4))[0]
self.styleOffsetCount = unpack('<i', buff.read(4))[0]
self.flags = unpack('<i', buff.read(4))[0]
self.m_isUTF8 = ((self.flags & UTF8_FLAG) != 0)
self.stringsOffset = unpack('<i', buff.read(4))[0]
self.stylesOffset = unpack('<i', buff.read(4))[0]
self.m_stringOffsets = []
self.m_styleOffsets = []
self.m_strings = []
self.m_styles = []
for i in range(0, self.stringCount):
self.m_stringOffsets.append(unpack('<i', buff.read(4))[0])
for i in range(0, self.styleOffsetCount):
self.m_styleOffsets.append(unpack('<i', buff.read(4))[0])
size = self.chunkSize - self.stringsOffset
if self.stylesOffset != 0:
size = self.stylesOffset - self.stringsOffset
# FIXME
if (size % 4) != 0:
androconf.warning("ooo")
for i in range(0, size):
self.m_strings.append(unpack('=b', buff.read(1))[0])
if self.stylesOffset != 0:
size = self.chunkSize - self.stylesOffset
# FIXME
if (size % 4) != 0:
androconf.warning("ooo")
for i in range(0, size / 4):
self.m_styles.append(unpack('<i', buff.read(4))[0])
def getString(self, idx):
if idx in self._cache:
return self._cache[idx]
if idx < 0 or not self.m_stringOffsets or idx >= len(self.m_stringOffsets):
return ""
offset = self.m_stringOffsets[idx]
if not self.m_isUTF8:
length = self.getShort2(self.m_strings, offset)
offset += 2
self._cache[idx] = self.decode(self.m_strings, offset, length)
else:
offset += self.getVarint(self.m_strings, offset)[1]
varint = self.getVarint(self.m_strings, offset)
offset += varint[1]
length = varint[0]
self._cache[idx] = self.decode2(self.m_strings, offset, length)
return self._cache[idx]
def getStyle(self, idx):
print idx
print idx in self.m_styleOffsets, self.m_styleOffsets[idx]
print self.m_styles[0]
def decode(self, array, offset, length):
length = length * 2
length = length + length % 2
data = ""
for i in range(0, length):
t_data = pack("=b", self.m_strings[offset + i])
data += unicode(t_data, errors='ignore')
if data[-2:] == "\x00\x00":
break
end_zero = data.find("\x00\x00")
if end_zero != -1:
data = data[:end_zero]
return data.decode("utf-16", 'replace')
def decode2(self, array, offset, length):
data = ""
for i in range(0, length):
t_data = pack("=b", self.m_strings[offset + i])
data += unicode(t_data, errors='ignore')
return data.decode("utf-8", 'replace')
def getVarint(self, array, offset):
val = array[offset]
more = (val & 0x80) != 0
val &= 0x7f
if not more:
return val, 1
return val << 8 | array[offset + 1] & 0xff, 2
def getShort(self, array, offset):
value = array[offset / 4]
if ((offset % 4) / 2) == 0:
return value & 0xFFFF
else:
return value >> 16
def getShort2(self, array, offset):
return (array[offset + 1] & 0xff) << 8 | array[offset] & 0xff
def show(self):
print "StringBlock", hex(self.start), hex(self.header), hex(self.header_size), hex(self.chunkSize), hex(self.stringsOffset), self.m_stringOffsets
for i in range(0, len(self.m_stringOffsets)):
print i, repr(self.getString(i))
ATTRIBUTE_IX_NAMESPACE_URI = 0
ATTRIBUTE_IX_NAME = 1
ATTRIBUTE_IX_VALUE_STRING = 2
ATTRIBUTE_IX_VALUE_TYPE = 3
ATTRIBUTE_IX_VALUE_DATA = 4
ATTRIBUTE_LENGHT = 5
CHUNK_AXML_FILE = 0x00080003
CHUNK_RESOURCEIDS = 0x00080180
CHUNK_XML_FIRST = 0x00100100
CHUNK_XML_START_NAMESPACE = 0x00100100
CHUNK_XML_END_NAMESPACE = 0x00100101
CHUNK_XML_START_TAG = 0x00100102
CHUNK_XML_END_TAG = 0x00100103
CHUNK_XML_TEXT = 0x00100104
CHUNK_XML_LAST = 0x00100104
START_DOCUMENT = 0
END_DOCUMENT = 1
START_TAG = 2
END_TAG = 3
TEXT = 4
class AXMLParser:
def __init__(self, raw_buff):
self.reset()
self.valid_axml = True
self.buff = bytecode.BuffHandle(raw_buff)
axml_file = unpack('<L', self.buff.read(4))[0]
if axml_file == CHUNK_AXML_FILE:
self.buff.read(4)
self.sb = StringBlock(self.buff)
self.m_resourceIDs = []
self.m_prefixuri = {}
self.m_uriprefix = {}
self.m_prefixuriL = []
self.visited_ns = []
else:
self.valid_axml = False
androconf.warning("Not a valid xml file")
def is_valid(self):
return self.valid_axml
def reset(self):
self.m_event = -1
self.m_lineNumber = -1
self.m_name = -1
self.m_namespaceUri = -1
self.m_attributes = []
self.m_idAttribute = -1
self.m_classAttribute = -1
self.m_styleAttribute = -1
def next(self):
self.doNext()
return self.m_event
def doNext(self):
if self.m_event == END_DOCUMENT:
return
event = self.m_event
self.reset()
while True:
chunkType = -1
# Fake END_DOCUMENT event.
if event == END_TAG:
pass
# START_DOCUMENT
if event == START_DOCUMENT:
chunkType = CHUNK_XML_START_TAG
else:
if self.buff.end():
self.m_event = END_DOCUMENT
break
chunkType = unpack('<L', self.buff.read(4))[0]
if chunkType == CHUNK_RESOURCEIDS:
chunkSize = unpack('<L', self.buff.read(4))[0]
# FIXME
if chunkSize < 8 or chunkSize % 4 != 0:
androconf.warning("Invalid chunk size")
for i in range(0, chunkSize / 4 - 2):
self.m_resourceIDs.append(unpack('<L', self.buff.read(4))[0])
continue
# FIXME
if chunkType < CHUNK_XML_FIRST or chunkType > CHUNK_XML_LAST:
androconf.warning("invalid chunk type")
# Fake START_DOCUMENT event.
if chunkType == CHUNK_XML_START_TAG and event == -1:
self.m_event = START_DOCUMENT
break
self.buff.read(4) # /*chunkSize*/
lineNumber = unpack('<L', self.buff.read(4))[0]
self.buff.read(4) # 0xFFFFFFFF
if chunkType == CHUNK_XML_START_NAMESPACE or chunkType == CHUNK_XML_END_NAMESPACE:
if chunkType == CHUNK_XML_START_NAMESPACE:
prefix = unpack('<L', self.buff.read(4))[0]
uri = unpack('<L', self.buff.read(4))[0]
self.m_prefixuri[prefix] = uri
self.m_uriprefix[uri] = prefix
self.m_prefixuriL.append((prefix, uri))
self.ns = uri
else:
self.ns = -1
self.buff.read(4)
self.buff.read(4)
(prefix, uri) = self.m_prefixuriL.pop()
#del self.m_prefixuri[ prefix ]
#del self.m_uriprefix[ uri ]
continue
self.m_lineNumber = lineNumber
if chunkType == CHUNK_XML_START_TAG:
self.m_namespaceUri = unpack('<L', self.buff.read(4))[0]
self.m_name = unpack('<L', self.buff.read(4))[0]
# FIXME
self.buff.read(4) # flags
attributeCount = unpack('<L', self.buff.read(4))[0]
self.m_idAttribute = (attributeCount >> 16) - 1
attributeCount = attributeCount & 0xFFFF
self.m_classAttribute = unpack('<L', self.buff.read(4))[0]
self.m_styleAttribute = (self.m_classAttribute >> 16) - 1
self.m_classAttribute = (self.m_classAttribute & 0xFFFF) - 1
for i in range(0, attributeCount * ATTRIBUTE_LENGHT):
self.m_attributes.append(unpack('<L', self.buff.read(4))[0])
for i in range(ATTRIBUTE_IX_VALUE_TYPE, len(self.m_attributes), ATTRIBUTE_LENGHT):
self.m_attributes[i] = self.m_attributes[i] >> 24
self.m_event = START_TAG
break
if chunkType == CHUNK_XML_END_TAG:
self.m_namespaceUri = unpack('<L', self.buff.read(4))[0]
self.m_name = unpack('<L', self.buff.read(4))[0]
self.m_event = END_TAG
break
if chunkType == CHUNK_XML_TEXT:
self.m_name = unpack('<L', self.buff.read(4))[0]
# FIXME
self.buff.read(4)
self.buff.read(4)
self.m_event = TEXT
break
def getPrefixByUri(self, uri):
try:
return self.m_uriprefix[uri]
except KeyError:
return -1
def getPrefix(self):
try:
return self.sb.getString(self.m_uriprefix[self.m_namespaceUri])
except KeyError:
return u''
def getName(self):
if self.m_name == -1 or (self.m_event != START_TAG and self.m_event != END_TAG) :
return u''
return self.sb.getString(self.m_name)
def getText(self) :
if self.m_name == -1 or self.m_event != TEXT :
return u''
return self.sb.getString(self.m_name)
def getNamespacePrefix(self, pos):
prefix = self.m_prefixuriL[pos][0]
return self.sb.getString(prefix)
def getNamespaceUri(self, pos):
uri = self.m_prefixuriL[pos][1]
return self.sb.getString(uri)
def getXMLNS(self):
buff = ""
for i in self.m_uriprefix:
if i not in self.visited_ns:
buff += "xmlns:%s=\"%s\"\n" % (self.sb.getString(self.m_uriprefix[i]), self.sb.getString(self.m_prefixuri[self.m_uriprefix[i]]))
self.visited_ns.append(i)
return buff
def getNamespaceCount(self, pos) :
pass
def getAttributeOffset(self, index):
# FIXME
if self.m_event != START_TAG:
androconf.warning("Current event is not START_TAG.")
offset = index * 5
# FIXME
if offset >= len(self.m_attributes):
androconf.warning("Invalid attribute index")
return offset
def getAttributeCount(self):
if self.m_event != START_TAG:
return -1
return len(self.m_attributes) / ATTRIBUTE_LENGHT
def getAttributePrefix(self, index):
offset = self.getAttributeOffset(index)
uri = self.m_attributes[offset + ATTRIBUTE_IX_NAMESPACE_URI]
prefix = self.getPrefixByUri(uri)
if prefix == -1:
return ""
return self.sb.getString(prefix)
def getAttributeName(self, index) :
offset = self.getAttributeOffset(index)
name = self.m_attributes[offset+ATTRIBUTE_IX_NAME]
if name == -1 :
return ""
return self.sb.getString( name )
def getAttributeValueType(self, index) :
offset = self.getAttributeOffset(index)
return self.m_attributes[offset+ATTRIBUTE_IX_VALUE_TYPE]
def getAttributeValueData(self, index) :
offset = self.getAttributeOffset(index)
return self.m_attributes[offset+ATTRIBUTE_IX_VALUE_DATA]
def getAttributeValue(self, index) :
offset = self.getAttributeOffset(index)
valueType = self.m_attributes[offset+ATTRIBUTE_IX_VALUE_TYPE]
if valueType == TYPE_STRING :
valueString = self.m_attributes[offset+ATTRIBUTE_IX_VALUE_STRING]
return self.sb.getString( valueString )
# WIP
return ""
#int valueData=m_attributes[offset+ATTRIBUTE_IX_VALUE_DATA];
#return TypedValue.coerceToString(valueType,valueData);
TYPE_ATTRIBUTE = 2
TYPE_DIMENSION = 5
TYPE_FIRST_COLOR_INT = 28
TYPE_FIRST_INT = 16
TYPE_FLOAT = 4
TYPE_FRACTION = 6
TYPE_INT_BOOLEAN = 18
TYPE_INT_COLOR_ARGB4 = 30
TYPE_INT_COLOR_ARGB8 = 28
TYPE_INT_COLOR_RGB4 = 31
TYPE_INT_COLOR_RGB8 = 29
TYPE_INT_DEC = 16
TYPE_INT_HEX = 17
TYPE_LAST_COLOR_INT = 31
TYPE_LAST_INT = 31
TYPE_NULL = 0
TYPE_REFERENCE = 1
TYPE_STRING = 3
RADIX_MULTS = [ 0.00390625, 3.051758E-005, 1.192093E-007, 4.656613E-010 ]
DIMENSION_UNITS = [ "px","dip","sp","pt","in","mm" ]
FRACTION_UNITS = [ "%", "%p" ]
COMPLEX_UNIT_MASK = 15
def complexToFloat(xcomplex):
return (float)(xcomplex & 0xFFFFFF00) * RADIX_MULTS[(xcomplex >> 4) & 3]
class AXMLPrinter:
def __init__(self, raw_buff):
self.axml = AXMLParser(raw_buff)
self.xmlns = False
self.buff = u''
while True and self.axml.is_valid():
_type = self.axml.next()
# print "tagtype = ", _type
if _type == START_DOCUMENT:
self.buff += u'<?xml version="1.0" encoding="utf-8"?>\n'
elif _type == START_TAG:
self.buff += u'<' + self.getPrefix(self.axml.getPrefix()) + self.axml.getName() + u'\n'
self.buff += self.axml.getXMLNS()
for i in range(0, self.axml.getAttributeCount()):
self.buff += "%s%s=\"%s\"\n" % (self.getPrefix(
self.axml.getAttributePrefix(i)), self.axml.getAttributeName(i), self._escape(self.getAttributeValue(i)))
self.buff += u'>\n'
elif _type == END_TAG:
self.buff += "</%s%s>\n" % (self.getPrefix(self.axml.getPrefix()), self.axml.getName())
elif _type == TEXT:
self.buff += "%s\n" % self.axml.getText()
elif _type == END_DOCUMENT:
break
# pleed patch
def _escape(self, s):
s = s.replace("&", "&")
s = s.replace('"', """)
s = s.replace("'", "'")
s = s.replace("<", "<")
s = s.replace(">", ">")
return escape(s)
def get_buff(self):
return self.buff.encode('utf-8')
def get_xml(self):
return minidom.parseString(self.get_buff()).toprettyxml(encoding="utf-8")
def get_xml_obj(self):
return minidom.parseString(self.get_buff())
def getPrefix(self, prefix):
if prefix == None or len(prefix) == 0:
return u''
return prefix + u':'
def getAttributeValue(self, index):
_type = self.axml.getAttributeValueType(index)
_data = self.axml.getAttributeValueData(index)
if _type == TYPE_STRING:
return self.axml.getAttributeValue(index)
elif _type == TYPE_ATTRIBUTE:
return "?%s%08X" % (self.getPackage(_data), _data)
elif _type == TYPE_REFERENCE:
return "@%s%08X" % (self.getPackage(_data), _data)
elif _type == TYPE_FLOAT:
return "%f" % unpack("=f", pack("=L", _data))[0]
elif _type == TYPE_INT_HEX:
return "0x%08X" % _data
elif _type == TYPE_INT_BOOLEAN:
if _data == 0:
return "false"
return "true"
elif _type == TYPE_DIMENSION:
return "%f%s" % (complexToFloat(_data), DIMENSION_UNITS[_data & COMPLEX_UNIT_MASK])
elif _type == TYPE_FRACTION:
return "%f%s" % (complexToFloat(_data) * 100, FRACTION_UNITS[_data & COMPLEX_UNIT_MASK])
elif _type >= TYPE_FIRST_COLOR_INT and _type <= TYPE_LAST_COLOR_INT:
return "#%08X" % _data
elif _type >= TYPE_FIRST_INT and _type <= TYPE_LAST_INT:
return "%d" % androconf.long2int(_data)
return "<0x%X, type 0x%02X>" % (_data, _type)
def getPackage(self, id):
if id >> 24 == 1:
return "android:"
return ""
RES_NULL_TYPE = 0x0000
RES_STRING_POOL_TYPE = 0x0001
RES_TABLE_TYPE = 0x0002
RES_XML_TYPE = 0x0003
# Chunk types in RES_XML_TYPE
RES_XML_FIRST_CHUNK_TYPE = 0x0100
RES_XML_START_NAMESPACE_TYPE= 0x0100
RES_XML_END_NAMESPACE_TYPE = 0x0101
RES_XML_START_ELEMENT_TYPE = 0x0102
RES_XML_END_ELEMENT_TYPE = 0x0103
RES_XML_CDATA_TYPE = 0x0104
RES_XML_LAST_CHUNK_TYPE = 0x017f
# This contains a uint32_t array mapping strings in the string
# pool back to resource identifiers. It is optional.
RES_XML_RESOURCE_MAP_TYPE = 0x0180
# Chunk types in RES_TABLE_TYPE
RES_TABLE_PACKAGE_TYPE = 0x0200
RES_TABLE_TYPE_TYPE = 0x0201
RES_TABLE_TYPE_SPEC_TYPE = 0x0202
class ARSCParser:
def __init__(self, raw_buff):
self.analyzed = False
self.buff = bytecode.BuffHandle(raw_buff)
#print "SIZE", hex(self.buff.size())
self.header = ARSCHeader(self.buff)
self.packageCount = unpack('<i', self.buff.read(4))[0]
#print hex(self.packageCount)
self.stringpool_main = StringBlock(self.buff)
self.next_header = ARSCHeader(self.buff)
self.packages = {}
self.values = {}
for i in range(0, self.packageCount):
current_package = ARSCResTablePackage(self.buff)
package_name = current_package.get_name()
self.packages[package_name] = []
mTableStrings = StringBlock(self.buff)
mKeyStrings = StringBlock(self.buff)
#self.stringpool_main.show()
#self.mTableStrings.show()
#self.mKeyStrings.show()
self.packages[package_name].append(current_package)
self.packages[package_name].append(mTableStrings)
self.packages[package_name].append(mKeyStrings)
pc = PackageContext(current_package, self.stringpool_main, mTableStrings, mKeyStrings)
current = self.buff.get_idx()
while not self.buff.end():
header = ARSCHeader(self.buff)
self.packages[package_name].append(header)
if header.type == RES_TABLE_TYPE_SPEC_TYPE:
self.packages[package_name].append(ARSCResTypeSpec(self.buff, pc))
elif header.type == RES_TABLE_TYPE_TYPE:
a_res_type = ARSCResType(self.buff, pc)
self.packages[package_name].append(a_res_type)
entries = []
for i in range(0, a_res_type.entryCount):
current_package.mResId = current_package.mResId & 0xffff0000 | i
entries.append((unpack('<i', self.buff.read(4))[0], current_package.mResId))
self.packages[package_name].append(entries)
for entry, res_id in entries:
if self.buff.end():
break
if entry != -1:
ate = ARSCResTableEntry(self.buff, res_id, pc)
self.packages[package_name].append(ate)
elif header.type == RES_TABLE_PACKAGE_TYPE:
break
else:
androconf.warning("unknown type")
break
current += header.size
self.buff.set_idx(current)
def _analyse(self):
if self.analyzed:
return
self.analyzed = True
for package_name in self.packages:
self.values[package_name] = {}
nb = 3
for header in self.packages[package_name][nb:]:
if isinstance(header, ARSCHeader):
if header.type == RES_TABLE_TYPE_TYPE:
a_res_type = self.packages[package_name][nb + 1]
if a_res_type.config.get_language() not in self.values[package_name]:
self.values[package_name][a_res_type.config.get_language()] = {}
self.values[package_name][a_res_type.config.get_language()]["public"] = []
c_value = self.values[package_name][a_res_type.config.get_language()]
entries = self.packages[package_name][nb + 2]
nb_i = 0
for entry, res_id in entries:
if entry != -1:
ate = self.packages[package_name][nb + 3 + nb_i]
#print ate.is_public(), a_res_type.get_type(), ate.get_value(), hex(ate.mResId)
if ate.get_index() != -1:
c_value["public"].append((a_res_type.get_type(), ate.get_value(), ate.mResId))
if a_res_type.get_type() not in c_value:
c_value[a_res_type.get_type()] = []
if a_res_type.get_type() == "string":
c_value["string"].append(self.get_resource_string(ate))
elif a_res_type.get_type() == "id":
if not ate.is_complex():
c_value["id"].append(self.get_resource_id(ate))
elif a_res_type.get_type() == "bool":
if not ate.is_complex():
c_value["bool"].append(self.get_resource_bool(ate))
elif a_res_type.get_type() == "integer":
c_value["integer"].append(self.get_resource_integer(ate))
elif a_res_type.get_type() == "color":
c_value["color"].append(self.get_resource_color(ate))
elif a_res_type.get_type() == "dimen":
c_value["dimen"].append(self.get_resource_dimen(ate))
#elif a_res_type.get_type() == "style":
# c_value["style"].append(self.get_resource_style(ate))
nb_i += 1
nb += 1
def get_resource_string(self, ate):
return [ate.get_value(), ate.get_key_data()]
def get_resource_id(self, ate):
x = [ate.get_value()]
if ate.key.get_data() == 0:
x.append("false")
elif ate.key.get_data() == 1:
x.append("true")
return x
def get_resource_bool(self, ate):
x = [ate.get_value()]
if ate.key.get_data() == 0:
x.append("false")
elif ate.key.get_data() == -1:
x.append("true")
return x
def get_resource_integer(self, ate):
return [ate.get_value(), ate.key.get_data()]
def get_resource_color(self, ate):
entry_data = ate.key.get_data()
return [ate.get_value(), "#%02x%02x%02x%02x" % (((entry_data >> 24) & 0xFF), ((entry_data >> 16) & 0xFF), ((entry_data >> 8) & 0xFF), (entry_data & 0xFF))]
def get_resource_dimen(self, ate):
try:
return [ate.get_value(), "%s%s" % (complexToFloat(ate.key.get_data()), DIMENSION_UNITS[ate.key.get_data() & COMPLEX_UNIT_MASK])]
except Exception, why:
androconf.warning(why.__str__())
return [ate.get_value(), ate.key.get_data()]
# FIXME
def get_resource_style(self, ate):
return ["", ""]
def get_packages_names(self):
return self.packages.keys()
def get_locales(self, package_name):
self._analyse()
return self.values[package_name].keys()
def get_types(self, package_name, locale):
self._analyse()
return self.values[package_name][locale].keys()
def get_public_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["public"]:
buff += '<public type="%s" name="%s" id="0x%08x" />\n' % (i[0], i[1], i[2])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_string_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["string"]:
buff += '<string name="%s">%s</string>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_strings_resources(self):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += "<packages>\n"
for package_name in self.get_packages_names():
buff += "<package name=\"%s\">\n" % package_name
for locale in self.get_locales(package_name):
buff += "<locale value=%s>\n" % repr(locale)
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["string"]:
buff += '<string name="%s">%s</string>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
buff += '</locale>\n'
buff += "</package>\n"
buff += "</packages>\n"
return buff.encode('utf-8')
def get_id_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["id"]:
if len(i) == 1:
buff += '<item type="id" name="%s"/>\n' % (i[0])
else:
buff += '<item type="id" name="%s">%s</item>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_bool_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["bool"]:
buff += '<bool name="%s">%s</bool>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_integer_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["integer"]:
buff += '<integer name="%s">%s</integer>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_color_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["color"]:
buff += '<color name="%s">%s</color>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_dimen_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["dimen"]:
buff += '<dimen name="%s">%s</dimen>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_id(self, package_name, rid, locale='\x00\x00'):
self._analyse()
try:
for i in self.values[package_name][locale]["public"]:
if i[2] == rid:
return i
except KeyError:
return None
def get_string(self, package_name, name, locale='\x00\x00'):
self._analyse()
try:
for i in self.values[package_name][locale]["string"]:
if i[0] == name:
return i
except KeyError:
return None
def get_items(self, package_name):
self._analyse()
return self.packages[package_name]
class PackageContext:
def __init__(self, current_package, stringpool_main, mTableStrings, mKeyStrings):
self.stringpool_main = stringpool_main
self.mTableStrings = mTableStrings
self.mKeyStrings = mKeyStrings
self.current_package = current_package
def get_mResId(self):
return self.current_package.mResId
def set_mResId(self, mResId):
self.current_package.mResId = mResId
class ARSCHeader:
def __init__(self, buff):
self.start = buff.get_idx()
self.type = unpack('<h', buff.read(2))[0]
self.header_size = unpack('<h', buff.read(2))[0]
self.size = unpack('<i', buff.read(4))[0]
#print "ARSCHeader", hex(self.start), hex(self.type), hex(self.header_size), hex(self.size)
class ARSCResTablePackage:
def __init__(self, buff):
self.start = buff.get_idx()
self.id = unpack('<i', buff.read(4))[0]
self.name = buff.readNullString(256)
self.typeStrings = unpack('<i', buff.read(4))[0]
self.lastPublicType = unpack('<i', buff.read(4))[0]
self.keyStrings = unpack('<i', buff.read(4))[0]
self.lastPublicKey = unpack('<i', buff.read(4))[0]
self.mResId = self.id << 24
#print "ARSCResTablePackage", hex(self.start), hex(self.id), hex(self.mResId), repr(self.name.decode("utf-16", errors='replace')), hex(self.typeStrings), hex(self.lastPublicType), hex(self.keyStrings), hex(self.lastPublicKey)
def get_name(self):
name = self.name.decode("utf-16", 'replace')
name = name[:name.find("\x00")]
return name
class ARSCResTypeSpec:
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id = unpack('<b', buff.read(1))[0]
self.res0 = unpack('<b', buff.read(1))[0]
self.res1 = unpack('<h', buff.read(2))[0]
self.entryCount = unpack('<i', buff.read(4))[0]
#print "ARSCResTypeSpec", hex(self.start), hex(self.id), hex(self.res0), hex(self.res1), hex(self.entryCount), "table:" + self.parent.mTableStrings.getString(self.id - 1)
self.typespec_entries = []
for i in range(0, self.entryCount):
self.typespec_entries.append(unpack('<i', buff.read(4))[0])
class ARSCResType:
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id = unpack('<b', buff.read(1))[0]
self.res0 = unpack('<b', buff.read(1))[0]
self.res1 = unpack('<h', buff.read(2))[0]
self.entryCount = unpack('<i', buff.read(4))[0]
self.entriesStart = unpack('<i', buff.read(4))[0]
self.mResId = (0xff000000 & self.parent.get_mResId()) | self.id << 16
self.parent.set_mResId(self.mResId)
#print "ARSCResType", hex(self.start), hex(self.id), hex(self.res0), hex(self.res1), hex(self.entryCount), hex(self.entriesStart), hex(self.mResId), "table:" + self.parent.mTableStrings.getString(self.id - 1)
self.config = ARSCResTableConfig(buff)
def get_type(self):
return self.parent.mTableStrings.getString(self.id - 1)
class ARSCResTableConfig:
def __init__(self, buff):
self.start = buff.get_idx()
self.size = unpack('<i', buff.read(4))[0]
self.imsi = unpack('<i', buff.read(4))[0]
self.locale = unpack('<i', buff.read(4))[0]
self.screenType = unpack('<i', buff.read(4))[0]
self.input = unpack('<i', buff.read(4))[0]
self.screenSize = unpack('<i', buff.read(4))[0]
self.version = unpack('<i', buff.read(4))[0]
self.screenConfig = 0
self.screenSizeDp = 0
if self.size >= 32:
self.screenConfig = unpack('<i', buff.read(4))[0]
if self.size >= 36:
self.screenSizeDp = unpack('<i', buff.read(4))[0]
self.exceedingSize = self.size - 36
if self.exceedingSize > 0:
androconf.warning("too much bytes !")
self.padding = buff.read(self.exceedingSize)
#print "ARSCResTableConfig", hex(self.start), hex(self.size), hex(self.imsi), hex(self.locale), repr(self.get_language()), repr(self.get_country()), hex(self.screenType), hex(self.input), hex(self.screenSize), hex(self.version), hex(self.screenConfig), hex(self.screenSizeDp)
def get_language(self):
x = self.locale & 0x0000ffff
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
def get_country(self):
x = (self.locale & 0xffff0000) >> 16
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
class ARSCResTableEntry:
def __init__(self, buff, mResId, parent=None):
self.start = buff.get_idx()
self.mResId = mResId
self.parent = parent
self.size = unpack('<h', buff.read(2))[0]
self.flags = unpack('<h', buff.read(2))[0]
self.index = unpack('<i', buff.read(4))[0]
#print "ARSCResTableEntry", hex(self.start), hex(self.mResId), hex(self.size), hex(self.flags), hex(self.index), self.is_complex()#, hex(self.mResId)
if self.flags & 1:
self.item = ARSCComplex(buff, parent)
else:
self.key = ARSCResStringPoolRef(buff, self.parent)
def get_index(self):
return self.index
def get_value(self):
return self.parent.mKeyStrings.getString(self.index)
def get_key_data(self):
return self.key.get_data_value()
def is_public(self):
return self.flags == 0 or self.flags == 2
def is_complex(self):
return (self.flags & 1) == 1
class ARSCComplex:
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id_parent = unpack('<i', buff.read(4))[0]
self.count = unpack('<i', buff.read(4))[0]
self.items = []
for i in range(0, self.count):
self.items.append((unpack('<i', buff.read(4))[0], ARSCResStringPoolRef(buff, self.parent)))
#print "ARSCComplex", hex(self.start), self.id_parent, self.count, repr(self.parent.mKeyStrings.getString(self.id_parent))
class ARSCResStringPoolRef:
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.skip_bytes = buff.read(3)
self.data_type = unpack('<b', buff.read(1))[0]
self.data = unpack('<i', buff.read(4))[0]
#print "ARSCResStringPoolRef", hex(self.start), hex(self.data_type), hex(self.data)#, "key:" + self.parent.mKeyStrings.getString(self.index), self.parent.stringpool_main.getString(self.data)
def get_data_value(self):
return self.parent.stringpool_main.getString(self.data)
def get_data(self):
return self.data
def get_data_type(self):
return self.data_type
def get_arsc_info(arscobj):
buff = ""
for package in arscobj.get_packages_names():
buff += package + ":\n"
for locale in arscobj.get_locales(package):
buff += "\t" + repr(locale) + ":\n"
for ttype in arscobj.get_types(package, locale):
buff += "\t\t" + ttype + ":\n"
try:
tmp_buff = getattr(arscobj, "get_" + ttype + "_resources")(package, locale).decode("utf-8", 'replace').split("\n")
for i in tmp_buff:
buff += "\t\t\t" + i + "\n"
except AttributeError:
pass
return buff
| apache-2.0 | 988,063,409,939,832,800 | 31.306281 | 283 | 0.528192 | false |
sacsant/avocado-misc-tests | io/net/infiniband/mckey.py | 4 | 7803 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2016 IBM
# Author: Narasimhan V <[email protected]>
# Author: Manvanthara B Puttashankar <[email protected]>
"""
Mckey - RDMA CM multicast setup and simple data transfer test.
"""
import time
import netifaces
from netifaces import AF_INET
from avocado import Test
from avocado.utils.software_manager import SoftwareManager
from avocado.utils import process, distro
from avocado.utils.network.interfaces import NetworkInterface
from avocado.utils.network.hosts import LocalHost, RemoteHost
from avocado.utils.ssh import Session
class Mckey(Test):
"""
mckey Test.
"""
def setUp(self):
"""
Setup and install dependencies for the test.
"""
self.test_name = "mckey"
self.basic = self.params.get("basic_option", default="None")
self.ext = self.params.get("ext_option", default="None")
self.flag = self.params.get("ext_flag", default="0")
if self.basic == "None" and self.ext == "None":
self.cancel("No option given")
if self.flag == "1" and self.ext != "None":
self.option = self.ext
else:
self.option = self.basic
if process.system("ibstat", shell=True, ignore_status=True) != 0:
self.cancel("MOFED is not installed. Skipping")
pkgs = []
detected_distro = distro.detect()
if detected_distro.name == "Ubuntu":
pkgs.extend(["openssh-client", "iputils-ping"])
elif detected_distro.name == "SuSE":
pkgs.extend(["openssh", "iputils"])
else:
pkgs.extend(["openssh-clients", "iputils"])
smm = SoftwareManager()
for pkg in pkgs:
if not smm.check_installed(pkg) and not smm.install(pkg):
self.cancel("Not able to install %s" % pkg)
interfaces = netifaces.interfaces()
self.iface = self.params.get("interface", default="")
self.peer_ip = self.params.get("peer_ip", default="")
self.peer_user = self.params.get("peer_user", default="root")
self.peer_password = self.params.get("peer_password", '*',
default="None")
self.ipaddr = self.params.get("host_ip", default="")
self.netmask = self.params.get("netmask", default="")
local = LocalHost()
if self.iface[0:2] == 'ib':
self.networkinterface = NetworkInterface(self.iface, local,
if_type='Infiniband')
try:
self.networkinterface.add_ipaddr(self.ipaddr, self.netmask)
self.networkinterface.save(self.ipaddr, self.netmask)
except Exception:
self.networkinterface.save(self.ipaddr, self.netmask)
else:
self.networkinterface = NetworkInterface(self.iface, local)
try:
self.networkinterface.add_ipaddr(self.ipaddr, self.netmask)
self.networkinterface.save(self.ipaddr, self.netmask)
except Exception:
self.networkinterface.save(self.ipaddr, self.netmask)
self.networkinterface.bring_up()
self.session = Session(self.peer_ip, user=self.peer_user,
password=self.peer_password)
if self.iface not in interfaces:
self.cancel("%s interface is not available" % self.iface)
if self.peer_ip == "":
self.cancel("%s peer machine is not available" % self.peer_ip)
self.timeout = "2m"
self.local_ip = netifaces.ifaddresses(self.iface)[AF_INET][0]['addr']
self.ip_val = self.local_ip.split(".")[-1]
self.mtu = self.params.get("mtu", default=1500)
self.remotehost = RemoteHost(self.peer_ip, self.peer_user,
password=self.peer_password)
self.peer_interface = self.remotehost.get_interface_by_ipaddr(self.peer_ip).name
self.peer_networkinterface = NetworkInterface(self.peer_interface,
self.remotehost)
self.option = self.option.replace("PEERIP", self.peer_ip)
self.option = self.option.replace("LOCALIP", self.local_ip)
self.option = self.option.replace("IPVAL", self.ip_val)
self.option_list = self.option.split(",")
if detected_distro.name == "Ubuntu":
cmd = "service ufw stop"
# FIXME: "redhat" as the distro name for RHEL is deprecated
# on Avocado versions >= 50.0. This is a temporary compatibility
# enabler for older runners, but should be removed soon
elif detected_distro.name in ['rhel', 'fedora', 'redhat']:
cmd = "systemctl stop firewalld"
elif detected_distro.name == "SuSE":
if detected_distro.version == 15:
cmd = "systemctl stop firewalld"
else:
cmd = "rcSuSEfirewall2 stop"
elif detected_distro.name == "centos":
cmd = "service iptables stop"
else:
self.cancel("Distro not supported")
if process.system(cmd, ignore_status=True, shell=True) != 0:
self.cancel("Unable to disable firewall")
output = self.session.cmd(cmd)
if not output.exit_status == 0:
self.cancel("Unable to disable firewall on peer")
def test(self):
"""
Test mckey
"""
if self.peer_networkinterface.set_mtu(self.mtu) is not None:
self.fail("Failed to set mtu in peer")
if self.networkinterface.set_mtu(self.mtu) is not None:
self.fail("Failed to set mtu in host")
self.log.info(self.test_name)
logs = "> /tmp/ib_log 2>&1 &"
if self.flag == "0":
self.option_list[0] = "%s -p 0x0002" % self.option_list[0]
self.option_list[1] = "%s -p 0x0002" % self.option_list[1]
self.option = "%s -p 0x0002" % self.option_list
cmd = " timeout %s %s %s %s" % (self.timeout, self.test_name,
self.option_list[0], logs)
output = self.session.cmd(cmd)
if not output.exit_status == 0:
self.fail("SSH connection (or) Server command failed")
time.sleep(5)
self.log.info("Client data - %s(%s)" %
(self.test_name, self.option_list[1]))
cmd = "timeout %s %s %s" \
% (self.timeout, self.test_name, self.option_list[1])
if process.system(cmd, shell=True, ignore_status=True) != 0:
self.fail("Client command failed")
time.sleep(5)
self.log.info("Server data - %s(%s)" %
(self.test_name, self.option_list[0]))
cmd = " timeout %s cat /tmp/ib_log && rm -rf /tmp/ib_log" \
% (self.timeout)
output = self.session.cmd(cmd)
if not output.exit_status == 0:
self.fail("Server output retrieval failed")
def tearDown(self):
"""
unset ip
"""
if self.networkinterface.set_mtu('1500') is not None:
self.fail("Failed to set mtu in host")
if self.peer_networkinterface.set_mtu('1500') is not None:
self.fail("Failed to set mtu in peer")
self.remotehost.remote_session.quit()
| gpl-2.0 | 1,454,047,511,195,583,700 | 43.335227 | 88 | 0.587979 | false |
caoilteguiry/dev_music | dev_music.py | 1 | 4261 | #!/usr/bin/env python
"""dev_music
Use the /dev/audio device file to play musical notes on UNIX systems.
Created by Caoilte Guiry
License: BSD License
Sample Usage:
$ python dev_music.py -i input_file.txt
Input File Format:
<note>, <duration>
<note>, <duration>
...
where <note> is a musical note in the range [A-G#] (flat notes, e.g. Bb, are
currently not supported), and duration is the length you wish to play that
note.
Example Input File:
A, 0.2
B, 0.2
C, 0.1
B, 0.1
C, 0.1
A, 0.4
D, 0.4
A, 0.2
B, 0.2
C, 0.1
B, 0.1
C, 0.1
A, 0.4
D, 0.4
TODOs:
* Allow specification of note delimiter in the input file (currently assumed as
being the newline character).
* Allow specification of note/duration delimiter (currently assumed as being
comma).
* In fact, use a proper input file format (perhaps try to find something
standardised)
* Add --tempo option
* Add aliases for flat notes (e.g. Bb->A#)
* Set upper limit on duration?
* Implement REPL?
* Document how this works a bit better.
"""
from __future__ import with_statement
import os
import sys
from optparse import OptionParser
__author__ = "Caoilte Guiry"
__copyright__ = "Copyright (c) 2011 Caoilte Guiry."
__version__ = "0.0.1"
__license__ = "BSD License"
# The NOTES dict represents the notes and their associated number of
# characters per line, e.g. for a D#, you would require a sequence of:
# cccccccccccc
# cccccccccccc
# ...
# where c is an arbitrary character.
NOTES = {
"E":23,
"F":22,
"F#":21,
"G":20,
"G#":19,
"A":18,
"A#":17,
"B":16,
"C":15,
"C#":14,
"D":13,
"D#":12,
}
# time playing a character consumes (imperical and very approximate)
CHAR_DURATION = 0.000125
class DevMusicError(Exception):
"""Parent exception type for dev_music."""
pass
class InvalidNoteError(DevMusicError):
"""An invalid note was specified."""
def __init__(self, note):
self.note = note
self.value = "Invalid Note '%s'" % note
def __str__(self):
return repr(self.value)
class InvalidDurationError(DevMusicError):
"""An invalid note duration was specified."""
def __init__(self, duration):
self.duration = duration
self.value = "Invalid Duration '%s'" % duration
def __str__(self):
return repr(self.value)
def get_text_stream(note, duration):
"""Return a text stream for a specified note and duration."""
# normalise inputs
note = note.upper()
try:
duration = float(duration)
except ValueError:
raise InvalidDurationError(duration)
try:
chars_per_line = NOTES[note]
except KeyError:
raise InvalidNoteError(note)
lines_required = int(duration/(CHAR_DURATION*chars_per_line))
# 'c' is an arbitrary character
return ("c"*chars_per_line+"\n")*lines_required
def main():
"""Parse args/options, read input file and write streams to /dev/audio."""
# First, verify we have a /dev/audio
if not os.path.exists("/dev/audio"):
print "Sorry, your OS does not have a /dev/audio device file"
sys.exit(1)
parser = OptionParser()
parser.add_option("-i", "--input-file", dest="input_file",
help="The name of the input file",
metavar="<input_file>", default=None)
options, args = parser.parse_args()
if not options.input_file:
print "You must specify an input file"
parser.print_usage()
sys.exit(1)
try:
with open(options.input_file) as input_fh:
try:
with open("/dev/audio", "w") as devaudio_fh:
for line in input_fh.readlines():
note, duration = line.rstrip().split(",")
data = get_text_stream(note, duration)
devaudio_fh.write(data)
except IOError:
print "Failed to open /dev/audio for writing"
sys.exit(1)
except DevMusicError, error_msg:
print error_msg
sys.exit(1)
except IOError:
print "Error: Could not open input file '%s' for reading." % \
(options.input_file)
sys.exit(1)
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | -3,578,082,631,517,333,500 | 23.488506 | 79 | 0.609481 | false |
tlangerak/Multi-Agent-Systems | build/lib.win-amd64-2.7/spade/Platform.py | 3 | 9628 | # -*- coding: utf-8 -*-
from spade.AMS import AmsAgentDescription
from spade.DF import DfAgentDescription, Service
from spade.Agent import PlatformAgent, require_login
import spade.Envelope
import spade.AID
import spade.Behaviour
import spade.ACLMessage
import spade.BasicFipaDateTime
#from spade.wui import require_login
from os.path import abspath
class PlatformRestart(Exception):
def __str__(self):
return
class SpadePlatform(PlatformAgent):
class RouteBehaviour(spade.Behaviour.Behaviour):
#This behavior routes messages between agents.
#Also uses MTPs when different protocols are required (HTTP, ...)
def __init__(self):
spade.Behaviour.Behaviour.__init__(self)
def _process(self):
msg = self._receive(True)
if (msg is not None):
self.myAgent.DEBUG("SPADE Platform Received a message: " + str(msg), 'info')
if msg.getSender() == self.myAgent.getAID():
# Prevent self-loopholes
self.myAgent.DEBUG("ACC LOOP HOLE", "warn")
return
#prepare to send the message to each one of the receivers separately
to_list = msg.getReceivers()
d = {}
for to in to_list:
if (self.myAgent.getAID().getName() != to.getName()):
if not to.getAddresses()[0] in d:
d[to.getAddresses()[0]] = list()
d[to.getAddresses()[0]].append(to)
for k, v in d.items():
newmsg = msg
newmsg.to = v
try:
protocol, receiver_URI = k.split("://")
except:
self.myAgent.DEBUG("Malformed Agent Address URI: " + str(k), "error")
break
# Check if one of our MTPs handles this protocol
#switch(protocol)
if protocol in self.myAgent.mtps.keys():
self.myAgent.DEBUG("Message through protocol " + str(protocol))
payload = newmsg
envelope = spade.Envelope.Envelope()
envelope.setFrom(newmsg.getSender())
for i in newmsg.getReceivers():
envelope.addTo(i)
envelope.setAclRepresentation(newmsg.getAclRepresentation())
envelope.setPayloadLength(len(str(payload)))
envelope.setPayloadEncoding("US-ASCII")
envelope.setDate(spade.BasicFipaDateTime.BasicFipaDateTime())
self.myAgent.mtps[protocol].send(envelope, payload)
else:
# Default case: it's an XMPP message
self.myAgent.DEBUG("Message through protocol XMPP", 'info')
platform = self.myAgent.getSpadePlatformJID().split(".", 1)[1]
if not platform in receiver_URI:
# Outside platform
self.myAgent.DEBUG("Message for another platform", 'info')
self.myAgent.send(newmsg, "jabber")
else:
# THIS platform
self.myAgent.DEBUG("Message for current platform", 'info')
for recv in v:
#self.myAgent._sendTo(newmsg, recv.getName(), "jabber")
self.myAgent.send(newmsg, "jabber")
"""
if k[7:] != self.myAgent.getSpadePlatformJID():
self.myAgent._sendTo(newmsg, k[7:])
else:
for recv in v:
self.myAgent._sendTo(newmsg, recv.getName())
# Reenviamos el msg a todos los destinatarios
# Tambien deberiamos comprobar el protocolo y usar una pasarela en el caso de que sea necesario.
#print "Message to", to.getName(), "... Posting!"
"""
else:
pass
##self.myAgent.DEBUG("ACC::dying... this shouldn't happen", 'err')
def __init__(self, node, password, server, port, config=None):
PlatformAgent.__init__(self, node, password, server, port, config=config, debug=[])
self.mtps = {}
def _setup(self):
self.setDefaultBehaviour(self.RouteBehaviour())
self.wui.registerController("index", self.index)
self.wui.registerController("agents", self.agents)
self.wui.registerController("services", self.services)
self.wui.registerController("roster", self.get_roster)
self.wui.setPort(8008)
self.wui.start()
import mtps
# Load MTPs
for name, _mtp in self.config.acc.mtp.items():
try:
mod = "mtps."+name
mod = __import__(mod, globals(), locals(),[name])
self.mtps[_mtp['protocol']] = mod.INSTANCE(name, self.config, self)
except Exception, e:
self.DEBUG("EXCEPTION IMPORTING MTPS: " + str(e), 'err', 'acc')
def takeDown(self):
for k, _mtp in self.mtps.items():
try:
_mtp.stop()
del self.mtps[k]
except:
pass
def setXMPPServer(self, server):
self.server = server
#Controllers
def index(self):
import sys
import time
servername = self.getDomain()
platform = self.getName()
version = str(sys.version)
the_time = str(time.ctime())
doc_path = abspath('.')
return "webadmin_indigo.pyra", dict(name=platform, servername=servername, platform=platform, version=version, time=the_time, doc_path=doc_path)
@require_login
def agents(self):
import sys
import time
#so = self.session
servername = self.getDomain()
platform = self.getName()
version = str(sys.version)
the_time = str(time.ctime())
search = self.searchAgent(AmsAgentDescription())
"""for agent in search:
if not agent.has_key("fipa:state"):
agent["fipa:state"] = ""
"""
# Build AWUIs dict
awuis = {}
if search:
aw = ""
for agent in search:
if agent.getAID():
aw = "#"
for addr in agent.getAID().getAddresses():
if "awui://" in addr:
aw = addr.replace("awui://", "http://")
break
awuis[agent.getAID().getName()] = aw
self.DEBUG("AWUIs: " + str(awuis))
return "agents.pyra", dict(name=platform, servername=servername, platform=platform, version=version, time=the_time, agents=search, awuis=awuis)
@require_login
def services(self):
import sys
import time
servername = self.getDomain()
platform = self.getName()
version = str(sys.version)
the_time = str(time.ctime())
try:
search = self.searchService(DfAgentDescription())
except Exception, e:
print "Exception: " + str(e)
servs = {}
for dad in search:
for service in dad.getServices():
if service.getType() not in servs.keys():
servs[service.getType()] = []
new_dad = dad
new_dad.services = [service]
s = Service(dad=new_dad)
servs[service.getType()].append(s)
self.DEBUG("Services: " + str(servs))
return "services.pyra", dict(name=platform, servername=servername, platform=platform, version=version, time=the_time, services=servs)
@require_login
def get_roster(self):
import sys
import time
import copy
servername = self.getDomain()
platform = self.getName()
version = str(sys.version)
the_time = str(time.ctime())
roster = copy.copy(self.server.DB.db)
for server, v in roster.items():
try:
del v["__ir__"]
except: pass
for r in v.values():
try:
del r["roster"]["__ir__"]
except: pass
return "rosterdb.pyra", dict(name=platform, servername=servername, platform=platform, version=version, time=the_time, roster=roster)
def getMembers(self, aname):
msg = spade.ACLMessage.ACLMessage()
msg.setOntology("spade:x:organization")
template = spade.Behaviour.ACLTemplate()
template.setConversationId(msg.getConversationId())
t = spade.Behaviour.MessageTemplate(template)
b = self.GetMembersBehav()
b.msg = msg
b.aname = aname
self.addBehaviour(b, t)
b.join()
return b.result
class GetMembersBehav(spade.Behaviour.OneShotBehaviour):
def _process(self):
self.result = []
self.msg.addReceiver(spade.AID.aid(self.aname, addresses=["xmpp://" + self.aname]))
self.msg.setContent("MEMBERS")
self.myAgent.send(self.msg)
rep = None
rep = self._receive(True, 20)
if rep:
print "The members list arrived"
self.result = rep.getContent().split(",")
| lgpl-2.1 | 1,134,033,479,792,324,200 | 39.116667 | 151 | 0.521811 | false |
semonte/intellij-community | python/testData/highlighting/awaitInDictPy36.py | 23 | 1744 | async def f11(x):
y = {await<error descr="expression expected">:</error> 10 for await<error descr="expression expected"> </error>in []} # fail
await x
def f12(x):
y = {await: 10 for await in []}
return x
async def f21(x):
y = {mapper(await<error descr="expression expected">)</error>: 10 for await<error descr="expression expected"> </error>in []} # fail
await x
def f22(x):
y = {mapper(await): 10 for await in []}
return x
async def f31(x):
await<error descr="expression expected"> </error>= [] # fail
y = {i: 10 for i in await<error descr="expression expected">}</error> # fail
await x
def f32(x):
await = []
y = {i: 10 for i in await}
return x
async def f41(x):
y = {await z: 10 for z in []}
await x
async def f42(x):
y = {mapper(await z): 10 for z in []}
await x
async def f43(x):
y = {z: 10 for <error descr="can't assign to await expression">await z</error> in []} # fail
await x
async def f44(x):
y = {z: 10 for z in await x}
await x
async def f51():
await<error descr="expression expected"> </error>= 5 # fail
return {await<error descr="expression expected">:</error> 10} # fail
def f52():
await = 5
return {await: 10}
async def f61():
await<error descr="expression expected"> </error>= 5 # fail
return {"a": 10, await<error descr="expression expected">:</error> 10, "b": 10} # fail
def f62():
await = 5
return {"a": 10, await: 10, "b": 10}
async def f71(x):
return {await x: 10}
async def f72(x):
return {"a": 10, await x: 10, "b": 10}
async def f81(x):
{fun: await fun() for fun in funcs if await smth}
{fun: await fun() async for fun in funcs if await smth} | apache-2.0 | 6,558,491,934,449,130,000 | 20.280488 | 137 | 0.59289 | false |
kalregi/What-sWrong_SVG | SpanLayout.py | 1 | 9311 | #!/usr/bin/env python3
# -*- coding: utf-8, vim: expandtab:ts=4 -*-
from AbstractEdgeLayout import AbstractEdgeLayout
from utils.Counter import Counter
from utils.HashMultiMapArrayList import HashMultiMapArrayList
from SVGWriter import *
"""
* A SpanLayouy lays out edges as rectangular blocks under or above the tokens that the edge covers. The label is
* written into these blocks. If there are multiple edge types then all spans of the same type appear in the same
* contiguous vertical area.
*
* @author Sebastian Riedel
"""
class SpanLayout(AbstractEdgeLayout):
"""
* Should the graph be upside-down reverted.
"""
@property
def revert(self):
return self._revert
@revert.setter
def revert(self, value):
self._revert = value
"""
* Should we draw separation lines between the areas for different span types.
"""
@property
def separationLines(self):
return self._separationLines
@separationLines.setter
def separationLines(self, value):
self._separationLines = value
"""
* The order/vertical layer in which the area of a certain type should be drawn.
"""
@property
def orders(self):
return self._orders
@orders.setter
def orders(self, value):
self._orders = value
"""
* How much space should at least be between the label of a span and the right and left edges of the span.
"""
@property
def totalTextMargin(self):
return self._totalTextMargin
@totalTextMargin.setter
def totalTextMargin(self, value):
self._totalTextMargin = value
"""
* Creates a new SpanLayout.
"""
def __init__(self):
super().__init__()
self._baseline = 1
self._revert = True
self._separationLines = True
self._orders = {}
self._totalTextMargin = 6
"""
* Sets the order/vertical layer in which the area of a certain type should be drawn.
*
* @param type the type we want to change the order for.
* @param order the order/vertical layer in which the area of the given type should be drawn.
"""
def setTypeOrder(self, type, order):
self._orders[type] = order
"""
* Returns the order/vertical layer in which the area of a certain type should be drawn.
*
* @param type the type we want to get the order for.
* @return the order/vertical layer in which the area of the given type should be drawn.
"""
def getOrder(self, type):
if type in self._orders:
order = self._orders[type]
else:
order = None
return order
"""
* Should we draw separation lines between the areas for different span types.
*
* @return true iff separation lines should be drawn.
"""
def isSeparationLines(self):
return self._separationLines
"""
* Should we draw separation lines between the areas for different span types.
*
* @param separationLines true iff separation lines should be drawn.
"""
# See the setter above...
"""
* For each token that has a self-loop we need the token to be wide enough. This method calculates the needed token
* width for a given set of edges. That is, for all self-loops in the set of edges we calculate how wide the
* corresponding token need to be.
*
* @param edges the set of edges that can contain self-loops.
* @param g2d the graphics object needed to find out the actual width of text.
* @return A mapping from tokens with self-loops to pixel widths.
"""
def estimateRequiredTokenWidths(self, edges, scene):
result = {}
for edge in edges:
if edge.From == edge.To:
labelwith = Text(scene, (0, 0), edge.label, 12, scene.color).getWidth() # Original fontsize is 8
if edge.From in result:
width = max(labelwith, result[edge.From]) # oldWith is result[...]
else:
width = labelwith
result[edge.From] = width + self._totalTextMargin
return result
"""
* Lays out the edges as spans (blocks) under or above the tokens they contain.
*
* @param edges the edges to layout.
* @param bounds the bounds of the tokens the spans connect.
* @param g2d the graphics object to draw on.
* @return the dimensions of the drawn graph.
"""
def layoutEdges(self, edges, bounds, scene):
if len(self.visible) > 0:
edges = set(edges)
edges &= self._visible # Intersection
# find out height of each edge
self._shapes.clear()
depth = Counter()
offset = Counter()
dominates = HashMultiMapArrayList()
for over in edges:
for under in edges:
orderOver = self.getOrder(over.getTypePrefix())
orderUnder = self.getOrder(under.getTypePrefix())
if orderOver > orderUnder or orderOver == orderUnder and (
over.covers(under) or over.coversSemi(under) or
over.coversExactly(under) and
over.lexicographicOrder(under) > 0 or
over.overlaps(under) and over.getMinIndex() < under.getMinIndex()):
dominates.add(over, under)
for edge in edges:
self.calculateDepth(dominates, depth, edge)
# calculate maxHeight and maxWidth
maxDepth = depth.getMaximum()
if len(edges) > 0:
maxHeight = (maxDepth + 1) * self._heightPerLevel + 3
else:
maxHeight = 1
# in case there are no edges that cover other edges (depth == 0) we need
# to increase the height slightly because loops on the same token
# have height of 1.5 levels
# build map from vertex to incoming/outgoing edges
vertex2edges = HashMultiMapArrayList() # XXX NOT NOT LINKED LIST!
for edge in edges:
vertex2edges.add(edge.From, edge)
vertex2edges.add(edge.To, edge)
# assign starting and end points of edges by sorting the edges per vertex
maxWidth = 0
# draw each edge
for edge in edges:
# set Color and remember old color
old = scene.color
scene.color = self.getColor(edge.type)
# prepare label (will be needed for spacing)
labelwith = Text(scene, (0, 0), edge.label, 12, scene.color).getWidth() * 0
# draw lines
if self._revert:
spanLevel = maxDepth - depth[edge]
else:
spanLevel = depth[edge]
height = self._baseline + maxHeight - (spanLevel + 1) * self._heightPerLevel + offset[edge]
# scene.setStroke(self.getStroke(edge)) # TODO: Ez rossz
buffer = 2
fromBounds = bounds[edge.From]
toBounds = bounds[edge.To]
minX = min(fromBounds.From, toBounds.From)
maxX = max(fromBounds.To, toBounds.To)
if maxX > maxWidth:
maxWidth = maxX + 1
if maxX - minX < labelwith + self._totalTextMargin:
middle = minX + (maxX - minX) // 2
textWidth = labelwith + self._totalTextMargin
minX = middle - textWidth // 2
maxX = middle + textWidth // 2
# connection
if self.curve:
scene.add(Rectangle(scene, (minX, height-buffer), maxX-minX, self._heightPerLevel - 2 * buffer,
(255, 255, 255), (0, 0, 0), 1))
else:
scene.add(Rectangle(scene, (minX, height-buffer), maxX-minX, self._heightPerLevel - 2 * buffer,
(255, 255, 255), (0, 0, 0), 1))
# write label in the middle under
labelx = minX + (maxX - minX) // 2 - labelwith // 2
labely = height + self._heightPerLevel // 2
scene.add(Text(scene, (labelx, labely), edge.getLabelWithNote(), 12, scene.color))
scene.color = old
self._shapes[(minX, height-buffer, maxX-minX, self._heightPerLevel - 2 * buffer)] = edge
# int maxWidth = 0;
for bound in bounds.values():
if bound.To > maxWidth:
maxWidth = bound.To
if self._separationLines:
# find largest depth for each prefix type
minDepths = {}
for edge in edges:
edgeDepth = depth[edge]
typeDepth = minDepths.get(edge.getTypePrefix())
if typeDepth is None or typeDepth > edgeDepth:
typeDepth = edgeDepth
minDepths[edge.getTypePrefix()] = typeDepth
height = self._baseline - 1
for d in minDepths.values():
if not self._revert:
height += (maxDepth - d) * self._heightPerLevel
else:
height += d * self._heightPerLevel
scene.color = (211, 211, 211) # Color.LIGHT_GRAY
scene.add(Line(scene, (0, height), (maxWidth, height), color=scene.color))
return maxWidth+scene.offsetx, maxHeight+scene.offsety
| gpl-3.0 | 4,679,376,453,182,341,000 | 34.403042 | 119 | 0.583181 | false |
MrTheodor/bakery | src/start_backmapping.py | 1 | 22692 | #!/usr/bin/env python
"""
Copyright (C) 2015-2016 Jakub Krajniak <[email protected]>
This file is distributed under free software licence:
you can redistribute it and/or modify it under the terms of the
GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import espressopp # NOQA
import math # NOQA
from mpi4py import MPI
import random
import os
import time
import files_io
import tools_sim as tools
import gromacs_topology
import tools_backmapping
import tools as general_tools
from app_args import _args_backmapping as _args
# GROMACS units, kJ/mol K
kb = 0.0083144621
# Storage options
simulation_author = os.environ.get('USER', 'xxx')
simulation_email = '[email protected]'
# Mostly you do not need to modify lines below.
def main(args): # NOQA
h5md_group = 'atoms'
time0 = time.time()
lj_cutoff = args.lj_cutoff
cg_cutoff = args.cg_cutoff
max_cutoff = max([args.lj_cutoff, args.cg_cutoff])
print('Welcome in bakery!\n')
print('Reading hybrid topology and coordinate file')
generate_exclusions = False #args.exclusion_list is None or not os.path.exists(args.exclusion_list)
input_conf = gromacs_topology.read(args.top, doRegularExcl=generate_exclusions)
input_gro_conf = files_io.GROFile(args.conf)
input_gro_conf.read()
if not generate_exclusions:
exclusion_file = open(args.exclusion_list, 'r')
exclusions = [map(int, x.split()) for x in exclusion_file.readlines()]
print('Read exclusion list from {} (total: {})'.format(args.exclusion_list, len(exclusions)))
input_conf = input_conf._replace(exclusions=exclusions)
else:
exclusion_list_file = 'exclusion_{}.list'.format(args.top.split('.')[0])
with open(exclusion_list_file, 'w') as fel:
for p in input_conf.exclusions:
fel.write('{} {}\n'.format(*p))
print('Save exclusion list: {} ({})'.format(exclusion_list_file, len(input_conf.exclusions)))
box = input_gro_conf.box
print('\nSetting up simulation...')
# Tune simulation parameter according to arguments
integrator_step = args.int_step
if args.trj_collect > 0:
integrator_step = min([integrator_step, args.trj_collect])
k_eq_step = int(args.eq / integrator_step)
long_step = int(args.long / integrator_step)
dynamic_res_time = 0
if args.alpha > 0.0:
dynamic_res_time = int(int(1.0 / args.alpha) / integrator_step) + 2
if args.nonuniform_lambda:
dynamic_res_time += int(10000/integrator_step)
print('Running nonuniform lambda, extended running time by {} steps'.format(10000))
if args.skin:
skin = args.skin
else:
skin = 0.16
rng_seed = args.rng_seed
if args.rng_seed == -1:
rng_seed = random.randint(1, 10000)
args.rng_seed = rng_seed
random.seed(rng_seed)
_args().save_to_file('{}_{}_params.out'.format(args.output_prefix, rng_seed), args)
print('Skin: {}'.format(skin))
print('RNG Seed: {}'.format(rng_seed))
print('Time step: {}'.format(args.dt))
print('LJ cutoff: {}'.format(lj_cutoff))
print('CG cutoff: {}'.format(cg_cutoff))
print('Boltzmann constant = {}'.format(kb))
system = espressopp.System()
system.rng = espressopp.esutil.RNG(rng_seed)
part_prop, all_particles, adress_tuple = tools.genParticleList(
input_conf, input_gro_conf, adress=True, use_charge=True)
print('Reads {} particles with properties {}'.format(len(all_particles), part_prop))
if input_conf.charges:
print('Total charge: {}'.format(sum(input_conf.charges)))
# Make output from AT particles.
at_gro_conf = files_io.GROFile.copy(input_gro_conf, [x for p in adress_tuple for x in p[1:]], renumber=True)
gro_whole = files_io.GROFile.copy(input_gro_conf, [x for p in adress_tuple for x in p], renumber=True)
# Generate initial velocities, only for CG particles, AT particles will get the CG particle velocity.
particle_list = []
index_adrat = part_prop.index('adrat')
print('Generating velocities from Maxwell-Boltzmann distribution for T={}'.format(
args.temperature))
part_prop.append('v')
cg_particles = [x for x in all_particles if x.adrat == 0]
vx, vy, vz = espressopp.tools.velocities.gaussian(
args.temperature,
len(cg_particles),
[x.mass for x in cg_particles],
kb=kb)
cg_id = 0
last_vel = (0.0, 0.0, 0.0)
last_lambda = 0.0
last_res_id = -1
index_lambda = part_prop.index('lambda_adr')
for p in all_particles:
t = list(p)
if p.adrat == 0: # this is CG particle
last_vel = (vx[cg_id], vy[cg_id], vz[cg_id])
cg_id += 1
if args.nonuniform_lambda and p.res_id != last_res_id:
last_res_id = p.res_id
last_lambda = -1.0 * random.uniform(0.0, 10000 * args.alpha)
if args.nonuniform_lambda:
t[index_lambda] = last_lambda
del t[index_adrat]
t.append(espressopp.Real3D(last_vel))
particle_list.append(t)
del part_prop[index_adrat]
print('Running with box {}'.format(box))
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = skin
if args.node_grid:
nodeGrid = map(int, args.node_grid.split(','))
else:
nodeGrid = espressopp.tools.decomp.nodeGrid(MPI.COMM_WORLD.size)
print('Number of nodes {}, node-grid: {}'.format(
MPI.COMM_WORLD.size, nodeGrid))
if args.cell_grid:
cellGrid = map(int, args.cell_grid.split(','))
else:
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, max_cutoff, skin)
print('Cell grid: {}'.format(cellGrid))
hook_after_init = lambda *_, **__: True
hook_setup_interactions = lambda *_, **__: True
if args.hooks and os.path.exists(args.hooks):
print('Found {}'.format(args.hooks))
l = {}
execfile(args.hooks, globals(), l)
hook_after_init = l.get('hook_after_init', hook_after_init)
hook_setup_interactions = l.get('hook_setup_interactions', hook_setup_interactions)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
hook_after_init(system, particle_list, adress_tuple, input_conf, part_prop)
system.storage.addParticles(map(tuple, particle_list), *part_prop)
system.storage.decompose()
vs_list = espressopp.FixedVSList(system.storage)
vs_list.addTuples(adress_tuple)
at_particle_group = espressopp.ParticleGroup(system.storage)
cg_particle_group = espressopp.ParticleGroup(system.storage)
particle_groups = {'at': at_particle_group, 'cg': cg_particle_group}
at_particle_ids = set()
cg_particle_ids = set()
for a in adress_tuple:
cg_particle_ids.add(a[0])
cg_particle_group.add(a[0])
for at in a[1:]:
at_particle_group.add(at)
at_particle_ids.add(at)
integrator = espressopp.integrator.VelocityVerletHybrid(system, vs_list)
integrator.dt = args.dt
system.integrator = integrator
system.storage.decompose()
print('Prepared:')
print('Bonds: {}'.format(sum(len(x) for x in input_conf.bondtypes.values())))
print('Angles: {}'.format(sum(len(x) for x in input_conf.angletypes.values())))
print('Dihedrals: {}'.format(sum(len(x) for x in input_conf.dihedraltypes.values())))
print('Pairs: {}'.format(sum(len(x) for x in input_conf.pairtypes.values())))
print('CG particles: {}'.format(len(cg_particle_ids)))
print('AT particles: {}'.format(len(at_particle_ids)))
print('Setting dynamic resolution')
dynamic_res = espressopp.integrator.DynamicResolution(
system,
vs_list,
args.alpha)
integrator.addExtension(dynamic_res)
dynamic_res.active = False
dynamic_res.resolution = args.initial_resolution
if args.table_groups is None:
table_groups = []
else:
table_groups = args.table_groups.split(',')
print('Using table groups: {}'.format(table_groups))
# Define interactions.
verletlistAT, verletlistCG = tools_backmapping.setupSinglePhase(
system, args, input_conf, at_particle_ids, cg_particle_ids, table_groups=table_groups)
hook_setup_interactions(system, input_conf, verletlistAT, verletlistCG)
print('Number of interactions: {}'.format(system.getNumberOfInteractions()))
# Define the thermostat
temperature = args.temperature * kb
print('Temperature: {} ({}), gamma: {}'.format(args.temperature, temperature, args.thermostat_gamma))
print('Thermostat: {}'.format(args.thermostat))
if args.thermostat == 'lv':
if args.thermostat_whole:
print('Enable thermostat on all particles, not only atomistic')
thermostat = espressopp.integrator.LangevinThermostat(system)
elif args.thermostat_cg:
print('Enable thermostat only on CG particles')
thermostat = espressopp.integrator.LangevinThermostatOnGroup(system, cg_particle_group)
else:
thermostat = espressopp.integrator.LangevinThermostatOnGroup(system, at_particle_group)
thermostat.temperature = temperature
thermostat.gamma = args.thermostat_gamma
elif args.thermostat == 'vr':
thermostat = espressopp.integrator.StochasticVelocityRescaling(system)
thermostat.temperature = temperature
thermostat.coupling = args.thermostat_gamma
integrator.addExtension(thermostat)
print("Added tuples, decomposing now ...")
output_file = 'trjout.h5'
h5file = '{}_{}_{}_{}'.format(
args.output_prefix,
rng_seed, args.alpha,
output_file)
print('Trajectory saved to: {}'.format(h5file))
traj_file = espressopp.io.DumpH5MD(
system, h5file,
group_name=h5md_group,
static_box=True,
author=simulation_author,
email=simulation_email,
store_lambda=True,
store_species=True,
store_force=args.store_force,
store_state=args.store_state,
is_single_prec=True,
chunk_size=256)
traj_file.set_parameters({
'temperature': temperature,
'thermostat': args.thermostat,
'skin': skin,
'rng_seed': rng_seed,
'lj_cutoff': lj_cutoff,
'cg_cutoff': cg_cutoff,
'vl_cutoff': max_cutoff,
'integrator_step': integrator_step,
'dt': args.dt
})
ext_analysis, system_analysis = tools.setSystemAnalysis(
system, integrator, args, args.energy_collect, '_first', dynamic_res, particle_groups)
system_analysis.dump()
k_trj_collect = int(math.ceil(float(args.trj_collect) / integrator_step))
k_trj_flush = 25 if 25 < 10*k_trj_collect else 10*k_trj_collect
if k_trj_collect == 0:
k_trj_flush = 0
print('Dynamic resolution, rate={}'.format(args.alpha))
print('CG equilibration for {}'.format(k_eq_step * integrator_step))
print('Collect trajectory every {} step'.format(k_trj_collect * integrator_step))
print('Flush trajectory every {} step'.format(k_trj_flush * integrator_step))
print('Collect energy every {} step'.format(args.energy_collect))
print('Atomistic long run for {}'.format(long_step * integrator_step))
system.storage.decompose()
if args.gro_collect > 0:
gro_collect_filename = '{}confout_dump_{}_{}.gro'.format(
args.output_prefix, args.alpha, rng_seed)
dump_conf_gro = espressopp.io.DumpGRO(system, integrator, filename=gro_collect_filename, append=True)
ext_dump_conf_gro = espressopp.integrator.ExtAnalyze(
dump_conf_gro, args.gro_collect)
integrator.addExtension(ext_dump_conf_gro)
print('Store .gro file {}'.format(gro_collect_filename))
if args.remove_com > 0:
print('Removes total velocity of the system every {} steps'.format(args.remove_com))
total_velocity = espressopp.analysis.CMVelocity(system)
ext_remove_com = espressopp.integrator.ExtAnalyze(total_velocity, args.remove_com)
integrator.addExtension(ext_remove_com)
gro_whole.update_positions(system)
gro_whole.write(
'{}confout_full_{}_{}_before.gro'.format(args.output_prefix, args.alpha, args.rng_seed), force=True)
print('Number of particles: {}'.format(len(particle_list)))
time_sim0 = time.time()
############# SIMULATION: EQUILIBRATION PHASE #####################
system_analysis.dump()
global_int_step = 0
for k in range(k_eq_step):
system_analysis.info()
if k_trj_collect > 0 and k % k_trj_collect == 0:
traj_file.dump(global_int_step * integrator_step, global_int_step * integrator_step * args.dt)
if k_trj_flush > 0 and k % k_trj_flush == 0:
traj_file.flush() # Write HDF5 to disk.
integrator.run(integrator_step)
global_int_step += 1
time_cg = time.time() - time_sim0
system_analysis.dump()
######### Now run backmapping. #######################
time_sim0 = time.time()
has_capforce = False
if args.cap_force and not args.cap_force_lj:
has_capforce = True
print('Define maximum cap-force during the backmapping (max: {})'.format(args.cap_force))
cap_force = espressopp.integrator.CapForce(system, args.cap_force)
print('Activating dynamic resolution changer')
dynamic_res.active = True
print('Change time-step to {}'.format(args.dt_dyn))
integrator.dt = args.dt_dyn
if has_capforce:
thermostat.disconnect()
integrator.addExtension(cap_force)
thermostat.connect()
print('End of CG simulation. Start dynamic resolution, dt={}'.format(
args.dt_dyn))
two_phase = args.two_phase or args.second_phase_em
if two_phase:
ext_analysis.disconnect()
verletlistCG.disconnect()
verletlistAT.disconnect()
verletlistCG = tools_backmapping.setupFirstPhase(
system, args, input_conf, at_particle_ids, cg_particle_ids)
ext_analysis2, system_analysis2 = tools.setSystemAnalysis(
system,
integrator,
args,
args.energy_collect_bck,
'_one',
dynamic_res,
particle_groups)
# Run first phase, only bonded terms and non-bonded CG term is enabled.
for k in range(dynamic_res_time):
if k_trj_collect > 0 and k % k_trj_collect == 0:
traj_file.dump(global_int_step * integrator_step, global_int_step * integrator_step * args.dt)
if k_trj_flush > 0 and k % k_trj_flush == 0:
traj_file.flush() # Write HDF5 to disk.
system_analysis2.info()
integrator.run(integrator_step)
global_int_step += 1
confout_aa = '{}confout_aa_{}_{}_phase_one.gro'.format(args.output_prefix, args.alpha, rng_seed)
at_gro_conf.update_positions(system)
at_gro_conf.write(confout_aa, force=True)
gro_whole.update_positions(system)
gro_whole.write(
'{}confout_full_{}_{}_phase_one.gro'.format(args.output_prefix, args.alpha, args.rng_seed), force=True)
print('Atomistic configuration write to: {}'.format(confout_aa))
########## SECOND PHASE ################
# Change interactions.
print('Second phase, switch on non-bonded interactions, time-step: {}'.format(args.dt_dyn))
verletlistCG.disconnect()
verletlistAT, verletlistCG = tools_backmapping.setupSecondPhase(
system, args, input_conf, at_particle_ids, cg_particle_ids)
# Reset dynamic res, start again.
if args.alpha2 is not None:
print('Change dynamic resolution alpha: {}'.format(args.alpha2))
dynamic_res.rate = args.alpha2
dynamic_res_time = int(int(1.0 / args.alpha2) / integrator_step) + 1 if args.alpha2 > 0.0 else 0
dynamic_res.active = True
dynamic_res.resolution = args.initial_resolution
# Reset system analysis.
ext_analysis2.disconnect()
ext_analysis3, system_analysis3 = tools.setSystemAnalysis(
system, integrator, args, args.energy_collect_bck, '_two', dynamic_res, particle_groups)
if args.second_phase_em:
minimize_energy = espressopp.integrator.MinimizeEnergy(system, 0.0001, 10.0, 0.001 * input_conf.Lx)
while not minimize_energy.run(100, True):
pass
else:
# Simulation
for k in range(dynamic_res_time):
if k_trj_collect > 0 and k % k_trj_collect == 0:
traj_file.dump(global_int_step * integrator_step, global_int_step * integrator_step * args.dt)
if k_trj_flush > 0 and k % k_trj_flush == 0:
traj_file.flush() # Write HDF5 to disk.
system_analysis3.info()
integrator.run(integrator_step)
global_int_step += 1
else:
# Single phase backmapping
ext_analysis.interval = args.energy_collect_bck
print('Running a single-phase backmapping.')
for k in range(dynamic_res_time):
if k_trj_collect > 0 and k % k_trj_collect == 0:
traj_file.dump(global_int_step * integrator_step, global_int_step * integrator_step * args.dt)
if k_trj_flush > 0 and k % k_trj_flush == 0 and k > 0:
traj_file.flush() # Write HDF5 to disk.
system_analysis.info()
integrator.run(integrator_step)
global_int_step += 1
# After backmapping, switch off dynamic resolution
print('Disconnect dynamic_res')
dynamic_res.active = False
time_bck = time.time() - time_sim0
gro_whole.update_positions(system)
gro_whole.write(
'{}confout_full_{}_{}_phase_two.gro'.format(args.output_prefix, args.alpha, rng_seed), force=True)
confout_aa = '{}confout_aa_{}_{}_phase_two.gro'.format(args.output_prefix, args.alpha, rng_seed)
at_gro_conf.update_positions(system)
at_gro_conf.write(confout_aa, force=True)
print('Atomistic configuration write to: {}'.format(confout_aa))
############ Now run normal AT simulation.############
print('End of dynamic resolution, change energy measuring accuracy to {}'.format(
args.energy_collect))
print('Set back time-step to: {}'.format(args.dt))
time_sim0 = time.time()
ext_analysis.interval = args.energy_collect
if two_phase:
ext_analysis3.interval = args.energy_collect
else:
ext_analysis.interval = args.energy_collect
if has_capforce:
if args.cap_force_ramp is None:
cap_force.disconnect()
print('Cap-force switched off')
else:
cap_force.ramp = args.cap_force_ramp
print('Cap-force switched gradually, decrease of {}'.format(cap_force.ramp))
integrator.dt = args.dt
print('Running for {} steps'.format(long_step * integrator_step))
for k in range(long_step):
if k_trj_collect > 0 and k % k_trj_collect == 0:
traj_file.dump(global_int_step * integrator_step, global_int_step * integrator_step * args.dt)
if k_trj_flush > 0 and k % k_trj_flush == 0:
traj_file.flush() # Write HDF5 to disk.
if two_phase:
system_analysis3.info()
else:
system_analysis.info()
integrator.run(integrator_step)
global_int_step += 1
if args.em > 0:
if has_capforce:
cap_force.disconnect()
print('Runninng basic energy minimization')
if two_phase:
system_analysis3.info()
else:
system_analysis.info()
minimize_energy = espressopp.integrator.MinimizeEnergy(system, args.em_gamma, args.em_ftol, args.em_max_d * input_gro_conf.box[0], True)
minimize_energy.run(args.em, True)
print('Energy information:')
if two_phase:
system_analysis3.info()
else:
system_analysis.info()
time_at = time.time() - time_sim0
## Save benchmark data
if os.path.exists('{}benchmark.dat'.format(args.output_prefix)):
benchmark_file = open('{}benchmark.dat'.format(args.output_prefix), 'a')
else:
benchmark_file = open('{}benchmark.dat'.format(args.output_prefix), 'w')
benchmark_file.write('N_at\tN_cg\tCPUs\talpha\ttime_cg\ttime_bck\ttime_at\n')
benchmark_file.write('{Nat}\t{Ncg}\t{CPUs}\t{alpha}\t{time_cg}\t{time_bck}\t{time_at}\n'.format(
Nat=len(at_particle_ids), Ncg=len(cg_particle_ids), CPUs=MPI.COMM_WORLD.size, alpha=args.alpha,
time_cg=time_cg,time_bck=time_bck,time_at=time_at))
benchmark_file.close()
## End save benchmark data
gro_whole.update_positions(system)
gro_whole.write(
'{}confout_final_full_{}_{}.gro'.format(args.output_prefix, args.alpha, rng_seed), force=True)
confout_aa = '{}confout_final_aa_{}_{}.gro'.format(args.output_prefix, args.alpha, rng_seed)
at_gro_conf.update_positions(system)
at_gro_conf.write(confout_aa, force=True)
print('Final atomistic configuration write to: {}'.format(confout_aa))
print('Final hybrid configuration write to: {}'.format(
'{}confout_final_full_{}_{}.gro'.format(args.output_prefix, args.alpha, rng_seed)))
# Write atomistic topology
hyb_top = files_io.GROMACSTopologyFile(args.top)
hyb_top.read()
at_topology = general_tools.get_atomistic_topology(
hyb_top,
virtual_atomtypes=[
v['atnum'] for v in input_conf.atomtypeparams.values() if v['particletype'] == 'V'])
topol_aa = '{}topol_final_aa_{}_{}.top'.format(args.output_prefix, args.alpha, rng_seed)
at_topology.write(topol_aa)
print('Final AA topology: {}'.format(topol_aa))
traj_file.close()
print('Finished!')
print('Total time: {}'.format(time.time() - time0))
espressopp.tools.analyse.final_info(system, integrator, verletlistAT, time0, time.time())
if __name__ == '__main__':
args = _args().parse_args()
if args.debug:
import ipdb
with ipdb.launch_ipdb_on_exception():
main(args)
else:
main(args)
| gpl-3.0 | 9,107,173,588,670,538,000 | 38.880492 | 144 | 0.635687 | false |
okuta/chainer | chainer/optimizers/corrected_momentum_sgd.py | 5 | 4381 | import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import optimizer
from chainer import types
if types.TYPE_CHECKING:
import typing_extensions as tpe
class CorrectedMomentumSGDHyperparameter(tpe.Protocol):
"""Protocol class for hyperparameter of corrected momentum SGD.
This is only for PEP 544 compliant static type checkers.
"""
lr = None # type: float
momentum = None # type: float
_default_hyperparam = optimizer.Hyperparameter() # type: CorrectedMomentumSGDHyperparameter # NOQA
_default_hyperparam.lr = 0.01
_default_hyperparam.momentum = 0.9
class CorrectedMomentumSGDRule(optimizer.UpdateRule):
"""Update rule for the corrected momentum SGD.
See :class:`~chainer.optimizers.CorrectedMomentumSGD` for the default
values of the hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
momentum (float): Exponential decay rate of the first order moment.
"""
def __init__(self, parent_hyperparam=None, lr=None, momentum=None):
super(CorrectedMomentumSGDRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
if momentum is not None:
self.hyperparam.momentum = momentum
def init_state(self, param):
with chainer.using_device(param.device):
self.state['v'] = param.device.xp.zeros_like(param.data)
# For iDeep
if isinstance(param.data, intel64.mdarray):
self.state['v'] = intel64.ideep.array(
self.state['v'], itype=intel64.ideep.wgt_array)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
v = self.state['v']
if isinstance(v, intel64.mdarray):
v.inplace_axpby(self.hyperparam.momentum,
-1, grad)
param.data += self.hyperparam.lr * v
else:
v *= self.hyperparam.momentum
v -= grad
param.data += self.hyperparam.lr * v
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
cuda.elementwise(
'T grad, T lr, T momentum',
'T param, T v',
'''v = momentum * v - grad;
param += lr * v;''',
'momentum_sgd')(
grad, self.hyperparam.lr, self.hyperparam.momentum,
param.data, self.state['v'])
class CorrectedMomentumSGD(optimizer.GradientMethod):
"""Momentum SGD optimizer.
This implements momentum correction discussed in the third section of
`Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour
<https://arxiv.org/abs/1706.02677>`_.
:class:`~chainer.optimizers.MomentumSGD` implements the equation (10) of
the paper. This optimizer implements the equation (9).
To get better understanding between the two methods,
we show the equivalence between the equation (9) and modification of
the equation (10) that takes momentum correction into account.
First, we set :math:`v_{t} = \\eta_{t} u_t`.
We substitute this relation to the equation (10).
.. math::
v_{t+1} &= m\\frac{\\eta_{t+1}}{\\eta_{t}}v_t + \\eta_{t+1}g_t \\\\
&= m\\frac{\\eta_{t+1}}{\\eta_{t}}\\eta_{t}u_t +
\\eta_{t+1}g_t \\\\
&= \\eta_{t+1}(m u_t + g_t) \\\\
From this result, we derive :math:`u_{t+1} = m u_t + g_t`, which is how
update tensors are calculated by
:class:`~chainer.optimizers.CorrectedMomentumSGD`. Thus, the equivalence
is shown.
Args:
lr (float): Learning rate.
momentum (float): Exponential decay rate of the first order moment.
"""
def __init__(self, lr=_default_hyperparam.lr,
momentum=_default_hyperparam.momentum):
super(CorrectedMomentumSGD, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.momentum = momentum
lr = optimizer.HyperparameterProxy('lr')
momentum = optimizer.HyperparameterProxy('momentum')
def create_update_rule(self):
return CorrectedMomentumSGDRule(self.hyperparam)
| mit | -2,446,195,163,499,081,700 | 32.7 | 99 | 0.618808 | false |
florentchandelier/keras | keras/layers/convolutional.py | 23 | 11778 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from .. import activations, initializations, regularizers, constraints
from ..utils.theano_utils import shared_zeros, on_gpu
from ..layers.core import Layer
if on_gpu():
from theano.sandbox.cuda import dnn
class Convolution1D(Layer):
def __init__(self, input_dim, nb_filter, filter_length,
init='uniform', activation='linear', weights=None,
border_mode='valid', subsample_length=1,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None):
if border_mode not in {'valid', 'full', 'same'}:
raise Exception('Invalid border mode for Convolution1D:', border_mode)
super(Convolution1D, self).__init__()
self.nb_filter = nb_filter
self.input_dim = input_dim
self.filter_length = filter_length
self.subsample_length = subsample_length
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.subsample = (1, subsample_length)
self.border_mode = border_mode
self.input = T.tensor3()
self.W_shape = (nb_filter, input_dim, filter_length, 1)
self.W = self.init(self.W_shape)
self.b = shared_zeros((nb_filter,))
self.params = [self.W, self.b]
self.regularizers = []
self.W_regularizer = regularizers.get(W_regularizer)
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train):
X = self.get_input(train)
X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)
border_mode = self.border_mode
if border_mode == 'same':
border_mode = 'full'
conv_out = T.nnet.conv.conv2d(X, self.W, border_mode=border_mode, subsample=self.subsample)
if self.border_mode == 'same':
shift_x = (self.filter_length - 1) // 2
conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, :]
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])).dimshuffle(0, 2, 1)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"nb_filter": self.nb_filter,
"filter_length": self.filter_length,
"init": self.init.__name__,
"activation": self.activation.__name__,
"border_mode": self.border_mode,
"subsample_length": self.subsample_length,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None,
"b_constraint": self.b_constraint.get_config() if self.b_constraint else None}
class Convolution2D(Layer):
def __init__(self, nb_filter, stack_size, nb_row, nb_col,
init='glorot_uniform', activation='linear', weights=None,
border_mode='valid', subsample=(1, 1),
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None):
if border_mode not in {'valid', 'full', 'same'}:
raise Exception('Invalid border mode for Convolution2D:', border_mode)
super(Convolution2D, self).__init__()
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.subsample = subsample
self.border_mode = border_mode
self.nb_filter = nb_filter
self.stack_size = stack_size
self.nb_row = nb_row
self.nb_col = nb_col
self.input = T.tensor4()
self.W_shape = (nb_filter, stack_size, nb_row, nb_col)
self.W = self.init(self.W_shape)
self.b = shared_zeros((nb_filter,))
self.params = [self.W, self.b]
self.regularizers = []
self.W_regularizer = regularizers.get(W_regularizer)
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train):
X = self.get_input(train)
border_mode = self.border_mode
if on_gpu() and dnn.dnn_available():
if border_mode == 'same':
assert(self.subsample == (1, 1))
pad_x = (self.nb_row - self.subsample[0]) // 2
pad_y = (self.nb_col - self.subsample[1]) // 2
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=(pad_x, pad_y))
else:
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=border_mode,
subsample=self.subsample)
else:
if border_mode == 'same':
border_mode = 'full'
conv_out = T.nnet.conv.conv2d(X, self.W,
border_mode=border_mode,
subsample=self.subsample)
if self.border_mode == 'same':
shift_x = (self.nb_row - 1) // 2
shift_y = (self.nb_col - 1) // 2
conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y]
return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
def get_config(self):
return {"name": self.__class__.__name__,
"nb_filter": self.nb_filter,
"stack_size": self.stack_size,
"nb_row": self.nb_row,
"nb_col": self.nb_col,
"init": self.init.__name__,
"activation": self.activation.__name__,
"border_mode": self.border_mode,
"subsample": self.subsample,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None,
"b_constraint": self.b_constraint.get_config() if self.b_constraint else None}
class MaxPooling1D(Layer):
def __init__(self, pool_length=2, stride=None, ignore_border=True):
super(MaxPooling1D, self).__init__()
self.pool_length = pool_length
self.stride = stride
if self.stride:
self.st = (self.stride, 1)
else:
self.st = None
self.input = T.tensor3()
self.poolsize = (pool_length, 1)
self.ignore_border = ignore_border
def get_output(self, train):
X = self.get_input(train)
X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)
output = downsample.max_pool_2d(X, ds=self.poolsize, st=self.st, ignore_border=self.ignore_border)
output = output.dimshuffle(0, 2, 1, 3)
return T.reshape(output, (output.shape[0], output.shape[1], output.shape[2]))
def get_config(self):
return {"name": self.__class__.__name__,
"stride": self.stride,
"pool_length": self.pool_length,
"ignore_border": self.ignore_border}
class MaxPooling2D(Layer):
def __init__(self, poolsize=(2, 2), stride=None, ignore_border=True):
super(MaxPooling2D, self).__init__()
self.input = T.tensor4()
self.poolsize = poolsize
self.stride = stride
self.ignore_border = ignore_border
def get_output(self, train):
X = self.get_input(train)
output = downsample.max_pool_2d(X, ds=self.poolsize, st=self.stride, ignore_border=self.ignore_border)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"poolsize": self.poolsize,
"ignore_border": self.ignore_border,
"stride": self.stride}
class UpSample1D(Layer):
def __init__(self, length=2):
super(UpSample1D, self).__init__()
self.length = length
self.input = T.tensor3()
def get_output(self, train):
X = self.get_input(train)
output = theano.tensor.extra_ops.repeat(X, self.length, axis=1)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"length": self.length}
class UpSample2D(Layer):
def __init__(self, size=(2, 2)):
super(UpSample2D, self).__init__()
self.input = T.tensor4()
self.size = size
def get_output(self, train):
X = self.get_input(train)
Y = theano.tensor.extra_ops.repeat(X, self.size[0], axis=2)
output = theano.tensor.extra_ops.repeat(Y, self.size[1], axis=3)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"size": self.size}
class ZeroPadding2D(Layer):
def __init__(self, pad=(1, 1)):
super(ZeroPadding2D, self).__init__()
self.pad = pad
self.input = T.tensor4()
def get_output(self, train):
X = self.get_input(train)
pad = self.pad
in_shape = X.shape
out_shape = (in_shape[0], in_shape[1], in_shape[2] + 2 * pad[0], in_shape[3] + 2 * pad[1])
out = T.zeros(out_shape)
indices = (slice(None), slice(None), slice(pad[0], in_shape[2] + pad[0]), slice(pad[1], in_shape[3] + pad[1]))
return T.set_subtensor(out[indices], X)
def get_config(self):
return {"name": self.__class__.__name__,
"pad": self.pad}
| mit | -2,243,043,435,512,501,200 | 38.656566 | 118 | 0.57081 | false |
mola/qgis | python/plugins/fTools/tools/doGeometry.py | 1 | 33173 | # -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from ui_frmGeometry import Ui_Dialog
import ftools_utils
import math
from itertools import izip
import voronoi
from sets import Set
class GeometryDialog(QDialog, Ui_Dialog):
def __init__(self, iface, function):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
self.myFunction = function
self.buttonOk = self.buttonBox_2.button( QDialogButtonBox.Ok )
QObject.connect(self.toolOut, SIGNAL("clicked()"), self.outFile)
if self.myFunction == 1:
QObject.connect(self.inShape, SIGNAL("currentIndexChanged(QString)"), self.update)
self.manageGui()
self.success = False
self.cancel_close = self.buttonBox_2.button( QDialogButtonBox.Close )
self.progressBar.setValue(0)
def update(self):
self.cmbField.clear()
inputLayer = unicode(self.inShape.currentText())
if inputLayer != "":
changedLayer = ftools_utils.getVectorLayerByName(inputLayer)
changedField = ftools_utils.getFieldList(changedLayer)
for i in changedField:
self.cmbField.addItem(unicode(changedField[i].name()))
self.cmbField.addItem("--- " + self.tr( "Merge all" ) + " ---")
def accept(self):
if self.inShape.currentText() == "":
QMessageBox.information(self, self.tr("Geometry"), self.tr( "Please specify input vector layer" ) )
elif self.outShape.text() == "":
QMessageBox.information(self, self.tr("Geometry"), self.tr( "Please specify output shapefile" ) )
elif self.lineEdit.isVisible() and self.lineEdit.value() < 0.00:
QMessageBox.information(self, self.tr("Geometry"), self.tr( "Please specify valid tolerance value" ) )
elif self.cmbField.isVisible() and self.cmbField.currentText() == "":
QMessageBox.information(self, self.tr("Geometry"), self.tr( "Please specify valid UID field" ) )
else:
self.outShape.clear()
self.geometry( self.inShape.currentText(), self.lineEdit.value(), self.cmbField.currentText() )
def outFile(self):
self.outShape.clear()
(self.shapefileName, self.encoding) = ftools_utils.saveDialog(self)
if self.shapefileName is None or self.encoding is None:
return
self.outShape.setText(QString(self.shapefileName))
def manageGui(self):
if self.myFunction == 1: # Singleparts to multipart
self.setWindowTitle( self.tr( "Singleparts to multipart" ) )
self.lineEdit.setVisible(False)
self.label.setVisible(False)
self.label_2.setText( self.tr( "Output shapefile" ) )
self.cmbField.setVisible(True)
self.field_label.setVisible(True)
elif self.myFunction == 2: # Multipart to singleparts
self.setWindowTitle( self.tr( "Multipart to singleparts" ) )
self.lineEdit.setVisible(False)
self.label.setVisible(False)
self.label_2.setText(self.tr( "Output shapefile" ) )
self.cmbField.setVisible(False)
self.field_label.setVisible(False)
elif self.myFunction == 3: # Extract nodes
self.setWindowTitle( self.tr( "Extract nodes" ) )
self.lineEdit.setVisible(False)
self.label.setVisible(False)
self.cmbField.setVisible(False)
self.field_label.setVisible(False)
elif self.myFunction == 4: # Polygons to lines
self.setWindowTitle( self.tr( "Polygons to lines" ) )
self.label_2.setText( self.tr( "Output shapefile" ) )
self.label_3.setText( self.tr( "Input polygon vector layer" ) )
self.label.setVisible(False)
self.lineEdit.setVisible(False)
self.cmbField.setVisible(False)
self.field_label.setVisible(False)
elif self.myFunction == 5: # Export/Add geometry columns
self.setWindowTitle( self.tr( "Export/Add geometry columns" ) )
self.label_2.setText( self.tr( "Output shapefile" ) )
self.label_3.setText( self.tr( "Input vector layer" ) )
self.label.setVisible(False)
self.lineEdit.setVisible(False)
self.cmbField.setVisible(False)
self.field_label.setVisible(False)
elif self.myFunction == 7: # Polygon centroids
self.setWindowTitle( self.tr( "Polygon centroids" ) )
self.label_2.setText( self.tr( "Output point shapefile" ) )
self.label_3.setText( self.tr( "Input polygon vector layer" ) )
self.label.setVisible( False )
self.lineEdit.setVisible( False )
self.cmbField.setVisible( False )
self.field_label.setVisible( False )
else:
if self.myFunction == 8: # Delaunay triangulation
self.setWindowTitle( self.tr( "Delaunay triangulation" ) )
self.label_3.setText( self.tr( "Input point vector layer" ) )
self.label.setVisible( False )
self.lineEdit.setVisible( False )
elif self.myFunction == 10: # Voronoi Polygons
self.setWindowTitle( self.tr( "Voronoi polygon" ) )
self.label_3.setText( self.tr( "Input point vector layer" ) )
self.label.setText( self.tr( "Buffer region" ) )
self.lineEdit.setSuffix(" %")
self.lineEdit.setRange(0, 100)
self.lineEdit.setSingleStep(5)
self.lineEdit.setValue(0)
else: # Polygon from layer extent
self.setWindowTitle( self.tr( "Polygon from layer extent" ) )
self.label_3.setText( self.tr( "Input layer" ) )
self.label.setVisible( False )
self.lineEdit.setVisible( False )
self.label_2.setText( self.tr( "Output polygon shapefile" ) )
self.cmbField.setVisible( False )
self.field_label.setVisible( False )
self.resize( 381, 100 )
myList = []
self.inShape.clear()
if self.myFunction == 3 or self.myFunction == 6:
myList = ftools_utils.getLayerNames( [ QGis.Polygon, QGis.Line ] )
elif self.myFunction == 4 or self.myFunction == 7:
myList = ftools_utils.getLayerNames( [ QGis.Polygon ] )
elif self.myFunction == 8 or self.myFunction == 10:
myList = ftools_utils.getLayerNames( [ QGis.Point ] )
elif self.myFunction == 9:
myList = ftools_utils.getLayerNames( "all" )
else:
myList = ftools_utils.getLayerNames( [ QGis.Point, QGis.Line, QGis.Polygon ] )
self.inShape.addItems( myList )
return
#1: Singleparts to multipart
#2: Multipart to singleparts
#3: Extract nodes
#4: Polygons to lines
#5: Export/Add geometry columns
#6: Simplify geometries (disabled)
#7: Polygon centroids
#8: Delaunay triangulation
#9: Polygon from layer extent
#10:Voronoi polygons
def geometry( self, myLayer, myParam, myField ):
if self.myFunction == 9:
vlayer = ftools_utils.getMapLayerByName( myLayer )
else:
vlayer = ftools_utils.getVectorLayerByName( myLayer )
error = False
check = QFile( self.shapefileName )
if check.exists():
if not QgsVectorFileWriter.deleteShapeFile( self.shapefileName ):
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Unable to delete existing shapefile." ) )
return
self.buttonOk.setEnabled( False )
self.testThread = geometryThread( self.iface.mainWindow(), self, self.myFunction, vlayer, myParam,
myField, self.shapefileName, self.encoding )
QObject.connect( self.testThread, SIGNAL( "runFinished(PyQt_PyObject)" ), self.runFinishedFromThread )
QObject.connect( self.testThread, SIGNAL( "runStatus(PyQt_PyObject)" ), self.runStatusFromThread )
QObject.connect( self.testThread, SIGNAL( "runRange(PyQt_PyObject)" ), self.runRangeFromThread )
self.cancel_close.setText( self.tr("Cancel") )
QObject.connect( self.cancel_close, SIGNAL( "clicked()" ), self.cancelThread )
self.testThread.start()
def cancelThread( self ):
self.testThread.stop()
self.buttonOk.setEnabled( True )
def runFinishedFromThread( self, success ):
self.testThread.stop()
self.buttonOk.setEnabled( True )
extra = ""
if success == "math_error":
QMessageBox.warning( self, self.tr("Geometry"), self.tr("Error processing specified tolerance!\nPlease choose larger tolerance...") )
if not QgsVectorFileWriter.deleteShapeFile( self.shapefileName ):
QMessageBox.warning( self, self.tr("Geometry"), self.tr( "Unable to delete incomplete shapefile." ) )
elif success == "attr_error":
QMessageBox.warning( self, self.tr("Geometry"), self.tr("At least two features must have same attribute value!\nPlease choose another field...") )
if not QgsVectorFileWriter.deleteShapeFile( self.shapefileName ):
QMessageBox.warning( self, self.tr("Geometry"), self.tr( "Unable to delete incomplete shapefile." ) )
else:
if success == "valid_error":
extra = self.tr("One or more features in the output layer may have invalid "
+ "geometry, please check using the check validity tool\n")
success = True
self.cancel_close.setText( "Close" )
QObject.disconnect( self.cancel_close, SIGNAL( "clicked()" ), self.cancelThread )
if success:
addToTOC = QMessageBox.question( self, self.tr("Geometry"),
self.tr( "Created output shapefile:\n%1\n%2\n\nWould you like to add the new layer to the TOC?" ).arg( unicode( self.shapefileName ) ).arg( extra ),
QMessageBox.Yes, QMessageBox.No, QMessageBox.NoButton )
if addToTOC == QMessageBox.Yes:
if not ftools_utils.addShapeToCanvas( unicode( self.shapefileName ) ):
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Error loading output shapefile:\n%1" ).arg( unicode( self.shapefileName ) ))
else:
QMessageBox.warning( self, self.tr("Geometry"), self.tr( "Error writing output shapefile." ) )
def runStatusFromThread( self, status ):
self.progressBar.setValue( status )
def runRangeFromThread( self, range_vals ):
self.progressBar.setRange( range_vals[ 0 ], range_vals[ 1 ] )
class geometryThread( QThread ):
def __init__( self, parentThread, parentObject, function, vlayer, myParam, myField, myName, myEncoding ):
QThread.__init__( self, parentThread )
self.parent = parentObject
self.running = False
self.myFunction = function
self.vlayer = vlayer
self.myParam = myParam
self.myField = myField
self.myName = myName
self.myEncoding = myEncoding
def run( self ):
self.running = True
if self.myFunction == 1: # Singleparts to multipart
success = self.single_to_multi()
elif self.myFunction == 2: # Multipart to singleparts
success = self.multi_to_single()
elif self.myFunction == 3: # Extract nodes
success = self.extract_nodes()
elif self.myFunction == 4: # Polygons to lines
success = self.polygons_to_lines()
elif self.myFunction == 5: # Export/Add geometry columns
success = self.export_geometry_info()
# note that 6 used to be associated with simplify_geometry
elif self.myFunction == 7: # Polygon centroids
success = self.polygon_centroids()
elif self.myFunction == 8: # Delaunay triangulation
success = self.delaunay_triangulation()
elif self.myFunction == 9: # Polygon from layer extent
success = self.layer_extent()
elif self.myFunction == 10: # Voronoi Polygons
success = self.voronoi_polygons()
self.emit( SIGNAL( "runFinished(PyQt_PyObject)" ), success )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
def stop(self):
self.running = False
def single_to_multi( self ):
vprovider = self.vlayer.dataProvider()
allAttrs = vprovider.attributeIndexes()
vprovider.select( allAttrs )
fields = vprovider.fields()
allValid = True
geomType = self.singleToMultiGeom(vprovider.geometryType())
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, geomType, vprovider.crs() )
inFeat = QgsFeature()
outFeat = QgsFeature()
inGeom = QgsGeometry()
outGeom = QgsGeometry()
index = vprovider.fieldNameIndex( self.myField )
if not index == -1:
unique = ftools_utils.getUniqueValues( vprovider, int( index ) )
else:
unique = [QVariant(QString())]
nFeat = vprovider.featureCount() * len( unique )
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
merge_all = self.myField == QString("--- " + self.tr( "Merge all" ) + " ---")
if not len( unique ) == self.vlayer.featureCount() \
or merge_all:
for i in unique:
vprovider.rewind()
multi_feature= []
first = True
vprovider.select(allAttrs)
while vprovider.nextFeature( inFeat ):
atMap = inFeat.attributeMap()
if not merge_all:
idVar = atMap[ index ]
else:
idVar = QVariant(QString())
if idVar.toString().trimmed() == i.toString().trimmed() \
or merge_all:
if first:
atts = atMap
first = False
inGeom = QgsGeometry( inFeat.geometry() )
vType = inGeom.type()
feature_list = self.extractAsMulti( inGeom )
multi_feature.extend( feature_list )
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
outFeat.setAttributeMap( atts )
outGeom = QgsGeometry( self.convertGeometry(multi_feature, vType) )
if not outGeom.isGeosValid():
allValid = "valid_error"
outFeat.setGeometry(outGeom)
writer.addFeature(outFeat)
del writer
else:
return "attr_error"
return allValid
def multi_to_single( self ):
vprovider = self.vlayer.dataProvider()
allAttrs = vprovider.attributeIndexes()
vprovider.select( allAttrs )
fields = vprovider.fields()
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, vprovider.geometryType(), vprovider.crs() )
inFeat = QgsFeature()
outFeat = QgsFeature()
inGeom = QgsGeometry()
outGeom = QgsGeometry()
nFeat = vprovider.featureCount()
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
while vprovider.nextFeature( inFeat ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
inGeom = inFeat.geometry()
atMap = inFeat.attributeMap()
featList = self.extractAsSingle( inGeom )
outFeat.setAttributeMap( atMap )
for i in featList:
outFeat.setGeometry( i )
writer.addFeature( outFeat )
del writer
return True
def extract_nodes( self ):
vprovider = self.vlayer.dataProvider()
allAttrs = vprovider.attributeIndexes()
vprovider.select( allAttrs )
fields = vprovider.fields()
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, QGis.WKBPoint, vprovider.crs() )
inFeat = QgsFeature()
outFeat = QgsFeature()
inGeom = QgsGeometry()
outGeom = QgsGeometry()
nFeat = vprovider.featureCount()
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
while vprovider.nextFeature( inFeat ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
inGeom = inFeat.geometry()
atMap = inFeat.attributeMap()
pointList = ftools_utils.extractPoints( inGeom )
outFeat.setAttributeMap( atMap )
for i in pointList:
outFeat.setGeometry( outGeom.fromPoint( i ) )
writer.addFeature( outFeat )
del writer
return True
def polygons_to_lines( self ):
vprovider = self.vlayer.dataProvider()
allAttrs = vprovider.attributeIndexes()
vprovider.select( allAttrs )
fields = vprovider.fields()
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, QGis.WKBLineString, vprovider.crs() )
inFeat = QgsFeature()
outFeat = QgsFeature()
inGeom = QgsGeometry()
outGeom = QgsGeometry()
nFeat = vprovider.featureCount()
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
while vprovider.nextFeature(inFeat):
multi = False
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
inGeom = inFeat.geometry()
if inGeom.isMultipart():
multi = True
atMap = inFeat.attributeMap()
lineList = self.extractAsLine( inGeom )
outFeat.setAttributeMap( atMap )
for h in lineList:
outFeat.setGeometry( outGeom.fromPolyline( h ) )
writer.addFeature( outFeat )
del writer
return True
def export_geometry_info( self ):
vprovider = self.vlayer.dataProvider()
allAttrs = vprovider.attributeIndexes()
vprovider.select( allAttrs )
( fields, index1, index2 ) = self.checkGeometryFields( self.vlayer )
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, vprovider.geometryType(), vprovider.crs() )
inFeat = QgsFeature()
outFeat = QgsFeature()
inGeom = QgsGeometry()
nFeat = vprovider.featureCount()
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
while vprovider.nextFeature(inFeat):
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
nElement += 1
inGeom = inFeat.geometry()
( attr1, attr2 ) = self.simpleMeasure( inGeom )
outFeat.setGeometry( inGeom )
atMap = inFeat.attributeMap()
outFeat.setAttributeMap( atMap )
outFeat.addAttribute( index1, QVariant( attr1 ) )
outFeat.addAttribute( index2, QVariant( attr2 ) )
writer.addFeature( outFeat )
del writer
return True
def polygon_centroids( self ):
vprovider = self.vlayer.dataProvider()
allAttrs = vprovider.attributeIndexes()
vprovider.select( allAttrs )
fields = vprovider.fields()
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, QGis.WKBPoint, vprovider.crs() )
inFeat = QgsFeature()
outFeat = QgsFeature()
nFeat = vprovider.featureCount()
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
while vprovider.nextFeature( inFeat ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
inGeom = inFeat.geometry()
atMap = inFeat.attributeMap()
outGeom = QgsGeometry(inGeom.centroid())
if outGeom is None:
return "math_error"
outFeat.setAttributeMap( atMap )
outFeat.setGeometry( outGeom )
writer.addFeature( outFeat )
del writer
return True
def delaunay_triangulation( self ):
import voronoi
from sets import Set
vprovider = self.vlayer.dataProvider()
allAttrs = vprovider.attributeIndexes()
vprovider.select( allAttrs )
fields = {
0 : QgsField( "POINTA", QVariant.Double ),
1 : QgsField( "POINTB", QVariant.Double ),
2 : QgsField( "POINTC", QVariant.Double ) }
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, QGis.WKBPolygon, vprovider.crs() )
inFeat = QgsFeature()
c = voronoi.Context()
pts = []
ptDict = {}
ptNdx = -1
while vprovider.nextFeature(inFeat):
geom = QgsGeometry(inFeat.geometry())
point = geom.asPoint()
x = point.x()
y = point.y()
pts.append((x, y))
ptNdx +=1
ptDict[ptNdx] = inFeat.id()
if len(pts) < 3:
return False
uniqueSet = Set(item for item in pts)
ids = [pts.index(item) for item in uniqueSet]
sl = voronoi.SiteList([voronoi.Site(*i) for i in uniqueSet])
c.triangulate = True
voronoi.voronoi(sl, c)
triangles = c.triangles
feat = QgsFeature()
nFeat = len( triangles )
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
for triangle in triangles:
indicies = list(triangle)
indicies.append(indicies[0])
polygon = []
step = 0
for index in indicies:
vprovider.featureAtId(ptDict[ids[index]], inFeat, True, allAttrs)
geom = QgsGeometry(inFeat.geometry())
point = QgsPoint(geom.asPoint())
polygon.append(point)
if step <= 3: feat.addAttribute(step, QVariant(ids[index]))
step += 1
geometry = QgsGeometry().fromPolygon([polygon])
feat.setGeometry(geometry)
writer.addFeature(feat)
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
del writer
return True
def voronoi_polygons( self ):
vprovider = self.vlayer.dataProvider()
allAttrs = vprovider.attributeIndexes()
vprovider.select( allAttrs )
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
vprovider.fields(), QGis.WKBPolygon, vprovider.crs() )
inFeat = QgsFeature()
outFeat = QgsFeature()
extent = self.vlayer.extent()
extraX = extent.height()*(self.myParam/100.00)
extraY = extent.width()*(self.myParam/100.00)
height = extent.height()
width = extent.width()
c = voronoi.Context()
pts = []
ptDict = {}
ptNdx = -1
while vprovider.nextFeature(inFeat):
geom = QgsGeometry(inFeat.geometry())
point = geom.asPoint()
x = point.x()-extent.xMinimum()
y = point.y()-extent.yMinimum()
pts.append((x, y))
ptNdx +=1
ptDict[ptNdx] = inFeat.id()
self.vlayer = None
if len(pts) < 3:
return False
uniqueSet = Set(item for item in pts)
ids = [pts.index(item) for item in uniqueSet]
sl = voronoi.SiteList([voronoi.Site(i[0], i[1], sitenum=j) for j, i in enumerate(uniqueSet)])
voronoi.voronoi(sl, c)
inFeat = QgsFeature()
nFeat = len(c.polygons)
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
for site, edges in c.polygons.iteritems():
vprovider.featureAtId(ptDict[ids[site]], inFeat, True, allAttrs)
lines = self.clip_voronoi(edges, c, width, height, extent, extraX, extraY)
geom = QgsGeometry.fromMultiPoint(lines)
geom = QgsGeometry(geom.convexHull())
outFeat.setGeometry(geom)
outFeat.setAttributeMap(inFeat.attributeMap())
writer.addFeature(outFeat)
nElement += 1
self.emit(SIGNAL("runStatus(PyQt_PyObject)" ), nElement)
del writer
return True
def clip_voronoi(self, edges, c, width, height, extent, exX, exY):
""" Clip voronoi function based on code written for Inkscape
Copyright (C) 2010 Alvin Penner, [email protected]
"""
def clip_line(x1, y1, x2, y2, w, h, x, y):
if x1 < 0-x and x2 < 0-x:
return [0, 0, 0, 0]
if x1 > w+x and x2 > w+x:
return [0, 0, 0, 0]
if x1 < 0-x:
y1 = (y1*x2 - y2*x1)/(x2 - x1)
x1 = 0-x
if x2 < 0-x:
y2 = (y1*x2 - y2*x1)/(x2 - x1)
x2 = 0-x
if x1 > w+x:
y1 = y1 + (w+x - x1)*(y2 - y1)/(x2 - x1)
x1 = w+x
if x2 > w+x:
y2 = y1 + (w+x - x1)*(y2 - y1)/(x2 - x1)
x2 = w+x
if y1 < 0-y and y2 < 0-y:
return [0, 0, 0, 0]
if y1 > h+y and y2 > h+y:
return [0, 0, 0, 0]
if x1 == x2 and y1 == y2:
return [0, 0, 0, 0]
if y1 < 0-y:
x1 = (x1*y2 - x2*y1)/(y2 - y1)
y1 = 0-y
if y2 < 0-y:
x2 = (x1*y2 - x2*y1)/(y2 - y1)
y2 = 0-y
if y1 > h+y:
x1 = x1 + (h+y - y1)*(x2 - x1)/(y2 - y1)
y1 = h+y
if y2 > h+y:
x2 = x1 + (h+y - y1)*(x2 - x1)/(y2 - y1)
y2 = h+y
return [x1, y1, x2, y2]
lines = []
hasXMin = False
hasYMin = False
hasXMax = False
hasYMax = False
for edge in edges:
if edge[1] >= 0 and edge[2] >= 0: # two vertices
[x1, y1, x2, y2] = clip_line(c.vertices[edge[1]][0], c.vertices[edge[1]][1], c.vertices[edge[2]][0], c.vertices[edge[2]][1], width, height, exX, exY)
elif edge[1] >= 0: # only one vertex
if c.lines[edge[0]][1] == 0: # vertical line
xtemp = c.lines[edge[0]][2]/c.lines[edge[0]][0]
if c.vertices[edge[1]][1] > (height+exY)/2:
ytemp = height+exY
else:
ytemp = 0-exX
else:
xtemp = width+exX
ytemp = (c.lines[edge[0]][2] - (width+exX)*c.lines[edge[0]][0])/c.lines[edge[0]][1]
[x1, y1, x2, y2] = clip_line(c.vertices[edge[1]][0], c.vertices[edge[1]][1], xtemp, ytemp, width, height, exX, exY)
elif edge[2] >= 0: # only one vertex
if c.lines[edge[0]][1] == 0: # vertical line
xtemp = c.lines[edge[0]][2]/c.lines[edge[0]][0]
if c.vertices[edge[2]][1] > (height+exY)/2:
ytemp = height+exY
else:
ytemp = 0.0-exY
else:
xtemp = 0.0-exX
ytemp = c.lines[edge[0]][2]/c.lines[edge[0]][1]
[x1, y1, x2, y2] = clip_line(xtemp, ytemp, c.vertices[edge[2]][0], c.vertices[edge[2]][1], width, height, exX, exY)
if x1 or x2 or y1 or y2:
lines.append(QgsPoint(x1+extent.xMinimum(),y1+extent.yMinimum()))
lines.append(QgsPoint(x2+extent.xMinimum(),y2+extent.yMinimum()))
if 0-exX in (x1, x2):
hasXMin = True
if 0-exY in (y1, y2):
hasYMin = True
if height+exY in (y1, y2):
hasYMax = True
if width+exX in (x1, x2):
hasXMax = True
if hasXMin:
if hasYMax:
lines.append(QgsPoint(extent.xMinimum()-exX, height+extent.yMinimum()+exY))
if hasYMin:
lines.append(QgsPoint(extent.xMinimum()-exX, extent.yMinimum()-exY))
if hasXMax:
if hasYMax:
lines.append(QgsPoint(width+extent.xMinimum()+exX, height+extent.yMinimum()+exY))
if hasYMin:
lines.append(QgsPoint(width+extent.xMinimum()+exX, extent.yMinimum()-exY))
return lines
def layer_extent( self ):
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, 0 ) )
fields = {
0 : QgsField( "MINX", QVariant.Double ),
1 : QgsField( "MINY", QVariant.Double ),
2 : QgsField( "MAXX", QVariant.Double ),
3 : QgsField( "MAXY", QVariant.Double ),
4 : QgsField( "CNTX", QVariant.Double ),
5 : QgsField( "CNTY", QVariant.Double ),
6 : QgsField( "AREA", QVariant.Double ),
7 : QgsField( "PERIM", QVariant.Double ),
8 : QgsField( "HEIGHT", QVariant.Double ),
9 : QgsField( "WIDTH", QVariant.Double ) }
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, QGis.WKBPolygon, self.vlayer.srs() )
rect = self.vlayer.extent()
minx = rect.xMinimum()
miny = rect.yMinimum()
maxx = rect.xMaximum()
maxy = rect.yMaximum()
height = rect.height()
width = rect.width()
cntx = minx + ( width / 2.0 )
cnty = miny + ( height / 2.0 )
area = width * height
perim = ( 2 * width ) + (2 * height )
rect = [
QgsPoint( minx, miny ),
QgsPoint( minx, maxy ),
QgsPoint( maxx, maxy ),
QgsPoint( maxx, miny ),
QgsPoint( minx, miny ) ]
geometry = QgsGeometry().fromPolygon( [ rect ] )
feat = QgsFeature()
feat.setGeometry( geometry )
feat.setAttributeMap( {
0 : QVariant( minx ),
1 : QVariant( miny ),
2 : QVariant( maxx ),
3 : QVariant( maxy ),
4 : QVariant( cntx ),
5 : QVariant( cnty ),
6 : QVariant( area ),
7 : QVariant( perim ),
8 : QVariant( height ),
9 : QVariant( width ) } )
writer.addFeature( feat )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, 100 ) )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
del writer
return True
def simpleMeasure( self, inGeom ):
if inGeom.wkbType() in (QGis.WKBPoint, QGis.WKBPoint25D):
pt = QgsPoint()
pt = inGeom.asPoint()
attr1 = pt.x()
attr2 = pt.y()
else:
measure = QgsDistanceArea()
attr1 = measure.measure(inGeom)
if inGeom.type() == QGis.Polygon:
attr2 = self.perimMeasure( inGeom, measure )
else:
attr2 = attr1
return ( attr1, attr2 )
def perimMeasure( self, inGeom, measure ):
value = 0.00
if inGeom.isMultipart():
poly = inGeom.asMultiPolygon()
for k in poly:
for j in k:
value = value + measure.measureLine( j )
else:
poly = inGeom.asPolygon()
for k in poly:
value = value + measure.measureLine( k )
return value
def checkForField( self, L, e ):
e = QString( e ).toLower()
fieldRange = range( 0,len( L ) )
for item in fieldRange:
if L[ item ].toLower() == e:
return True, item
return False, len( L )
def checkGeometryFields( self, vlayer ):
vprovider = vlayer.dataProvider()
nameList = []
fieldList = vprovider.fields()
geomType = vlayer.geometryType()
for i in fieldList.keys():
nameList.append( fieldList[ i ].name().toLower() )
if geomType == QGis.Polygon:
plp = "Poly"
( found, index1 ) = self.checkForField( nameList, "AREA" )
if not found:
field = QgsField( "AREA", QVariant.Double, "double", 10, 6, self.tr("Polygon area") )
index1 = len( fieldList.keys() )
fieldList[ index1 ] = field
( found, index2 ) = self.checkForField( nameList, "PERIMETER" )
if not found:
field = QgsField( "PERIMETER", QVariant.Double, "double", 10, 6, self.tr("Polygon perimeter") )
index2 = len( fieldList.keys() )
fieldList[ index2 ] = field
elif geomType == QGis.Line:
plp = "Line"
(found, index1) = self.checkForField(nameList, "LENGTH")
if not found:
field = QgsField("LENGTH", QVariant.Double, "double", 10, 6, self.tr("Line length") )
index1 = len(fieldList.keys())
fieldList[index1] = field
index2 = index1
else:
plp = "Point"
(found, index1) = self.checkForField(nameList, "XCOORD")
if not found:
field = QgsField("XCOORD", QVariant.Double, "double", 10, 6, self.tr("Point x coordinate") )
index1 = len(fieldList.keys())
fieldList[index1] = field
(found, index2) = self.checkForField(nameList, "YCOORD")
if not found:
field = QgsField("YCOORD", QVariant.Double, "double", 10, 6, self.tr("Point y coordinate") )
index2 = len(fieldList.keys())
fieldList[index2] = field
return (fieldList, index1, index2)
def extractAsLine( self, geom ):
multi_geom = QgsGeometry()
temp_geom = []
if geom.type() == 2:
if geom.isMultipart():
multi_geom = geom.asMultiPolygon()
for i in multi_geom:
temp_geom.extend(i)
else:
multi_geom = geom.asPolygon()
temp_geom = multi_geom
return temp_geom
else:
return []
def singleToMultiGeom(self, wkbType):
try:
if wkbType in (QGis.WKBPoint, QGis.WKBMultiPoint,
QGis.WKBPoint25D, QGis.WKBMultiPoint25D):
return QGis.WKBMultiPoint
elif wkbType in (QGis.WKBLineString, QGis.WKBMultiLineString,
QGis.WKBMultiLineString25D, QGis.WKBLineString25D):
return QGis.WKBMultiLineString
elif wkbType in (QGis.WKBPolygon, QGis.WKBMultiPolygon,
QGis.WKBMultiPolygon25D, QGis.WKBPolygon25D):
return QGis.WKBMultiPolygon
else:
return QGis.WKBUnknown
except Exception, err:
print str(err)
def extractAsSingle( self, geom ):
multi_geom = QgsGeometry()
temp_geom = []
if geom.type() == 0:
if geom.isMultipart():
multi_geom = geom.asMultiPoint()
for i in multi_geom:
temp_geom.append( QgsGeometry().fromPoint ( i ) )
else:
temp_geom.append( geom )
elif geom.type() == 1:
if geom.isMultipart():
multi_geom = geom.asMultiPolyline()
for i in multi_geom:
temp_geom.append( QgsGeometry().fromPolyline( i ) )
else:
temp_geom.append( geom )
elif geom.type() == 2:
if geom.isMultipart():
multi_geom = geom.asMultiPolygon()
for i in multi_geom:
temp_geom.append( QgsGeometry().fromPolygon( i ) )
else:
temp_geom.append( geom )
return temp_geom
def extractAsMulti( self, geom ):
temp_geom = []
if geom.type() == 0:
if geom.isMultipart():
return geom.asMultiPoint()
else:
return [ geom.asPoint() ]
elif geom.type() == 1:
if geom.isMultipart():
return geom.asMultiPolyline()
else:
return [ geom.asPolyline() ]
else:
if geom.isMultipart():
return geom.asMultiPolygon()
else:
return [ geom.asPolygon() ]
def convertGeometry( self, geom_list, vType ):
if vType == 0:
return QgsGeometry().fromMultiPoint(geom_list)
elif vType == 1:
return QgsGeometry().fromMultiPolyline(geom_list)
else:
return QgsGeometry().fromMultiPolygon(geom_list)
| gpl-2.0 | 3,456,899,687,871,464,000 | 37.261822 | 159 | 0.624122 | false |
nnugumanov/yandex-tank | yandextank/plugins/Telegraf/decoder.py | 5 | 2675 | """Known metrics decoder"""
import logging
logger = logging.getLogger(__name__)
class MetricsDecoder(object):
def __init__(self):
"""
translates telegraf metric names into common Monitoring metric names
translates `uncommon` names to `custom:%s`s
"""
self.known_metrics = {
'mem_used': 'Memory_used',
'mem_free': 'Memory_free',
'mem_buffered': 'Memory_buff',
'mem_cached': 'Memory_cached',
'kernel_context_switches': 'System_csw',
'kernel_interrupts': 'System_int',
'kernel_processes_forked': 'System_forks',
'processes_total': 'System_numproc',
'processes_total_threads': 'System_numthreads',
'system_load1': 'System_la1',
'system_load5': 'System_la5',
'system_load15': 'System_la15',
'nstat_TcpRetransSegs': 'Net_retransmit',
# those guys became inactive due to net interface names and disk ids
# we don't need unknown id data here
# 'net_packets_recv': 'Net_rx',
# 'net_packets_sent': 'Net_tx',
# 'net_bytes_recv': 'Net_recv',
# 'net_bytes_sent': 'Net_send',
# 'diskio_read_bytes': 'Disk_read',
# 'diskio_write_bytes': 'Disk_write',
# ----------
# remove this crunch after front refactoring
# 'cpu-cpu-total_usage_user': 'CPU_user',
# 'cpu-cpu-total_usage_system': 'CPU_system',
# 'cpu-cpu-total_usage_idle': 'CPU_idle',
# 'cpu-cpu-total_usage_iowait': 'CPU_iowait',
# 'cpu-cpu-total_usage_irq': 'CPU_irq',
# 'cpu-cpu-total_usage_nice': 'CPU_nice',
# 'cpu-cpu-total_usage_softirq': 'CPU_softirq',
# 'cpu-cpu-total_usage_steal': 'CPU_steal',
# 'cpu-cpu-total_usage_guest': 'CPU_guest'
}
self.diff_metrics = {
'cpu': [],
'mem': [],
'net': ['packets_recv', 'packets_sent', 'bytes_recv', 'bytes_sent'],
'nstat': ['TcpRetransSegs'],
'net_response': [],
'kernel': ['context_switches', 'interrupts', 'processes_forked', 'vmstat_pgfault', 'vmstat_pgmajfault'],
'diskio': [
'read_bytes', 'write_bytes', 'io_time', 'read_time', 'reads',
'write_time', 'writes'
],
'custom': []
}
def find_common_names(self, key):
if key in self.known_metrics:
return self.known_metrics[key]
else:
return 'custom:{}'.format(key)
decoder = MetricsDecoder()
| lgpl-2.1 | -3,474,733,972,796,811,000 | 36.676056 | 116 | 0.515514 | false |
emoronayuso/beeton | asterisk-bee/asteriskbee/api_dialplan/views.py | 1 | 24397 | # Create your views here.
#encoding:utf-8
from django.shortcuts import render_to_response
#Para uso de la funcion HttpResponseRedirect (redireccionar paginas)
from django.http import HttpResponseRedirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
#from django.conf import settings
#Incluimos el modelo para poder crear los formularios
from models import *
#######from models import linea
import sets
#Incluimos el modelo ficheros_audio y de moh
from asteriskbee.api_admin_audio.models import ficheros_audio
from asteriskbee.api_musica_espera.models import moh
#Herramientas de DEBUG de python####
import pdb
###USO: pdb.set_trace()
###(pdb) <pulsar n para siguiente>
########modulo para ejecutar comandos en la shell####
### COMANDO PARA EJECUTAR -> echo '[nombre_contexto]' >> /etc/asterisk/extensions.conf
### y --> echo 'switch => Realtime/@' >> /etc/asterisk/extensions.conf
### -x implica -r
import subprocess
import os
#Funcion que muestra la lista de los contextos del plan de llamadas
@login_required(login_url='/login')
def lista_contextos(request):
mapa_contextos = {}
##Sacamos la lista de contexto de las lineas del dialplan
mapa = linea.objects.values('context').distinct()
#Creamos un diccionario para gestionar los contextos
for contexto in mapa:
mapa_contextos[contexto['context']] = contexto['context']
return render_to_response('dialplan/dialplan.html',{'mapa_contextos' : mapa_contextos },context_instance=RequestContext(request))
@login_required(login_url='/login')
def add_contexto(request):
if request.method == 'POST':
contexto_form = contextoForm(request.POST)
##Comprobamos que los campos no sean nulos y sean de acuerdo al tipo definido
if contexto_form.is_valid():
#Guardamos TODOS los datos en la base de datos y redirigimos
# contexto_form.save()
##Antes de incluir las lineas en el fichero de configuracion del dialplan
##/etc/asterisk/extensions.conf:
## [nombre_del_contexto]
## switch => Realtime/@
##Debemos comprobar aqui que no existen dos contextos con el mismo nombre
### QUEDA PENDIENTE #####
###Incluimos las lineas en el dialplan para indicar a Asterisk que
###usara la configuracion de la base de datos del Realtime
##Creamos el manejador de ficheros en modo anadir 'a'
fichero_extensions = open('/etc/asterisk/beeton_extensions.conf', 'a')
contexto = request.POST.get('context')
##Escribimos en el fichero
fichero_extensions.write("["+contexto+"]\nswitch => Realtime/@\n")
# Cerramos en archivo
fichero_extensions.close()
##Ahora solo quedaria crear una primera linea de contexto, por ejemplo para la extension 1 usando la aplicacion espera
l = linea(context=contexto,exten=1,priority=1,appdata='esperar.py')
l.save()
api = aplicaciones.objects.get(script='esperar.py')
param = parametros(id_linea=l,id_api=api,param1=1)
param.save()
return HttpResponseRedirect('/admin/dialplan/')
else:
contexto_form = contextoForm
lista = {}
return render_to_response('dialplan/crear_contexto.html',{'lista_contextos' : lista, 'formulario_contexto' : contexto_form },context_instance=RequestContext(request))
###Funcion que visualiza las lineas del contexto seleccionado, y permite modificar cada una
@login_required(login_url='/login')
def mod_contexto(request):
#Lista de posibles errores
errores = []
##Lo primero sera listar las lineas del contexto selecciondo
contexto_sele = request.GET.get('context')
#Informacion sobre las opciones que afectan a todas las extensiones definidas en el contexto
opciones_contexto = []
lista_api_op_cont = aplicacion_opcion_contexto.objects.filter(contexto=contexto_sele)
for api in lista_api_op_cont:
par = api_opcion_contexto_param.objects.get(id_api_opc_contexto=api.id)
opciones_contexto.append(api.id_api.texto_opcion_contexto+par.parametro)
##Realizamos una consulta de todas las lineas que tienen el contexto seleccionado
lineas_sele = linea.objects.filter(context=contexto_sele)
linea_sele = {}
linea_form = {}
aplicacion_form = {}
aplicacion_nom_param_form = {}
aplicaciones_des_param_form = {}
aplicacion_acti_temp_param_form = {}
add_linea = {}
lista_api = aplicaciones.objects.all()
lista_contextos = linea.objects.values('context').distinct()
parametros_form = {}
datos_api = {}
########Modificar si en un fururo se incluyen mas parametros para las aplicaciones##########
num_param = 10#############################################################################
###########################################################################################
id_api = {}
nombre_api = {}
lista_audios = ficheros_audio.objects.all()
mapa_audio = {}
for audio in lista_audios:
ls1 = str(audio.fichero_audio).split('/')
ls2 = ls1[2].split('.')
opc = ls2[0]
mapa_audio[str(audio.nombre)]= opc
lista_opciones = mapa_audio.keys()
lista_valor_ast = mapa_audio.values()
mohs = moh.objects.all()
lista_moh = {}
for m in mohs:
lista_moh[str(m.name)] = str(m.name)
lista_moh = lista_moh.values()
#pdb.set_trace()
################################Si se ha seleccionado MODIFICAR alguna linea#########################################
if request.GET.get('id_linea'):
#Sacamos las id de la linea seleccionada
id_linea_sele = request.GET.get('id_linea')
linea_sele = linea.objects.filter(id=id_linea_sele)
##Mostramos el formulario para modificar la linea
##Si se pulsa sobre la opcion 'modificar linea' de la linea seleccionada
if request.method == 'POST':
##Recogemos los datos de los formularios linea y parametros
##INSTANCIANDOLOS ANTES, ya que si no se crearia una linea nueva. IMPORTANTE!!!!
datos_linea = linea.objects.get(pk=id_linea_sele)
linea_form = lineaContextoForm(request.POST, instance=datos_linea)
####HACEMOS LO MISMO CON EL FORMULARIO DE PARAMETROS
datos_param = parametros.objects.get(id_linea=id_linea_sele)
parametros_form = parametrosLineaContextoForm(request.POST,instance=datos_param)
#linea_form = lineaContextoForm(request.POST)
#parametros_form = parametrosLineaContextoForm(request.POST)
##Comprobamos que los campos no sean nulos y sean de acuerdo al tipo definido
if linea_form.is_valid() and parametros_form.is_valid():
##NOTA: NO HACE FALTA RECOGER TODOS LOS DATOS PARA MODIFICAR LA LINEA###
exten = linea_form.cleaned_data['exten']
##########################################################
##################TODOS LOS VALORES DE TODOS LOS PARAMETROS##########
param1 = parametros_form.cleaned_data['param1']
param2 = parametros_form.cleaned_data['param2']
param3 = parametros_form.cleaned_data['param3']
param4 = parametros_form.cleaned_data['param4']
param5 = parametros_form.cleaned_data['param5']
param6 = parametros_form.cleaned_data['param6']
param7 = parametros_form.cleaned_data['param7']
param8 = parametros_form.cleaned_data['param8']
param9 = parametros_form.cleaned_data['param9']
param10 = parametros_form.cleaned_data['param10']
####################################################################
#Guardamos TODOS los datos en la base de datos y redirigimos
linea_form.save()
parametros_form.save()
return HttpResponseRedirect('/admin/dialplan/mod_contexto/?context='+contexto_sele)
else:
###Mostramos los formularios con los datos para modificar una linea
datos_linea = linea.objects.get(pk=request.GET.get('id_linea'))
##Este formulario solo muestra el numero de extension de la linea
linea_form = lineaContextoForm(instance=datos_linea)
##Mostramos la lista de aplicaciones disponibles
# lista_api = aplicaciones.objects.all()
datos_param = parametros.objects.get(id_linea=id_linea_sele)
##Sacamos la lista de parametros a partir de la llinea seleccionada
parametros_form = parametrosLineaContextoForm(instance=datos_param)
###A partir del modelo parametros sacamos la aplicacion y su formulario
#datos_api = aplicaciones.objects.get(datos_param.id_api)
datos_api = datos_param.id_api
num_param = datos_api.num_para
aplicacion_form = aplicacionContextoForm(instance=datos_api)
####Sacamos los formularios con los nombres,las descripciones y si la aplicacion tiene o noplantilla para cada parametro
aplicacion_nom_param_form = aplicacionContextoParamForm(instance=datos_api)
aplicaciones_des_param_form = aplicacionContextoDesParamForm(instance=datos_api)
aplicacion_acti_temp_param_form = aplicacionBitActTemplateParamForm(instance=datos_api)
####################################################FIN MODIFICAR LINEA###########################################
###################################################ANADIR NUEVA LINEA##############################################
##Si se ha seleccionado anadir una nueva linea en el contexto
if request.GET.get('add_linea'):
##Esta linea es para seleccionar el div del formulario para anadir nueva linea en la plantilla
add_linea = 1
linea_form = lineaContextoForm
##Mostramos la lista de aplicaciones disponibles
lista_api = aplicaciones.objects.all()
lista_contextos = linea.objects.values('context').distinct()
###Sacamos los datos de la aplicacion por defecto seleccionada RESPONDER_LLAMADAS con id 1
datos_api = aplicaciones.objects.get(id=1)
id_api = datos_api.id
nombre_api = datos_api.nombre
#Sacamos el numero de parametros de la aplicacion seleccionada por defecto
num_param = datos_api.num_para
#Generamos el formulario de los datos de la aplicacion de la api seleccionada por defecto
aplicacion_form = aplicacionContextoForm(instance=datos_api)
#Sacamos el formulario de los parametros de la aplicacion seleccionada por defecto
parametros_form = parametrosLineaContextoForm()
####Sacamos los formularios con los nombres y las descripciones de la aplicacion
aplicacion_nom_param_form = aplicacionContextoParamForm(instance=datos_api)
aplicaciones_des_param_form = aplicacionContextoDesParamForm(instance=datos_api)
aplicacion_acti_temp_param_form = aplicacionBitActTemplateParamForm(instance=datos_api)
##Si se ha seleccionado anadir linea y se han mandado datos por POST para guardar los datos en la BD
if request.method == 'POST':
#pdb.set_trace()
##Recogemos los datos del formulario
linea_form = lineaContextoForm(request.POST)
#Copiamos el formulario devuelto en otro,para dar valores al context, script y a la prioridad de la linea, ya que la instancia del QueryDict del request.POST es inmutable
values_post = request.POST.copy()
values_post.__setitem__('context',request.GET.get('context'))
values_post.__setitem__('appdata',request.POST.get('script'))
###Aqui tendria que incluir una condicion para saber que tipo de funcionalidad es: opcion_contexto o no
##Sacamos la api seleccionada
api_sele = aplicaciones.objects.get(script=request.POST.get('script'))
#####################################################Se incluye opcion contexto#############################
if api_sele.es_opcion_contexto == 1:
##Aqui es donde tendria que incluir las lineas en el fichero del plan de llamadas
#pdb.set_trace()
##Miramos que dicho contexto no tenga ya la opcion
if aplicacion_opcion_contexto.objects.filter(contexto=contexto_sele,id_api=api_sele.id).count() == 0 :
##Guardamos en la base de datos la inclusion de la opcion para dicho contexto
api_opc_context = aplicacion_opcion_contexto(id_api=api_sele, contexto=contexto_sele)
api_opc_context.save()
api_opc_context = aplicacion_opcion_contexto.objects.get(id_api=api_sele, contexto=contexto_sele)
#Y ahora guaradamos los parametros asociados a la nueva opcion del contexto si son correctos
parametros_form = parametrosLineaContextoForm(request.POST)
if parametros_form.is_valid() :
##Si los parametros son validos
for num in range(1,(api_sele.num_para)+1):
# parametros_form.cleaned_data['paramX']
#pdb.set_trace()
api_op_cont_par = api_opcion_contexto_param(id_api_opc_contexto=api_opc_context, parametro=parametros_form.cleaned_data['param'+str(num)])
api_op_cont_par.save()
# Y por ultimo incluimos la linea en el archivo /etc/beeton_extensions.conf
fichero_extensions_r = open('/etc/asterisk/beeton_extensions.conf', 'r')
fichero_extensions_w = open('/etc/asterisk/beeton_extensions.conf.B', 'w')
for lin in fichero_extensions_r.readlines():
if lin == "["+contexto_sele+"]\n":
fichero_extensions_w.write(lin)
fichero_extensions_w.write(api_sele.lineas_extensions_conf)
else:
fichero_extensions_w.write(lin)
fichero_extensions_r.close()
fichero_extensions_w.close()
fichero_extensions_r = open('/etc/asterisk/beeton_extensions.conf.B', 'r')
fichero_extensions_w = open('/etc/asterisk/beeton_extensions.conf', 'w')
for lin in fichero_extensions_r.readlines():
fichero_extensions_w.write(lin)
fichero_extensions_r.close()
fichero_extensions_w.close()
#Borramos el fichero beeton_extensions.conf.B
os.system('rm /etc/asterisk/beeton_extensions.conf.B')
##Y refrescamos la configuracion de los ficheros de configuracion de Asterisk
os.system('asterisk -x reload')
else:
##El contexto ya tiene dicha opcion
errores.append('El contexto '+contexto_sele+' ya tiene la opcion: '+api_sele.nombre)
return HttpResponseRedirect('/admin/dialplan/mod_contexto/?context='+contexto_sele)
########################################################################################
########Realizar todo lo que viene abajo si la api no es opcion_contexto
num_lineas = linea.objects.filter(context=request.GET.get('context'),exten=request.POST.get('exten')).count()
#pdb.set_trace()
##Si ya existen lineas con dicha extension en dicho contexto
if num_lineas > 0:
##Rebanamos los datos y nos quedamos con la linea con la prioridad mas alta
linea_prioridad_max = linea.objects.filter(context=request.GET.get('context'),exten=request.POST.get('exten')).order_by('-priority')[0]
##Asignamos el nuevo valor prioridad al valor del POST sumandole 1
values_post.__setitem__('priority',linea_prioridad_max.priority+1)
else:
##Si no existen lineas en ese contexto de dicha extension le asignamos la prioridad 1
values_post.__setitem__('priority',1)
##Creamos un nuevo formulario de linea con los nuevos datos
linea_form = lineaContextoForm(values_post)
parametros_form = parametrosLineaContextoForm(request.POST)
##Recogemos los datos del formulario de aplicaciones para obtener el nombre del script y para asignar el id_api a la nueva tupla de la tabla parametros
api_form = aplicacionContextoForm(request.POST)
##Comprobamos que los campos no sean nulos y sean de acuerdo al tipo definido, ademas tenemos que hacer esto antes de acceder a los datos del formulario con cleaned_data
if linea_form.is_valid() and parametros_form.is_valid():
#pdb.set_trace()
##sacamos la api seleccionada segun el nombre del sript para sacar la id_api del modelo parametros
api_sele = aplicaciones.objects.get(script=request.POST.get('script'))
##Antes de crear la nueva tupla de parametros tenemos que crear la nueva linea
linea_form.save()
##Sacamos nueva linea que acabamos de crear con la maxima prioridad
linea_nueva = linea.objects.filter(context=request.GET.get('context'),exten=request.POST.get('exten')).order_by('-priority')[0]
##########################################################
##################TODOS LOS VALORES DE TODOS LOS PARAMETROS##########
para1 = parametros_form.cleaned_data['param1']
para2 = parametros_form.cleaned_data['param2']
para3 = parametros_form.cleaned_data['param3']
para4 = parametros_form.cleaned_data['param4']
para5 = parametros_form.cleaned_data['param5']
para6 = parametros_form.cleaned_data['param6']
para7 = parametros_form.cleaned_data['param7']
para8 = parametros_form.cleaned_data['param8']
para9 = parametros_form.cleaned_data['param9']
para10 = parametros_form.cleaned_data['param10']
####################################################################
#Guardamos TODOS los datos en la base de datos y redirigimos
nuevos_parametros = parametros(id_api=api_sele,id_linea=linea_nueva,param1=para1,param2=para2,param3=para3,param4=para4,param5=para5,param6=para6,param7=para7,param8=para8,param9=para9,param10=para10)
nuevos_parametros.save()
return HttpResponseRedirect('/admin/dialplan/mod_contexto/?context='+contexto_sele+'&add_linea=1')
###################################################################################################################
return render_to_response('dialplan/mod_contexto.html',{'opciones_contexto': opciones_contexto,'errores':errores, 'lista_moh':lista_moh,'lista_valores_audio': lista_valor_ast,'lista_opciones_audio' : lista_opciones, 'mapa_audios' : mapa_audio ,'id_api': id_api,'lista_contextos':lista_contextos, 'nombre_api': nombre_api, 'aplicacion_des_param_form':aplicaciones_des_param_form,'aplicacion_nom_param_form':aplicacion_nom_param_form,'aplicacion_temp_param_form': aplicacion_acti_temp_param_form,'num_param':num_param,'param_form' :parametros_form, 'lista_api': lista_api, 'add_linea': add_linea ,'aplicacion_form': aplicacion_form, 'linea_form': linea_form,'linea': linea_sele,'lista_lineas' : lineas_sele, 'contexto' : contexto_sele },context_instance=RequestContext(request))
@login_required(login_url='/login')
def carga_datos_nueva_linea(request):
linea_form = {}
lista_api = {}
datos_api = {}
num_param = {}
aplicacion_form = {}
parametros_form = {}
aplicacion_nom_param_form = {}
aplicaciones_des_param_form = {}
aplicacion_acti_temp_param_form = {}
lista_contextos = linea.objects.values('context').distinct()
lista_audios = ficheros_audio.objects.all()
mapa_audio = {}
for audio in lista_audios:
ls1 = str(audio.fichero_audio).split('/')
ls2 = ls1[1].split('.')
opc = ls2[0]
mapa_audio[str(audio.nombre)]= opc
lista_opciones = mapa_audio.keys()
lista_valor_ast = mapa_audio.values()
mohs = moh.objects.all()
lista_moh = {}
for m in mohs:
lista_moh[str(m.name)] = str(m.name)
lista_moh = lista_moh.values()
nombre_api = {}
if request.GET.get('add_linea') and request.GET.get('id_api'):
id_api = request.GET.get('id_api')
##Esta linea es para seleccionar el div del formualrio para anadir nueva linea en la plantilla
add_linea = 1
linea_form = lineaContextoForm
##Mostramos la lista de aplicaciones disponibles
lista_api = aplicaciones.objects.all()
datos_api = aplicaciones.objects.get(id=id_api)
#Sacamos el numero de parametros de la aplicacion seleccionada por defecto
num_param = datos_api.num_para
#Generamos el formulario de los datos de la aplicacion de la api seleccionada por defecto
aplicacion_form = aplicacionContextoForm(instance=datos_api)
#Sacamos el formulario de los parametros de la aplicacion seleccionada por defecto
parametros_form = parametrosLineaContextoForm()
####Sacamos los formularios con los nombres y las descripciones de la aplicacion
aplicacion_nom_param_form = aplicacionContextoParamForm(instance=datos_api)
aplicaciones_des_param_form = aplicacionContextoDesParamForm(instance=datos_api)
aplicacion_acti_temp_param_form = aplicacionBitActTemplateParamForm(instance=datos_api)
nombre_api = datos_api.nombre
return render_to_response('dialplan/add_linea.html', {'lista_moh':lista_moh,'lista_valores_audio': lista_valor_ast,'lista_opciones_audio' : lista_opciones,'mapa_audios' : mapa_audio ,'add_linea': add_linea ,'id_api': id_api,'nombre_api': nombre_api,'lista_contextos':lista_contextos, 'linea_form': linea_form, 'lista_api': lista_api, 'num_param': num_param, 'aplicacion_form': aplicacion_form,'param_form' :parametros_form, 'aplicacion_des_param_form':aplicaciones_des_param_form,'aplicacion_temp_param_form': aplicacion_acti_temp_param_form,'aplicacion_nom_param_form':aplicacion_nom_param_form }, context_instance=RequestContext(request) )
##Funcion que modifica el parametro commented de la base de datos
@login_required(login_url='/login')
def mod_linea_comentada(request):
contexto = request.GET.get('context')
extension = request.GET.get('exten')
prioridad = request.GET.get('priority')
##Devuelve el numero de filas modificadas de la tabla
# l1 = linea.objects.filter(context=contexto,exten=extension,priority=prioridad).update(commented=1)
##Seleccionamos la linea y comprobamos el valor de commented
l = linea.objects.get(context=contexto,exten=extension,priority=prioridad)
if(l.commented == 1):
linea.objects.filter(context=contexto,exten=extension,priority=prioridad).update(commented=0)
else:
linea.objects.filter(context=contexto,exten=extension,priority=prioridad).update(commented=1)
##Devolvemos la funcion render_to_response de la funcion mod_contexto
return mod_contexto(request)
##Funcion que borra una linea
@login_required(login_url='/login')
def borra_linea(request):
contexto = request.GET.get('context')
extension = request.GET.get('exten')
prioridad = request.GET.get('priority')
lista_api = aplicaciones.objects.all()
lista_lineas = linea.objects.filter(context=contexto)
num = linea.objects.filter(context=contexto,exten=extension,priority=prioridad).count()
if(num >= 1):
l = linea.objects.get(context=contexto,exten=extension,priority=prioridad)
l.delete()
return render_to_response('dialplan/lista_lineas.html',{ 'lista_api': lista_api, 'lista_lineas': lista_lineas },context_instance=RequestContext(request))
@login_required(login_url='/login')
def cambia_linea(request):
id_linea_sele = request.GET.get('id_linea_sele')
#sacamos la linea seleccionada
linea_sele = linea.objects.get(id=id_linea_sele)
##Creamos UNA lista con los id el orden de las lineas del determiando contexto
li = list(linea.objects.filter(context=request.GET.get('context')).values_list('id',flat=True))
#sacamos la posicion de la lista de los id de la linea seleccionada
# pos_linea_sele = li.index(id_linea_sele) Esto da error de tipo, al index hay que pasarle
# un valor y no una variable
###############Para solventar esto: ####################
pos_linea_sele = 0
pos_linea_cam = 0
for elem in li:
if int(elem) == int(id_linea_sele):
break
pos_linea_sele = pos_linea_sele + 1
########################################################
if request.GET.get('bajar')=='1' and (pos_linea_sele)<len(li):
pos_linea_cam = pos_linea_sele+1
else:
pos_linea_cam = pos_linea_sele
if request.GET.get('subir')=='1' and (pos_linea_sele)>0:
pos_linea_cam = pos_linea_sele-1
elif request.GET.get('bajar')=='1' and (pos_linea_sele+1)<len(li):
pos_linea_cam = pos_linea_sele+1
else:
pos_linea_cam = pos_linea_sele
#pdb.set_trace()
linea_cam = linea.objects.get(id=li[pos_linea_cam])
##Intercambiamos el valor de la prioridad de las dos lineas si la extensiones en la misma
if linea_sele.exten == linea_cam.exten:
aux = linea_sele.priority
linea_sele.priority = linea_cam.priority
linea_cam.priority = aux
linea_sele.save()
linea_cam.save()
contexto = request.GET.get('context')
lista_api = aplicaciones.objects.all()
lista_lineas = linea.objects.filter(context=contexto)
return render_to_response('dialplan/lista_lineas.html',{ 'lista_api': lista_api, 'lista_lineas': lista_lineas },context_instance=RequestContext(request))
| gpl-3.0 | 2,901,558,179,897,302,500 | 38.097756 | 784 | 0.683773 | false |
wangxianliang/facenet | src/facenet_train_classifier.py | 1 | 24175 | """Training a face recognizer with TensorFlow based on the FaceNet paper
FaceNet: A Unified Embedding for Face Recognition and Clustering: http://arxiv.org/abs/1503.03832
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import sys
import random
import tensorflow as tf
import numpy as np
import importlib
import argparse
import facenet
import lfw
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
import h5py
def main(args):
network = importlib.import_module(args.model_def)
subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist
os.makedirs(log_dir)
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist
os.makedirs(model_dir)
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))
np.random.seed(seed=args.seed)
random.seed(args.seed)
train_set = facenet.get_dataset(args.data_dir)
if args.filter_filename:
train_set = filter_dataset(train_set, args.filter_filename,
args.filter_percentile, args.filter_min_nrof_images_per_class)
nrof_classes = len(train_set)
print('Model directory: %s' % model_dir)
print('Log directory: %s' % log_dir)
pretrained_model = None
if args.pretrained_model:
pretrained_model = os.path.expanduser(args.pretrained_model)
print('Pre-trained model: %s' % pretrained_model)
if args.lfw_dir:
print('LFW directory: %s' % args.lfw_dir)
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False)
# Get a list of image paths and their labels
image_list, label_list = facenet.get_image_paths_and_labels(train_set)
assert len(image_list)>0, 'The dataset should not be empty'
# Create a queue that produces indices into the image_list and label_list
labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
range_size = array_ops.shape(labels)[0]
index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
shuffle=True, seed=None, capacity=32)
index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int64, shape=(None,1), name='labels')
input_queue = data_flow_ops.FIFOQueue(capacity=100000,
dtypes=[tf.string, tf.int64],
shapes=[(1,), (1,)],
shared_name=None, name=None)
enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder], name='enqueue_op')
nrof_preprocess_threads = 4
images_and_labels = []
for _ in range(nrof_preprocess_threads):
filenames, label = input_queue.dequeue()
images = []
for filename in tf.unstack(filenames):
file_contents = tf.read_file(filename)
image = tf.image.decode_png(file_contents)
if args.random_rotate:
image = tf.py_func(facenet.random_rotate_image, [image], tf.uint8)
if args.random_crop:
image = tf.random_crop(image, [args.image_size, args.image_size, 3])
else:
image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
if args.random_flip:
image = tf.image.random_flip_left_right(image)
#pylint: disable=no-member
image.set_shape((args.image_size, args.image_size, 3))
images.append(tf.image.per_image_standardization(image))
images_and_labels.append([images, label])
image_batch, label_batch = tf.train.batch_join(
images_and_labels, batch_size=batch_size_placeholder,
shapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,
capacity=4 * nrof_preprocess_threads * args.batch_size,
allow_smaller_final_batch=True)
image_batch = tf.identity(image_batch, 'image_batch')
image_batch = tf.identity(image_batch, 'input')
label_batch = tf.identity(label_batch, 'label_batch')
print('Total number of classes: %d' % nrof_classes)
print('Total number of examples: %d' % len(image_list))
print('Building training graph')
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
# Build the inference graph
prelogits, _ = network.inference(image_batch, args.keep_probability,
phase_train=phase_train_placeholder, weight_decay=args.weight_decay)
bottleneck = slim.fully_connected(prelogits, args.embedding_size, activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(args.weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params,
scope='Bottleneck', reuse=False)
logits = slim.fully_connected(bottleneck, len(train_set), activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(args.weight_decay),
scope='Logits', reuse=False)
embeddings = tf.nn.l2_normalize(bottleneck, 1, 1e-10, name='embeddings')
# Add center loss
if args.center_loss_factor>0.0:
prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
# Calculate the average cross entropy loss across the batch
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=label_batch, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# Calculate the total losses
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
# Build a Graph that trains the model with one batch of examples and updates the model parameters
train_op = facenet.train(total_loss, global_step, args.optimizer,
learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
# Create a saver
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Start running operations on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
tf.train.start_queue_runners(sess=sess)
with sess.as_default():
if pretrained_model:
print('Restoring pretrained model: %s' % pretrained_model)
saver.restore(sess, pretrained_model)
# Training and validation loop
print('Running training')
epoch = 0
while epoch < args.max_nrof_epochs:
step = sess.run(global_step, feed_dict=None)
epoch = step // args.epoch_size
# Train for one epoch
train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step,
total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file)
# Save variables and the metagraph if it doesn't exist already
save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)
# Evaluate on LFW
if args.lfw_dir:
evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder,
embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer)
return model_dir
def find_threshold(var, percentile):
hist, bin_edges = np.histogram(var, 100)
cdf = np.float32(np.cumsum(hist)) / np.sum(hist)
bin_centers = (bin_edges[:-1]+bin_edges[1:])/2
#plt.plot(bin_centers, cdf)
threshold = np.interp(percentile*0.01, cdf, bin_centers)
return threshold
def filter_dataset(dataset, data_filename, percentile, min_nrof_images_per_class):
with h5py.File(data_filename,'r') as f:
distance_to_center = np.array(f.get('distance_to_center'))
label_list = np.array(f.get('label_list'))
image_list = np.array(f.get('image_list'))
distance_to_center_threshold = find_threshold(distance_to_center, percentile)
indices = np.where(distance_to_center>=distance_to_center_threshold)[0]
filtered_dataset = dataset
removelist = []
for i in indices:
label = label_list[i]
image = image_list[i]
if image in filtered_dataset[label].image_paths:
filtered_dataset[label].image_paths.remove(image)
if len(filtered_dataset[label].image_paths)<min_nrof_images_per_class:
removelist.append(label)
ix = sorted(list(set(removelist)), reverse=True)
for i in ix:
del(filtered_dataset[i])
return filtered_dataset
def train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step,
loss, train_op, summary_op, summary_writer, regularization_losses, learning_rate_schedule_file):
batch_number = 0
if args.learning_rate>0.0:
lr = args.learning_rate
else:
lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
index_epoch = sess.run(index_dequeue_op)
label_epoch = np.array(label_list)[index_epoch]
image_epoch = np.array(image_list)[index_epoch]
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.array(label_epoch),1)
image_paths_array = np.expand_dims(np.array(image_epoch),1)
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})
# Training loop
train_time = 0
while batch_number < args.epoch_size:
start_time = time.time()
feed_dict = {learning_rate_placeholder: lr, phase_train_placeholder:True, batch_size_placeholder:args.batch_size}
if (batch_number % 100 == 0):
err, _, step, reg_loss, summary_str = sess.run([loss, train_op, global_step, regularization_losses, summary_op], feed_dict=feed_dict)
summary_writer.add_summary(summary_str, global_step=step)
else:
err, _, step, reg_loss = sess.run([loss, train_op, global_step, regularization_losses], feed_dict=feed_dict)
duration = time.time() - start_time
print('Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.3f\tRegLoss %2.3f' %
(epoch, batch_number+1, args.epoch_size, duration, err, np.sum(reg_loss)))
batch_number += 1
train_time += duration
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/total', simple_value=train_time)
summary_writer.add_summary(summary, step)
return step
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder,
embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer):
start_time = time.time()
# Run forward pass to calculate embeddings
print('Runnning forward pass on LFW images')
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.arange(0,len(image_paths)),1)
image_paths_array = np.expand_dims(np.array(image_paths),1)
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})
embedding_size = embeddings.get_shape()[1]
nrof_images = len(actual_issame)*2
assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size'
nrof_batches = nrof_images // batch_size
emb_array = np.zeros((nrof_images, embedding_size))
lab_array = np.zeros((nrof_images,))
for _ in range(nrof_batches):
feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size}
emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict)
lab_array[lab] = lab
emb_array[lab] = emb
assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline'
_, _, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=nrof_folds)
print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
lfw_time = time.time() - start_time
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))
summary.value.add(tag='lfw/val_rate', simple_value=val)
summary.value.add(tag='time/lfw', simple_value=lfw_time)
summary_writer.add_summary(summary, step)
with open(os.path.join(log_dir,'lfw_result.txt'),'at') as f:
f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val))
def save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):
# Save the model checkpoint
print('Saving variables')
start_time = time.time()
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % model_name)
saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
save_time_variables = time.time() - start_time
print('Variables saved in %.2f seconds' % save_time_variables)
metagraph_filename = os.path.join(model_dir, 'model-%s.meta' % model_name)
save_time_metagraph = 0
if not os.path.exists(metagraph_filename):
print('Saving metagraph')
start_time = time.time()
saver.export_meta_graph(metagraph_filename)
save_time_metagraph = time.time() - start_time
print('Metagraph saved in %.2f seconds' % save_time_metagraph)
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/save_variables', simple_value=save_time_variables)
summary.value.add(tag='time/save_metagraph', simple_value=save_time_metagraph)
summary_writer.add_summary(summary, step)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logs_base_dir', type=str,
help='Directory where to write event logs.', default='~/logs/facenet')
parser.add_argument('--models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.', default='~/models/facenet')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--pretrained_model', type=str,
help='Load a pretrained model before training starts.')
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches. Multiple directories are separated with colon.',
default='~/datasets/facescrub/fs_aligned:~/datasets/casia/casia-webface-aligned')
parser.add_argument('--model_def', type=str,
help='Model definition. Points to a module containing the definition of the inference graph.', default='models.nn4')
parser.add_argument('--max_nrof_epochs', type=int,
help='Number of epochs to run.', default=500)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=90)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=96)
parser.add_argument('--epoch_size', type=int,
help='Number of batches per epoch.', default=1000)
parser.add_argument('--embedding_size', type=int,
help='Dimensionality of the embedding.', default=128)
parser.add_argument('--random_crop',
help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +
'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')
parser.add_argument('--random_flip',
help='Performs random horizontal flipping of training images.', action='store_true')
parser.add_argument('--random_rotate',
help='Performs random rotations of training images.', action='store_true')
parser.add_argument('--keep_probability', type=float,
help='Keep probability of dropout for the fully connected layer(s).', default=1.0)
parser.add_argument('--weight_decay', type=float,
help='L2 weight regularization.', default=0.0)
parser.add_argument('--decov_loss_factor', type=float,
help='DeCov loss factor.', default=0.0)
parser.add_argument('--center_loss_factor', type=float,
help='Center loss factor.', default=0.0)
parser.add_argument('--center_loss_alfa', type=float,
help='Center update rate for center loss.', default=0.95)
parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],
help='The optimization algorithm to use', default='ADAGRAD')
parser.add_argument('--learning_rate', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.1)
parser.add_argument('--learning_rate_decay_epochs', type=int,
help='Number of epochs between learning rate decay.', default=100)
parser.add_argument('--learning_rate_decay_factor', type=float,
help='Learning rate decay factor.', default=1.0)
parser.add_argument('--moving_average_decay', type=float,
help='Exponential decay for tracking of training parameters.', default=0.9999)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
parser.add_argument('--nrof_preprocess_threads', type=int,
help='Number of preprocessing (data loading and augumentation) threads.', default=4)
parser.add_argument('--log_histograms',
help='Enables logging of weight/bias histograms in tensorboard.', action='store_true')
parser.add_argument('--learning_rate_schedule_file', type=str,
help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='../data/learning_rate_schedule.txt')
parser.add_argument('--filter_filename', type=str,
help='File containing image data used for dataset filtering', default='')
parser.add_argument('--filter_percentile', type=float,
help='Keep only the percentile images closed to its class center', default=100.0)
parser.add_argument('--filter_min_nrof_images_per_class', type=int,
help='Keep only the classes with this number of examples or more', default=0)
# Parameters for validation on LFW
parser.add_argument('--lfw_pairs', type=str,
help='The file containing the pairs to use for validation.', default='../data/pairs.txt')
parser.add_argument('--lfw_file_ext', type=str,
help='The file extension for the LFW dataset.', default='png', choices=['jpg', 'png'])
parser.add_argument('--lfw_dir', type=str,
help='Path to the data directory containing aligned face patches.', default='')
parser.add_argument('--lfw_batch_size', type=int,
help='Number of images to process in a batch in the LFW test set.', default=100)
parser.add_argument('--lfw_nrof_folds', type=int,
help='Number of folds to use for cross validation. Mainly used for testing.', default=10)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| mit | -2,985,093,791,566,041,600 | 51.554348 | 167 | 0.662834 | false |
jean/sentry | tests/sentry/tasks/test_commits.py | 1 | 2569 | from __future__ import absolute_import
from mock import patch
from sentry.models import Commit, Deploy, Release, ReleaseHeadCommit, Repository
from sentry.tasks.commits import fetch_commits
from sentry.testutils import TestCase
class FetchCommits(TestCase):
def test_simple(self):
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name='baz')
repo = Repository.objects.create(
name='example',
provider='dummy',
organization_id=org.id,
)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
commit = Commit.objects.create(
organization_id=org.id,
repository_id=repo.id,
key='a' * 40,
)
ReleaseHeadCommit.objects.create(
organization_id=org.id,
repository_id=repo.id,
release=release,
commit=commit,
)
refs = [{
'repository': repo.name,
'commit': 'b' * 40,
}]
release2 = Release.objects.create(
organization_id=org.id,
version='12345678',
)
deploy = Deploy.objects.create(
organization_id=org.id,
release=release2,
environment_id=5,
)
with self.tasks():
with patch.object(Deploy, 'notify_if_ready') as mock_notify_if_ready:
fetch_commits(
release_id=release2.id,
user_id=self.user.id,
refs=refs,
previous_release_id=release.id,
)
commit_list = list(
Commit.objects.filter(
releasecommit__release=release2,
).order_by('releasecommit__order')
)
# see DummyRepositoryProvider.compare_commits
assert len(commit_list) == 3
assert commit_list[0].repository_id == repo.id
assert commit_list[0].organization_id == org.id
assert commit_list[0].key == '62de626b7c7cfb8e77efb4273b1a3df4123e6216'
assert commit_list[1].repository_id == repo.id
assert commit_list[1].organization_id == org.id
assert commit_list[1].key == '58de626b7c7cfb8e77efb4273b1a3df4123e6345'
assert commit_list[2].repository_id == repo.id
assert commit_list[2].organization_id == org.id
assert commit_list[2].key == 'b' * 40
mock_notify_if_ready.assert_called_with(deploy.id, fetch_complete=True)
| bsd-3-clause | -8,042,140,651,912,441,000 | 30.716049 | 81 | 0.57065 | false |
nolanliou/tensorflow | tensorflow/python/training/session_run_hook.py | 9 | 10620 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A SessionRunHook extends `session.run()` calls for the `MonitoredSession`.
SessionRunHooks are useful to track training, report progress, request early
stopping and more. SessionRunHooks use the observer pattern and notify at the
following points:
- when a session starts being used
- before a call to the `session.run()`
- after a call to the `session.run()`
- when the session closed
A SessionRunHook encapsulates a piece of reusable/composable computation that
can piggyback a call to `MonitoredSession.run()`. A hook can add any
ops-or-tensor/feeds to the run call, and when the run call finishes with success
gets the outputs it requested. Hooks are allowed to add ops to the graph in
`hook.begin()`. The graph is finalized after the `begin()` method is called.
There are a few pre-defined hooks:
- StopAtStepHook: Request stop based on global_step
- CheckpointSaverHook: saves checkpoint
- LoggingTensorHook: outputs one or more tensor values to log
- NanTensorHook: Request stop if given `Tensor` contains Nans.
- SummarySaverHook: saves summaries to a summary writer
For more specific needs, you can create custom hooks:
class ExampleHook(SessionRunHook):
def begin(self):
# You can add ops to the graph here.
print('Starting the session.')
self.your_tensor = ...
def after_create_session(self, session, coord):
# When this is called, the graph is finalized and
# ops can no longer be added to the graph.
print('Session created.')
def before_run(self, run_context):
print('Before calling session.run().')
return SessionRunArgs(self.your_tensor)
def after_run(self, run_context, run_values):
print('Done running one step. The value of my tensor: %s',
run_values.results)
if you-need-to-stop-loop:
run_context.request_stop()
def end(self, session):
print('Done with the session.')
To understand how hooks interact with calls to `MonitoredSession.run()`,
look at following code:
with MonitoredTrainingSession(hooks=your_hooks, ...) as sess:
while not sess.should_stop():
sess.run(your_fetches)
Above user code leads to following execution:
call hooks.begin()
sess = tf.Session()
call hooks.after_create_session()
while not stop is requested:
call hooks.before_run()
try:
results = sess.run(merged_fetches, feed_dict=merged_feeds)
except (errors.OutOfRangeError, StopIteration):
break
call hooks.after_run()
call hooks.end()
sess.close()
Note that if sess.run() raises OutOfRangeError or StopIteration then
hooks.after_run() will not be called but hooks.end() will still be called.
If sess.run() raises any other exception then neither hooks.after_run() nor
hooks.end() will be called.
@@SessionRunHook
@@SessionRunArgs
@@SessionRunContext
@@SessionRunValues
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.SessionRunHook")
class SessionRunHook(object):
"""Hook to extend calls to MonitoredSession.run()."""
def begin(self):
"""Called once before using the session.
When called, the default graph is the one that will be launched in the
session. The hook can modify the graph by adding new operations to it.
After the `begin()` call the graph will be finalized and the other callbacks
can not modify the graph anymore. Second call of `begin()` on the same
graph, should not change the graph.
"""
pass
def after_create_session(self, session, coord): # pylint: disable=unused-argument
"""Called when new TensorFlow session is created.
This is called to signal the hooks that a new session has been created. This
has two essential differences with the situation in which `begin` is called:
* When this is called, the graph is finalized and ops can no longer be added
to the graph.
* This method will also be called as a result of recovering a wrapped
session, not only at the beginning of the overall session.
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
pass
def before_run(self, run_context): # pylint: disable=unused-argument
"""Called before each call to run().
You can return from this call a `SessionRunArgs` object indicating ops or
tensors to add to the upcoming `run()` call. These ops/tensors will be run
together with the ops/tensors originally passed to the original run() call.
The run args you return can also contain feeds to be added to the run()
call.
The `run_context` argument is a `SessionRunContext` that provides
information about the upcoming `run()` call: the originally requested
op/tensors, the TensorFlow Session.
At this point graph is finalized and you can not add ops.
Args:
run_context: A `SessionRunContext` object.
Returns:
None or a `SessionRunArgs` object.
"""
return None
def after_run(self,
run_context, # pylint: disable=unused-argument
run_values): # pylint: disable=unused-argument
"""Called after each call to run().
The `run_values` argument contains results of requested ops/tensors by
`before_run()`.
The `run_context` argument is the same one send to `before_run` call.
`run_context.request_stop()` can be called to stop the iteration.
If `session.run()` raises any exceptions then `after_run()` is not called.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
pass
def end(self, session): # pylint: disable=unused-argument
"""Called at the end of session.
The `session` argument can be used in case the hook wants to run final ops,
such as saving a last checkpoint.
If `session.run()` raises exception other than OutOfRangeError or
StopIteration then `end()` is not called.
Note the difference between `end()` and `after_run()` behavior when
`session.run()` raises OutOfRangeError or StopIteration. In that case
`end()` is called but `after_run()` is not called.
Args:
session: A TensorFlow Session that will be soon closed.
"""
pass
@tf_export("train.SessionRunArgs")
class SessionRunArgs(
collections.namedtuple("SessionRunArgs",
["fetches", "feed_dict", "options"])):
"""Represents arguments to be added to a `Session.run()` call.
Args:
fetches: Exactly like the 'fetches' argument to Session.Run().
Can be a single tensor or op, a list of 'fetches' or a dictionary
of fetches. For example:
fetches = global_step_tensor
fetches = [train_op, summary_op, global_step_tensor]
fetches = {'step': global_step_tensor, 'summ': summary_op}
Note that this can recurse as expected:
fetches = {'step': global_step_tensor,
'ops': [train_op, check_nan_op]}
feed_dict: Exactly like the `feed_dict` argument to `Session.Run()`
options: Exactly like the `options` argument to `Session.run()`, i.e., a
config_pb2.RunOptions proto.
"""
def __new__(cls, fetches, feed_dict=None, options=None):
return super(SessionRunArgs, cls).__new__(cls, fetches, feed_dict, options)
@tf_export("train.SessionRunContext")
class SessionRunContext(object):
"""Provides information about the `session.run()` call being made.
Provides information about original request to `Session.Run()` function.
SessionRunHook objects can stop the loop by calling `request_stop()` of
`run_context`. In the future we may use this object to add more information
about run without changing the Hook API.
"""
def __init__(self, original_args, session):
"""Initializes SessionRunContext."""
self._original_args = original_args
self._session = session
self._stop_requested = False
@property
def original_args(self):
"""A `SessionRunArgs` object holding the original arguments of `run()`.
If user called `MonitoredSession.run(fetches=a, feed_dict=b)`, then this
field is equal to SessionRunArgs(a, b).
Returns:
A `SessionRunArgs` object
"""
return self._original_args
@property
def session(self):
"""A TensorFlow session object which will execute the `run`."""
return self._session
@property
def stop_requested(self):
"""Returns whether a stop is requested or not.
If true, `MonitoredSession` stops iterations.
Returns:
A `bool`
"""
return self._stop_requested
def request_stop(self):
"""Sets stop requested field.
Hooks can use this function to request stop of iterations.
`MonitoredSession` checks whether this is called or not.
"""
self._stop_requested = True
@tf_export("train.SessionRunValues")
class SessionRunValues(
collections.namedtuple("SessionRunValues",
["results", "options", "run_metadata"])):
"""Contains the results of `Session.run()`.
In the future we may use this object to add more information about result of
run without changing the Hook API.
Args:
results: The return values from `Session.run()` corresponding to the fetches
attribute returned in the RunArgs. Note that this has the same shape as
the RunArgs fetches. For example:
fetches = global_step_tensor
=> results = nparray(int)
fetches = [train_op, summary_op, global_step_tensor]
=> results = [None, nparray(string), nparray(int)]
fetches = {'step': global_step_tensor, 'summ': summary_op}
=> results = {'step': nparray(int), 'summ': nparray(string)}
options: `RunOptions` from the `Session.run()` call.
run_metadata: `RunMetadata` from the `Session.run()` call.
"""
| apache-2.0 | 1,037,282,924,206,702,000 | 35.369863 | 84 | 0.690866 | false |
stefanw/froide | froide/problem/models.py | 1 | 1770 | from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from froide.foirequest.models import FoiMessage
USER_PROBLEM_CHOICES = [
('message_not_delivered', _('Your message was not delivered.')),
('attachment_broken', _('The attachments don\'t seem to work.')),
('redaction_needed', _('You need more redaction.')),
('foi_help_needed', _('You need help to understand or reply to this message.')),
('other', _('Something else...')),
]
AUTO_PROBLEM_CHOICES = [
('bounce_publicbody', _('You received a bounce mail from the public body.')),
]
PROBLEM_CHOICES = AUTO_PROBLEM_CHOICES + USER_PROBLEM_CHOICES
class ProblemReport(models.Model):
message = models.ForeignKey(
FoiMessage, on_delete=models.CASCADE
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True, on_delete=models.SET_NULL,
blank=True
)
kind = models.CharField(
max_length=50, choices=PROBLEM_CHOICES
)
timestamp = models.DateTimeField(default=timezone.now)
auto_submitted = models.BooleanField(default=False)
resolved = models.BooleanField(default=False)
description = models.TextField(blank=True)
resolution = models.TextField(blank=True)
resolution_timestamp = models.DateTimeField(null=True, blank=True)
class Meta:
ordering = ('-timestamp',)
verbose_name = _('problem report')
verbose_name_plural = _('problem reports')
def __str__(self):
return self.kind
def get_absolute_url(self):
return self.message.get_absolute_short_url()
def get_absolute_domain_url(self):
return self.message.get_absolute_domain_short_url()
| mit | -7,492,476,918,604,720,000 | 31.181818 | 84 | 0.679096 | false |
rahul003/mxnet | python/mxnet/contrib/io.py | 12 | 3431 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Contrib data iterators for common data formats."""
from __future__ import absolute_import
from ..io import DataIter, DataDesc
from .. import ndarray as nd
class DataLoaderIter(DataIter):
"""Returns an iterator for ``mx.gluon.data.Dataloader`` so gluon dataloader
can be used in symbolic module.
Parameters
----------
loader : mxnet.gluon.data.Dataloader
Gluon dataloader instance
data_name : str, optional
The data name.
label_name : str, optional
The label name.
dtype : str, optional
The dtype specifier, can be float32 or float16
Example usage:
----------
>>> import mxnet as mx
>>> from mxnet.gluon.data.vision import MNIST
>>> from mxnet.gluon.data import DataLoader
>>> train_dataset = MNIST(train=True)
>>> train_data = mx.gluon.data.DataLoader(train_dataset, 32, shuffle=True, num_workers=4)
>>> dataiter = mx.io.DataloaderIter(train_data)
>>> for batch in dataiter:
... batch.data[0].shape
...
(32L, 28L, 28L, 1L)
"""
def __init__(self, loader, data_name='data', label_name='softmax_label', dtype='float32'):
super(DataLoaderIter, self).__init__()
self._loader = loader
self._iter = iter(self._loader)
data, label = next(self._iter)
self.batch_size = data.shape[0]
self.dtype = dtype
self.provide_data = [DataDesc(data_name, data.shape, dtype)]
self.provide_label = [DataDesc(label_name, label.shape, dtype)]
self._current_batch = None
self.reset()
def reset(self):
self._iter = iter(self._loader)
def iter_next(self):
try:
self._current_batch = next(self._iter)
except StopIteration:
self._current_batch = None
return self._current_batch is not None
def getdata(self):
if self.getpad():
dshape = self._current_batch[0].shape
ret = nd.empty(shape=([self.batch_size] + list(dshape[1:])))
ret[:dshape[0]] = self._current_batch[0].astype(self.dtype)
return [ret]
return [self._current_batch[0].astype(self.dtype)]
def getlabel(self):
if self.getpad():
lshape = self._current_batch[1].shape
ret = nd.empty(shape=([self.batch_size] + list(lshape[1:])))
ret[:lshape[0]] = self._current_batch[1].astype(self.dtype)
return [ret]
return [self._current_batch[1].astype(self.dtype)]
def getpad(self):
return self.batch_size - self._current_batch[0].shape[0]
def getindex(self):
return None
| apache-2.0 | 2,660,313,115,486,738,000 | 35.115789 | 94 | 0.639464 | false |
rhyolight/nupic.research | projects/speech_commands/noise_examples.py | 1 | 2567 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import print_function
import os
from torchvision import transforms
from htmresearch.frameworks.pytorch.speech_commands_dataset import (
SpeechCommandsDataset, BackgroundNoiseDataset
)
from htmresearch.frameworks.pytorch.audio_transforms import *
def save_examples(noise = 0.0, maxVal = 1.0, dataDir="data"):
"""
Generate sample noise files for listening and debugging.
:param noise: noise value for the addNoise transform
:param maxVal: maxVal for the addNoise transform
:param dataDir: root dir containing speech_commands directory
"""
testDataDir = os.path.join(dataDir, "speech_commands", "test")
outDir = os.path.join(dataDir, "noise_examples")
if not os.path.exists(outDir):
os.mkdir(outDir)
# Create noise dataset with noise transform
noiseTransform = transforms.Compose([
LoadAudio(),
FixAudioLength(),
AddNoise(noise, maxVal=maxVal),
])
noiseDataset = SpeechCommandsDataset(
testDataDir, noiseTransform, silence_percentage=0,
)
for i in range(0, 2552, 100):
d = noiseDataset[i]
fname = os.path.join(outDir, noiseDataset.classes[d["target"]] + "_" +
str(i) + "_"
+ str(int(noise*100)) + "_"
+ str(int(maxVal*100))
+ ".wav"
)
print(d["path"], fname)
librosa.output.write_wav(fname, d['samples'], d["sample_rate"])
if __name__ == '__main__':
for maxVal in [1.0, 0.5, 0.25]:
for noise in [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.5]:
save_examples(noise, maxVal)
| gpl-3.0 | 999,586,541,086,768,900 | 35.671429 | 74 | 0.641215 | false |
eayunstack/python-neutronclient | neutronclient/neutron/v2_0/quota.py | 1 | 8850 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import abc
import argparse
from cliff import lister
from cliff import show
from oslo_serialization import jsonutils
import six
from neutronclient._i18n import _
from neutronclient.common import exceptions
from neutronclient.common import utils
from neutronclient.neutron import v2_0 as neutronV20
def get_tenant_id(args, client):
return (args.pos_tenant_id or args.tenant_id or
client.get_quotas_tenant()['tenant']['tenant_id'])
class DeleteQuota(neutronV20.NeutronCommand):
"""Delete defined quotas of a given tenant."""
resource = 'quota'
def get_parser(self, prog_name):
parser = super(DeleteQuota, self).get_parser(prog_name)
parser.add_argument(
'--tenant-id', metavar='tenant-id',
help=_('The owner tenant ID.'))
parser.add_argument(
'--tenant_id',
help=argparse.SUPPRESS)
parser.add_argument(
'pos_tenant_id',
help=argparse.SUPPRESS, nargs='?')
return parser
def take_action(self, parsed_args):
neutron_client = self.get_client()
tenant_id = get_tenant_id(parsed_args, neutron_client)
obj_deleter = getattr(neutron_client,
"delete_%s" % self.resource)
obj_deleter(tenant_id)
print((_('Deleted %(resource)s: %(tenant_id)s')
% {'tenant_id': tenant_id,
'resource': self.resource}),
file=self.app.stdout)
return
class ListQuota(neutronV20.NeutronCommand, lister.Lister):
"""List quotas of all tenants who have non-default quota values."""
resource = 'quota'
def get_parser(self, prog_name):
parser = super(ListQuota, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
neutron_client = self.get_client()
search_opts = {}
self.log.debug('search options: %s', search_opts)
obj_lister = getattr(neutron_client,
"list_%ss" % self.resource)
data = obj_lister(**search_opts)
info = []
collection = self.resource + "s"
if collection in data:
info = data[collection]
_columns = len(info) > 0 and sorted(info[0].keys()) or []
return (_columns, (utils.get_item_properties(s, _columns)
for s in info))
class ShowQuotaBase(neutronV20.NeutronCommand, show.ShowOne):
"""Base class to show quotas of a given tenant."""
resource = "quota"
@abc.abstractmethod
def retrieve_data(self, tenant_id, neutron_client):
"""Retrieve data using neutron client for the given tenant."""
def get_parser(self, prog_name):
parser = super(ShowQuotaBase, self).get_parser(prog_name)
parser.add_argument(
'--tenant-id', metavar='tenant-id',
help=_('The owner tenant ID.'))
parser.add_argument(
'--tenant_id',
help=argparse.SUPPRESS)
# allow people to do neutron quota-show <tenant-id>.
# we use a different name for this because the default will
# override whatever is in the named arg otherwise.
parser.add_argument(
'pos_tenant_id',
help=argparse.SUPPRESS, nargs='?')
return parser
def take_action(self, parsed_args):
neutron_client = self.get_client()
tenant_id = get_tenant_id(parsed_args, neutron_client)
data = self.retrieve_data(tenant_id, neutron_client)
if self.resource in data:
return zip(*sorted(six.iteritems(data[self.resource])))
return
class ShowQuota(ShowQuotaBase):
"""Show quotas for a given tenant."""
def retrieve_data(self, tenant_id, neutron_client):
return neutron_client.show_quota(tenant_id)
class ShowQuotaDefault(ShowQuotaBase):
"""Show default quotas for a given tenant."""
def retrieve_data(self, tenant_id, neutron_client):
return neutron_client.show_quota_default(tenant_id)
class UpdateQuota(neutronV20.NeutronCommand, show.ShowOne):
"""Define tenant's quotas not to use defaults."""
resource = 'quota'
def get_parser(self, prog_name):
parser = super(UpdateQuota, self).get_parser(prog_name)
parser.add_argument(
'--tenant-id', metavar='tenant-id',
help=_('The owner tenant ID.'))
parser.add_argument(
'--tenant_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--network', metavar='networks',
help=_('The limit of networks.'))
parser.add_argument(
'--subnet', metavar='subnets',
help=_('The limit of subnets.'))
parser.add_argument(
'--port', metavar='ports',
help=_('The limit of ports.'))
parser.add_argument(
'--router', metavar='routers',
help=_('The limit of routers.'))
parser.add_argument(
'--floatingip', metavar='floatingips',
help=_('The limit of floating IPs.'))
parser.add_argument(
'--security-group', metavar='security_groups',
help=_('The limit of security groups.'))
parser.add_argument(
'--security-group-rule', metavar='security_group_rules',
help=_('The limit of security groups rules.'))
parser.add_argument(
'--vip', metavar='vips',
help=_('The limit of vips.'))
parser.add_argument(
'--pool', metavar='pools',
help=_('The limit of pools.'))
parser.add_argument(
'--member', metavar='members',
help=_('The limit of pool members.'))
parser.add_argument(
'--health-monitor', metavar='health_monitors',
help=_('The limit of health monitors.'))
parser.add_argument(
'pos_tenant_id',
help=argparse.SUPPRESS, nargs='?')
return parser
def _validate_int(self, name, value):
try:
return_value = int(value)
except Exception:
message = (_('Quota limit for %(name)s must be an integer') %
{'name': name})
raise exceptions.NeutronClientException(message=message)
return return_value
def args2body(self, parsed_args):
quota = {}
for resource in ('network', 'subnet', 'port', 'router', 'floatingip',
'security_group', 'security_group_rule',
'vip', 'pool', 'member', 'health_monitor'):
if getattr(parsed_args, resource):
quota[resource] = self._validate_int(
resource,
getattr(parsed_args, resource))
return {self.resource: quota}
def take_action(self, parsed_args):
neutron_client = self.get_client()
_extra_values = neutronV20.parse_args_to_dict(self.values_specs)
neutronV20._merge_args(self, parsed_args, _extra_values,
self.values_specs)
body = self.args2body(parsed_args)
if self.resource in body:
body[self.resource].update(_extra_values)
else:
body[self.resource] = _extra_values
obj_updator = getattr(neutron_client,
"update_%s" % self.resource)
tenant_id = get_tenant_id(parsed_args, neutron_client)
data = obj_updator(tenant_id, body)
if self.resource in data:
for k, v in six.iteritems(data[self.resource]):
if isinstance(v, list):
value = ""
for _item in v:
if value:
value += "\n"
if isinstance(_item, dict):
value += jsonutils.dumps(_item)
else:
value += str(_item)
data[self.resource][k] = value
elif v is None:
data[self.resource][k] = ''
return zip(*sorted(six.iteritems(data[self.resource])))
else:
return
| apache-2.0 | -786,006,942,532,715,600 | 35.270492 | 78 | 0.576723 | false |
0x1306e6d/Baekjoon | baekjoon/2250.py | 1 | 1524 | """
2250 : 트리의 높이와 너비
URL : https://www.acmicpc.net/problem/2250
Input :
19
1 2 3
2 4 5
3 6 7
4 8 -1
5 9 10
6 11 12
7 13 -1
8 -1 -1
9 14 15
10 -1 -1
11 16 -1
12 -1 -1
13 17 -1
14 -1 -1
15 18 -1
16 -1 -1
17 -1 19
18 -1 -1
19 -1 -1
Output :
3 18
"""
tree = {}
tree2 = {}
def put(root=1, depth=1, width=0):
ndescendants = 1
left = tree[root]['left']
if left is not None:
ndescendants += put(left, depth + 1, width)
width += ndescendants
global tree2
if depth not in tree2:
tree2[depth] = []
tree2[depth].append(width)
right = tree[root]['right']
if right is not None:
ndescendants += put(right, depth + 1, width)
return ndescendants
n = int(input())
root = set(range(1, n + 1))
for nn in range(1, n + 1):
i, left, right = map(int, input().split())
tree[i] = {
'left': left if left != -1 else None,
'right': right if right != -1 else None,
}
if left != -1:
root.remove(left)
if right != -1:
root.remove(right)
root = root.pop()
put(root)
mdepth = 0
mwidth = 0
for depth in sorted(tree2):
width = 1
if len(tree2[depth]) >= 2:
width = max(tree2[depth]) - min(tree2[depth]) + 1
if width > mwidth:
mdepth = depth
mwidth = width
print("{} {}".format(mdepth, mwidth))
| gpl-2.0 | -6,250,782,842,945,087,000 | 17.617284 | 57 | 0.484085 | false |
4dn-dcic/fourfront | src/encoded/upgrade/static_section.py | 2 | 1699 | from snovault import (
upgrade_step,
)
@upgrade_step('static_section', '1', '2')
def static_section_1_2(value, system):
# Rename/move sections
if value['name'] == "help#introduction":
value['name'] = "help.user-guide.data-organization.introduction"
if value['name'] == "help#carousel-place-holder":
value['name'] = "help.user-guide.data-organization.carousel-place-holder"
if value['name'] == "help#introduction2":
value['name'] = "help.user-guide.data-organization.introduction2"
if value['name'] == "help.account-creation#account_creation":
value['name'] = "help.user-guide.account-creation.account_creation"
if value['name'] == "help.getting-started#getting_started":
value['name'] = "help.user-guide.getting-started.getting_started"
if value['name'] == "help.biosample#metadata":
value['name'] = "help.submitter-guide.biosample-metadata.metadata"
if value['name'] == "help.spreadsheet#excel_submission":
value['name'] = "help.submitter-guide.spreadsheet.excel_submission"
if value['name'] == "help.spreadsheet#schema_info":
value['name'] = "help.submitter-guide.spreadsheet.schema_info"
if value['name'] == "help.rest-api#rest_api_submission":
value['name'] = "help.submitter-guide.rest-api.rest_api_submission"
if value['name'] == "help.data-processing-pipelines":
value['name'] = "help.analysis.cwl-docker.data-processing-pipelines"
if value['name'] == "help.spreadsheet#schema_info":
value['name'] = "help.submitter-guide.spreadsheet.schema_info"
if "#" in value['name']:
value['name'] = value['name'].replace('#', '.', 1)
| mit | 9,120,920,084,271,159,000 | 35.148936 | 81 | 0.650383 | false |
paulproteus/django | tests/regressiontests/modeladmin/tests.py | 8 | 50199 | from __future__ import absolute_import, unicode_literals
from datetime import date
from django import forms
from django.conf import settings
from django.contrib.admin.options import (ModelAdmin, TabularInline,
InlineModelAdmin, HORIZONTAL, VERTICAL)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.validation import validate
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.contrib.admin import (SimpleListFilter,
BooleanFieldListFilter)
from django.core.exceptions import ImproperlyConfigured
from django.forms.models import BaseModelFormSet
from django.forms.widgets import Select
from django.test import TestCase
from django.test.utils import override_settings, str_prefix
from django.utils import unittest
from .models import Band, Concert, ValidationTestModel, ValidationTestInlineModel
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name', 'bio', 'sign_date'])
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_field_arguments(self):
# If we specify the fields argument, fieldsets_add and fielsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual( ma.get_fieldsets(request),
[(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If we specify fields or fieldsets, it should exclude fields on the Form class
# to the fields specified. This may cause errors to be raised in the db layer if
# required model fields arent in fields/fieldsets, but that's preferable to
# ghost errors where you have a field in your Form class that isn't being
# displayed because you forgot to add it to fields/fieldsets
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(), ['name'])
self.assertEqual(ma.get_form(request, self.band).base_fields.keys(),
['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(), ['name'])
self.assertEqual(ma.get_form(request, self.band).base_fields.keys(),
['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name'])
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `ModelAdmin.readonly_fields` and when no
`ModelAdmin.exclude` is defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
readonly_fields = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['sign_date',])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
readonly_fields = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(ma.get_formsets(request))[0]().forms[0].fields.keys(),
['main_band', 'opening_band', 'id', 'DELETE',])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is overridden if
`ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
exclude = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['bio', 'sign_date',])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(ma.get_formsets(request))[0]().forms[0].fields.keys(),
['main_band', 'opening_band', 'day', 'id', 'DELETE',])
def test_custom_form_validation(self):
# If we specify a form, it should use it allowing custom validation to work
# properly. This won't, however, break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(
type(ma.get_form(request).base_fields['sign_date'].widget),
AdminDateWidget)
def test_form_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `ModelAdmin.get_form()`
overrides all other declarations. Refs #8999.
"""
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['name']
class BandAdmin(ModelAdmin):
exclude = ['sign_date',]
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['bio']
return super(BandAdmin, self).get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name', 'sign_date',])
def test_formset_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `InlineModelAdmin.get_formset()`
overrides all other declarations. Refs #8999.
"""
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['opening_band']
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(ma.get_formsets(request))[0]().forms[0].fields.keys(),
['main_band', 'day', 'transport', 'id', 'DELETE',])
def test_queryset_override(self):
# If we need to override the queryset of a ModelChoiceField in our custom form
# make sure that RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
band2.save()
class ConcertAdmin(ModelAdmin):
pass
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<select name="main_band" id="id_main_band">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">The Beatles</option>\n'
'<option value="%d">The Doors</option>\n'
'</select>' % (band2.id, self.band.id))
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<select name="main_band" id="id_main_band">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">The Doors</option>\n'
'</select>' % self.band.id)
def test_regression_for_ticket_15820(self):
"""
Ensure that `obj` is passed from `InlineModelAdmin.get_fieldsets()` to
`InlineModelAdmin.get_formset()`.
"""
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['day']
class ConcertInline(TabularInline):
model = Concert
fk_name = 'main_band'
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs['form'] = CustomConcertForm
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
concert = Concert.objects.create(main_band=self.band, opening_band=self.band, day=1)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport'])
fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model))
self.assertEqual(fieldsets[0][1]['fields'], ['day'])
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', 'None'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['transport'].widget.choices),
[('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(ma.get_formsets(request))[0]().forms[0].fields.keys(),
['extra', 'transport', 'id', 'DELETE', 'main_band'])
class ValidationTests(unittest.TestCase):
def test_validation_only_runs_in_debug(self):
# Ensure validation only runs when DEBUG = True
try:
settings.DEBUG = True
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
site = AdminSite()
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.",
site.register,
ValidationTestModel,
ValidationTestModelAdmin,
)
finally:
settings.DEBUG = False
site = AdminSite()
site.register(ValidationTestModel, ValidationTestModelAdmin)
def test_raw_id_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields\[0\]', 'name' must be either a ForeignKey or ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_fieldsets_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ({},)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ((),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]' does not have exactly two elements.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", ()),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]\[1\]' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {}),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'fields' key is required in ValidationTestModelAdmin.fieldsets\[0\]\[1\] field options dict.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("non_existent_field",)}),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("name",)}),)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("name",)}),)
fields = ["name",]
self.assertRaisesRegexp(
ImproperlyConfigured,
"Both fieldsets and fields are specified in ValidationTestModelAdmin.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
self.assertRaisesRegexp(
ImproperlyConfigured,
"There are duplicate field\(s\) in ValidationTestModelAdmin.fieldsets",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fields = ["name", "name"]
self.assertRaisesRegexp(
ImproperlyConfigured,
"There are duplicate field\(s\) in ValidationTestModelAdmin.fields",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_form_validation(self):
class FakeForm(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
form = FakeForm
self.assertRaisesRegexp(
ImproperlyConfigured,
"ValidationTestModelAdmin.form does not inherit from BaseModelForm.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('non_existent_field',)
}),
)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'BandAdmin.fieldsets\[0\]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
BandAdmin,
Band,
)
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
validate(BandAdmin, Band)
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('non_existent_field',)
}),
)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'BandAdmin.fieldsets\[0]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
BandAdmin,
Band,
)
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
validate(BandAdmin, Band)
def test_filter_vertical_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("non_existent_field",)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("name",)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical\[0\]' must be a ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_filter_horizontal_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("non_existent_field",)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("name",)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal\[0\]' must be a ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_radio_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"non_existent_field": None}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"name": None}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields\['name'\]' is neither an instance of ForeignKey nor does have choices set.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields\['state'\]' is neither admin.HORIZONTAL nor admin.VERTICAL.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_prepopulated_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = ()
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"non_existent_field": None}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ("non_existent_field",)}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields\['slug'\]\[0\]' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"users": ("name",)}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields\['users'\]' is either a DateTimeField, ForeignKey or ManyToManyField. This isn't allowed.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_display_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
str_prefix("ValidationTestModelAdmin.list_display\[0\], %(_)s'non_existent_field' is not a callable or an attribute of 'ValidationTestModelAdmin' or found in the model 'ValidationTestModel'."),
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('users',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display\[0\]', 'users' is a ManyToManyField which is not supported.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_display_links_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'non_existent_field' which is not defined in 'list_display'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('name',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'name' which is not defined in 'list_display'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_filter_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' refers to 'non_existent_field' which does not refer to a Field.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (RandomClass,)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' is 'RandomClass' which is not a descendant of ListFilter.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', RandomClass),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]\[1\]' is 'RandomClass' which is not of type FieldListFilter.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_query_set(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', AwesomeFilter),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]\[1\]' is 'AwesomeFilter' which is not of type FieldListFilter.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (BooleanFieldListFilter,)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' is 'BooleanFieldListFilter' which is of type FieldListFilter but is not associated with a field name.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
# Valid declarations below -----------
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('is_active', AwesomeFilter, ('is_active', BooleanFieldListFilter), 'no')
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_per_page_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 'hello'
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_per_page' should be a integer.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 100
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_max_show_all_allowed_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 'hello'
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_max_show_all' should be an integer.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 200
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_search_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
search_fields = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.search_fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_date_hierarchy_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.date_hierarchy' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.date_hierarchy is neither an instance of DateField nor DateTimeField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_ordering_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering\[0\]' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering' has the random ordering marker '\?', but contains other fields as well. Please either remove '\?' or the other fields.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?',)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('band__name',)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('name',)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_select_related_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = 1
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_select_related' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = False
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_save_as_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = 1
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.save_as' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
save_as = True
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_save_on_top_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.save_on_top' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = True
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_inlines_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
inlines = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines\[0\]' does not inherit from BaseModelAdmin.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'model' is a required attribute of 'ValidationTestModelAdmin.inlines\[0\]'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines\[0\].model' does not inherit from models.Model.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_fields_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = ("non_existent_field",)
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.fields' refers to field 'non_existent_field' that is missing from the form.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_fk_name_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "non_existent_field"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.fk_name' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestInlineModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "parent"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_extra_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.extra' should be a integer.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_max_num_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.max_num' should be an integer or None \(default\).",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_formset_validation(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.formset' does not inherit from BaseModelFormSet.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
| bsd-3-clause | -2,683,338,257,506,031,000 | 32.533066 | 205 | 0.59808 | false |
linkdd/errcorrect | errcorrect/exception.py | 1 | 1145 | # -*- coding: utf-8 -*-
from errcorrect.disassembler import disassemble
import sys
def frames_from_traceback(traceback):
"""
Get iterable of frames in traceback.
:param traceback: traceback to extract frames from
:type traceback: traceback
:returns: frames
:rtype: iterable
"""
frames = []
curframe = traceback
while curframe is not None:
frames.append(curframe.tb_frame)
curframe = curframe.tb_next
return reversed(frames)
def extract_exception_info():
"""
Extract information about exception using ``sys.exc_info()``.
Extracted informations are:
- exception type
- exception value
- exception traceback
- traceback frames (with disassembled code)
:rtype: dict
"""
errtype, err, traceback = sys.exc_info()
return {
'type': errtype,
'value': err,
'traceback': traceback,
'frames': [
{
'frame': frame,
'instructions': disassemble(frame.f_code, frame.f_lasti)
}
for frame in frames_from_traceback(traceback)
]
}
| mit | -7,012,470,556,888,084,000 | 19.818182 | 72 | 0.593886 | false |
waynegm/OpendTect-Plugins | bin/python/wmpy/PyLops/ex_make_1d_seismic.py | 1 | 2677 | # Generate 1D Synthetic Seismic data for Acoustic Impedance step model
#
# Copyright (C) 2021 Wayne Mogg All rights reserved.
#
# This file may be used under the terms of the MIT License
# (https://github.com/waynegm/OpendTect-External-Attributes/blob/master/LICENSE)
#
# Author: Wayne Mogg
# Date: May, 2021
# Homepage: http://waynegm.github.io/OpendTect-Plugin-Docs/External_Attributes/ExternalAttributes/
#
# Input: Single trace seismic data (only used for sampling and size)
# Output: Output replaced by 1D logAI model, smoother background logAI model or
# reflectivity model filtered by supplied wavelet
#
import sys,os
import numpy as np
from scipy.signal import filtfilt
import pylops
#
# Import the module with the I/O scaffolding of the External Attribute
#
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
#
# The attribute parameters - keep what you need
#
xa.params = {
'Inputs': ['Input'],
'Output': ['Impedance','Background','Seismic'],
'Wavelet' : {'Type': 'File', 'Value': 'Seismics/*.wvlt'},
'Background Smoother (samples)': {'Type': 'Number', 'Value': 100},
'Parallel' : False
}
#
# Define the compute function
#
def doCompute():
#
# Initialise some constants from the attribute parameters or the SeismicInfo, xa.SI, array
#
dt = xa.SI['zstep']
nsmooth = np.int(xa.params['Background Smoother (samples)']['Value'])
waveletfile = xa.params['Wavelet']['Value']
#
# Load the wavelet
#
wavelet = np.loadtxt(waveletfile, comments='!', skiprows=9)
#
# This is the trace processing loop
#
start = True
logAI = []
logAIbackground = []
model = []
while True:
xa.doInput()
#
# After doInput the TraceInfo, xa.TI, array contains information specific to this trace segment - keep what you need
#
ns = xa.TI['nrsamp']
if start:
start = False
vp = 1500 + np.arange(ns) + filtfilt(np.ones(5)/5., 1, np.random.normal(0,100,ns))
rho = 1000 + vp + filtfilt(np.ones(5)/5., 1, np.random.normal(0,30,ns))
vp[ns//2:] += 500
rho[ns//2:] += 100
logAI = np.log(vp*rho)
logAIbackground = filtfilt(np.ones(nsmooth)/float(nsmooth), 1, logAI)
PPop = pylops.avo.poststack.PoststackLinearModelling(wavelet/2, nt0=ns)
model = PPop * logAI
#
# Get the input
#
indata = xa.Input['Input'][0,0,:]
#
# Prepare the output
#
xa.Output['Impedance'] = logAI
xa.Output['Background'] = logAIbackground
xa.Output['Seismic'] = model
xa.doOutput()
#
# Assign the compute function to the attribute
#
xa.doCompute = doCompute
#
# Do it
#
xa.run(sys.argv[1:])
| gpl-3.0 | -1,530,323,425,511,334,700 | 27.478723 | 116 | 0.654838 | false |
ailabitmo/ceur-ws-lod | ceur-ws-pdfs/CeurWsPDFParser/parsers/PdfExtractionLib.py | 1 | 4128 | # -*- coding: UTF-8 -*-
import os,re,codecs, re, sys
if sys.version[0] == '3':
import html.entities as htmlentitydefs
unicode = str
unichr = chr
else:
import htmlentitydefs
path_to_pdf2txt = os.path.join("{0} {1}".format(sys.executable,os.path.dirname(__file__)), 'pdf2txt.py')
def main():
directory = True
if directory:
input_directory = os.path.join(os.path.dirname(__file__), "pdfs")
for filename in os.listdir(input_directory):
if not filename.endswith(".pdf"):
continue
print(filename)
fullname = os.path.join(input_directory, filename)
res = get_html_and_txt(fullname, add_files = True, update_files = False)
else:
a = 1
def get_html_and_txt(input_filename, add_files = True, update_files = True):
try:
out_inf = {
"html": u"",
"txt": u"",
}
temp_html_file = ''
temp_txt_file = ''
temp_txt_from_html_file = ''
if not add_files:
if not os.path.isdir(os.path.join( os.path.dirname(__file__), 'temp_dir') ):
os.mkdir( os.path.join( os.path.dirname(__file__), 'temp_dir') )
temp_html_file = os.path.join( os.path.dirname(__file__), 'temp_dir', "1.html" )
temp_txt_file = os.path.join( os.path.dirname(__file__), 'temp_dir', "1.txt" )
temp_txt_from_html_file = os.path.join( os.path.dirname(__file__), 'temp_dir', "1.txt_html" )
else:
#pos_dot = os.path.basename(input_filename).rindex(".")
temp_html_file = os.path.join( os.path.dirname(input_filename), os.path.basename(input_filename).replace(".pdf", '.html'))
temp_txt_file = os.path.join( os.path.dirname(input_filename), os.path.basename(input_filename).replace(".pdf", '.txt'))
temp_txt_from_html_file = os.path.join( os.path.dirname(input_filename), os.path.basename(input_filename).replace(".pdf", '.txt_html'))
txt_command = u"{0} -o \"{1}\" \"{2}\"".format(path_to_pdf2txt, temp_txt_file, input_filename)
html_command =u"{0} -o \"{1}\" \"{2}\"".format(path_to_pdf2txt, temp_html_file, input_filename)
if not os.path.exists(temp_txt_file):
#print(txt_command)
#os.system(txt_command)
a = 1
else:
if update_files:
#print(txt_command)
#os.system(txt_command)
a = 1
if not os.path.exists(temp_html_file):
#print(html_command)
os.system(html_command)
else:
if update_files:
#print(html_command)
os.system(html_command)
# fh = codecs.open(temp_txt_file, 'rb')
# out_inf["txt"] = fh.read(os.path.getsize(temp_txt_file)).decode("UTF-8")
# fh.close()
fh = codecs.open(temp_html_file, 'rb')
out_inf["html"] = fh.read(os.path.getsize(temp_html_file)).decode("UTF-8")
fh.close()
out_inf['txt_from_html'] = html2text(out_inf["html"])
wh = codecs.open(temp_txt_from_html_file, 'w', encoding="UTF-8")
wh.write(out_inf['txt_from_html'])
wh.close()
except Exception as err:
print("get_html_and_txt -> {0}".format(err))
finally:
return out_inf
def html2text(html_text):
def char_from_entity(match):
code = htmlentitydefs.name2codepoint.get(match.group(1), 0xFFFD)
return unichr(code)
#убрал таким образом комментарии
text = re.sub(r"<[Pp][^>]*?(?!</)>", "\n\n", unicode(html_text))
#text = re.sub(r"<[Hh]\d+[^>]*?(?!</)>", "\n\n", unicode(html_text))
text = re.sub(r"<[^>]*?>", " ", text)
text = re.sub(r"&#(\d+);", lambda m: unichr(int(m.group(1))), text)
text = re.sub(r"&([A-Za-z]+);", char_from_entity, text)
text = re.sub(r"\n(?:[ \xA0\t]+\n)+", "\n", text)
return re.sub(r"\n\n+", "\n\n", text.strip())
if __name__ == '__main__':
main()
| mit | -1,037,087,723,436,460,500 | 36.317757 | 148 | 0.531463 | false |
shmir/PyIxNetwork | ixnetwork/ixn_protocol_stack.py | 2 | 2278 | """
Classes and utilities to manage IXN protocol stack objects.
@author [email protected]
"""
from ixnetwork.ixn_object import IxnObject
class IxnProtocolStack(IxnObject):
def action(self, oper):
self.execute(oper, self.obj_ref())
def start(self):
self.action('start')
def stop(self):
self.action('stop')
class IxnProtocolEndpoint(IxnProtocolStack):
pass
class IxnEthernetEndpoint(IxnProtocolEndpoint):
def __init__(self, **data):
""" Create new Ethernet endpoint object in the API.
:param parent: parent port object.
"""
data['parent'] = data['parent'].get_child_static('protocolStack')
data['objType'] = 'ethernetEndpoint'
super(self.__class__, self).__init__(**data)
class IxnDcbxEndpoint(IxnProtocolEndpoint):
def __init__(self, **data):
""" Create new DCBX endpoint object in the API.
:param parent: parent port object.
"""
data['parent'] = IxnObject(parent=data['parent'].get_child_static('protocolStack'), objType='ethernet')
data['objType'] = 'dcbxEndpoint'
super(self.__class__, self).__init__(**data)
class IxnFcoeClientEndpoint(IxnProtocolEndpoint):
def __init__(self, **data):
""" Create new FCoE client endpoint object in the API.
:param parent: parent port object.
"""
data['parent'] = IxnObject(parent=data['parent'].get_child_static('protocolStack'), objType='ethernet')
data['objType'] = 'fcoeClientEndpoint'
super(self.__class__, self).__init__(**data)
class IxnFcoeForwarderEndpoint(IxnProtocolEndpoint):
def __init__(self, **data):
""" Create new FCoE forwarder endpoint object in the API.
:param parent: parent port object.
"""
data['parent'] = IxnObject(parent=data['parent'].get_child_static('protocolStack'), objType='ethernet')
data['objType'] = 'fcoeFwdEndpoint'
super(self.__class__, self).__init__(**data)
class IxnRange(IxnProtocolStack):
def __init__(self, **data):
""" Create new range object in the API.
:param parent: parent endpoint object.
"""
data['objType'] = 'range'
super(self.__class__, self).__init__(**data)
| apache-2.0 | 167,224,735,663,011,780 | 25.183908 | 111 | 0.619842 | false |
dbiesecke/dbiesecke.github.io | repo/script.module.urlresolver/lib/urlresolver/plugins/vidmad.py | 13 | 1734 | '''
urlresolver Kodi plugin
Copyright (C) 2016 Gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class VidMadResolver(UrlResolver):
name = "vidmad.net"
domains = ["vidmad.net", "tamildrive.com"]
pattern = '(?://|\.)((?:vidmad|tamildrive)\.(?:net|com))/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT}
response = self.net.http_GET(web_url, headers=headers)
html = response.content
if 'Not Found' in html:
raise ResolverError('File Removed')
if 'Video is processing' in html:
raise ResolverError('File still being processed')
sources = helpers.scrape_sources(html)
return helpers.pick_source(sources) + helpers.append_headers(headers)
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
| mit | 4,340,701,334,570,576,400 | 35.893617 | 87 | 0.675894 | false |
nr-plugins/c4ddev | lib/six/test_six.py | 3 | 29734 | # Copyright (c) 2010-2018 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import operator
import sys
import types
import unittest
import py
import six
def test_add_doc():
def f():
"""Icky doc"""
pass
six._add_doc(f, """New doc""")
assert f.__doc__ == "New doc"
def test_import_module():
from logging import handlers
m = six._import_module("logging.handlers")
assert m is handlers
def test_integer_types():
assert isinstance(1, six.integer_types)
assert isinstance(-1, six.integer_types)
assert isinstance(six.MAXSIZE + 23, six.integer_types)
assert not isinstance(.1, six.integer_types)
def test_string_types():
assert isinstance("hi", six.string_types)
assert isinstance(six.u("hi"), six.string_types)
assert issubclass(six.text_type, six.string_types)
def test_class_types():
class X:
pass
class Y(object):
pass
assert isinstance(X, six.class_types)
assert isinstance(Y, six.class_types)
assert not isinstance(X(), six.class_types)
def test_text_type():
assert type(six.u("hi")) is six.text_type
def test_binary_type():
assert type(six.b("hi")) is six.binary_type
def test_MAXSIZE():
try:
# This shouldn't raise an overflow error.
six.MAXSIZE.__index__()
except AttributeError:
# Before Python 2.6.
pass
py.test.raises(
(ValueError, OverflowError),
operator.mul, [None], six.MAXSIZE + 1)
def test_lazy():
if six.PY3:
html_name = "html.parser"
else:
html_name = "HTMLParser"
assert html_name not in sys.modules
mod = six.moves.html_parser
assert sys.modules[html_name] is mod
assert "htmlparser" not in six._MovedItems.__dict__
try:
import _tkinter
except ImportError:
have_tkinter = False
else:
have_tkinter = True
have_gdbm = True
try:
import gdbm
except ImportError:
try:
import dbm.gnu
except ImportError:
have_gdbm = False
@py.test.mark.parametrize("item_name",
[item.name for item in six._moved_attributes])
def test_move_items(item_name):
"""Ensure that everything loads correctly."""
try:
item = getattr(six.moves, item_name)
if isinstance(item, types.ModuleType):
__import__("six.moves." + item_name)
except AttributeError:
if item_name == "zip_longest" and sys.version_info < (2, 6):
py.test.skip("zip_longest only available on 2.6+")
except ImportError:
if item_name == "winreg" and not sys.platform.startswith("win"):
py.test.skip("Windows only module")
if item_name.startswith("tkinter"):
if not have_tkinter:
py.test.skip("requires tkinter")
if item_name == "tkinter_ttk" and sys.version_info[:2] <= (2, 6):
py.test.skip("ttk only available on 2.7+")
if item_name.startswith("dbm_gnu") and not have_gdbm:
py.test.skip("requires gdbm")
raise
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_parse_moved_attributes])
def test_move_items_urllib_parse(item_name):
"""Ensure that everything loads correctly."""
if item_name == "ParseResult" and sys.version_info < (2, 5):
py.test.skip("ParseResult is only found on 2.5+")
if item_name in ("parse_qs", "parse_qsl") and sys.version_info < (2, 6):
py.test.skip("parse_qs[l] is new in 2.6")
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.parse)
getattr(six.moves.urllib.parse, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_error_moved_attributes])
def test_move_items_urllib_error(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.error)
getattr(six.moves.urllib.error, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_request_moved_attributes])
def test_move_items_urllib_request(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.request)
getattr(six.moves.urllib.request, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_response_moved_attributes])
def test_move_items_urllib_response(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.response)
getattr(six.moves.urllib.response, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_robotparser_moved_attributes])
def test_move_items_urllib_robotparser(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.robotparser)
getattr(six.moves.urllib.robotparser, item_name)
def test_import_moves_error_1():
from six.moves.urllib.parse import urljoin
from six import moves
# In 1.4.1: AttributeError: 'Module_six_moves_urllib_parse' object has no attribute 'urljoin'
assert moves.urllib.parse.urljoin
def test_import_moves_error_2():
from six import moves
assert moves.urllib.parse.urljoin
# In 1.4.1: ImportError: cannot import name urljoin
from six.moves.urllib.parse import urljoin
def test_import_moves_error_3():
from six.moves.urllib.parse import urljoin
# In 1.4.1: ImportError: cannot import name urljoin
from six.moves.urllib_parse import urljoin
def test_from_imports():
from six.moves.queue import Queue
assert isinstance(Queue, six.class_types)
from six.moves.configparser import ConfigParser
assert isinstance(ConfigParser, six.class_types)
def test_filter():
from six.moves import filter
f = filter(lambda x: x % 2, range(10))
assert six.advance_iterator(f) == 1
def test_filter_false():
from six.moves import filterfalse
f = filterfalse(lambda x: x % 3, range(10))
assert six.advance_iterator(f) == 0
assert six.advance_iterator(f) == 3
assert six.advance_iterator(f) == 6
def test_map():
from six.moves import map
assert six.advance_iterator(map(lambda x: x + 1, range(2))) == 1
def test_getoutput():
from six.moves import getoutput
output = getoutput('echo "foo"')
assert output == 'foo'
def test_zip():
from six.moves import zip
assert six.advance_iterator(zip(range(2), range(2))) == (0, 0)
@py.test.mark.skipif("sys.version_info < (2, 6)")
def test_zip_longest():
from six.moves import zip_longest
it = zip_longest(range(2), range(1))
assert six.advance_iterator(it) == (0, 0)
assert six.advance_iterator(it) == (1, None)
class TestCustomizedMoves:
def teardown_method(self, meth):
try:
del six._MovedItems.spam
except AttributeError:
pass
try:
del six.moves.__dict__["spam"]
except KeyError:
pass
def test_moved_attribute(self):
attr = six.MovedAttribute("spam", "foo", "bar")
if six.PY3:
assert attr.mod == "bar"
else:
assert attr.mod == "foo"
assert attr.attr == "spam"
attr = six.MovedAttribute("spam", "foo", "bar", "lemma")
assert attr.attr == "lemma"
attr = six.MovedAttribute("spam", "foo", "bar", "lemma", "theorm")
if six.PY3:
assert attr.attr == "theorm"
else:
assert attr.attr == "lemma"
def test_moved_module(self):
attr = six.MovedModule("spam", "foo")
if six.PY3:
assert attr.mod == "spam"
else:
assert attr.mod == "foo"
attr = six.MovedModule("spam", "foo", "bar")
if six.PY3:
assert attr.mod == "bar"
else:
assert attr.mod == "foo"
def test_custom_move_module(self):
attr = six.MovedModule("spam", "six", "six")
six.add_move(attr)
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
attr = six.MovedModule("spam", "six", "six")
six.add_move(attr)
from six.moves import spam
assert spam is six
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
def test_custom_move_attribute(self):
attr = six.MovedAttribute("spam", "six", "six", "u", "u")
six.add_move(attr)
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
attr = six.MovedAttribute("spam", "six", "six", "u", "u")
six.add_move(attr)
from six.moves import spam
assert spam is six.u
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
def test_empty_remove(self):
py.test.raises(AttributeError, six.remove_move, "eggs")
def test_get_unbound_function():
class X(object):
def m(self):
pass
assert six.get_unbound_function(X.m) is X.__dict__["m"]
def test_get_method_self():
class X(object):
def m(self):
pass
x = X()
assert six.get_method_self(x.m) is x
py.test.raises(AttributeError, six.get_method_self, 42)
def test_get_method_function():
class X(object):
def m(self):
pass
x = X()
assert six.get_method_function(x.m) is X.__dict__["m"]
py.test.raises(AttributeError, six.get_method_function, hasattr)
def test_get_function_closure():
def f():
x = 42
def g():
return x
return g
cell = six.get_function_closure(f())[0]
assert type(cell).__name__ == "cell"
def test_get_function_code():
def f():
pass
assert isinstance(six.get_function_code(f), types.CodeType)
if not hasattr(sys, "pypy_version_info"):
py.test.raises(AttributeError, six.get_function_code, hasattr)
def test_get_function_defaults():
def f(x, y=3, b=4):
pass
assert six.get_function_defaults(f) == (3, 4)
def test_get_function_globals():
def f():
pass
assert six.get_function_globals(f) is globals()
def test_dictionary_iterators(monkeypatch):
def stock_method_name(iterwhat):
"""Given a method suffix like "lists" or "values", return the name
of the dict method that delivers those on the version of Python
we're running in."""
if six.PY3:
return iterwhat
return 'iter' + iterwhat
class MyDict(dict):
if not six.PY3:
def lists(self, **kw):
return [1, 2, 3]
def iterlists(self, **kw):
return iter([1, 2, 3])
f = MyDict.iterlists
del MyDict.iterlists
setattr(MyDict, stock_method_name('lists'), f)
d = MyDict(zip(range(10), reversed(range(10))))
for name in "keys", "values", "items", "lists":
meth = getattr(six, "iter" + name)
it = meth(d)
assert not isinstance(it, list)
assert list(it) == list(getattr(d, name)())
py.test.raises(StopIteration, six.advance_iterator, it)
record = []
def with_kw(*args, **kw):
record.append(kw["kw"])
return old(*args)
old = getattr(MyDict, stock_method_name(name))
monkeypatch.setattr(MyDict, stock_method_name(name), with_kw)
meth(d, kw=42)
assert record == [42]
monkeypatch.undo()
@py.test.mark.skipif("sys.version_info[:2] < (2, 7)",
reason="view methods on dictionaries only available on 2.7+")
def test_dictionary_views():
def stock_method_name(viewwhat):
"""Given a method suffix like "keys" or "values", return the name
of the dict method that delivers those on the version of Python
we're running in."""
if six.PY3:
return viewwhat
return 'view' + viewwhat
d = dict(zip(range(10), (range(11, 20))))
for name in "keys", "values", "items":
meth = getattr(six, "view" + name)
view = meth(d)
assert set(view) == set(getattr(d, name)())
def test_advance_iterator():
assert six.next is six.advance_iterator
l = [1, 2]
it = iter(l)
assert six.next(it) == 1
assert six.next(it) == 2
py.test.raises(StopIteration, six.next, it)
py.test.raises(StopIteration, six.next, it)
def test_iterator():
class myiter(six.Iterator):
def __next__(self):
return 13
assert six.advance_iterator(myiter()) == 13
class myitersub(myiter):
def __next__(self):
return 14
assert six.advance_iterator(myitersub()) == 14
def test_callable():
class X:
def __call__(self):
pass
def method(self):
pass
assert six.callable(X)
assert six.callable(X())
assert six.callable(test_callable)
assert six.callable(hasattr)
assert six.callable(X.method)
assert six.callable(X().method)
assert not six.callable(4)
assert not six.callable("string")
def test_create_bound_method():
class X(object):
pass
def f(self):
return self
x = X()
b = six.create_bound_method(f, x)
assert isinstance(b, types.MethodType)
assert b() is x
def test_create_unbound_method():
class X(object):
pass
def f(self):
return self
u = six.create_unbound_method(f, X)
py.test.raises(TypeError, u)
if six.PY2:
assert isinstance(u, types.MethodType)
x = X()
assert f(x) is x
if six.PY3:
def test_b():
data = six.b("\xff")
assert isinstance(data, bytes)
assert len(data) == 1
assert data == bytes([255])
def test_u():
s = six.u("hi \u0439 \U00000439 \\ \\\\ \n")
assert isinstance(s, str)
assert s == "hi \u0439 \U00000439 \\ \\\\ \n"
else:
def test_b():
data = six.b("\xff")
assert isinstance(data, str)
assert len(data) == 1
assert data == "\xff"
def test_u():
s = six.u("hi \u0439 \U00000439 \\ \\\\ \n")
assert isinstance(s, unicode)
assert s == "hi \xd0\xb9 \xd0\xb9 \\ \\\\ \n".decode("utf8")
def test_u_escapes():
s = six.u("\u1234")
assert len(s) == 1
def test_unichr():
assert six.u("\u1234") == six.unichr(0x1234)
assert type(six.u("\u1234")) is type(six.unichr(0x1234))
def test_int2byte():
assert six.int2byte(3) == six.b("\x03")
py.test.raises(Exception, six.int2byte, 256)
def test_byte2int():
assert six.byte2int(six.b("\x03")) == 3
assert six.byte2int(six.b("\x03\x04")) == 3
py.test.raises(IndexError, six.byte2int, six.b(""))
def test_bytesindex():
assert six.indexbytes(six.b("hello"), 3) == ord("l")
def test_bytesiter():
it = six.iterbytes(six.b("hi"))
assert six.next(it) == ord("h")
assert six.next(it) == ord("i")
py.test.raises(StopIteration, six.next, it)
def test_StringIO():
fp = six.StringIO()
fp.write(six.u("hello"))
assert fp.getvalue() == six.u("hello")
def test_BytesIO():
fp = six.BytesIO()
fp.write(six.b("hello"))
assert fp.getvalue() == six.b("hello")
def test_exec_():
def f():
l = []
six.exec_("l.append(1)")
assert l == [1]
f()
ns = {}
six.exec_("x = 42", ns)
assert ns["x"] == 42
glob = {}
loc = {}
six.exec_("global y; y = 42; x = 12", glob, loc)
assert glob["y"] == 42
assert "x" not in glob
assert loc["x"] == 12
assert "y" not in loc
def test_reraise():
def get_next(tb):
if six.PY3:
return tb.tb_next.tb_next
else:
return tb.tb_next
e = Exception("blah")
try:
raise e
except Exception:
tp, val, tb = sys.exc_info()
try:
six.reraise(tp, val, tb)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert tb is get_next(tb2)
try:
six.reraise(tp, val)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert tb2 is not tb
try:
six.reraise(tp, val, tb2)
except Exception:
tp2, value2, tb3 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert get_next(tb3) is tb2
try:
six.reraise(tp, None, tb)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is not val
assert isinstance(value2, Exception)
assert tb is get_next(tb2)
def test_raise_from():
try:
try:
raise Exception("blah")
except Exception:
ctx = sys.exc_info()[1]
f = Exception("foo")
six.raise_from(f, None)
except Exception:
tp, val, tb = sys.exc_info()
if sys.version_info[:2] > (3, 0):
# We should have done a raise f from None equivalent.
assert val.__cause__ is None
assert val.__context__ is ctx
if sys.version_info[:2] >= (3, 3):
# And that should suppress the context on the exception.
assert val.__suppress_context__
# For all versions the outer exception should have raised successfully.
assert str(val) == "foo"
def test_print_():
save = sys.stdout
out = sys.stdout = six.moves.StringIO()
try:
six.print_("Hello,", "person!")
finally:
sys.stdout = save
assert out.getvalue() == "Hello, person!\n"
out = six.StringIO()
six.print_("Hello,", "person!", file=out)
assert out.getvalue() == "Hello, person!\n"
out = six.StringIO()
six.print_("Hello,", "person!", file=out, end="")
assert out.getvalue() == "Hello, person!"
out = six.StringIO()
six.print_("Hello,", "person!", file=out, sep="X")
assert out.getvalue() == "Hello,Xperson!\n"
out = six.StringIO()
six.print_(six.u("Hello,"), six.u("person!"), file=out)
result = out.getvalue()
assert isinstance(result, six.text_type)
assert result == six.u("Hello, person!\n")
six.print_("Hello", file=None) # This works.
out = six.StringIO()
six.print_(None, file=out)
assert out.getvalue() == "None\n"
class FlushableStringIO(six.StringIO):
def __init__(self):
six.StringIO.__init__(self)
self.flushed = False
def flush(self):
self.flushed = True
out = FlushableStringIO()
six.print_("Hello", file=out)
assert not out.flushed
six.print_("Hello", file=out, flush=True)
assert out.flushed
@py.test.mark.skipif("sys.version_info[:2] >= (2, 6)")
def test_print_encoding(monkeypatch):
# Fool the type checking in print_.
monkeypatch.setattr(six, "file", six.BytesIO, raising=False)
out = six.BytesIO()
out.encoding = "utf-8"
out.errors = None
six.print_(six.u("\u053c"), end="", file=out)
assert out.getvalue() == six.b("\xd4\xbc")
out = six.BytesIO()
out.encoding = "ascii"
out.errors = "strict"
py.test.raises(UnicodeEncodeError, six.print_, six.u("\u053c"), file=out)
out.errors = "backslashreplace"
six.print_(six.u("\u053c"), end="", file=out)
assert out.getvalue() == six.b("\\u053c")
def test_print_exceptions():
py.test.raises(TypeError, six.print_, x=3)
py.test.raises(TypeError, six.print_, end=3)
py.test.raises(TypeError, six.print_, sep=42)
def test_with_metaclass():
class Meta(type):
pass
class X(six.with_metaclass(Meta)):
pass
assert type(X) is Meta
assert issubclass(X, object)
class Base(object):
pass
class X(six.with_metaclass(Meta, Base)):
pass
assert type(X) is Meta
assert issubclass(X, Base)
class Base2(object):
pass
class X(six.with_metaclass(Meta, Base, Base2)):
pass
assert type(X) is Meta
assert issubclass(X, Base)
assert issubclass(X, Base2)
assert X.__mro__ == (X, Base, Base2, object)
class X(six.with_metaclass(Meta)):
pass
class MetaSub(Meta):
pass
class Y(six.with_metaclass(MetaSub, X)):
pass
assert type(Y) is MetaSub
assert Y.__mro__ == (Y, X, object)
@py.test.mark.skipif("sys.version_info[:2] < (3, 0)")
def test_with_metaclass_prepare():
"""Test that with_metaclass causes Meta.__prepare__ to be called with the correct arguments."""
class MyDict(dict):
pass
class Meta(type):
@classmethod
def __prepare__(cls, name, bases):
namespace = MyDict(super().__prepare__(name, bases), cls=cls, bases=bases)
namespace['namespace'] = namespace
return namespace
class Base(object):
pass
bases = (Base,)
class X(six.with_metaclass(Meta, *bases)):
pass
assert getattr(X, 'cls', type) is Meta
assert getattr(X, 'bases', ()) == bases
assert isinstance(getattr(X, 'namespace', {}), MyDict)
def test_wraps():
def f(g):
@six.wraps(g)
def w():
return 42
return w
def k():
pass
original_k = k
k = f(f(k))
assert hasattr(k, '__wrapped__')
k = k.__wrapped__
assert hasattr(k, '__wrapped__')
k = k.__wrapped__
assert k is original_k
assert not hasattr(k, '__wrapped__')
def f(g, assign, update):
def w():
return 42
w.glue = {"foo" : "bar"}
return six.wraps(g, assign, update)(w)
k.glue = {"melon" : "egg"}
k.turnip = 43
k = f(k, ["turnip"], ["glue"])
assert k.__name__ == "w"
assert k.turnip == 43
assert k.glue == {"melon" : "egg", "foo" : "bar"}
def test_add_metaclass():
class Meta(type):
pass
class X:
"success"
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, object)
assert X.__module__ == __name__
assert X.__doc__ == "success"
class Base(object):
pass
class X(Base):
pass
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, Base)
class Base2(object):
pass
class X(Base, Base2):
pass
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, Base)
assert issubclass(X, Base2)
# Test a second-generation subclass of a type.
class Meta1(type):
m1 = "m1"
class Meta2(Meta1):
m2 = "m2"
class Base:
b = "b"
Base = six.add_metaclass(Meta1)(Base)
class X(Base):
x = "x"
X = six.add_metaclass(Meta2)(X)
assert type(X) is Meta2
assert issubclass(X, Base)
assert type(Base) is Meta1
assert "__dict__" not in vars(X)
instance = X()
instance.attr = "test"
assert vars(instance) == {"attr": "test"}
assert instance.b == Base.b
assert instance.x == X.x
# Test a class with slots.
class MySlots(object):
__slots__ = ["a", "b"]
MySlots = six.add_metaclass(Meta1)(MySlots)
assert MySlots.__slots__ == ["a", "b"]
instance = MySlots()
instance.a = "foo"
py.test.raises(AttributeError, setattr, instance, "c", "baz")
# Test a class with string for slots.
class MyStringSlots(object):
__slots__ = "ab"
MyStringSlots = six.add_metaclass(Meta1)(MyStringSlots)
assert MyStringSlots.__slots__ == "ab"
instance = MyStringSlots()
instance.ab = "foo"
py.test.raises(AttributeError, setattr, instance, "a", "baz")
py.test.raises(AttributeError, setattr, instance, "b", "baz")
class MySlotsWeakref(object):
__slots__ = "__weakref__",
MySlotsWeakref = six.add_metaclass(Meta)(MySlotsWeakref)
assert type(MySlotsWeakref) is Meta
@py.test.mark.skipif("sys.version_info[:2] < (2, 7) or sys.version_info[:2] in ((3, 0), (3, 1))")
def test_assertCountEqual():
class TestAssertCountEqual(unittest.TestCase):
def test(self):
with self.assertRaises(AssertionError):
six.assertCountEqual(self, (1, 2), [3, 4, 5])
six.assertCountEqual(self, (1, 2), [2, 1])
TestAssertCountEqual('test').test()
@py.test.mark.skipif("sys.version_info[:2] < (2, 7)")
def test_assertRegex():
class TestAssertRegex(unittest.TestCase):
def test(self):
with self.assertRaises(AssertionError):
six.assertRegex(self, 'test', r'^a')
six.assertRegex(self, 'test', r'^t')
TestAssertRegex('test').test()
@py.test.mark.skipif("sys.version_info[:2] < (2, 7)")
def test_assertRaisesRegex():
class TestAssertRaisesRegex(unittest.TestCase):
def test(self):
with six.assertRaisesRegex(self, AssertionError, '^Foo'):
raise AssertionError('Foo')
with self.assertRaises(AssertionError):
with six.assertRaisesRegex(self, AssertionError, r'^Foo'):
raise AssertionError('Bar')
TestAssertRaisesRegex('test').test()
def test_python_2_unicode_compatible():
@six.python_2_unicode_compatible
class MyTest(object):
def __str__(self):
return six.u('hello')
def __bytes__(self):
return six.b('hello')
my_test = MyTest()
if six.PY2:
assert str(my_test) == six.b("hello")
assert unicode(my_test) == six.u("hello")
elif six.PY3:
assert bytes(my_test) == six.b("hello")
assert str(my_test) == six.u("hello")
assert getattr(six.moves.builtins, 'bytes', str)(my_test) == six.b("hello")
class EnsureTests:
# grinning face emoji
UNICODE_EMOJI = six.u("\U0001F600")
BINARY_EMOJI = b"\xf0\x9f\x98\x80"
def test_ensure_binary_raise_type_error(self):
with py.test.raises(TypeError):
six.ensure_str(8)
def test_errors_and_encoding(self):
six.ensure_binary(self.UNICODE_EMOJI, encoding='latin-1', errors='ignore')
with py.test.raises(UnicodeEncodeError):
six.ensure_binary(self.UNICODE_EMOJI, encoding='latin-1', errors='strict')
def test_ensure_binary_raise(self):
converted_unicode = six.ensure_binary(self.UNICODE_EMOJI, encoding='utf-8', errors='strict')
converted_binary = six.ensure_binary(self.BINARY_EMOJI, encoding="utf-8", errors='strict')
if six.PY2:
# PY2: unicode -> str
assert converted_unicode == self.BINARY_EMOJI and isinstance(converted_unicode, str)
# PY2: str -> str
assert converted_binary == self.BINARY_EMOJI and isinstance(converted_binary, str)
else:
# PY3: str -> bytes
assert converted_unicode == self.BINARY_EMOJI and isinstance(converted_unicode, bytes)
# PY3: bytes -> bytes
assert converted_binary == self.BINARY_EMOJI and isinstance(converted_binary, bytes)
def test_ensure_str(self):
converted_unicode = six.ensure_str(self.UNICODE_EMOJI, encoding='utf-8', errors='strict')
converted_binary = six.ensure_str(self.BINARY_EMOJI, encoding="utf-8", errors='strict')
if six.PY2:
# PY2: unicode -> str
assert converted_unicode == self.BINARY_EMOJI and isinstance(converted_unicode, str)
# PY2: str -> str
assert converted_binary == self.BINARY_EMOJI and isinstance(converted_binary, str)
else:
# PY3: str -> str
assert converted_unicode == self.UNICODE_EMOJI and isinstance(converted_unicode, str)
# PY3: bytes -> str
assert converted_binary == self.UNICODE_EMOJI and isinstance(converted_unicode, str)
def test_ensure_text(self):
converted_unicode = six.ensure_text(self.UNICODE_EMOJI, encoding='utf-8', errors='strict')
converted_binary = six.ensure_text(self.BINARY_EMOJI, encoding="utf-8", errors='strict')
if six.PY2:
# PY2: unicode -> unicode
assert converted_unicode == self.UNICODE_EMOJI and isinstance(converted_unicode, unicode)
# PY2: str -> unicode
assert converted_binary == self.UNICODE_EMOJI and isinstance(converted_unicode, unicode)
else:
# PY3: str -> str
assert converted_unicode == self.UNICODE_EMOJI and isinstance(converted_unicode, str)
# PY3: bytes -> str
assert converted_binary == self.UNICODE_EMOJI and isinstance(converted_unicode, str)
| mit | -5,638,111,570,566,030,000 | 28.97379 | 101 | 0.600457 | false |
trailofbits/manticore | tests/ethereum/test_plugins.py | 2 | 4308 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
import shutil
from manticore.ethereum.plugins import VerboseTrace, KeepOnlyIfStorageChanges
from manticore.ethereum import ManticoreEVM
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class EthPluginsTests(unittest.TestCase):
def setUp(self):
self.mevm = ManticoreEVM()
def tearDown(self):
ws = self.mevm.workspace
del self.mevm
shutil.rmtree(ws)
def test_ignore_states(self):
m = self.mevm
m.register_plugin(KeepOnlyIfStorageChanges())
filename = os.path.join(THIS_DIR, "contracts", "absurdrepetition.sol")
with m.kill_timeout():
m.multi_tx_analysis(filename)
for st in m.all_states:
if st.platform.logs:
return
self.fail("We did not reach any state with logs")
@unittest.skip("failing")
def test_verbose_trace(self):
source_code = """contract X {}"""
self.mevm.register_plugin(VerboseTrace())
# owner address is hardcodded so the contract address is predictable
owner = self.mevm.create_account(
balance=1000, address=0xAFB6D63079413D167770DE9C3F50DB6477811BDB
)
# Initialize contract so it's constructor function will be traced
self.mevm.solidity_create_contract(source_code, owner=owner, gas=90000)
files = set(os.listdir(self.mevm.workspace))
# self.assertEqual(len(files), 0) # just a sanity check? workspace
# contains .state_id and other config files
# Shall produce a verbose trace file
with self.assertLogs("manticore.core.manticore", level="INFO") as cm:
self.mevm.finalize()
prefix = "\x1b[34mINFO:\x1b[0m:m.c.manticore"
# self.assertEqual(f'{prefix}:Generated testcase No. 0 - RETURN', cm.output[0])
self.assertEqual(f"{prefix}:Results in {self.mevm.workspace}", cm.output[0])
# self.assertEqual(f'{prefix}:Total time: {self.mevm._last_run_stats["time_elapsed"]}', cm.output[2])
self.assertEqual(len(cm.output), 1)
import re
files = set((f for f in os.listdir(self.mevm.workspace) if re.match(r"[^.].*", f)))
expected_files = {
"global_X.runtime_visited",
"global_X_runtime.bytecode",
"test_00000000.verbose_trace",
"global_X.sol",
"global_X.runtime_asm",
"global_X.init_asm",
"global_X.init_visited",
"test_00000000.constraints",
"command.sh",
"global_X_init.bytecode",
"test_00000000.tx",
"test_00000000.pkl",
"manticore.yml",
"global.summary",
"test_00000000.summary",
"test_00000000.tx.json",
"test_00000000.logs",
"test_00000000.trace",
}
self.assertEqual(files, expected_files)
result_vt_path = os.path.join(self.mevm.workspace, "test_00000000.verbose_trace")
expected_vt_path = os.path.join(THIS_DIR, "data/verbose_trace_plugin_out")
with open(result_vt_path) as res_fp, open(expected_vt_path) as exp_fp:
res = res_fp.readlines()
exp = exp_fp.readlines()
self.assertEqual(len(res), len(exp))
self.assertEqual(len(res), 204)
# Till line 184 the outputs shall be the same
# Next there is a CODESIZE instruction that concretizes to different values each run
# and as a result, the values in memory might differ.
#
# For some reason even setting `(set-option :random-seed 1)` in z3 doesn't help
for i in range(184):
self.assertEqual(res[i], exp[i], f"Difference on line {i}")
till = 130 # number of chars that doesn't differ
for i in range(184, 188):
self.assertEqual(res[i][:till], exp[i][:till], f"Difference on line {i}")
for i in range(188, 195):
self.assertEqual(res[i], exp[i], f"Difference on line {i}")
for i in range(195, 200):
self.assertEqual(res[i][:till], exp[i][:till], f"Difference on line {i}")
for i in range(200, len(res)):
self.assertEqual(res[i], exp[i], f"Difference on line {i}")
| agpl-3.0 | -4,979,989,525,697,667,000 | 36.789474 | 113 | 0.600975 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.