repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ptisserand/ansible | lib/ansible/template/vars.py | 39 | 4935 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import Mapping
from jinja2.utils import missing
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
__all__ = ['AnsibleJ2Vars']
class AnsibleJ2Vars(Mapping):
'''
Helper class to template all variable content before jinja2 sees it. This is
done by hijacking the variable storage that jinja2 uses, and overriding __contains__
and __getitem__ to look like a dict. Added bonus is avoiding duplicating the large
hashes that inject tends to be.
To facilitate using builtin jinja2 things like range, globals are also handled here.
'''
def __init__(self, templar, globals, locals=None, *extras):
'''
Initializes this object with a valid Templar() object, as
well as several dictionaries of variables representing
different scopes (in jinja2 terminology).
'''
self._templar = templar
self._globals = globals
self._extras = extras
self._locals = dict()
if isinstance(locals, dict):
for key, val in iteritems(locals):
if val is not missing:
if key[:2] == 'l_':
self._locals[key[2:]] = val
elif key not in ('context', 'environment', 'template'):
self._locals[key] = val
def __contains__(self, k):
if k in self._templar._available_variables:
return True
if k in self._locals:
return True
for i in self._extras:
if k in i:
return True
if k in self._globals:
return True
return False
def __iter__(self):
keys = set()
keys.update(self._templar._available_variables, self._locals, self._globals, *self._extras)
return iter(keys)
def __len__(self):
keys = set()
keys.update(self._templar._available_variables, self._locals, self._globals, *self._extras)
return len(keys)
def __getitem__(self, varname):
if varname not in self._templar._available_variables:
if varname in self._locals:
return self._locals[varname]
for i in self._extras:
if varname in i:
return i[varname]
if varname in self._globals:
return self._globals[varname]
else:
raise KeyError("undefined variable: %s" % varname)
variable = self._templar._available_variables[varname]
# HostVars is special, return it as-is, as is the special variable
# 'vars', which contains the vars structure
from ansible.vars.hostvars import HostVars
if isinstance(variable, dict) and varname == "vars" or isinstance(variable, HostVars) or hasattr(variable, '__UNSAFE__'):
return variable
else:
value = None
try:
value = self._templar.template(variable)
except AnsibleUndefinedVariable:
raise
except Exception as e:
msg = getattr(e, 'message') or to_native(e)
raise AnsibleError("An unhandled exception occurred while templating '%s'. "
"Error was a %s, original message: %s" % (to_native(variable), type(e), msg))
return value
def add_locals(self, locals):
'''
If locals are provided, create a copy of self containing those
locals in addition to what is already in this variable proxy.
'''
if locals is None:
return self
# FIXME run this only on jinja2>=2.9?
# prior to version 2.9, locals contained all of the vars and not just the current
# local vars so this was not necessary for locals to propagate down to nested includes
new_locals = self._locals.copy()
new_locals.update(locals)
return AnsibleJ2Vars(self._templar, self._globals, locals=new_locals, *self._extras)
| gpl-3.0 |
samuel1208/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
ennoborg/gramps | gramps/plugins/textreport/recordsreport.py | 9 | 13718 | # encoding:utf-8
#
# Gramps - a GTK+/GNOME based genealogy program - Records plugin
#
# Copyright (C) 2008-2011 Reinhard Müller
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012 Brian G. Matherly
# Copyright (C) 2013-2016 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
""" Records Report """
#------------------------------------------------------------------------
#
# Standard Python modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.plugins.lib.librecords import (RECORDS, find_records,
CALLNAME_DONTUSE, CALLNAME_REPLACE,
CALLNAME_UNDERLINE_ADD)
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle,
FONT_SANS_SERIF, PARA_ALIGN_CENTER,
IndexMark, INDEX_TYPE_TOC)
from gramps.gen.plug.menu import (BooleanOption, EnumeratedListOption,
FilterOption, NumberOption,
PersonOption, StringOption)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.lib import Span
from gramps.gen.errors import ReportError
from gramps.gen.proxy import LivingProxyDb, CacheProxyDb
#------------------------------------------------------------------------
#
# Records Report
#
#------------------------------------------------------------------------
class RecordsReport(Report):
""" Records Report """
def __init__(self, database, options, user):
"""
This report needs the following parameters (class variables)
that come in the options class.
incl_private - Whether to include private data
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
menu = options.menu
self.set_locale(options.menu.get_option_by_name('trans').get_value())
stdoptions.run_private_data_option(self, menu)
living_opt = stdoptions.run_living_people_option(self, menu,
self._locale)
self.database = CacheProxyDb(self.database)
self._lv = menu.get_option_by_name('living_people').get_value()
for (value, description) in living_opt.get_items(xml_items=True):
if value == self._lv:
living_desc = self._(description)
break
self.living_desc = self._(
"(Living people: %(option_name)s)") % {'option_name': living_desc}
filter_option = menu.get_option_by_name('filter')
self.filter = filter_option.get_filter()
self.top_size = menu.get_option_by_name('top_size').get_value()
self.callname = menu.get_option_by_name('callname').get_value()
self.footer = menu.get_option_by_name('footer').get_value()
self.include = {}
for (text, varname, default) in RECORDS:
self.include[varname] = menu.get_option_by_name(varname).get_value()
self._nf = stdoptions.run_name_format_option(self, menu)
def write_report(self):
"""
Build the actual report.
"""
records = find_records(self.database, self.filter,
self.top_size, self.callname,
trans_text=self._, name_format=self._nf,
living_mode=self._lv, user=self._user)
self.doc.start_paragraph('REC-Title')
title = self._("Records")
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.write_text(title, mark)
self.doc.end_paragraph()
self.doc.start_paragraph('REC-Subtitle')
filter_name = self.filter.get_name(self._locale)
self.doc.write_text("(%s)" % filter_name)
self.doc.end_paragraph()
if self._lv != LivingProxyDb.MODE_INCLUDE_ALL:
self.doc.start_paragraph('REC-Subtitle')
self.doc.write_text(self.living_desc)
self.doc.end_paragraph()
for (text, varname, top) in records:
if not self.include[varname]:
continue
self.doc.start_paragraph('REC-Heading')
self.doc.write_text(self._(text))
self.doc.end_paragraph()
last_value = None
rank = 0
for (number,
(sort, value, name, handletype, handle)) in enumerate(top):
mark = None
if handletype == 'Person':
person = self.database.get_person_from_handle(handle)
mark = utils.get_person_mark(self.database, person)
elif handletype == 'Family':
family = self.database.get_family_from_handle(handle)
# librecords.py checks that the family has both
# a father and a mother and also that each one is
# in the filter if any filter was used, so we don't
# have to do any similar checking here, it's been done
f_handle = family.get_father_handle()
dad = self.database.get_person_from_handle(f_handle)
f_mark = utils.get_person_mark(self.database, dad)
m_handle = family.get_mother_handle()
mom = self.database.get_person_from_handle(m_handle)
m_mark = utils.get_person_mark(self.database, mom)
else:
raise ReportError(_(
"Option '%(opt_name)s' is present "
"in %(file)s\n but is not known to "
"the module. Ignoring...")
% {'opt_name': handletype,
'file': 'libnarrate.py'})
# since the error is very unlikely I reused the string
if value != last_value:
last_value = value
rank = number
self.doc.start_paragraph('REC-Normal')
self.doc.write_text(
self._("%(number)s. ") % {'number': rank+1})
self.doc.write_markup(str(name), name.get_tags(), mark)
if handletype == 'Family':
self.doc.write_text('', f_mark)
self.doc.write_text('', m_mark)
if isinstance(value, Span):
tvalue = value.get_repr(dlocale=self._locale)
else:
tvalue = value
self.doc.write_text(" (%s)" % tvalue)
self.doc.end_paragraph()
self.doc.start_paragraph('REC-Footer')
self.doc.write_text(self.footer)
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# Records Report Options
#
#------------------------------------------------------------------------
class RecordsReportOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__pid = None
self.__filter = None
self.__db = dbase
self._nf = None
MenuReportOptions.__init__(self, name, dbase)
def get_subject(self):
""" Return a string that describes the subject of the report. """
return self.__filter.get_filter().get_name()
def add_menu_options(self, menu):
category_name = _("Report Options")
self.__filter = FilterOption(_("Filter"), 0)
self.__filter.set_help(
_("Determines what people are included in the report."))
menu.add_option(category_name, "filter", self.__filter)
self.__filter.connect('value-changed', self.__filter_changed)
self.__pid = PersonOption(_("Filter Person"))
self.__pid.set_help(_("The center person for the filter"))
menu.add_option(category_name, "pid", self.__pid)
self.__pid.connect('value-changed', self.__update_filters)
top_size = NumberOption(_("Number of ranks to display"), 3, 1, 100)
menu.add_option(category_name, "top_size", top_size)
callname = EnumeratedListOption(_("Use call name"), CALLNAME_DONTUSE)
callname.set_items([
(CALLNAME_DONTUSE, _("Don't use call name")),
(CALLNAME_REPLACE, _("Replace first names with call name")),
(CALLNAME_UNDERLINE_ADD,
_("Underline call name in first names / "
"add call name to first name"))])
menu.add_option(category_name, "callname", callname)
footer = StringOption(_("Footer text"), "")
menu.add_option(category_name, "footer", footer)
category_name = _("Report Options (2)")
self._nf = stdoptions.add_name_format_option(menu, category_name)
self._nf.connect('value-changed', self.__update_filters)
self.__update_filters()
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
stdoptions.add_localization_option(menu, category_name)
p_count = 0
for (text, varname, default) in RECORDS:
if varname.startswith('person'):
p_count += 1
p_half = p_count // 2
p_idx = 0
for (text, varname, default) in RECORDS:
option = BooleanOption(_(text), default)
if varname.startswith('person'):
if p_idx >= p_half:
category_name = _("Person 2")
else:
category_name = _("Person 1")
p_idx += 1
elif varname.startswith('family'):
category_name = _("Family")
menu.add_option(category_name, varname, option)
def __update_filters(self):
"""
Update the filter list based on the selected person
"""
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
nfv = self._nf.get_value()
filter_list = utils.get_person_filters(person,
include_single=False,
name_format=nfv)
self.__filter.set_filters(filter_list)
def __filter_changed(self):
"""
Handle filter change. If the filter is not specific to a person,
disable the person option
"""
filter_value = self.__filter.get_value()
if filter_value == 0: # "Entire Database" (as "include_single=False")
self.__pid.set_available(False)
else:
# The other filters need a center person (assume custom ones too)
self.__pid.set_available(True)
def make_default_style(self, default_style):
#Paragraph Styles
font = FontStyle()
font.set_type_face(FONT_SANS_SERIF)
font.set_size(16)
font.set_bold(True)
para = ParagraphStyle()
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_("The style used for the title."))
default_style.add_paragraph_style('REC-Title', para)
font = FontStyle()
font.set_type_face(FONT_SANS_SERIF)
font.set_size(12)
font.set_bold(True)
para = ParagraphStyle()
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_("The style used for the subtitle."))
default_style.add_paragraph_style('REC-Subtitle', para)
font = FontStyle()
font.set_size(12)
font.set_bold(True)
para = ParagraphStyle()
para.set_font(font)
para.set_top_margin(utils.pt2cm(6))
para.set_description(_('The style used for the section headers.'))
default_style.add_paragraph_style('REC-Heading', para)
font = FontStyle()
font.set_size(10)
para = ParagraphStyle()
para.set_font(font)
para.set_left_margin(0.5)
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style('REC-Normal', para)
font = FontStyle()
font.set_size(8)
para = ParagraphStyle()
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_top_border(True)
para.set_top_margin(utils.pt2cm(8))
para.set_description(_('The style used for the footer.'))
default_style.add_paragraph_style('REC-Footer', para)
| gpl-2.0 |
axiom-data-science/paegan | paegan/cdm/dsg/collections/base/nested_point_collection.py | 3 | 1754 | import itertools
import collections
from paegan.cdm.dsg.collections.base.feature_collection import FeatureCollection
from paegan.cdm.dsg.collections.base.point_collection import PointCollection
from paegan.cdm.dsg.features.base.point import Point
from paegan.utils.asalist import AsaList
class NestedPointCollection(FeatureCollection):
"""
A collection of PointCollections
"""
def __init__(self, **kwargs):
super(NestedPointCollection,self).__init__(**kwargs)
def calculate_bounds(self):
"""
Calculate the time_range, bbox, and size of this collection.
Will scan all data.
Ensures that .size, .bbox and .time_range return non-null.
If the collection already knows its bbox; time_range; and/or size,
they are recomputed.
"""
single_point_collection = PointCollection(elements=list(AsaList.flatten(self)))
single_point_collection.calculate_bounds()
self.bbox = single_point_collection.bbox
self.time_range = single_point_collection.time_range
self.depth_range = single_point_collection.depth_range
self._point_size = single_point_collection.size
self.size = len(self._elements)
def flatten(self):
"""
Returns a Generator of Points that are part of this collection
"""
return AsaList.flatten(self)
def get_point_size(self):
"""
Returns the number of actual Points in this NestedPointCollection
Ex. pc = 10 profiles with 20 bins each will return 200
pc.size = 10
pc.point_size = 200
"""
return self._point_size
point_size = property(get_point_size, None) | gpl-3.0 |
chrisnatali/networkx | networkx/linalg/tests/test_algebraic_connectivity.py | 54 | 10790 | from contextlib import contextmanager
from math import sqrt
import networkx as nx
from nose import SkipTest
from nose.tools import *
methods = ('tracemin_pcg', 'tracemin_chol', 'tracemin_lu', 'lanczos', 'lobpcg')
try:
from numpy.random import get_state, seed, set_state, shuffle
@contextmanager
def save_random_state():
state = get_state()
try:
yield
finally:
set_state(state)
def preserve_random_state(func):
def wrapper(*args, **kwargs):
with save_random_state():
seed(1234567890)
return func(*args, **kwargs)
wrapper.__name__ = func.__name__
return wrapper
except ImportError:
@contextmanager
def save_random_state():
yield
def preserve_random_state(func):
return func
def check_eigenvector(A, l, x):
nx = numpy.linalg.norm(x)
# Check zeroness.
assert_not_almost_equal(nx, 0)
y = A * x
ny = numpy.linalg.norm(y)
# Check collinearity.
assert_almost_equal(numpy.dot(x, y), nx * ny)
# Check eigenvalue.
assert_almost_equal(ny, l * nx)
class TestAlgebraicConnectivity(object):
numpy = 1
@classmethod
def setupClass(cls):
global numpy
try:
import numpy.linalg
import scipy.sparse
except ImportError:
raise SkipTest('SciPy not available.')
@preserve_random_state
def test_directed(self):
G = nx.DiGraph()
for method in self._methods:
assert_raises(nx.NetworkXNotImplemented, nx.algebraic_connectivity,
G, method=method)
assert_raises(nx.NetworkXNotImplemented, nx.fiedler_vector, G,
method=method)
@preserve_random_state
def test_null_and_singleton(self):
G = nx.Graph()
for method in self._methods:
assert_raises(nx.NetworkXError, nx.algebraic_connectivity, G,
method=method)
assert_raises(nx.NetworkXError, nx.fiedler_vector, G,
method=method)
G.add_edge(0, 0)
for method in self._methods:
assert_raises(nx.NetworkXError, nx.algebraic_connectivity, G,
method=method)
assert_raises(nx.NetworkXError, nx.fiedler_vector, G,
method=method)
@preserve_random_state
def test_disconnected(self):
G = nx.Graph()
G.add_nodes_from(range(2))
for method in self._methods:
assert_equal(nx.algebraic_connectivity(G), 0)
assert_raises(nx.NetworkXError, nx.fiedler_vector, G,
method=method)
G.add_edge(0, 1, weight=0)
for method in self._methods:
assert_equal(nx.algebraic_connectivity(G), 0)
assert_raises(nx.NetworkXError, nx.fiedler_vector, G,
method=method)
@preserve_random_state
def test_unrecognized_method(self):
G = nx.path_graph(4)
assert_raises(nx.NetworkXError, nx.algebraic_connectivity, G,
method='unknown')
assert_raises(nx.NetworkXError, nx.fiedler_vector, G, method='unknown')
@preserve_random_state
def test_two_nodes(self):
G = nx.Graph()
G.add_edge(0, 1, weight=1)
A = nx.laplacian_matrix(G)
for method in self._methods:
assert_almost_equal(nx.algebraic_connectivity(
G, tol=1e-12, method=method), 2)
x = nx.fiedler_vector(G, tol=1e-12, method=method)
check_eigenvector(A, 2, x)
G = nx.MultiGraph()
G.add_edge(0, 0, spam=1e8)
G.add_edge(0, 1, spam=1)
G.add_edge(0, 1, spam=-2)
A = -3 * nx.laplacian_matrix(G, weight='spam')
for method in self._methods:
assert_almost_equal(nx.algebraic_connectivity(
G, weight='spam', tol=1e-12, method=method), 6)
x = nx.fiedler_vector(G, weight='spam', tol=1e-12, method=method)
check_eigenvector(A, 6, x)
@preserve_random_state
def test_path(self):
G = nx.path_graph(8)
A = nx.laplacian_matrix(G)
sigma = 2 - sqrt(2 + sqrt(2))
for method in self._methods:
assert_almost_equal(nx.algebraic_connectivity(
G, tol=1e-12, method=method), sigma)
x = nx.fiedler_vector(G, tol=1e-12, method=method)
check_eigenvector(A, sigma, x)
@preserve_random_state
def test_cycle(self):
G = nx.cycle_graph(8)
A = nx.laplacian_matrix(G)
sigma = 2 - sqrt(2)
for method in self._methods:
assert_almost_equal(nx.algebraic_connectivity(
G, tol=1e-12, method=method), sigma)
x = nx.fiedler_vector(G, tol=1e-12, method=method)
check_eigenvector(A, sigma, x)
@preserve_random_state
def test_buckminsterfullerene(self):
G = nx.Graph(
[(1, 10), (1, 41), (1, 59), (2, 12), (2, 42), (2, 60), (3, 6),
(3, 43), (3, 57), (4, 8), (4, 44), (4, 58), (5, 13), (5, 56),
(5, 57), (6, 10), (6, 31), (7, 14), (7, 56), (7, 58), (8, 12),
(8, 32), (9, 23), (9, 53), (9, 59), (10, 15), (11, 24), (11, 53),
(11, 60), (12, 16), (13, 14), (13, 25), (14, 26), (15, 27),
(15, 49), (16, 28), (16, 50), (17, 18), (17, 19), (17, 54),
(18, 20), (18, 55), (19, 23), (19, 41), (20, 24), (20, 42),
(21, 31), (21, 33), (21, 57), (22, 32), (22, 34), (22, 58),
(23, 24), (25, 35), (25, 43), (26, 36), (26, 44), (27, 51),
(27, 59), (28, 52), (28, 60), (29, 33), (29, 34), (29, 56),
(30, 51), (30, 52), (30, 53), (31, 47), (32, 48), (33, 45),
(34, 46), (35, 36), (35, 37), (36, 38), (37, 39), (37, 49),
(38, 40), (38, 50), (39, 40), (39, 51), (40, 52), (41, 47),
(42, 48), (43, 49), (44, 50), (45, 46), (45, 54), (46, 55),
(47, 54), (48, 55)])
for normalized in (False, True):
if not normalized:
A = nx.laplacian_matrix(G)
sigma = 0.2434017461399311
else:
A = nx.normalized_laplacian_matrix(G)
sigma = 0.08113391537997749
for method in methods:
try:
assert_almost_equal(nx.algebraic_connectivity(
G, normalized=normalized, tol=1e-12, method=method),
sigma)
x = nx.fiedler_vector(G, normalized=normalized, tol=1e-12,
method=method)
check_eigenvector(A, sigma, x)
except nx.NetworkXError as e:
if e.args not in (('Cholesky solver unavailable.',),
('LU solver unavailable.',)):
raise
_methods = ('tracemin', 'lanczos', 'lobpcg')
class TestSpectralOrdering(object):
numpy = 1
@classmethod
def setupClass(cls):
global numpy
try:
import numpy.linalg
import scipy.sparse
except ImportError:
raise SkipTest('SciPy not available.')
@preserve_random_state
def test_nullgraph(self):
for graph in (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph):
G = graph()
assert_raises(nx.NetworkXError, nx.spectral_ordering, G)
@preserve_random_state
def test_singleton(self):
for graph in (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph):
G = graph()
G.add_node('x')
assert_equal(nx.spectral_ordering(G), ['x'])
G.add_edge('x', 'x', weight=33)
G.add_edge('x', 'x', weight=33)
assert_equal(nx.spectral_ordering(G), ['x'])
@preserve_random_state
def test_unrecognized_method(self):
G = nx.path_graph(4)
assert_raises(nx.NetworkXError, nx.spectral_ordering, G,
method='unknown')
@preserve_random_state
def test_three_nodes(self):
G = nx.Graph()
G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1)],
weight='spam')
for method in self._methods:
order = nx.spectral_ordering(G, weight='spam', method=method)
assert_equal(set(order), set(G))
ok_(set([1, 3]) in (set(order[:-1]), set(order[1:])))
G = nx.MultiDiGraph()
G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1), (2, 3, 2)])
for method in self._methods:
order = nx.spectral_ordering(G, method=method)
assert_equal(set(order), set(G))
ok_(set([2, 3]) in (set(order[:-1]), set(order[1:])))
@preserve_random_state
def test_path(self):
path = list(range(10))
shuffle(path)
G = nx.Graph()
G.add_path(path)
for method in self._methods:
order = nx.spectral_ordering(G, method=method)
ok_(order in [path, list(reversed(path))])
@preserve_random_state
def test_disconnected(self):
G = nx.Graph()
G.add_path(range(0, 10, 2))
G.add_path(range(1, 10, 2))
for method in self._methods:
order = nx.spectral_ordering(G, method=method)
assert_equal(set(order), set(G))
seqs = [list(range(0, 10, 2)), list(range(8, -1, -2)),
list(range(1, 10, 2)), list(range(9, -1, -2))]
ok_(order[:5] in seqs)
ok_(order[5:] in seqs)
@preserve_random_state
def test_cycle(self):
path = list(range(10))
G = nx.Graph()
G.add_path(path, weight=5)
G.add_edge(path[-1], path[0], weight=1)
A = nx.laplacian_matrix(G).todense()
for normalized in (False, True):
for method in methods:
try:
order = nx.spectral_ordering(G, normalized=normalized,
method=method)
except nx.NetworkXError as e:
if e.args not in (('Cholesky solver unavailable.',),
('LU solver unavailable.',)):
raise
else:
if not normalized:
ok_(order in [[1, 2, 0, 3, 4, 5, 6, 9, 7, 8],
[8, 7, 9, 6, 5, 4, 3, 0, 2, 1]])
else:
ok_(order in [[1, 2, 3, 0, 4, 5, 9, 6, 7, 8],
[8, 7, 6, 9, 5, 4, 0, 3, 2, 1]])
_methods = ('tracemin', 'lanczos', 'lobpcg')
| bsd-3-clause |
nfletton/django-oscar | src/oscar/apps/dashboard/views.py | 32 | 7437 | from datetime import timedelta
from decimal import Decimal as D, ROUND_UP
from django.utils.timezone import now
from django.views.generic import TemplateView
from oscar.core.loading import get_model
from django.db.models import Avg, Sum, Count
from oscar.core.compat import get_user_model
from oscar.apps.promotions.models import AbstractPromotion
ConditionalOffer = get_model('offer', 'ConditionalOffer')
Voucher = get_model('voucher', 'Voucher')
Basket = get_model('basket', 'Basket')
StockAlert = get_model('partner', 'StockAlert')
Product = get_model('catalogue', 'Product')
Order = get_model('order', 'Order')
Line = get_model('order', 'Line')
User = get_user_model()
class IndexView(TemplateView):
"""
An overview view which displays several reports about the shop.
Supports the permission-based dashboard. It is recommended to add a
index_nonstaff.html template because Oscar's default template will
display potentially sensitive store information.
"""
def get_template_names(self):
if self.request.user.is_staff:
return ['dashboard/index.html', ]
else:
return ['dashboard/index_nonstaff.html', 'dashboard/index.html']
def get_context_data(self, **kwargs):
ctx = super(IndexView, self).get_context_data(**kwargs)
ctx.update(self.get_stats())
return ctx
def get_active_site_offers(self):
"""
Return active conditional offers of type "site offer". The returned
``Queryset`` of site offers is filtered by end date greater then
the current date.
"""
return ConditionalOffer.objects.filter(
end_datetime__gt=now(), offer_type=ConditionalOffer.SITE)
def get_active_vouchers(self):
"""
Get all active vouchers. The returned ``Queryset`` of vouchers
is filtered by end date greater then the current date.
"""
return Voucher.objects.filter(end_datetime__gt=now())
def get_number_of_promotions(self, abstract_base=AbstractPromotion):
"""
Get the number of promotions for all promotions derived from
*abstract_base*. All subclasses of *abstract_base* are queried
and if another abstract base class is found this method is executed
recursively.
"""
total = 0
for cls in abstract_base.__subclasses__():
if cls._meta.abstract:
total += self.get_number_of_promotions(cls)
else:
total += cls.objects.count()
return total
def get_open_baskets(self, filters=None):
"""
Get all open baskets. If *filters* dictionary is provided they will
be applied on all open baskets and return only filtered results.
"""
if filters is None:
filters = {}
filters['status'] = Basket.OPEN
return Basket.objects.filter(**filters)
def get_hourly_report(self, hours=24, segments=10):
"""
Get report of order revenue split up in hourly chunks. A report is
generated for the last *hours* (default=24) from the current time.
The report provides ``max_revenue`` of the hourly order revenue sum,
``y-range`` as the labeling for the y-axis in a template and
``order_total_hourly``, a list of properties for hourly chunks.
*segments* defines the number of labeling segments used for the y-axis
when generating the y-axis labels (default=10).
"""
# Get datetime for 24 hours agao
time_now = now().replace(minute=0, second=0)
start_time = time_now - timedelta(hours=hours - 1)
orders_last_day = Order.objects.filter(date_placed__gt=start_time)
order_total_hourly = []
for hour in range(0, hours, 2):
end_time = start_time + timedelta(hours=2)
hourly_orders = orders_last_day.filter(date_placed__gt=start_time,
date_placed__lt=end_time)
total = hourly_orders.aggregate(
Sum('total_incl_tax')
)['total_incl_tax__sum'] or D('0.0')
order_total_hourly.append({
'end_time': end_time,
'total_incl_tax': total
})
start_time = end_time
max_value = max([x['total_incl_tax'] for x in order_total_hourly])
divisor = 1
while divisor < max_value / 50:
divisor *= 10
max_value = (max_value / divisor).quantize(D('1'), rounding=ROUND_UP)
max_value *= divisor
if max_value:
segment_size = (max_value) / D('100.0')
for item in order_total_hourly:
item['percentage'] = int(item['total_incl_tax'] / segment_size)
y_range = []
y_axis_steps = max_value / D(str(segments))
for idx in reversed(range(segments + 1)):
y_range.append(idx * y_axis_steps)
else:
y_range = []
for item in order_total_hourly:
item['percentage'] = 0
ctx = {
'order_total_hourly': order_total_hourly,
'max_revenue': max_value,
'y_range': y_range,
}
return ctx
def get_stats(self):
datetime_24hrs_ago = now() - timedelta(hours=24)
orders = Order.objects.filter()
orders_last_day = orders.filter(date_placed__gt=datetime_24hrs_ago)
open_alerts = StockAlert.objects.filter(status=StockAlert.OPEN)
closed_alerts = StockAlert.objects.filter(status=StockAlert.CLOSED)
total_lines_last_day = Line.objects.filter(
order__in=orders_last_day).count()
stats = {
'total_orders_last_day': orders_last_day.count(),
'total_lines_last_day': total_lines_last_day,
'average_order_costs': orders_last_day.aggregate(
Avg('total_incl_tax')
)['total_incl_tax__avg'] or D('0.00'),
'total_revenue_last_day': orders_last_day.aggregate(
Sum('total_incl_tax')
)['total_incl_tax__sum'] or D('0.00'),
'hourly_report_dict': self.get_hourly_report(hours=24),
'total_customers_last_day': User.objects.filter(
date_joined__gt=datetime_24hrs_ago,
).count(),
'total_open_baskets_last_day': self.get_open_baskets({
'date_created__gt': datetime_24hrs_ago
}).count(),
'total_products': Product.objects.count(),
'total_open_stock_alerts': open_alerts.count(),
'total_closed_stock_alerts': closed_alerts.count(),
'total_site_offers': self.get_active_site_offers().count(),
'total_vouchers': self.get_active_vouchers().count(),
'total_promotions': self.get_number_of_promotions(),
'total_customers': User.objects.count(),
'total_open_baskets': self.get_open_baskets().count(),
'total_orders': orders.count(),
'total_lines': Line.objects.filter(order__in=orders).count(),
'total_revenue': orders.aggregate(
Sum('total_incl_tax')
)['total_incl_tax__sum'] or D('0.00'),
'order_status_breakdown': orders.order_by(
'status'
).values('status').annotate(freq=Count('id'))
}
return stats
| bsd-3-clause |
iitjee/steppinsPython | SystemProgramming/Parallel System/00 intro.py | 1 | 2190 | Most computers spend a lot of time doing nothing. If you start a system monitor tool and watch the CPU utilization, you’ll see
what I mean—it’s rare to see one hit 100 percent, even when you are running multiple programs.* There are just too many delays
built into software: disk accesses, network traffic, database queries, waiting for users to click a button, and so on. In
fact, the majority of a modern CPU’s capacity is often spent in an idle state; faster chips help speed up performance demand
peaks, but much of their power can go largely unused.
Early on in computing, programmers realized that they could tap into such unused processing power by running more than one
program at the same time. By dividing the CPU’s attention among a set of tasks, its capacity need not go to waste while any
given task is waiting for an external event to occur. The technique is usually called parallel processing (and sometimes
“multiprocessing” or even “multitasking”) because many tasks seem to be performed at once, overlapping and parallel in time.
It’s at the heart of modern operating systems, and it gave rise to the notion of multiple-active-window computer interfaces
we’ve all come to take for granted.
here are two fundamental ways to get tasks running at the same time in Python— process forks and spawned threads.
Functionally, both rely on underlying operating system services to run bits of Python code in parallel. Procedurally, they are
very dif- ferent in terms of interface, portability, and communication.
for eg: direct process forks are not supported on Windows under standard Python whereas Python’s thread support works on all
major platforms.
in this chapter is on introducing more direct techniques—forks, threads, pipes, signals, sockets, and other launching
techniques—and on using Py- thon’s built-in tools that support them, such as the os.fork call and the threading, queue, and
multiprocessing modules.
External Tools (3rd Party):
MPI for Python system allows Python scripts to also employ the Message Passing Interface (MPI) standard, allowing Python
programs to exploit multiple processors in various way
| gpl-3.0 |
jc0n/scrapy | tests/test_utils_signal.py | 121 | 2741 | from testfixtures import LogCapture
from twisted.trial import unittest
from twisted.python.failure import Failure
from twisted.internet import defer, reactor
from pydispatch import dispatcher
from scrapy.utils.signal import send_catch_log, send_catch_log_deferred
class SendCatchLogTest(unittest.TestCase):
@defer.inlineCallbacks
def test_send_catch_log(self):
test_signal = object()
handlers_called = set()
dispatcher.connect(self.error_handler, signal=test_signal)
dispatcher.connect(self.ok_handler, signal=test_signal)
with LogCapture() as l:
result = yield defer.maybeDeferred(
self._get_result, test_signal, arg='test',
handlers_called=handlers_called
)
assert self.error_handler in handlers_called
assert self.ok_handler in handlers_called
self.assertEqual(len(l.records), 1)
record = l.records[0]
self.assertIn('error_handler', record.getMessage())
self.assertEqual(record.levelname, 'ERROR')
self.assertEqual(result[0][0], self.error_handler)
self.assert_(isinstance(result[0][1], Failure))
self.assertEqual(result[1], (self.ok_handler, "OK"))
dispatcher.disconnect(self.error_handler, signal=test_signal)
dispatcher.disconnect(self.ok_handler, signal=test_signal)
def _get_result(self, signal, *a, **kw):
return send_catch_log(signal, *a, **kw)
def error_handler(self, arg, handlers_called):
handlers_called.add(self.error_handler)
a = 1/0
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == 'test'
return "OK"
class SendCatchLogDeferredTest(SendCatchLogTest):
def _get_result(self, signal, *a, **kw):
return send_catch_log_deferred(signal, *a, **kw)
class SendCatchLogDeferredTest2(SendCatchLogTest):
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == 'test'
d = defer.Deferred()
reactor.callLater(0, d.callback, "OK")
return d
def _get_result(self, signal, *a, **kw):
return send_catch_log_deferred(signal, *a, **kw)
class SendCatchLogTest2(unittest.TestCase):
def test_error_logged_if_deferred_not_supported(self):
test_signal = object()
test_handler = lambda: defer.Deferred()
dispatcher.connect(test_handler, test_signal)
with LogCapture() as l:
send_catch_log(test_signal)
self.assertEqual(len(l.records), 1)
self.assertIn("Cannot return deferreds from signal handler", str(l))
dispatcher.disconnect(test_handler, test_signal)
| bsd-3-clause |
Dino0631/RedRain-Bot | cogs/lib/youtube_dl/extractor/orf.py | 17 | 10860 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
HEADRequest,
unified_strdate,
strip_jsonp,
int_or_none,
float_or_none,
determine_ext,
remove_end,
unescapeHTML,
)
class ORFTVthekIE(InfoExtractor):
IE_NAME = 'orf:tvthek'
IE_DESC = 'ORF TVthek'
_VALID_URL = r'https?://tvthek\.orf\.at/(?:[^/]+/)+(?P<id>\d+)'
_TESTS = [{
'url': 'http://tvthek.orf.at/program/Aufgetischt/2745173/Aufgetischt-Mit-der-Steirischen-Tafelrunde/8891389',
'playlist': [{
'md5': '2942210346ed779588f428a92db88712',
'info_dict': {
'id': '8896777',
'ext': 'mp4',
'title': 'Aufgetischt: Mit der Steirischen Tafelrunde',
'description': 'md5:c1272f0245537812d4e36419c207b67d',
'duration': 2668,
'upload_date': '20141208',
},
}],
'skip': 'Blocked outside of Austria / Germany',
}, {
'url': 'http://tvthek.orf.at/topic/Im-Wandel-der-Zeit/8002126/Best-of-Ingrid-Thurnher/7982256',
'info_dict': {
'id': '7982259',
'ext': 'mp4',
'title': 'Best of Ingrid Thurnher',
'upload_date': '20140527',
'description': 'Viele Jahre war Ingrid Thurnher das "Gesicht" der ZIB 2. Vor ihrem Wechsel zur ZIB 2 im Jahr 1995 moderierte sie unter anderem "Land und Leute", "Österreich-Bild" und "Niederösterreich heute".',
},
'params': {
'skip_download': True, # rtsp downloads
},
'_skip': 'Blocked outside of Austria / Germany',
}, {
'url': 'http://tvthek.orf.at/topic/Fluechtlingskrise/10463081/Heimat-Fremde-Heimat/13879132/Senioren-betreuen-Migrantenkinder/13879141',
'skip_download': True,
}, {
'url': 'http://tvthek.orf.at/profile/Universum/35429',
'skip_download': True,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
data_jsb = self._parse_json(
self._search_regex(
r'<div[^>]+class=(["\']).*?VideoPlaylist.*?\1[^>]+data-jsb=(["\'])(?P<json>.+?)\2',
webpage, 'playlist', group='json'),
playlist_id, transform_source=unescapeHTML)['playlist']['videos']
def quality_to_int(s):
m = re.search('([0-9]+)', s)
if m is None:
return -1
return int(m.group(1))
entries = []
for sd in data_jsb:
video_id, title = sd.get('id'), sd.get('title')
if not video_id or not title:
continue
video_id = compat_str(video_id)
formats = [{
'preference': -10 if fd['delivery'] == 'hls' else None,
'format_id': '%s-%s-%s' % (
fd['delivery'], fd['quality'], fd['quality_string']),
'url': fd['src'],
'protocol': fd['protocol'],
'quality': quality_to_int(fd['quality']),
} for fd in sd['sources']]
# Check for geoblocking.
# There is a property is_geoprotection, but that's always false
geo_str = sd.get('geoprotection_string')
if geo_str:
try:
http_url = next(
f['url']
for f in formats
if re.match(r'^https?://.*\.mp4$', f['url']))
except StopIteration:
pass
else:
req = HEADRequest(http_url)
self._request_webpage(
req, video_id,
note='Testing for geoblocking',
errnote=((
'This video seems to be blocked outside of %s. '
'You may want to try the streaming-* formats.')
% geo_str),
fatal=False)
self._check_formats(formats, video_id)
self._sort_formats(formats)
subtitles = {}
for sub in sd.get('subtitles', []):
sub_src = sub.get('src')
if not sub_src:
continue
subtitles.setdefault(sub.get('lang', 'de-AT'), []).append({
'url': sub_src,
})
upload_date = unified_strdate(sd.get('created_date'))
entries.append({
'_type': 'video',
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'description': sd.get('description'),
'duration': int_or_none(sd.get('duration_in_seconds')),
'upload_date': upload_date,
'thumbnail': sd.get('image_full_url'),
})
return {
'_type': 'playlist',
'entries': entries,
'id': playlist_id,
}
class ORFRadioIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
station = mobj.group('station')
show_date = mobj.group('date')
show_id = mobj.group('show')
if station == 'fm4':
show_id = '4%s' % show_id
data = self._download_json(
'http://audioapi.orf.at/%s/api/json/current/broadcast/%s/%s' % (station, show_id, show_date),
show_id
)
def extract_entry_dict(info, title, subtitle):
return {
'id': info['loopStreamId'].replace('.mp3', ''),
'url': 'http://loopstream01.apa.at/?channel=%s&id=%s' % (station, info['loopStreamId']),
'title': title,
'description': subtitle,
'duration': (info['end'] - info['start']) / 1000,
'timestamp': info['start'] / 1000,
'ext': 'mp3'
}
entries = [extract_entry_dict(t, data['title'], data['subtitle']) for t in data['streams']]
return {
'_type': 'playlist',
'id': show_id,
'title': data['title'],
'description': data['subtitle'],
'entries': entries
}
class ORFFM4IE(ORFRadioIE):
IE_NAME = 'orf:fm4'
IE_DESC = 'radio FM4'
_VALID_URL = r'https?://(?P<station>fm4)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
_TEST = {
'url': 'http://fm4.orf.at/player/20170107/CC',
'md5': '2b0be47375432a7ef104453432a19212',
'info_dict': {
'id': '2017-01-07_2100_tl_54_7DaysSat18_31295',
'ext': 'mp3',
'title': 'Solid Steel Radioshow',
'description': 'Die Mixshow von Coldcut und Ninja Tune.',
'duration': 3599,
'timestamp': 1483819257,
'upload_date': '20170107',
},
'skip': 'Shows from ORF radios are only available for 7 days.'
}
class ORFOE1IE(ORFRadioIE):
IE_NAME = 'orf:oe1'
IE_DESC = 'Radio Österreich 1'
_VALID_URL = r'https?://(?P<station>oe1)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
_TEST = {
'url': 'http://oe1.orf.at/player/20170108/456544',
'md5': '34d8a6e67ea888293741c86a099b745b',
'info_dict': {
'id': '2017-01-08_0759_tl_51_7DaysSun6_256141',
'ext': 'mp3',
'title': 'Morgenjournal',
'duration': 609,
'timestamp': 1483858796,
'upload_date': '20170108',
},
'skip': 'Shows from ORF radios are only available for 7 days.'
}
class ORFIPTVIE(InfoExtractor):
IE_NAME = 'orf:iptv'
IE_DESC = 'iptv.ORF.at'
_VALID_URL = r'https?://iptv\.orf\.at/(?:#/)?stories/(?P<id>\d+)'
_TEST = {
'url': 'http://iptv.orf.at/stories/2275236/',
'md5': 'c8b22af4718a4b4af58342529453e3e5',
'info_dict': {
'id': '350612',
'ext': 'flv',
'title': 'Weitere Evakuierungen um Vulkan Calbuco',
'description': 'md5:d689c959bdbcf04efeddedbf2299d633',
'duration': 68.197,
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20150425',
},
}
def _real_extract(self, url):
story_id = self._match_id(url)
webpage = self._download_webpage(
'http://iptv.orf.at/stories/%s' % story_id, story_id)
video_id = self._search_regex(
r'data-video(?:id)?="(\d+)"', webpage, 'video id')
data = self._download_json(
'http://bits.orf.at/filehandler/static-api/json/current/data.json?file=%s' % video_id,
video_id)[0]
duration = float_or_none(data['duration'], 1000)
video = data['sources']['default']
load_balancer_url = video['loadBalancerUrl']
abr = int_or_none(video.get('audioBitrate'))
vbr = int_or_none(video.get('bitrate'))
fps = int_or_none(video.get('videoFps'))
width = int_or_none(video.get('videoWidth'))
height = int_or_none(video.get('videoHeight'))
thumbnail = video.get('preview')
rendition = self._download_json(
load_balancer_url, video_id, transform_source=strip_jsonp)
f = {
'abr': abr,
'vbr': vbr,
'fps': fps,
'width': width,
'height': height,
}
formats = []
for format_id, format_url in rendition['redirect'].items():
if format_id == 'rtmp':
ff = f.copy()
ff.update({
'url': format_url,
'format_id': format_id,
})
formats.append(ff)
elif determine_ext(format_url) == 'f4m':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_id))
elif determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', m3u8_id=format_id))
else:
continue
self._sort_formats(formats)
title = remove_end(self._og_search_title(webpage), ' - iptv.ORF.at')
description = self._og_search_description(webpage)
upload_date = unified_strdate(self._html_search_meta(
'dc.date', webpage, 'upload date'))
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'thumbnail': thumbnail,
'upload_date': upload_date,
'formats': formats,
}
| gpl-3.0 |
Viktor-Evst/fixed-luigi | luigi/tools/luigi_grep.py | 12 | 2854 | #!/usr/bin/env python
import argparse
import json
from collections import defaultdict
from luigi import six
from luigi.six.moves.urllib.request import urlopen
class LuigiGrep(object):
def __init__(self, host, port):
self._host = host
self._port = port
@property
def graph_url(self):
return "http://{0}:{1}/api/graph".format(self._host, self._port)
def _fetch_json(self):
"""Returns the json representation of the dep graph"""
print("Fetching from url: " + self.graph_url)
resp = urlopen(self.graph_url).read()
return json.loads(resp.decode('utf-8'))
def _build_results(self, jobs, job):
job_info = jobs[job]
deps = job_info['deps']
deps_status = defaultdict(list)
for j in deps:
if j in jobs:
deps_status[jobs[j]['status']].append(j)
else:
deps_status['UNKNOWN'].append(j)
return {"name": job, "status": job_info['status'], "deps_by_status": deps_status}
def prefix_search(self, job_name_prefix):
"""searches for jobs matching the given job_name_prefix."""
json = self._fetch_json()
jobs = json['response']
for job in jobs:
if job.startswith(job_name_prefix):
yield self._build_results(jobs, job)
def status_search(self, status):
"""searches for jobs matching the given status"""
json = self._fetch_json()
jobs = json['response']
for job in jobs:
job_info = jobs[job]
if job_info['status'].lower() == status.lower():
yield self._build_results(jobs, job)
def main():
parser = argparse.ArgumentParser(
"luigi-grep is used to search for workflows using the luigi scheduler's json api")
parser.add_argument(
"--scheduler-host", default="localhost", help="hostname of the luigi scheduler")
parser.add_argument(
"--scheduler-port", default="8082", help="port of the luigi scheduler")
parser.add_argument("--prefix", help="prefix of a task query to search for", default=None)
parser.add_argument("--status", help="search for jobs with the given status", default=None)
args = parser.parse_args()
grep = LuigiGrep(args.scheduler_host, args.scheduler_port)
results = []
if args.prefix:
results = grep.prefix_search(args.prefix)
elif args.status:
results = grep.status_search(args.status)
for job in results:
print("{name}: {status}, Dependencies:".format(name=job['name'], status=job['status']))
for (status, jobs) in six.iteritems(job['deps_by_status']):
print(" status={status}".format(status=status))
for job in jobs:
print(" {job}".format(job=job))
if __name__ == '__main__':
main()
| apache-2.0 |
PythonScanClient/PyScanClient | Test/test_commands.py | 1 | 14881 | from __future__ import print_function
import unittest
import xml.etree.ElementTree as ET
from scan.commands import *
# These tests compare the XML as strings, even though for example
# both "<comment><text>Hello</text></comment>"
# and "<comment>\n <text>Hello</text>\n</comment>"
# would be acceptable XML representations.
# Changes to the XML could result in the need to update the tests.
class CommandTest(unittest.TestCase):
def testXMLEscape(self):
# Basic comment
cmd = Comment("Hello")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<comment><text>Hello</text></comment>")
# Check proper escape of "less than"
cmd = Comment("Check for current < 10")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<comment><text>Check for current < 10</text></comment>")
def testDelayCommand(self):
# Basic set
cmd = Delay(47.11)
print(cmd)
self.assertEqual(str(cmd), "Delay(47.11)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<delay><seconds>47.11</seconds></delay>")
def testConfig(self):
# Basic set
cmd = ConfigLog(True)
print(cmd)
self.assertEqual(str(cmd), "ConfigLog(True)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<config_log><automatic>true</automatic></config_log>")
def testSetCommand(self):
# Basic set
cmd = Set("some_device", 3.14)
print(cmd)
self.assertEqual(str(cmd), "Set('some_device', 3.14)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><wait>false</wait></set>")
# Handle numeric as well as string for value
cmd = Set("some_device", "Text")
print(cmd)
self.assertEqual(str(cmd), "Set('some_device', 'Text')")
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>\"Text\"</value><wait>false</wait></set>")
# With completion
cmd = Set("some_device", 3.14, completion=True)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><completion>true</completion><wait>false</wait></set>")
# .. and timeout
cmd = Set("some_device", 3.14, completion=True, timeout=5.0)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><completion>true</completion><wait>false</wait><timeout>5.0</timeout></set>")
# Setting a readback PV (the default one) enables wait-on-readback
cmd = Set("some_device", 3.14, completion=True, readback=True, tolerance=1, timeout=10.0)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><completion>true</completion><wait>true</wait><readback>some_device</readback><tolerance>1</tolerance><timeout>10.0</timeout></set>")
# Setting a readback PV (a specific one) enables wait-on-readback
cmd = Set("some_device", 3.14, completion=True, readback="some_device.RBV", tolerance=1, timeout=10.0)
print(cmd)
self.assertEqual(str(cmd), b"Set('some_device', 3.14, completion=True, timeout=10.0, readback='some_device.RBV', tolerance=1.000000)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><completion>true</completion><wait>true</wait><readback>some_device.RBV</readback><tolerance>1</tolerance><timeout>10.0</timeout></set>")
# Readback value different from the written value
cmd = Set("some_device", 3.14, completion=True, readback="other_device", readback_value=1, tolerance=1, timeout=10.0)
print(cmd)
self.assertEqual(str(cmd), b"Set('some_device', 3.14, completion=True, timeout=10.0, readback='other_device', readback_value=1, tolerance=1.000000)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><completion>true</completion><wait>true</wait><readback>other_device</readback><readback_value>1</readback_value><tolerance>1</tolerance><timeout>10.0</timeout></set>")
# Readback value uses string
cmd = Set("some_device", 3.14, completion=True, readback="status", readback_value='OK', tolerance=0, timeout=10.0)
print(cmd)
self.assertEqual(str(cmd), b"Set('some_device', 3.14, completion=True, timeout=10.0, readback='status', readback_value='OK')")
self.assertEqual(ET.tostring(cmd.genXML()), b"<set><device>some_device</device><value>3.14</value><completion>true</completion><wait>true</wait><readback>status</readback><readback_value>\"OK\"</readback_value><tolerance>0</tolerance><timeout>10.0</timeout></set>")
def testSequence(self):
# Nothing
cmd = Sequence()
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<sequence />")
# A few
cmd = Sequence(Comment("One"), Comment("Two"))
print(cmd.format())
self.assertEqual(ET.tostring(cmd.genXML()), b"<sequence><body><comment><text>One</text></comment><comment><text>Two</text></comment></body></sequence>")
# Sequences are 'flattened'
s1 = Sequence(Comment("One"), Comment("Two"))
s2 = Sequence(Comment("Four"), Comment("Five"))
seq1 = Sequence(s1, Comment("Three"), s2)
print(seq1.format())
seq2 = Sequence(Comment("One"), Comment("Two"), Comment("Three"), s2)
print(seq2.format())
self.assertEqual(ET.tostring(seq1.genXML()), ET.tostring(seq2.genXML()) )
def testParallel(self):
# Nothing
cmd = Parallel()
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel />")
# A few
cmd = Parallel(Comment("One"), Comment("Two"))
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel><body><comment><text>One</text></comment><comment><text>Two</text></comment></body></parallel>")
# .. as list
cmds = Comment("One"), Comment("Two")
cmd = Parallel(cmds)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel><body><comment><text>One</text></comment><comment><text>Two</text></comment></body></parallel>")
cmd = Parallel(body=cmds)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel><body><comment><text>One</text></comment><comment><text>Two</text></comment></body></parallel>")
# With other parameters
cmd = Parallel(cmds, timeout=10)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel><timeout>10</timeout><body><comment><text>One</text></comment><comment><text>Two</text></comment></body></parallel>")
cmd = Parallel(cmds, errhandler="MyHandler")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel><body><comment><text>One</text></comment><comment><text>Two</text></comment></body><error_handler>MyHandler</error_handler></parallel>")
cmd = Parallel()
cmd.append(Comment("One"), Comment("Two"))
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<parallel><body><comment><text>One</text></comment><comment><text>Two</text></comment></body></parallel>")
def testLog(self):
# One device
cmd = Log("pv1")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<log><devices><device>pv1</device></devices></log>")
# Nothing
cmd = Log()
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<log />")
# Several
cmd = Log("pv1", "pv2", "pv3")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<log><devices><device>pv1</device><device>pv2</device><device>pv3</device></devices></log>")
# .. provided as list
devices_to_log = [ "pv1", "pv2", "pv3" ]
cmd = Log(devices_to_log)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<log><devices><device>pv1</device><device>pv2</device><device>pv3</device></devices></log>")
def testInclude(self):
cmd = Include("start.scn")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<include><scan_file>start.scn</scan_file></include>")
cmd = Include("start.scn", "macro=value")
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<include><scan_file>start.scn</scan_file><macros>macro=value</macros></include>")
def testScript(self):
cmd = Script("MyCustomScript")
print(cmd)
self.assertEqual(str(cmd), "Script('MyCustomScript')")
self.assertEqual(ET.tostring(cmd.genXML()), b"<script><path>MyCustomScript</path></script>")
cmd = Script("MyCustomCommand", "arg1", 42.3)
print(cmd)
self.assertEqual(str(cmd), "Script('MyCustomCommand', 'arg1', 42.3)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<script><path>MyCustomCommand</path><arguments><argument>arg1</argument><argument>42.3</argument></arguments></script>")
# Arguments already provided as list
cmd = Script("MyCustomCommand", [ "arg1", 42.3 ])
print(cmd)
self.assertEqual(str(cmd), "Script('MyCustomCommand', 'arg1', 42.3)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<script><path>MyCustomCommand</path><arguments><argument>arg1</argument><argument>42.3</argument></arguments></script>")
def testWait(self):
cmd = Wait('device', 3.14)
print(cmd)
self.assertEqual(str(cmd), "Wait('device', 3.14)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<wait><device>device</device><value>3.14</value><comparison>EQUALS</comparison></wait>")
cmd = Wait('counts', 1000, comparison='increase by', timeout=5.0, errhandler='someHandler')
print(cmd)
self.assertEqual(str(cmd), "Wait('counts', 1000, comparison='increase by', timeout=5, errhandler='someHandler')")
self.assertEqual(ET.tostring(cmd.genXML()), b"<wait><device>counts</device><value>1000</value><comparison>INCREASE_BY</comparison><timeout>5.0</timeout><error_handler>someHandler</error_handler></wait>")
def testIf(self):
cmd = If('device', '>', 3.14)
print(cmd)
self.assertEqual(str(cmd), "If('device', '>', 3.14, tolerance=0.1)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<if><device>device</device><comparison>ABOVE</comparison><value>3.14</value><tolerance>0.1</tolerance><body /></if>")
cmd = If('device', '>', 3.14, [ Comment('BODY') ])
print(cmd)
self.assertEqual(str(cmd), "If('device', '>', 3.14, [ Comment('BODY') ], tolerance=0.1)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<if><device>device</device><comparison>ABOVE</comparison><value>3.14</value><tolerance>0.1</tolerance><body><comment><text>BODY</text></comment></body></if>")
def testLoop(self):
cmd = Loop('pv1', 1, 10, 0.1)
print(cmd)
self.assertEqual(str(cmd), "Loop('pv1', 1, 10, 0.1)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<loop><device>pv1</device><start>1</start><end>10</end><step>0.1</step><wait>false</wait><body /></loop>")
cmd = Loop('pv1', 1, 10, 0.1, Delay(5))
print(cmd)
self.assertEqual(str(cmd), "Loop('pv1', 1, 10, 0.1, [ Delay(5) ])")
cmd = Loop('pv1', 1, 10, 0.1, Delay(1), Delay(2))
print(cmd)
self.assertEqual(str(cmd), "Loop('pv1', 1, 10, 0.1, [ Delay(1), Delay(2) ])")
cmd = Loop('pv1', 1, 10, 0.1, body= [ Delay(1), Delay(2) ])
print(cmd)
self.assertEqual(str(cmd), "Loop('pv1', 1, 10, 0.1, [ Delay(1), Delay(2) ])")
self.assertEqual(ET.tostring(cmd.genXML()), b"<loop><device>pv1</device><start>1</start><end>10</end><step>0.1</step><wait>false</wait><body><delay><seconds>1</seconds></delay><delay><seconds>2</seconds></delay></body></loop>")
cmd = Loop('pv1', 1, 10, 0.1, Delay(1), Delay(2), readback=True)
print(cmd)
self.assertEqual(ET.tostring(cmd.genXML()), b"<loop><device>pv1</device><start>1</start><end>10</end><step>0.1</step><wait>true</wait><readback>pv1</readback><tolerance>0.01</tolerance><body><delay><seconds>1</seconds></delay><delay><seconds>2</seconds></delay></body></loop>")
cmd = Loop('pv1', 1, 10, 0.1, completion=True, timeout=10)
print(cmd)
self.assertEqual(str(cmd), "Loop('pv1', 1, 10, 0.1, completion=True, timeout=10)")
self.assertEqual(ET.tostring(cmd.genXML()), b"<loop><device>pv1</device><start>1</start><end>10</end><step>0.1</step><completion>true</completion><wait>false</wait><timeout>10</timeout><body /></loop>")
def testXMLSequence(self):
cmds = CommandSequence()
print(cmds)
self.assertEqual(len(cmds), 0)
print(cmds.genSCN())
cmds = CommandSequence(Comment('One'))
print(cmds)
self.assertEqual(len(cmds), 1)
print(cmds.genSCN())
cmds = CommandSequence(Comment('One'), Comment('Two'))
print(cmds)
self.assertEqual(len(cmds), 2)
print(cmds.genSCN())
self.assertEqual(b"<commands><comment><text>One</text></comment><comment><text>Two</text></comment></commands>",
cmds.genSCN().replace(b"\n", b"").replace(b" ", b""))
cmds = CommandSequence(Comment('One'))
cmds.append(Comment('Two'))
print(cmds)
self.assertEqual(len(cmds), 2)
print(cmds.genSCN())
cmds = CommandSequence( ( Comment('One'), Comment('Two') ) )
print(cmds)
self.assertEqual(len(cmds), 2)
print(cmds.genSCN())
cmds = CommandSequence(Comment('Example'), Loop('pos', 1, 5, 0.5, Set('run', 1), Delay(2), Set('run', 0)))
print(cmds)
def testCommandSequenceFormat(self):
cmds = CommandSequence(Parallel(
Sequence(Comment('Chain1'), Set('run', 1), Delay(2), Set('run', 0)),
Sequence(Comment('Chain2'), Set('foo', 1), Delay(2), Set('foo', 0))
))
print(cmds)
self.assertEqual(str(cmds), "[\n Parallel(\n Sequence(\n Comment('Chain1'),\n Set('run', 1),\n Delay(2),\n Set('run', 0)\n ),\n Sequence(\n Comment('Chain2'),\n Set('foo', 1),\n Delay(2),\n Set('foo', 0)\n )\n )\n]")
def testCommandAbstractMethodsMustBeImplemented(self):
class IncompleteCommand(Command):
pass
self.assertRaises(TypeError, IncompleteCommand)
if __name__ == "__main__":
unittest.main()
| epl-1.0 |
GHsimone/LCCS3basicCoder | __init__.py | 1 | 1622 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
LCCS3_BasicCoder
A QGIS plugin
The plugin loads a LCCS3 legend, creates a form with all LCCS3 classes and allows the user to code selected polygons
-------------------
begin : 2015-04-16
copyright : (C) 2015 by Simone Maffei
email : [email protected]
git sha : $Format:%H$
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
# noinspection PyPep8Naming
def classFactory(iface): # pylint: disable=invalid-name
"""Load LCCS3_BasicCoder class from file LCCS3_BasicCoder.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
from .lccs3_basiccoder import LCCS3_BasicCoder
return LCCS3_BasicCoder(iface)
| gpl-2.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-iothub/azure/mgmt/iothub/operations/certificates_operations.py | 2 | 23304 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class CertificatesOperations(object):
"""CertificatesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The version of the API. Constant value: "2017-07-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-07-01"
self.config = config
def list_by_iot_hub(
self, resource_group_name, resource_name, custom_headers=None, raw=False, **operation_config):
"""Get the certificate list.
Returns the list of certificates.
:param resource_group_name: The name of the resource group that
contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`CertificateListDescription
<azure.mgmt.iothub.models.CertificateListDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`CertificateListDescription
<azure.mgmt.iothub.models.CertificateListDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothub.models.ErrorDetailsException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateListDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, resource_group_name, resource_name, certificate_name, custom_headers=None, raw=False, **operation_config):
"""Get the certificate.
Returns the certificate.
:param resource_group_name: The name of the resource group that
contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate
:type certificate_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`CertificateDescription
<azure.mgmt.iothub.models.CertificateDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`CertificateDescription
<azure.mgmt.iothub.models.CertificateDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothub.models.ErrorDetailsException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, resource_name, certificate_name, if_match=None, certificate=None, custom_headers=None, raw=False, **operation_config):
"""Upload the certificate to the IoT hub.
Adds new or replaces existing certificate.
:param resource_group_name: The name of the resource group that
contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate
:type certificate_name: str
:param if_match: ETag of the Certificate. Do not specify for creating
a brand new certificate. Required to update an existing certificate.
:type if_match: str
:param certificate: base-64 representation of the X509 leaf
certificate .cer file or just .pem file content.
:type certificate: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`CertificateDescription
<azure.mgmt.iothub.models.CertificateDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`CertificateDescription
<azure.mgmt.iothub.models.CertificateDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothub.models.ErrorDetailsException>`
"""
certificate_description = models.CertificateBodyDescription(certificate=certificate)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(certificate_description, 'CertificateBodyDescription')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201, 200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('CertificateDescription', response)
if response.status_code == 200:
deserialized = self._deserialize('CertificateDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, resource_name, certificate_name, if_match, custom_headers=None, raw=False, **operation_config):
"""Delete an X509 certificate.
Deletes an existing X509 certificate or does nothing if it does not
exist.
:param resource_group_name: The name of the resource group that
contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothub.models.ErrorDetailsException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorDetailsException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def generate_verification_code(
self, resource_group_name, resource_name, certificate_name, if_match, custom_headers=None, raw=False, **operation_config):
"""Generate verification code for proof of possession flow.
Generates verification code for proof of possession flow. The
verification code will be used to generate a leaf certificate.
:param resource_group_name: The name of the resource group that
contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`CertificateWithNonceDescription
<azure.mgmt.iothub.models.CertificateWithNonceDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`CertificateWithNonceDescription
<azure.mgmt.iothub.models.CertificateWithNonceDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothub.models.ErrorDetailsException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}/generateVerificationCode'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateWithNonceDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def verify(
self, resource_group_name, resource_name, certificate_name, if_match, certificate=None, custom_headers=None, raw=False, **operation_config):
"""Verify certificate's private key possession.
Verifies the certificate's private key possession by providing the leaf
cert issued by the verifying pre uploaded certificate.
:param resource_group_name: The name of the resource group that
contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:param certificate: base-64 representation of X509 certificate .cer
file or just .pem file content.
:type certificate: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`CertificateDescription
<azure.mgmt.iothub.models.CertificateDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`CertificateDescription
<azure.mgmt.iothub.models.CertificateDescription>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorDetailsException<azure.mgmt.iothub.models.ErrorDetailsException>`
"""
certificate_verification_body = models.CertificateVerificationDescription(certificate=certificate)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}/verify'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(certificate_verification_body, 'CertificateVerificationDescription')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateDescription', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| mit |
sergio-incaser/odoo | addons/point_of_sale/report/__init__.py | 381 | 1238 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pos_users_product
import account_statement
import pos_receipt
import pos_invoice
import pos_lines
import pos_details
import pos_payment_report
import pos_report
import pos_order_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
odootr/odoo | addons/hr_recruitment/wizard/hr_recruitment_create_partner_job.py | 337 | 3434 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_recruitment_partner_create(osv.osv_memory):
_name = 'hr.recruitment.partner.create'
_description = 'Create Partner from job application'
_columns = {
'close': fields.boolean('Close job request'),
}
def view_init(self, cr, uid, fields_list, context=None):
case_obj = self.pool.get('hr.applicant')
if context is None:
context = {}
for case in case_obj.browse(cr, uid, context['active_ids'], context=context):
if case.partner_id:
raise osv.except_osv(_('Error!'),
_('A contact is already defined on this job request.'))
pass
def make_order(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
partner_obj = self.pool.get('res.partner')
case_obj = self.pool.get('hr.applicant')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
result = mod_obj._get_id(cr, uid, 'base', 'view_res_partner_filter')
res = mod_obj.read(cr, uid, result, ['res_id'], context=context)
for case in case_obj.browse(cr, uid, context['active_ids'], context=context):
partner_id = partner_obj.search(cr, uid, [('name', '=', case.partner_name or case.name)], context=context)
if partner_id:
raise osv.except_osv(_('Error!'),_('A contact is already existing with the same name.'))
partner_id = partner_obj.create(cr, uid, {
'name': case.partner_name or case.name,
'user_id': case.user_id.id,
'comment': case.description,
'phone': case.partner_phone,
'mobile': case.partner_mobile,
'email': case.email_from
}, context=context)
case_obj.write(cr, uid, [case.id], {
'partner_id': partner_id,
}, context=context)
return {
'domain': "[]",
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'res.partner',
'res_id': int(partner_id),
'view_id': False,
'type': 'ir.actions.act_window',
'search_view_id': res['res_id']
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
slyphon/pants | src/python/pants/backend/python/register.py | 5 | 2297 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.core.targets.dependencies import Dependencies
from pants.backend.python.python_artifact import PythonArtifact
from pants.backend.python.python_requirement import PythonRequirement
from pants.backend.python.python_requirements import python_requirements
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.targets.python_tests import PythonTests
from pants.backend.python.tasks.pytest_run import PytestRun
from pants.backend.python.tasks.python_binary_create import PythonBinaryCreate
from pants.backend.python.tasks.python_repl import PythonRepl
from pants.backend.python.tasks.python_run import PythonRun
from pants.backend.python.tasks.setup_py import SetupPy
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.task_registrar import TaskRegistrar as task
def build_file_aliases():
return BuildFileAliases(
targets={
'python_binary': PythonBinary,
'python_library': PythonLibrary,
'python_requirement_library': PythonRequirementLibrary,
'python_test_suite': Dependencies, # Legacy alias.
'python_tests': PythonTests,
},
objects={
'python_requirement': PythonRequirement,
'python_artifact': PythonArtifact,
'setup_py': PythonArtifact,
},
context_aware_object_factories={
'python_requirements': BuildFileAliases.curry_context(python_requirements),
}
)
def register_goals():
task(name='python-binary-create', action=PythonBinaryCreate).install('binary')
task(name='pytest', action=PytestRun).install('test')
task(name='py', action=PythonRun).install('run')
task(name='py', action=PythonRepl).install('repl')
task(name='setup-py', action=SetupPy).install().with_description(
'Build setup.py-based Python projects from python_library targets.')
| apache-2.0 |
thinkopensolutions/geraldo | site/newsite/django_1_0/tests/regressiontests/humanize/tests.py | 19 | 3125 | import unittest
from datetime import timedelta, date
from django.template import Template, Context, add_to_builtins
from django.utils.dateformat import DateFormat
from django.utils.translation import ugettext as _
from django.utils.html import escape
add_to_builtins('django.contrib.humanize.templatetags.humanize')
class HumanizeTests(unittest.TestCase):
def humanize_tester(self, test_list, result_list, method):
# Using max below ensures we go through both lists
# However, if the lists are not equal length, this raises an exception
for index in xrange(max(len(test_list), len(result_list))):
test_content = test_list[index]
t = Template('{{ test_content|%s }}' % method)
rendered = t.render(Context(locals())).strip()
self.assertEqual(rendered, escape(result_list[index]),
msg="%s test failed, produced %s, should've produced %s" % (method, rendered, result_list[index]))
def test_ordinal(self):
test_list = ('1','2','3','4','11','12',
'13','101','102','103','111',
'something else')
result_list = ('1st', '2nd', '3rd', '4th', '11th',
'12th', '13th', '101st', '102nd', '103rd',
'111th', 'something else')
self.humanize_tester(test_list, result_list, 'ordinal')
def test_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', '1234567.1234567')
result_list = ('100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.25',
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.1234567')
self.humanize_tester(test_list, result_list, 'intcomma')
def test_intword(self):
test_list = ('100', '1000000', '1200000', '1290000',
'1000000000','2000000000','6000000000000')
result_list = ('100', '1.0 million', '1.2 million', '1.3 million',
'1.0 billion', '2.0 billion', '6.0 trillion')
self.humanize_tester(test_list, result_list, 'intword')
def test_apnumber(self):
test_list = [str(x) for x in range(1, 11)]
result_list = (u'one', u'two', u'three', u'four', u'five', u'six',
u'seven', u'eight', u'nine', u'10')
self.humanize_tester(test_list, result_list, 'apnumber')
def test_naturalday(self):
from django.template import defaultfilters
today = date.today()
yesterday = today - timedelta(days=1)
tomorrow = today + timedelta(days=1)
someday = today - timedelta(days=10)
notdate = u"I'm not a date value"
test_list = (today, yesterday, tomorrow, someday, notdate)
someday_result = defaultfilters.date(someday)
result_list = (_(u'today'), _(u'yesterday'), _(u'tomorrow'),
someday_result, u"I'm not a date value")
self.humanize_tester(test_list, result_list, 'naturalday')
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
girving/tensorflow | tensorflow/python/ops/distributions/beta.py | 6 | 14810 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Beta distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Beta",
"BetaWithSoftplusConcentration",
]
_beta_sample_note = """Note: `x` must have dtype `self.dtype` and be in
`[0, 1].` It must have a shape compatible with `self.batch_shape()`."""
@tf_export("distributions.Beta")
class Beta(distribution.Distribution):
"""Beta distribution.
The Beta distribution is defined over the `(0, 1)` interval using parameters
`concentration1` (aka "alpha") and `concentration0` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z
Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)
```
where:
* `concentration1 = alpha`,
* `concentration0 = beta`,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The concentration parameters represent mean total counts of a `1` or a `0`,
i.e.,
```none
concentration1 = alpha = mean * total_concentration
concentration0 = beta = (1. - mean) * total_concentration
```
where `mean` in `(0, 1)` and `total_concentration` is a positive real number
representing a mean `total_count = concentration1 + concentration0`.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Warning: The samples can be zero due to finite precision.
This happens more often when some of the concentrations are very small.
Make sure to round the samples to `np.finfo(dtype).tiny` before computing the
density.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Create a batch of three Beta distributions.
alpha = [1, 2, 3]
beta = [1, 2, 3]
dist = tfd.Beta(alpha, beta)
dist.sample([4, 5]) # Shape [4, 5, 3]
# `x` has three batch entries, each with two samples.
x = [[.1, .4, .5],
[.2, .3, .5]]
# Calculate the probability of each pair of samples under the corresponding
# distribution in `dist`.
dist.prob(x) # Shape [2, 3]
```
```python
# Create batch_shape=[2, 3] via parameter broadcast:
alpha = [[1.], [2]] # Shape [2, 1]
beta = [3., 4, 5] # Shape [3]
dist = tfd.Beta(alpha, beta)
# alpha broadcast as: [[1., 1, 1,],
# [2, 2, 2]]
# beta broadcast as: [[3., 4, 5],
# [3, 4, 5]]
# batch_Shape [2, 3]
dist.sample([4, 5]) # Shape [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # Shape [2, 3]
```
Compute the gradients of samples w.r.t. the parameters:
```python
alpha = tf.constant(1.0)
beta = tf.constant(2.0)
dist = tfd.Beta(alpha, beta)
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, [alpha, beta])
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
concentration1=None,
concentration0=None,
validate_args=False,
allow_nan_stats=True,
name="Beta"):
"""Initialize a batch of Beta distributions.
Args:
concentration1: Positive floating-point `Tensor` indicating mean
number of successes; aka "alpha". Implies `self.dtype` and
`self.batch_shape`, i.e.,
`concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.
concentration0: Positive floating-point `Tensor` indicating mean
number of failures; aka "beta". Otherwise has same semantics as
`concentration1`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration1, concentration0]) as name:
self._concentration1 = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration1, name="concentration1"),
validate_args)
self._concentration0 = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration0, name="concentration0"),
validate_args)
check_ops.assert_same_float_dtype([
self._concentration1, self._concentration0])
self._total_concentration = self._concentration1 + self._concentration0
super(Beta, self).__init__(
dtype=self._total_concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration1,
self._concentration0,
self._total_concentration],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(zip(
["concentration1", "concentration0"],
[ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2))
@property
def concentration1(self):
"""Concentration parameter associated with a `1` outcome."""
return self._concentration1
@property
def concentration0(self):
"""Concentration parameter associated with a `0` outcome."""
return self._concentration0
@property
def total_concentration(self):
"""Sum of concentration parameters."""
return self._total_concentration
def _batch_shape_tensor(self):
return array_ops.shape(self.total_concentration)
def _batch_shape(self):
return self.total_concentration.get_shape()
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
expanded_concentration1 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration1
expanded_concentration0 = array_ops.ones_like(
self.total_concentration, dtype=self.dtype) * self.concentration0
gamma1_sample = random_ops.random_gamma(
shape=[n],
alpha=expanded_concentration1,
dtype=self.dtype,
seed=seed)
gamma2_sample = random_ops.random_gamma(
shape=[n],
alpha=expanded_concentration0,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, "beta"))
beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
return beta_sample
@distribution_util.AppendDocstring(_beta_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_beta_sample_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
@distribution_util.AppendDocstring(_beta_sample_note)
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
@distribution_util.AppendDocstring(_beta_sample_note)
def _cdf(self, x):
return math_ops.betainc(self.concentration1, self.concentration0, x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return (math_ops.xlogy(self.concentration1 - 1., x) +
(self.concentration0 - 1.) * math_ops.log1p(-x))
def _log_normalization(self):
return (math_ops.lgamma(self.concentration1)
+ math_ops.lgamma(self.concentration0)
- math_ops.lgamma(self.total_concentration))
def _entropy(self):
return (
self._log_normalization()
- (self.concentration1 - 1.) * math_ops.digamma(self.concentration1)
- (self.concentration0 - 1.) * math_ops.digamma(self.concentration0)
+ ((self.total_concentration - 2.) *
math_ops.digamma(self.total_concentration)))
def _mean(self):
return self._concentration1 / self._total_concentration
def _variance(self):
return self._mean() * (1. - self._mean()) / (1. + self.total_concentration)
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when `concentration1 <= 1` or
`concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN`
is used for undefined modes. If `self.allow_nan_stats` is `False` an
exception is raised when one or more modes are undefined.""")
def _mode(self):
mode = (self.concentration1 - 1.) / (self.total_concentration - 2.)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
is_defined = math_ops.logical_and(self.concentration1 > 1.,
self.concentration0 > 1.)
return array_ops.where(is_defined, mode, nan)
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.concentration1,
message="Mode undefined for concentration1 <= 1."),
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.concentration0,
message="Mode undefined for concentration0 <= 1.")
], mode)
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of a concentration parameter."""
if not validate_args:
return concentration
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
], concentration)
def _maybe_assert_valid_sample(self, x):
"""Checks the validity of a sample."""
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x, message="sample must be positive"),
check_ops.assert_less(
x,
array_ops.ones([], self.dtype),
message="sample must be less than `1`."),
], x)
class BetaWithSoftplusConcentration(Beta):
"""Beta with softplus transform of `concentration1` and `concentration0`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Beta(tf.nn.softplus(concentration1), "
"tf.nn.softplus(concentration2))` instead.",
warn_once=True)
def __init__(self,
concentration1,
concentration0,
validate_args=False,
allow_nan_stats=True,
name="BetaWithSoftplusConcentration"):
parameters = dict(locals())
with ops.name_scope(name, values=[concentration1,
concentration0]) as name:
super(BetaWithSoftplusConcentration, self).__init__(
concentration1=nn.softplus(concentration1,
name="softplus_concentration1"),
concentration0=nn.softplus(concentration0,
name="softplus_concentration0"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Beta, Beta)
def _kl_beta_beta(d1, d2, name=None):
"""Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.
Args:
d1: instance of a Beta distribution object.
d2: instance of a Beta distribution object.
name: (optional) Name to use for created operations.
default is "kl_beta_beta".
Returns:
Batchwise KL(d1 || d2)
"""
def delta(fn, is_property=True):
fn1 = getattr(d1, fn)
fn2 = getattr(d2, fn)
return (fn2 - fn1) if is_property else (fn2() - fn1())
with ops.name_scope(name, "kl_beta_beta", values=[
d1.concentration1,
d1.concentration0,
d1.total_concentration,
d2.concentration1,
d2.concentration0,
d2.total_concentration,
]):
return (delta("_log_normalization", is_property=False)
- math_ops.digamma(d1.concentration1) * delta("concentration1")
- math_ops.digamma(d1.concentration0) * delta("concentration0")
+ (math_ops.digamma(d1.total_concentration)
* delta("total_concentration")))
| apache-2.0 |
jerowe/bioconda-recipes | recipes/biopet-vcfstats/1.2/biopet-vcfstats.py | 80 | 3367 | #!/usr/bin/env python
#
# Wrapper script for starting the biopet-vcfstats JAR package
#
# This script is written for use with the Conda package manager and is copied
# from the peptide-shaker wrapper. Only the parameters are changed.
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
#
# This file was automatically generated by the sbt-bioconda plugin.
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'VcfStats-assembly-1.2.jar'
default_jvm_mem_opts = []
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
java = java_executable()
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| mit |
YuriGural/erpnext | erpnext/stock/doctype/purchase_receipt/test_purchase_receipt.py | 10 | 10589 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe, erpnext
import frappe.defaults
from frappe.utils import cint, flt, cstr, today
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import make_purchase_invoice
from erpnext import set_perpetual_inventory
from erpnext.accounts.doctype.account.test_account import get_inventory_account
class TestPurchaseReceipt(unittest.TestCase):
def setUp(self):
frappe.db.set_value("Buying Settings", None, "allow_multiple_items", 1)
def test_make_purchase_invoice(self):
pr = make_purchase_receipt(do_not_save=True)
self.assertRaises(frappe.ValidationError, make_purchase_invoice, pr.name)
pr.submit()
pi = make_purchase_invoice(pr.name)
self.assertEquals(pi.doctype, "Purchase Invoice")
self.assertEquals(len(pi.get("items")), len(pr.get("items")))
# modify rate
pi.get("items")[0].rate = 200
self.assertRaises(frappe.ValidationError, frappe.get_doc(pi).submit)
def test_purchase_receipt_no_gl_entry(self):
company = frappe.db.get_value('Warehouse', '_Test Warehouse - _TC', 'company')
set_perpetual_inventory(0, company)
existing_bin_stock_value = frappe.db.get_value("Bin", {"item_code": "_Test Item",
"warehouse": "_Test Warehouse - _TC"}, "stock_value")
pr = make_purchase_receipt()
stock_value_difference = frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Purchase Receipt", "voucher_no": pr.name,
"item_code": "_Test Item", "warehouse": "_Test Warehouse - _TC"}, "stock_value_difference")
self.assertEqual(stock_value_difference, 250)
current_bin_stock_value = frappe.db.get_value("Bin", {"item_code": "_Test Item",
"warehouse": "_Test Warehouse - _TC"}, "stock_value")
self.assertEqual(current_bin_stock_value, existing_bin_stock_value + 250)
self.assertFalse(get_gl_entries("Purchase Receipt", pr.name))
def test_purchase_receipt_gl_entry(self):
pr = frappe.copy_doc(test_records[0])
set_perpetual_inventory(1, pr.company)
self.assertEqual(cint(erpnext.is_perpetual_inventory_enabled(pr.company)), 1)
pr.insert()
pr.submit()
gl_entries = get_gl_entries("Purchase Receipt", pr.name)
self.assertTrue(gl_entries)
stock_in_hand_account = get_inventory_account(pr.company, pr.get("items")[0].warehouse)
fixed_asset_account = get_inventory_account(pr.company, pr.get("items")[1].warehouse)
if stock_in_hand_account == fixed_asset_account:
expected_values = {
stock_in_hand_account: [750.0, 0.0],
"Stock Received But Not Billed - _TC": [0.0, 500.0],
"Expenses Included In Valuation - _TC": [0.0, 250.0]
}
else:
expected_values = {
stock_in_hand_account: [375.0, 0.0],
fixed_asset_account: [375.0, 0.0],
"Stock Received But Not Billed - _TC": [0.0, 500.0],
"Expenses Included In Valuation - _TC": [0.0, 250.0]
}
for gle in gl_entries:
self.assertEquals(expected_values[gle.account][0], gle.debit)
self.assertEquals(expected_values[gle.account][1], gle.credit)
pr.cancel()
self.assertFalse(get_gl_entries("Purchase Receipt", pr.name))
set_perpetual_inventory(0, pr.company)
def test_subcontracting(self):
from erpnext.stock.doctype.stock_entry.test_stock_entry import make_stock_entry
make_stock_entry(item_code="_Test Item", target="_Test Warehouse 1 - _TC", qty=100, basic_rate=100)
make_stock_entry(item_code="_Test Item Home Desktop 100", target="_Test Warehouse 1 - _TC",
qty=100, basic_rate=100)
pr = make_purchase_receipt(item_code="_Test FG Item", qty=10, rate=500, is_subcontracted="Yes")
self.assertEquals(len(pr.get("supplied_items")), 2)
rm_supp_cost = sum([d.amount for d in pr.get("supplied_items")])
self.assertEquals(pr.get("items")[0].rm_supp_cost, flt(rm_supp_cost, 2))
def test_serial_no_supplier(self):
pr = make_purchase_receipt(item_code="_Test Serialized Item With Series", qty=1)
self.assertEquals(frappe.db.get_value("Serial No", pr.get("items")[0].serial_no, "supplier"),
pr.supplier)
pr.cancel()
self.assertFalse(frappe.db.get_value("Serial No", pr.get("items")[0].serial_no, "warehouse"))
def test_rejected_serial_no(self):
pr = frappe.copy_doc(test_records[0])
pr.get("items")[0].item_code = "_Test Serialized Item With Series"
pr.get("items")[0].qty = 3
pr.get("items")[0].rejected_qty = 2
pr.get("items")[0].received_qty = 5
pr.get("items")[0].rejected_warehouse = "_Test Rejected Warehouse - _TC"
pr.insert()
pr.submit()
accepted_serial_nos = pr.get("items")[0].serial_no.split("\n")
self.assertEquals(len(accepted_serial_nos), 3)
for serial_no in accepted_serial_nos:
self.assertEquals(frappe.db.get_value("Serial No", serial_no, "warehouse"),
pr.get("items")[0].warehouse)
rejected_serial_nos = pr.get("items")[0].rejected_serial_no.split("\n")
self.assertEquals(len(rejected_serial_nos), 2)
for serial_no in rejected_serial_nos:
self.assertEquals(frappe.db.get_value("Serial No", serial_no, "warehouse"),
pr.get("items")[0].rejected_warehouse)
def test_purchase_return(self):
set_perpetual_inventory()
pr = make_purchase_receipt()
return_pr = make_purchase_receipt(is_return=1, return_against=pr.name, qty=-2)
# check sle
outgoing_rate = frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Purchase Receipt",
"voucher_no": return_pr.name}, "outgoing_rate")
self.assertEqual(outgoing_rate, 50)
# check gl entries for return
gl_entries = get_gl_entries("Purchase Receipt", return_pr.name)
self.assertTrue(gl_entries)
stock_in_hand_account = get_inventory_account(return_pr.company)
expected_values = {
stock_in_hand_account: [0.0, 100.0],
"Stock Received But Not Billed - _TC": [100.0, 0.0],
}
for gle in gl_entries:
self.assertEquals(expected_values[gle.account][0], gle.debit)
self.assertEquals(expected_values[gle.account][1], gle.credit)
set_perpetual_inventory(0)
def test_purchase_return_for_rejected_qty(self):
set_perpetual_inventory()
pr = make_purchase_receipt(received_qty=4, qty=2)
return_pr = make_purchase_receipt(is_return=1, return_against=pr.name, received_qty = -4, qty=-2)
actual_qty = frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Purchase Receipt",
"voucher_no": return_pr.name, 'warehouse': return_pr.items[0].rejected_warehouse}, "actual_qty")
self.assertEqual(actual_qty, -2)
set_perpetual_inventory(0)
def test_purchase_return_for_serialized_items(self):
def _check_serial_no_values(serial_no, field_values):
serial_no = frappe.get_doc("Serial No", serial_no)
for field, value in field_values.items():
self.assertEquals(cstr(serial_no.get(field)), value)
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos
pr = make_purchase_receipt(item_code="_Test Serialized Item With Series", qty=1)
serial_no = get_serial_nos(pr.get("items")[0].serial_no)[0]
_check_serial_no_values(serial_no, {
"warehouse": "_Test Warehouse - _TC",
"purchase_document_no": pr.name
})
return_pr = make_purchase_receipt(item_code="_Test Serialized Item With Series", qty=-1,
is_return=1, return_against=pr.name, serial_no=serial_no)
_check_serial_no_values(serial_no, {
"warehouse": "",
"purchase_document_no": pr.name,
"delivery_document_no": return_pr.name
})
def test_closed_purchase_receipt(self):
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import update_purchase_receipt_status
pr = make_purchase_receipt(do_not_submit=True)
pr.submit()
update_purchase_receipt_status(pr.name, "Closed")
self.assertEquals(frappe.db.get_value("Purchase Receipt", pr.name, "status"), "Closed")
def test_pr_billing_status(self):
# PO -> PR1 -> PI and PO -> PI and PO -> PR2
from erpnext.buying.doctype.purchase_order.test_purchase_order import create_purchase_order
from erpnext.buying.doctype.purchase_order.purchase_order \
import make_purchase_receipt, make_purchase_invoice as make_purchase_invoice_from_po
po = create_purchase_order()
pr1 = make_purchase_receipt(po.name)
pr1.posting_date = today()
pr1.posting_time = "10:00"
pr1.get("items")[0].received_qty = 2
pr1.get("items")[0].qty = 2
pr1.submit()
pi1 = make_purchase_invoice(pr1.name)
pi1.submit()
pr1.load_from_db()
self.assertEqual(pr1.per_billed, 100)
pi2 = make_purchase_invoice_from_po(po.name)
pi2.get("items")[0].qty = 4
pi2.submit()
pr2 = make_purchase_receipt(po.name)
pr2.posting_date = today()
pr2.posting_time = "08:00"
pr2.get("items")[0].received_qty = 5
pr2.get("items")[0].qty = 5
pr2.submit()
pr1.load_from_db()
self.assertEqual(pr1.get("items")[0].billed_amt, 1000)
self.assertEqual(pr1.per_billed, 100)
self.assertEqual(pr1.status, "Completed")
self.assertEqual(pr2.get("items")[0].billed_amt, 2000)
self.assertEqual(pr2.per_billed, 80)
self.assertEqual(pr2.status, "To Bill")
def get_gl_entries(voucher_type, voucher_no):
return frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type=%s and voucher_no=%s
order by account desc""", (voucher_type, voucher_no), as_dict=1)
def make_purchase_receipt(**args):
frappe.db.set_value("Buying Settings", None, "allow_multiple_items", 1)
pr = frappe.new_doc("Purchase Receipt")
args = frappe._dict(args)
pr.posting_date = args.posting_date or today()
if args.posting_time:
pr.posting_time = args.posting_time
pr.company = args.company or "_Test Company"
pr.supplier = args.supplier or "_Test Supplier"
pr.is_subcontracted = args.is_subcontracted or "No"
pr.supplier_warehouse = "_Test Warehouse 1 - _TC"
pr.currency = args.currency or "INR"
pr.is_return = args.is_return
pr.return_against = args.return_against
qty = args.qty or 5
received_qty = args.received_qty or qty
rejected_qty = args.rejected_qty or flt(received_qty) - flt(qty)
pr.append("items", {
"item_code": args.item or args.item_code or "_Test Item",
"warehouse": args.warehouse or "_Test Warehouse - _TC",
"qty": qty,
"received_qty": received_qty,
"rejected_qty": rejected_qty,
"rejected_warehouse": args.rejected_warehouse or "_Test Rejected Warehouse - _TC" if rejected_qty != 0 else "",
"rate": args.rate or 50,
"conversion_factor": 1.0,
"serial_no": args.serial_no,
"stock_uom": "_Test UOM"
})
if not args.do_not_save:
pr.insert()
if not args.do_not_submit:
pr.submit()
return pr
test_dependencies = ["BOM", "Item Price"]
test_records = frappe.get_test_records('Purchase Receipt')
| gpl-3.0 |
yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/zeitgeist/mimetypes.py | 3 | 10501 | # -.- coding: utf-8 -.-
# Zeitgeist
#
# Copyright © 2010 Markus Korn <[email protected]>
# Copyright © 2010 Canonical Ltd.
# By Mikkel Kamstrup Erlandsen <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
from datamodel import Interpretation, Manifestation
__all__ = [
"get_interpretation_for_mimetype",
"get_manifestation_for_uri",
]
class RegExpr(object):
""" Helper class which holds a compiled regular expression
and its pattern."""
def __init__(self, pattern):
self.pattern = pattern
self.regex = re.compile(self.pattern)
def __str__(self):
return self.pattern
def __getattr__(self, name):
return getattr(self.regex, name)
def make_regex_tuple(*items):
return tuple((RegExpr(k), v) for k, v in items)
def get_interpretation_for_mimetype(mimetype):
""" get interpretation for a given mimetype, returns :const:`None`
if none of the predefined interpretations matches
"""
interpretation = MIMES.get(mimetype, None)
if interpretation is not None:
return interpretation
for pattern, interpretation in MIMES_REGEX:
if pattern.match(mimetype):
return interpretation
return None
def get_manifestation_for_uri(uri):
""" Lookup Manifestation for a given uri based on the scheme part,
returns :const:`None` if no suitable manifestation is found
"""
for scheme, manifestation in SCHEMES:
if uri.startswith(scheme):
return manifestation
return None
MIMES = {
# x-applix-*
"application/x-applix-word": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/x-applix-spreadsheet": Interpretation.SPREADSHEET,
"application/x-applix-presents": Interpretation.PRESENTATION,
# x-kword, x-kspread, x-kpresenter, x-killustrator
"application/x-kword": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/x-kspread": Interpretation.SPREADSHEET,
"application/x-kpresenter": Interpretation.PRESENTATION,
"application/x-killustrator": Interpretation.VECTOR_IMAGE,
# MS
"application/ms-powerpoint": Interpretation.PRESENTATION,
"application/vnd.ms-powerpoint": Interpretation.PRESENTATION,
"application/msword": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/msexcel": Interpretation.SPREADSHEET,
"application/ms-excel": Interpretation.SPREADSHEET,
"application/vnd.ms-excel": Interpretation.SPREADSHEET,
# pdf, postscript et al
"application/pdf": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/postscript": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/ps": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/rtf": Interpretation.PAGINATED_TEXT_DOCUMENT,
"image/vnd.djvu": Interpretation.PAGINATED_TEXT_DOCUMENT,
# GNOME office
"application/x-abiword": Interpretation.PAGINATED_TEXT_DOCUMENT,
"application/x-gnucash": Interpretation.SPREADSHEET,
"application/x-gnumeric": Interpretation.SPREADSHEET,
# TeX stuff
"text/x-tex": Interpretation.SOURCE_CODE,
"text/x-latex": Interpretation.SOURCE_CODE,
# Plain text
"text/plain": Interpretation.TEXT_DOCUMENT,
"text/csv": Interpretation.TEXT_DOCUMENT,
# HTML files on disk are always HTML_DOCUMENTS while online we should
# assume them to be WEBSITEs. By default we anticipate local files...
"text/html": Interpretation.HTML_DOCUMENT,
# Image types
"application/vnd.corel-draw": Interpretation.VECTOR_IMAGE,
"image/jpeg": Interpretation.RASTER_IMAGE,
"image/pjpeg": Interpretation.RASTER_IMAGE,
"image/png": Interpretation.RASTER_IMAGE,
"image/tiff": Interpretation.RASTER_IMAGE,
"image/gif": Interpretation.RASTER_IMAGE,
"image/x-xcf": Interpretation.RASTER_IMAGE,
"image/svg+xml": Interpretation.VECTOR_IMAGE,
"image/vnd.microsoft.icon": Interpretation.ICON,
# Audio
"application/ogg": Interpretation.AUDIO,
"audio/x-scpls": Interpretation.MEDIA_LIST,
# Development files
"application/ecmascript": Interpretation.SOURCE_CODE,
"application/javascript": Interpretation.SOURCE_CODE,
"application/json": Interpretation.SOURCE_CODE,
"application/soap+xml": Interpretation.SOURCE_CODE,
"application/xml-dtd": Interpretation.SOURCE_CODE,
"application/x-csh": Interpretation.SOURCE_CODE,
"application/x-designer": Interpretation.SOURCE_CODE,
"application/x-dia-diagram": Interpretation.SOURCE_CODE,
"application/x-fluid": Interpretation.SOURCE_CODE,
"application/x-glade": Interpretation.SOURCE_CODE,
"application/xhtml+xml": Interpretation.SOURCE_CODE,
"application/x-java-archive": Interpretation.SOURCE_CODE,
"application/x-javascript": Interpretation.SOURCE_CODE,
"application/x-m4": Interpretation.SOURCE_CODE,
"application/xml": Interpretation.SOURCE_CODE,
"application/x-perl": Interpretation.SOURCE_CODE,
"application/x-php": Interpretation.SOURCE_CODE,
"application/x-ruby": Interpretation.SOURCE_CODE,
"application/x-shellscript": Interpretation.SOURCE_CODE,
"application/x-sql": Interpretation.SOURCE_CODE,
"text/css": Interpretation.SOURCE_CODE,
"text/javascript": Interpretation.SOURCE_CODE,
"text/xml": Interpretation.SOURCE_CODE,
"text/x-c": Interpretation.SOURCE_CODE,
"text/x-c++": Interpretation.SOURCE_CODE,
"text/x-chdr": Interpretation.SOURCE_CODE,
"text/x-copying": Interpretation.SOURCE_CODE,
"text/x-credits": Interpretation.SOURCE_CODE,
"text/x-csharp": Interpretation.SOURCE_CODE,
"text/x-c++src": Interpretation.SOURCE_CODE,
"text/x-csrc": Interpretation.SOURCE_CODE,
"text/x-dsrc": Interpretation.SOURCE_CODE,
"text/x-eiffel": Interpretation.SOURCE_CODE,
"text/x-gettext-translation": Interpretation.SOURCE_CODE,
"text/x-gettext-translation-template": Interpretation.SOURCE_CODE,
"text/x-haskell": Interpretation.SOURCE_CODE,
"text/x-idl": Interpretation.SOURCE_CODE,
"text/x-java": Interpretation.SOURCE_CODE,
"text/x-lisp": Interpretation.SOURCE_CODE,
"text/x-lua": Interpretation.SOURCE_CODE,
"text/x-makefile": Interpretation.SOURCE_CODE,
"text/x-objcsrc": Interpretation.SOURCE_CODE,
"text/x-ocaml": Interpretation.SOURCE_CODE,
"text/x-pascal": Interpretation.SOURCE_CODE,
"text/x-patch": Interpretation.SOURCE_CODE,
"text/x-python": Interpretation.SOURCE_CODE,
"text/x-sql": Interpretation.SOURCE_CODE,
"text/x-tcl": Interpretation.SOURCE_CODE,
"text/x-troff": Interpretation.SOURCE_CODE,
"text/x-vala": Interpretation.SOURCE_CODE,
"text/x-vhdl": Interpretation.SOURCE_CODE,
"text/x-m4": Interpretation.SOURCE_CODE,
"text/x-jquery-tmpl": Interpretation.SOURCE_CODE,
# Email
"message/alternative": Interpretation.EMAIL,
"message/partial": Interpretation.EMAIL,
"message/related": Interpretation.EMAIL,
# People
"text/vcard": Interpretation.CONTACT,
# Archives
"application/zip": Interpretation.ARCHIVE,
"application/x-gzip": Interpretation.ARCHIVE,
"application/x-bzip": Interpretation.ARCHIVE,
"application/x-lzma": Interpretation.ARCHIVE,
"application/x-archive": Interpretation.ARCHIVE,
"application/x-7z-compressed": Interpretation.ARCHIVE,
"application/x-bzip-compressed-tar": Interpretation.ARCHIVE,
"application/x-lzma-compressed-tar": Interpretation.ARCHIVE,
"application/x-compressed-tar": Interpretation.ARCHIVE,
"application/x-stuffit": Interpretation.ARCHIVE,
# Software and packages
"application/x-deb": Interpretation.SOFTWARE,
"application/x-rpm": Interpretation.SOFTWARE,
"application/x-ms-dos-executable": Interpretation.SOFTWARE,
"application/x-executable": Interpretation.SOFTWARE,
"application/x-desktop": Interpretation.SOFTWARE,
"application/x-shockwave-flash": Interpretation.EXECUTABLE,
# File systems
"application/x-cd-image": Interpretation.FILESYSTEM_IMAGE,
"inode/directory": Interpretation.FOLDER,
}
MIMES_REGEX = make_regex_tuple(
# Star Office and OO.org
("application/vnd.oasis.opendocument.text.*", Interpretation.PAGINATED_TEXT_DOCUMENT),
("application/vnd.oasis.opendocument.presentation.*", Interpretation.PRESENTATION),
("application/vnd.oasis.opendocument.spreadsheet.*", Interpretation.SPREADSHEET),
("application/vnd.oasis.opendocument.graphics.*", Interpretation.VECTOR_IMAGE),
("application/vnd\\..*", Interpretation.DOCUMENT),
# x-applix-*
("application/x-applix-.*", Interpretation.DOCUMENT),
# MS
("application/vnd.ms-excel.*", Interpretation.SPREADSHEET),
("application/vnd.ms-powerpoint.*", Interpretation.PRESENTATION),
("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.*", Interpretation.SPREADSHEET),
("application/vnd.openxmlformats-officedocument.presentationml.presentation.*", Interpretation.PRESENTATION),
("application/vnd.openxmlformats-officedocument.wordprocessingml.document.*", Interpretation.PAGINATED_TEXT_DOCUMENT),
# TeX stuff
(".*/x-dvi", Interpretation.PAGINATED_TEXT_DOCUMENT),
# Image types
("image/.*", Interpretation.IMAGE),
# Audio
("audio/.*", Interpretation.AUDIO),
# Video
("video/.*", Interpretation.VIDEO),
)
SCHEMES = tuple((
("file://", Manifestation.FILE_DATA_OBJECT),
("http://", Manifestation.WEB_DATA_OBJECT),
("https://", Manifestation.WEB_DATA_OBJECT),
("ssh://", Manifestation.REMOTE_DATA_OBJECT),
("sftp://", Manifestation.REMOTE_DATA_OBJECT),
("ftp://", Manifestation.REMOTE_DATA_OBJECT),
("dav://", Manifestation.REMOTE_DATA_OBJECT),
("davs://", Manifestation.REMOTE_DATA_OBJECT),
("smb://", Manifestation.REMOTE_DATA_OBJECT),
))
# vim:noexpandtab:ts=4:sw=4
| mit |
sodafree/backend | build/lib.linux-i686-2.7/django/contrib/gis/tests/distapp/tests.py | 96 | 19051 | from __future__ import absolute_import
from django.db import connection
from django.db.models import Q
from django.contrib.gis.geos import GEOSGeometry, LineString
from django.contrib.gis.measure import D # alias for Distance
from django.contrib.gis.tests.utils import oracle, postgis, spatialite, no_oracle, no_spatialite
from django.test import TestCase
from .models import (AustraliaCity, Interstate, SouthTexasInterstate,
SouthTexasCity, SouthTexasCityFt, CensusZipcode, SouthTexasZipcode)
class DistanceTest(TestCase):
# A point we are testing distances with -- using a WGS84
# coordinate that'll be implicitly transormed to that to
# the coordinate system of the field, EPSG:32140 (Texas South Central
# w/units in meters)
stx_pnt = GEOSGeometry('POINT (-95.370401017314293 29.704867409475465)', 4326)
# Another one for Australia
au_pnt = GEOSGeometry('POINT (150.791 -34.4919)', 4326)
def get_names(self, qs):
cities = [c.name for c in qs]
cities.sort()
return cities
def test01_init(self):
"Test initialization of distance models."
self.assertEqual(9, SouthTexasCity.objects.count())
self.assertEqual(9, SouthTexasCityFt.objects.count())
self.assertEqual(11, AustraliaCity.objects.count())
self.assertEqual(4, SouthTexasZipcode.objects.count())
self.assertEqual(4, CensusZipcode.objects.count())
self.assertEqual(1, Interstate.objects.count())
self.assertEqual(1, SouthTexasInterstate.objects.count())
@no_spatialite
def test02_dwithin(self):
"Testing the `dwithin` lookup type."
# Distances -- all should be equal (except for the
# degree/meter pair in au_cities, that's somewhat
# approximate).
tx_dists = [(7000, 22965.83), D(km=7), D(mi=4.349)]
au_dists = [(0.5, 32000), D(km=32), D(mi=19.884)]
# Expected cities for Australia and Texas.
tx_cities = ['Downtown Houston', 'Southside Place']
au_cities = ['Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong']
# Performing distance queries on two projected coordinate systems one
# with units in meters and the other in units of U.S. survey feet.
for dist in tx_dists:
if isinstance(dist, tuple): dist1, dist2 = dist
else: dist1 = dist2 = dist
qs1 = SouthTexasCity.objects.filter(point__dwithin=(self.stx_pnt, dist1))
qs2 = SouthTexasCityFt.objects.filter(point__dwithin=(self.stx_pnt, dist2))
for qs in qs1, qs2:
self.assertEqual(tx_cities, self.get_names(qs))
# Now performing the `dwithin` queries on a geodetic coordinate system.
for dist in au_dists:
if isinstance(dist, D) and not oracle: type_error = True
else: type_error = False
if isinstance(dist, tuple):
if oracle: dist = dist[1]
else: dist = dist[0]
# Creating the query set.
qs = AustraliaCity.objects.order_by('name')
if type_error:
# A ValueError should be raised on PostGIS when trying to pass
# Distance objects into a DWithin query using a geodetic field.
self.assertRaises(ValueError, AustraliaCity.objects.filter(point__dwithin=(self.au_pnt, dist)).count)
else:
self.assertEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist))))
def test03a_distance_method(self):
"Testing the `distance` GeoQuerySet method on projected coordinate systems."
# The point for La Grange, TX
lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326)
# Reference distances in feet and in meters. Got these values from
# using the provided raw SQL statements.
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140)) FROM distapp_southtexascity;
m_distances = [147075.069813, 139630.198056, 140888.552826,
138809.684197, 158309.246259, 212183.594374,
70870.188967, 165337.758878, 139196.085105]
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278)) FROM distapp_southtexascityft;
# Oracle 11 thinks this is not a projected coordinate system, so it's s
# not tested.
ft_distances = [482528.79154625, 458103.408123001, 462231.860397575,
455411.438904354, 519386.252102563, 696139.009211594,
232513.278304279, 542445.630586414, 456679.155883207]
# Testing using different variations of parameters and using models
# with different projected coordinate systems.
dist1 = SouthTexasCity.objects.distance(lagrange, field_name='point')
dist2 = SouthTexasCity.objects.distance(lagrange) # Using GEOSGeometry parameter
if spatialite or oracle:
dist_qs = [dist1, dist2]
else:
dist3 = SouthTexasCityFt.objects.distance(lagrange.ewkt) # Using EWKT string parameter.
dist4 = SouthTexasCityFt.objects.distance(lagrange)
dist_qs = [dist1, dist2, dist3, dist4]
# Original query done on PostGIS, have to adjust AlmostEqual tolerance
# for Oracle.
if oracle: tol = 2
else: tol = 5
# Ensuring expected distances are returned for each distance queryset.
for qs in dist_qs:
for i, c in enumerate(qs):
self.assertAlmostEqual(m_distances[i], c.distance.m, tol)
self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol)
@no_spatialite
def test03b_distance_method(self):
"Testing the `distance` GeoQuerySet method on geodetic coordnate systems."
if oracle: tol = 2
else: tol = 5
# Testing geodetic distance calculation with a non-point geometry
# (a LineString of Wollongong and Shellharbour coords).
ls = LineString( ( (150.902, -34.4245), (150.87, -34.5789) ) )
if oracle or connection.ops.geography:
# Reference query:
# SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326)) FROM distapp_australiacity ORDER BY name;
distances = [1120954.92533513, 140575.720018241, 640396.662906304,
60580.9693849269, 972807.955955075, 568451.8357838,
40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0]
qs = AustraliaCity.objects.distance(ls).order_by('name')
for city, distance in zip(qs, distances):
# Testing equivalence to within a meter.
self.assertAlmostEqual(distance, city.distance.m, 0)
else:
# PostGIS 1.4 and below is limited to disance queries only
# to/from point geometries, check for raising of ValueError.
self.assertRaises(ValueError, AustraliaCity.objects.distance, ls)
self.assertRaises(ValueError, AustraliaCity.objects.distance, ls.wkt)
# Got the reference distances using the raw SQL statements:
# SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326), 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11));
# SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326)) FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere
if connection.ops.postgis and connection.ops.proj_version_tuple() >= (4, 7, 0):
# PROJ.4 versions 4.7+ have updated datums, and thus different
# distance values.
spheroid_distances = [60504.0628957201, 77023.9489850262, 49154.8867574404,
90847.4358768573, 217402.811919332, 709599.234564757,
640011.483550888, 7772.00667991925, 1047861.78619339,
1165126.55236034]
sphere_distances = [60580.9693849267, 77144.0435286473, 49199.4415344719,
90804.7533823494, 217713.384600405, 709134.127242793,
639828.157159169, 7786.82949717788, 1049204.06569028,
1162623.7238134]
else:
spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115,
90847.435881812, 217402.811862568, 709599.234619957,
640011.483583758, 7772.00667666425, 1047861.7859506,
1165126.55237647]
sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184,
90804.4414289463, 217712.63666124, 709131.691061906,
639825.959074112, 7786.80274606706, 1049200.46122281,
1162619.7297006]
# Testing with spheroid distances first.
hillsdale = AustraliaCity.objects.get(name='Hillsdale')
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point, spheroid=True)
for i, c in enumerate(qs):
self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol)
if postgis:
# PostGIS uses sphere-only distances by default, testing these as well.
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point)
for i, c in enumerate(qs):
self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol)
@no_oracle # Oracle already handles geographic distance calculation.
def test03c_distance_method(self):
"Testing the `distance` GeoQuerySet method used with `transform` on a geographic field."
# Normally you can't compute distances from a geometry field
# that is not a PointField (on PostGIS 1.4 and below).
if not connection.ops.geography:
self.assertRaises(ValueError, CensusZipcode.objects.distance, self.stx_pnt)
# We'll be using a Polygon (created by buffering the centroid
# of 77005 to 100m) -- which aren't allowed in geographic distance
# queries normally, however our field has been transformed to
# a non-geographic system.
z = SouthTexasZipcode.objects.get(name='77005')
# Reference query:
# SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140), ST_GeomFromText('<buffer_wkt>', 32140)) FROM "distapp_censuszipcode";
dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242]
# Having our buffer in the SRID of the transformation and of the field
# -- should get the same results. The first buffer has no need for
# transformation SQL because it is the same SRID as what was given
# to `transform()`. The second buffer will need to be transformed,
# however.
buf1 = z.poly.centroid.buffer(100)
buf2 = buf1.transform(4269, clone=True)
ref_zips = ['77002', '77025', '77401']
for buf in [buf1, buf2]:
qs = CensusZipcode.objects.exclude(name='77005').transform(32140).distance(buf)
self.assertEqual(ref_zips, self.get_names(qs))
for i, z in enumerate(qs):
self.assertAlmostEqual(z.distance.m, dists_m[i], 5)
def test04_distance_lookups(self):
"Testing the `distance_lt`, `distance_gt`, `distance_lte`, and `distance_gte` lookup types."
# Retrieving the cities within a 20km 'donut' w/a 7km radius 'hole'
# (thus, Houston and Southside place will be excluded as tested in
# the `test02_dwithin` above).
qs1 = SouthTexasCity.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
# Can't determine the units on SpatiaLite from PROJ.4 string, and
# Oracle 11 incorrectly thinks it is not projected.
if spatialite or oracle:
dist_qs = (qs1,)
else:
qs2 = SouthTexasCityFt.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
dist_qs = (qs1, qs2)
for qs in dist_qs:
cities = self.get_names(qs)
self.assertEqual(cities, ['Bellaire', 'Pearland', 'West University Place'])
# Doing a distance query using Polygons instead of a Point.
z = SouthTexasZipcode.objects.get(name='77005')
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=275)))
self.assertEqual(['77025', '77401'], self.get_names(qs))
# If we add a little more distance 77002 should be included.
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=300)))
self.assertEqual(['77002', '77025', '77401'], self.get_names(qs))
def test05_geodetic_distance_lookups(self):
"Testing distance lookups on geodetic coordinate systems."
# Line is from Canberra to Sydney. Query is for all other cities within
# a 100km of that line (which should exclude only Hobart & Adelaide).
line = GEOSGeometry('LINESTRING(144.9630 -37.8143,151.2607 -33.8870)', 4326)
dist_qs = AustraliaCity.objects.filter(point__distance_lte=(line, D(km=100)))
if oracle or connection.ops.geography:
# Oracle and PostGIS 1.5 can do distance lookups on arbitrary geometries.
self.assertEqual(9, dist_qs.count())
self.assertEqual(['Batemans Bay', 'Canberra', 'Hillsdale',
'Melbourne', 'Mittagong', 'Shellharbour',
'Sydney', 'Thirroul', 'Wollongong'],
self.get_names(dist_qs))
else:
# PostGIS 1.4 and below only allows geodetic distance queries (utilizing
# ST_Distance_Sphere/ST_Distance_Spheroid) from Points to PointFields
# on geometry columns.
self.assertRaises(ValueError, dist_qs.count)
# Ensured that a ValueError was raised, none of the rest of the test is
# support on this backend, so bail now.
if spatialite: return
# Too many params (4 in this case) should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4')))
# Not enough params should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)',)))
# Getting all cities w/in 550 miles of Hobart.
hobart = AustraliaCity.objects.get(name='Hobart')
qs = AustraliaCity.objects.exclude(name='Hobart').filter(point__distance_lte=(hobart.point, D(mi=550)))
cities = self.get_names(qs)
self.assertEqual(cities, ['Batemans Bay', 'Canberra', 'Melbourne'])
# Cities that are either really close or really far from Wollongong --
# and using different units of distance.
wollongong = AustraliaCity.objects.get(name='Wollongong')
d1, d2 = D(yd=19500), D(nm=400) # Yards (~17km) & Nautical miles.
# Normal geodetic distance lookup (uses `distance_sphere` on PostGIS.
gq1 = Q(point__distance_lte=(wollongong.point, d1))
gq2 = Q(point__distance_gte=(wollongong.point, d2))
qs1 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq1 | gq2)
# Geodetic distance lookup but telling GeoDjango to use `distance_spheroid`
# instead (we should get the same results b/c accuracy variance won't matter
# in this test case).
if postgis:
gq3 = Q(point__distance_lte=(wollongong.point, d1, 'spheroid'))
gq4 = Q(point__distance_gte=(wollongong.point, d2, 'spheroid'))
qs2 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq3 | gq4)
querysets = [qs1, qs2]
else:
querysets = [qs1]
for qs in querysets:
cities = self.get_names(qs)
self.assertEqual(cities, ['Adelaide', 'Hobart', 'Shellharbour', 'Thirroul'])
def test06_area(self):
"Testing the `area` GeoQuerySet method."
# Reference queries:
# SELECT ST_Area(poly) FROM distapp_southtexaszipcode;
area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461]
# Tolerance has to be lower for Oracle and differences
# with GEOS 3.0.0RC4
tol = 2
for i, z in enumerate(SouthTexasZipcode.objects.area()):
self.assertAlmostEqual(area_sq_m[i], z.area.sq_m, tol)
def test07_length(self):
"Testing the `length` GeoQuerySet method."
# Reference query (should use `length_spheroid`).
# SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]]');
len_m1 = 473504.769553813
len_m2 = 4617.668
if spatialite:
# Does not support geodetic coordinate systems.
self.assertRaises(ValueError, Interstate.objects.length)
else:
qs = Interstate.objects.length()
if oracle: tol = 2
else: tol = 5
self.assertAlmostEqual(len_m1, qs[0].length.m, tol)
# Now doing length on a projected coordinate system.
i10 = SouthTexasInterstate.objects.length().get(name='I-10')
self.assertAlmostEqual(len_m2, i10.length.m, 2)
@no_spatialite
def test08_perimeter(self):
"Testing the `perimeter` GeoQuerySet method."
# Reference query:
# SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode;
perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697]
if oracle: tol = 2
else: tol = 7
for i, z in enumerate(SouthTexasZipcode.objects.perimeter()):
self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol)
# Running on points; should return 0.
for i, c in enumerate(SouthTexasCity.objects.perimeter(model_att='perim')):
self.assertEqual(0, c.perim.m)
def test09_measurement_null_fields(self):
"Testing the measurement GeoQuerySet methods on fields with NULL values."
# Creating SouthTexasZipcode w/NULL value.
SouthTexasZipcode.objects.create(name='78212')
# Performing distance/area queries against the NULL PolygonField,
# and ensuring the result of the operations is None.
htown = SouthTexasCity.objects.get(name='Downtown Houston')
z = SouthTexasZipcode.objects.distance(htown.point).area().get(name='78212')
self.assertEqual(None, z.distance)
self.assertEqual(None, z.area)
| bsd-3-clause |
davidvon/pipa-pay-server | admin/api/cards.py | 1 | 18567 | # -*- coding: utf-8 -*-
import datetime
import time
import traceback
from flask import request
from flask.ext.restful import Resource
from api import API_PREFIX
from api.order import create_order
from app import restful_api, db, logger
from cache.order import cache_qrcode_code, get_cache_order
from cache.weixin import get_cache_customer_cards, cache_customer_cards
from models import Customer, CustomerCard, CustomerTradeRecords, CustomerCardShare, Order
from utils.util import nonce_str
from wexin.helper import WeixinHelper
from wexin_pay.views import payable
__author__ = 'fengguanhua'
class ApiCardMembers(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardMembers] in: args[%s]' % args)
openid = args.get("openid")
share = args.get("share")
cards = get_cache_customer_cards(openid)
if not cards:
customer_cards = CustomerCard.query.filter(CustomerCard.customer_id == openid) \
.order_by(CustomerCard.status.asc()).all()
cards = [
{'globalId': item.id,
'cardId': item.card_id,
'merchantId': item.card.merchant.id,
'cardCode': item.card_code,
'amount': item.amount,
'title': item.card.title,
'logo': item.card.merchant.logo,
'img': item.img or 'http://wx.cdn.pipapay.com/static/images/card_blue.png',
'status': item.status,
'expireDate': str(item.expire_date)} for item in customer_cards]
cache_customer_cards(openid, cards)
data = [card for card in cards if card['status'] < 3] if share else cards
logger.debug('[ApiCardMembers] out: result[0], data[%s]' % data)
return {"result": 0, "data": data}
class ApiCardDispatch(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardDispatch] in: args[%s]' % args)
order_id = args.get('order_id')
try:
order = Order.query.filter_by(order_id=order_id).first()
if not order:
logger.warn('[ApiCardDispatch] order[%s] not exist' % order_id)
return {"result": 254}
expire_date = datetime.date.today() + datetime.timedelta(365 * 3) # TODO
count = CustomerCard.query.filter_by(order_id=order_id).count()
if count < order.card_count:
for i in range(count, order.card_count):
card = CustomerCard(customer_id=order.customer.openid, order_id=order_id, card_id=order.card_id,
amount=order.face_amount, expire_date=expire_date, status=0)
db.session.add(card)
db.session.commit()
output = {"result": 0, "data": {"count": order.card_count, "amount": order.face_amount}}
logger.debug('[ApiCardDispatch] out: return [%s]' % output)
return output
except Exception as e:
logger.error(traceback.print_exc())
logger.error('[ApiCardDispatch] order[%s] card dispatch exception:[%s]' % (order_id, e.message))
return {'result': 255, 'data': e.message}
class ApiWxCardStatusUpdate(Resource):
def post(self):
openid = args = None
try:
args = request.values
logger.debug('[ApiWxCardStatusUpdate] in: args[%s]' % args)
openid = args['openid']
card_global_id = args['cardGlobalId']
card = CustomerCard.query.get(card_global_id)
card.status = 1
db.session.add(card)
db.session.commit()
logger.info('[ApiWxCardStatusUpdate] customer[%s] arg[%s] card[code:%s] status update success' %
(openid, args, card.card_code))
return {'result': 0, 'data': card.card_code}
except Exception as e:
logger.error(traceback.print_exc())
logger.error('[ApiWxCardStatusUpdate] customer[%s] arg[%s] card status update error:[%s]' %
(openid, args, e.message))
return {'result': 255, 'data': e.message}
class ApiCardPayCode(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardPayCode] in: args[%s]' % args)
card_id = args['cardId']
card_code = args['cardCode']
card = CustomerCard.query.filter_by(card_id=card_id, card_code=card_code).first()
if not card:
logger.warn('[ApiCardPayCode] card[id:%s,code:%s] not exist' % (card_id, card_code))
return {'result': 255}
data = {
'status': card.status,
'merchantName': card.card.merchant.name,
'cardName': card.card.title,
'amount': card.amount,
'qrcode': cache_qrcode_code(card_id, card_code)
}
logger.debug('[ApiCardPayCode] out: result[0] data[%s]' % data)
return {'result': 0, 'data': data}
class ApiCardPayRecords(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardPayRecords] in: args[%s]' % args)
card_id = args['cardId']
left = datetime.date.today()
right = datetime.date.today() - datetime.timedelta(30)
records = CustomerTradeRecords.query.filter(CustomerTradeRecords.card_id == card_id,
CustomerTradeRecords.time.between(left, right)).all()
recharge_total = 0
expend_total = 0
for item in records:
if item.type == 0:
recharge_total += item.amount
else:
expend_total += item.amount
data = {
'rechargeTotal': recharge_total,
'expendTotal': expend_total,
'records': [{'merchantName': item.card.merchant.name,
'date': str(item.time),
'amount': item.amount} for item in records]
}
logger.debug('[ApiCardPayRecords] out: result[0] data[%s]' % args)
return {'result': 0, 'data': data}
class ApiCardShareCheck(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardShareCheck] in: args[%s]' % args)
card_id = args['cardId']
open_id = args['openId']
card_code = args['cardCode']
if not card_code:
logger.warn('[ApiCardShareCheck] openid:%s card[id:%s] not banding' % (open_id, card_id))
return {'result': 254}
customer_card = CustomerCard.query.filter_by(customer_id=open_id, card_id=card_id, card_code=card_code).first()
if not customer_card:
logger.warn('[ApiCardShareCheck] openid:%s card[id:%s code:%s] not exist' % (open_id, card_id, card_code))
return {'result': 255}
if customer_card.status >= 3:
logger.debug('[ApiCardShareCheck] out: result[0] status[%s]' % customer_card.status)
return {'result': 0, 'status': customer_card.status} # 转赠中或已转赠
data = {'result': 0,
'status': customer_card.status,
'card': {
'sign': nonce_str(12),
'cardId': customer_card.card_id,
'cardCode': customer_card.card_code,
'cardName': customer_card.card.title,
'timestamp': str(int(time.time())),
'logo': customer_card.card.merchant.logo}
}
logger.debug('[ApiCardShareCheck] out: return[%s]' % data)
return data
class ApiCardShare(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardShare] in: args[%s]' % args)
open_id = args['openId']
card_id = args['cardId']
card_code = args['cardCode']
sign = args['sign']
timestamp = args['timestamp']
content = args['content']
try:
card = CustomerCard.query.filter_by(customer_id=open_id, card_id=card_id, card_code=card_code).first()
card.status = 4 # 卡表状态更新为 4:转赠中
record = CustomerCardShare(share_customer_id=open_id, customer_card_id=card.id,
timestamp=timestamp, content=content, sign=sign, status=0)
db.session.add(card)
db.session.add(record)
db.session.commit()
logger.info('[ApiCardShare] customer[%s] result[0] card[%s] share ok' % (open_id, card_id))
return {'result': 0}
except Exception as e:
logger.error(traceback.print_exc())
logger.error('[ApiCardShare] customer[%s] card[%s] share error:%s' % (open_id, card_id, e.message))
return {'result': 255, 'data': e.message}
class ApiCardShareInfo(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardShareInfo] in: args[%s]' % args)
open_id = args['openId']
card_id = args['cardId']
card_code = args['cardCode']
card = CustomerCard.query.filter_by(card_id=card_id, card_code=card_code).first()
if not card:
logger.warn('[ApiCardShareInfo] openid:%s card[id:%s code:%s] not exist' % (open_id, card_id, card_code))
return {'result': 254}
share = CustomerCardShare.query.filter_by(share_customer_id=open_id, customer_card_id=card.id).first()
acquire_customer = None
if share and share.acquire_customer_id:
acquire_customer = Customer.query.filter_by(openid=share.acquire_customer_id).first()
data = {'result': 0,
'data': {'status': '已领取' if share.status == 2 else '未领取',
'cardLogo': share.customer_card.card.merchant.logo,
'cardCode': card_code,
'cardName': share.customer_card.card.title,
'datetime': str(share.datetime),
'content': share.content,
'acquireUserImg': acquire_customer.head_image if acquire_customer else '',
'acquireUserName': acquire_customer.show_name() if acquire_customer else '',
}
}
logger.debug('[ApiCardShareInfo] out: return[%s]' % data)
return data
class ApiCardReceiveCheck(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardReceiveCheck] in: args[%s]' % args)
sign = args['sign']
info = CustomerCardShare.query.filter_by(sign=sign).first()
if not info:
logger.warn('[ApiCardReceiveCheck] sign[%s] not exist' % sign)
return {'result': 255} # sign不存在
card = info.customer_card
data = {'result': 0,
'data': {
'giveUserHeadImg': info.share_customer.head_image,
'giveUsername': info.share_customer.show_name(),
'shareContent': info.content,
'cardStatus': card.status,
'giveStatus': info.status,
'acquireUserOpenId': info.acquire_customer_id}
}
logger.debug('[ApiCardReceiveCheck] out: return[%s]' % data)
return data
class ApiCardReceive(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardReceive] in: args[%s]' % args)
sign = args['sign']
openid = args['openId']
need_commit = False
try:
info = CustomerCardShare.query.filter_by(sign=sign).first()
if not info:
logger.error('[ApiCardReceive] customer[%s] card[%s] not sharing' %
(openid, info.customer_card.card_code))
return {'result': 255} # sign不存在
new_card = CustomerCard.query.filter_by(customer_id=openid, card_code=info.customer_card.card_code,
card_id=info.customer_card.card_id).first()
if new_card:
if info.share_customer.openid == openid:
new_card.status = 0
db.session.add(new_card)
need_commit = True
else:
logger.info('[ApiCardReceive] customer[%s] card[%s] not exist' % (openid, info.customer_card_id))
old_card = CustomerCard.query.filter_by(customer_id=info.share_customer.openid,
card_id=info.customer_card.card_id,
card_code=info.customer_card.card_code).first()
new_card = CustomerCard(customer_id=openid, card_id=info.customer_card.card_id, img=old_card.img,
amount=old_card.amount, card_code=old_card.card_code,
expire_date=old_card.expire_date, status=0)
old_card.status = 5
db.session.add(old_card)
db.session.add(new_card)
need_commit = True
if info.status != 1:
info.acquire_customer_id = openid
info.status = 1
db.session.add(info)
need_commit = True
if need_commit:
db.session.commit()
logger.info('[ApiCardReceive] customer[%s] card[%s] received success' % (openid, new_card.card_code))
data = {'result': 0,
'data': {
'status': new_card.status,
"cardGlobalId": new_card.id,
'wxCardId': new_card.card.wx_card_id, # 微信卡券ID,可以chooseCard获取
'code': info.customer_card.card_code # 指定的卡券code码,只能被领一次。use_custom_code字段为true的卡券必须填写,
# 非自定义code不必填写。
}}
logger.debug('[ApiCardReceive] out: return[%s]' % data)
return data
except Exception as e:
logger.error(traceback.print_exc())
logger.error('[ApiCardReceive] customer[%s] receive card[%s] error:%s' % (openid, sign, e.message))
return {'result': 255, 'data': e.message}
class ApiCardBuy(Resource):
def post(self):
try:
args = request.values
logger.info('[ApiCardBuy] args:%s' % args)
card_id = args.get('cardId')
price = args.get('price')
count = args.get('count')
openid = args.get('openId')
order = create_order(card_id, float(price), openid, count)
if not order:
return {'result': 250}
res, outputs = payable(request, openid, order)
logger.info('[ApiCardBuy] data:%s' % str(outputs))
if res == 0:
outputs['orderId'] = order.order_id
logger.info('[ApiCardBuy] create temp order success:%s' % order.order_id)
return {'result': 0, 'content': outputs}
logger.warn('[ApiCardBuy] order:%s pre-pay failed:%d' % (order.order_id, res))
return {'result': res, 'msg': outputs}
except Exception as e:
logger.error('[ApiCardBuy] except:%s' % e.message)
return {'result': 254, 'msg': e.message}
class ApiCardBuyCommit(Resource):
def post(self):
args = request.values
logger.debug('[ApiCardBuyCommit] in: args[%s]' % args)
order_id = args.get('orderId')
order = get_cache_order(order_id)
if not order:
logger.warn('[ApiCardBuyCommit] order:%s not exist' % order_id)
return {'result': 254}
try:
order.paid = True
db.session.add(order)
db.session.commit()
logger.info('[ApiCardBuyCommit] order:%s create success' % order_id)
return {'result': 0}
except Exception as e:
logger.error('[ApiCardBuyCommit] order:%s create error:%s' % (order_id, e.message))
return {'result': 255}
class ApiCardActive(Resource):
def post(self):
open_id = card_id = code = None
args = request.values
logger.debug('[ApiCardActive] in: args[%s]' % args)
try:
card_id = args.get('card_id')
encrypt_code = args.get('encrypt_code')
open_id = args.get('openid')
logger.info('[ApiCardActive] data=%s' % str(args))
helper = WeixinHelper()
code = helper.decrypt_card_code(encrypt_code)
if not code:
logger.error('[ApiCardActive] decrypt card code[%s,%s] error' % (open_id, card_id))
return {'result': 255}
card = CustomerCard.query.filter_by(customer_id=open_id, card_id=card_id, card_code=code).first()
active = helper.active_card(card.amount * 100, code, card_id, 0)
if not active:
logger.error('[ApiCardActive] active card[%s,%s,%s] error' % (open_id, card_id, code))
return {'result': 255}
card.status = 2
db.session.add(card)
db.session.commit()
logger.debug('[ApiCardActive] out: result[0]')
return {'result': 0}
except Exception as e:
logger.error('[ApiCardActive] active card[%s,%s,%s] exception:%s' % (open_id, card_id, code, e.message))
return {'result': 255}
restful_api.add_resource(ApiCardBuy, API_PREFIX + 'card/buy')
restful_api.add_resource(ApiCardBuyCommit, API_PREFIX + 'card/buy/commit')
restful_api.add_resource(ApiCardActive, API_PREFIX + 'card/active')
restful_api.add_resource(ApiCardMembers, API_PREFIX + 'cards')
restful_api.add_resource(ApiCardDispatch, API_PREFIX + 'card/dispatch')
restful_api.add_resource(ApiWxCardStatusUpdate, API_PREFIX + 'card/add/status/update')
restful_api.add_resource(ApiCardPayCode, API_PREFIX + 'card/pay/code')
restful_api.add_resource(ApiCardPayRecords, API_PREFIX + 'card/pay/records')
restful_api.add_resource(ApiCardShareCheck, API_PREFIX + 'card/share/check')
restful_api.add_resource(ApiCardShare, API_PREFIX + 'card/share')
restful_api.add_resource(ApiCardShareInfo, API_PREFIX + 'card/share/info')
restful_api.add_resource(ApiCardReceiveCheck, API_PREFIX + 'card/receive/check')
restful_api.add_resource(ApiCardReceive, API_PREFIX + 'card/receive')
| apache-2.0 |
ntt-sic/nova | nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py | 27 | 2817 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.contrib import cloudpipe_update
from nova import db
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
fake_networks = [fake_network.fake_network(1),
fake_network.fake_network(2)]
def fake_project_get_networks(context, project_id, associate=True):
return fake_networks
def fake_network_update(context, network_id, values):
for network in fake_networks:
if network['id'] == network_id:
for key in values:
network[key] = values[key]
class CloudpipeUpdateTest(test.NoDBTestCase):
def setUp(self):
super(CloudpipeUpdateTest, self).setUp()
self.controller = cloudpipe_update.CloudpipeUpdateController()
self.stubs.Set(db, "project_get_networks", fake_project_get_networks)
self.stubs.Set(db, "network_update", fake_network_update)
def test_cloudpipe_configure_project(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-project')
body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
result = self.controller.update(req, 'configure-project',
body=body)
self.assertEqual('202 Accepted', result.status)
self.assertEqual(fake_networks[0]['vpn_public_address'], "1.2.3.4")
self.assertEqual(fake_networks[0]['vpn_public_port'], 222)
def test_cloudpipe_configure_project_bad_url(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-projectx')
body = {"vpn_ip": "1.2.3.4", "vpn_port": 222}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req,
'configure-projectx', body)
def test_cloudpipe_configure_project_bad_data(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-project')
body = {"vpn_ipxx": "1.2.3.4", "vpn_port": 222}
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update, req,
'configure-project', body)
| apache-2.0 |
shadowmint/nwidget | lib/pyglet-1.4.4/contrib/wydget/wydget/widgets/progress.py | 29 | 1570 | from pyglet.gl import *
from wydget import loadxml
from wydget import util
from wydget.widgets.label import Label
class Progress(Label):
name = 'progress'
def __init__(self, parent, value=0.0, show_value=True,
bar_color='gray', bgcolor=(.3, .3, .3, 1), color='white',
width=None, height=16, halign='center', valign='center', **kw):
self._value = util.parse_value(value, 0)
self.show_value = show_value
self.bar_color = util.parse_color(bar_color)
super(Progress, self).__init__(parent, ' ', width=width,
height=height, bgcolor=bgcolor, color=color, halign=halign,
valign=valign, **kw)
if self.show_value:
self.text = '%d%%'%(value * 100)
def set_value(self, value):
self._value = value
if self.show_value:
self.text = '%d%%'%(value * 100)
value = property(lambda self: self._value, set_value)
def renderBackground(self, rect):
super(Progress, self).renderBackground(rect)
r = rect.copy()
r.width *= self._value
b, self.bgcolor = self.bgcolor, self.bar_color
super(Progress, self).renderBackground(r)
self.bgcolor = b
@classmethod
def fromXML(cls, element, parent):
'''Create the object from the XML element and attach it to the parent.
'''
kw = loadxml.parseAttributes(element)
obj = cls(parent, **kw)
for child in element.getchildren():
loadxml.getConstructor(element.tag)(child, obj)
return obj
| apache-2.0 |
winndows/cinder | cinder/db/sqlalchemy/migrate_repo/versions/034_volume_type_add_desc_column.py | 31 | 1274 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, Table, String
def upgrade(migrate_engine):
"""Add description column to volume_types."""
meta = MetaData()
meta.bind = migrate_engine
volume_types = Table('volume_types', meta, autoload=True)
description = Column('description', String(255))
volume_types.create_column(description)
volume_types.update().values(description=None).execute()
def downgrade(migrate_engine):
"""Remove description column to volumes."""
meta = MetaData()
meta.bind = migrate_engine
volume_types = Table('volume_types', meta, autoload=True)
description = volume_types.columns.description
volume_types.drop_column(description)
| apache-2.0 |
faun/django_test | build/lib/django/template/loader.py | 16 | 8033 | # Wrapper for loading templates from storage of some sort (e.g. filesystem, database).
#
# This uses the TEMPLATE_LOADERS setting, which is a list of loaders to use.
# Each loader is expected to have this interface:
#
# callable(name, dirs=[])
#
# name is the template name.
# dirs is an optional list of directories to search instead of TEMPLATE_DIRS.
#
# The loader should return a tuple of (template_source, path). The path returned
# might be shown to the user for debugging purposes, so it should identify where
# the template was loaded from.
#
# A loader may return an already-compiled template instead of the actual
# template source. In that case the path returned should be None, since the
# path information is associated with the template during the compilation,
# which has already been done.
#
# Each loader should have an "is_usable" attribute set. This is a boolean that
# specifies whether the loader can be used in this Python installation. Each
# loader is responsible for setting this when it's initialized.
#
# For example, the eggs loader (which is capable of loading templates from
# Python eggs) sets is_usable to False if the "pkg_resources" module isn't
# installed, because pkg_resources is necessary to read eggs.
from django.core.exceptions import ImproperlyConfigured
from django.template import Origin, Template, Context, TemplateDoesNotExist, add_to_builtins
from django.utils.importlib import import_module
from django.conf import settings
template_source_loaders = None
class BaseLoader(object):
is_usable = False
def __init__(self, *args, **kwargs):
pass
def __call__(self, template_name, template_dirs=None):
return self.load_template(template_name, template_dirs)
def load_template(self, template_name, template_dirs=None):
source, display_name = self.load_template_source(template_name, template_dirs)
origin = make_origin(display_name, self.load_template_source, template_name, template_dirs)
try:
template = get_template_from_string(source, origin, template_name)
return template, None
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist, back off to
# returning the source and display name for the template we were asked to load.
# This allows for correct identification (later) of the actual template that does
# not exist.
return source, display_name
def load_template_source(self, template_name, template_dirs=None):
"""
Returns a tuple containing the source and origin for the given template
name.
"""
raise NotImplementedError
def reset(self):
"""
Resets any state maintained by the loader instance (e.g., cached
templates or cached loader modules).
"""
pass
class LoaderOrigin(Origin):
def __init__(self, display_name, loader, name, dirs):
super(LoaderOrigin, self).__init__(display_name)
self.loader, self.loadname, self.dirs = loader, name, dirs
def reload(self):
return self.loader(self.loadname, self.dirs)[0]
def make_origin(display_name, loader, name, dirs):
if settings.TEMPLATE_DEBUG and display_name:
return LoaderOrigin(display_name, loader, name, dirs)
else:
return None
def find_template_loader(loader):
if isinstance(loader, (tuple, list)):
loader, args = loader[0], loader[1:]
else:
args = []
if isinstance(loader, basestring):
module, attr = loader.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing template source loader %s: "%s"' % (loader, e))
try:
TemplateLoader = getattr(mod, attr)
except AttributeError, e:
raise ImproperlyConfigured('Error importing template source loader %s: "%s"' % (loader, e))
if hasattr(TemplateLoader, 'load_template_source'):
func = TemplateLoader(*args)
else:
# Try loading module the old way - string is full path to callable
if args:
raise ImproperlyConfigured("Error importing template source loader %s - can't pass arguments to function-based loader." % loader)
func = TemplateLoader
if not func.is_usable:
import warnings
warnings.warn("Your TEMPLATE_LOADERS setting includes %r, but your Python installation doesn't support that type of template loading. Consider removing that line from TEMPLATE_LOADERS." % loader)
return None
else:
return func
else:
raise ImproperlyConfigured('Loader does not define a "load_template" callable template source loader')
def find_template(name, dirs=None):
# Calculate template_source_loaders the first time the function is executed
# because putting this logic in the module-level namespace may cause
# circular import errors. See Django ticket #1292.
global template_source_loaders
if template_source_loaders is None:
loaders = []
for loader_name in settings.TEMPLATE_LOADERS:
loader = find_template_loader(loader_name)
if loader is not None:
loaders.append(loader)
template_source_loaders = tuple(loaders)
for loader in template_source_loaders:
try:
source, display_name = loader(name, dirs)
return (source, make_origin(display_name, loader, name, dirs))
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def find_template_source(name, dirs=None):
# For backward compatibility
import warnings
warnings.warn(
"`django.template.loaders.find_template_source` is deprecated; use `django.template.loaders.find_template` instead.",
DeprecationWarning
)
template, origin = find_template(name, dirs)
if hasattr(template, 'render'):
raise Exception("Found a compiled template that is incompatible with the deprecated `django.template.loaders.find_template_source` function.")
return template, origin
def get_template(template_name):
"""
Returns a compiled Template object for the given template name,
handling template inheritance recursively.
"""
template, origin = find_template(template_name)
if not hasattr(template, 'render'):
# template needs to be compiled
template = get_template_from_string(template, origin, template_name)
return template
def get_template_from_string(source, origin=None, name=None):
"""
Returns a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(source, origin, name)
def render_to_string(template_name, dictionary=None, context_instance=None):
"""
Loads the given template_name and renders it with the given dictionary as
context. The template_name may be a string to load a single template using
get_template, or it may be a tuple to use select_template to find one of
the templates in the list. Returns a string.
"""
dictionary = dictionary or {}
if isinstance(template_name, (list, tuple)):
t = select_template(template_name)
else:
t = get_template(template_name)
if context_instance:
context_instance.update(dictionary)
else:
context_instance = Context(dictionary)
return t.render(context_instance)
def select_template(template_name_list):
"Given a list of template names, returns the first that can be loaded."
for template_name in template_name_list:
try:
return get_template(template_name)
except TemplateDoesNotExist:
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(', '.join(template_name_list))
add_to_builtins('django.template.loader_tags')
| bsd-3-clause |
StanislavQA/python_task | timeweb2.py | 1 | 2037 | # -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
import unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class timeweb2(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_timeweb2(self):
wd = self.wd
self.open_home_page(wd)
self.tariff_plan(wd)
self.login(wd, username = "Чернядьева Анна Константиновна", email = "[email protected]")
def login(self, wd, username, email):
wd.find_element_by_xpath(
"//div[@class='overlay']/div/div/div[14]/form/div[2]/div[1]/div[2]/div[2]/input").click()
wd.find_element_by_xpath(
"//div[@class='overlay']/div/div/div[14]/form/div[2]/div[1]/div[2]/div[2]/input").clear()
wd.find_element_by_xpath(
"//div[@class='overlay']/div/div/div[14]/form/div[2]/div[1]/div[2]/div[2]/input").send_keys(
username)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(email)
wd.find_element_by_xpath("//label[@for='c4']").click()
if not wd.find_element_by_id("c4").is_selected():
wd.find_element_by_id("c4").click()
wd.find_element_by_link_text("ЗАКАЗАТЬ").click()
def tariff_plan(self, wd):
wd.find_element_by_link_text("ХОСТИНГ").click()
wd.find_element_by_link_text("РАЗМЕСТИТЬ САЙТ").click()
wd.find_element_by_css_selector("li.item.selected").click()
def open_home_page(self, wd):
wd.get("https://timeweb.com/ru/")
# Check for compliance with the selected plan
def check_exists_by_link_text("Year+"):
return len(webdriver.find_elements_by_link_text("Year+")) > 0
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Lilykos/inspire-next | inspire/modules/workflows/views/holdingpen_edit.py | 1 | 3372 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2015 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Tseemple Place, Suite 330, Boston, MA 02111-1307, USA.
from six import text_type
from flask import Blueprint, jsonify, request
from flask_login import login_required
from harvestingkit.html_utils import MathMLParser
from invenio.base.decorators import wash_arguments
from invenio.ext.principal import permission_required
from invenio.modules.workflows.acl import viewholdingpen
from invenio.modules.workflows.models import BibWorkflowObject
blueprint = Blueprint(
'inspire_holdingpen',
__name__,
url_prefix="/admin/holdingpen",
template_folder='../templates',
static_folder="../static",
)
# Constants
SUBJECT_TERM = "subject_term"
TERM = "term"
SCHEME = "scheme"
INSPIRE_SCHEME = "INSPIRE"
# Fields
SUBJECT_FIELD = "subject_term.term"
TITLE_FIELD = "title.title"
@blueprint.route('/edit_record_title', methods=['POST'])
@login_required
@permission_required(viewholdingpen.name)
@wash_arguments({'value': (text_type, ""),
'objectid': (int, 0)})
def edit_record_title(value, objectid):
editable_obj = BibWorkflowObject.query.get(objectid)
data = editable_obj.get_data()
data[TITLE_FIELD] = MathMLParser.html_to_text(value)
editable_obj.set_data(data)
editable_obj.save()
return jsonify({
"category": "success",
"message": "Edit on title was successful"
})
@blueprint.route('/edit_record_subject', methods=['POST'])
@login_required
@permission_required(viewholdingpen.name)
@wash_arguments({'objectid': (text_type, "")})
def edit_record_subject(objectid):
editable_obj = BibWorkflowObject.query.get(objectid)
data = editable_obj.get_data()
old_subjects_list = data[SUBJECT_FIELD]
new_subjects_list = request.values.getlist('subjects[]') or []
# We will use a diff method to find which
# subjects to remove and which to add.
# PLUS removes unicode
to_remove = [str(x) for x in list(set(old_subjects_list) - set(new_subjects_list))]
to_add = [str(x) for x in list(set(new_subjects_list) - set(old_subjects_list))]
# Make a copy of the original list
subject_objects = []
subject_objects.extend(data[SUBJECT_TERM])
# Remove subjects
subject_objects = [subj for subj in subject_objects
if subj[TERM] not in to_remove ]
# Add the new subjects
for subj in to_add:
subject_objects.append({
TERM: subj,
SCHEME: INSPIRE_SCHEME
})
data[SUBJECT_TERM] = subject_objects
editable_obj.set_data(data)
editable_obj.save()
return jsonify({
"category": "success",
"message": "Edit on subjects was successful"
})
| gpl-2.0 |
ljwolf/pysal | pysal/spreg/ml_error.py | 6 | 19663 | """
ML Estimation of Spatial Error Model
"""
__author__ = "Luc Anselin [email protected],\
Serge Rey [email protected], \
Levi Wolf [email protected]"
import numpy as np
import numpy.linalg as la
from scipy import sparse as sp
from scipy.sparse.linalg import splu as SuperLU
import pysal as ps
from utils import RegressionPropsY, RegressionPropsVM
import diagnostics as DIAG
import user_output as USER
import summary_output as SUMMARY
import regimes as REGI
from w_utils import symmetrize
try:
from scipy.optimize import minimize_scalar
minimize_scalar_available = True
except ImportError:
minimize_scalar_available = False
from .sputils import spdot, spfill_diagonal, spinv
__all__ = ["ML_Error"]
class BaseML_Error(RegressionPropsY, RegressionPropsVM, REGI.Regimes_Frame):
"""
ML estimation of the spatial error model (note no consistency
checks, diagnostics or constants added); Anselin (1988) [Anselin1988]_
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
method : string
if 'full', brute force calculation (full matrix expressions)
if 'ord', Ord eigenvalue calculation
if 'LU', LU decomposition for sparse matrices
epsilon : float
tolerance criterion in mimimize_scalar function and inverse_product
regimes_att : dictionary
Dictionary containing elements to be used in case of a regimes model,
i.e. 'x' before regimes, 'regimes' list and 'cols2regi'
Attributes
----------
betas : array
kx1 array of estimated coefficients
lam : float
estimate of spatial autoregressive coefficient
u : array
nx1 array of residuals
e_filtered : array
spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant, excluding the rho)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
method : string
log Jacobian method
if 'full': brute force (full matrix computations)
if 'ord' : Ord eigenvalue method
epsilon : float
tolerance criterion used in minimize_scalar function and inverse_product
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (k+1 x k+1) - includes lambda
vm1 : array
2x2 array of variance covariance for lambda, sigma
sig2 : float
Sigma squared used in computations
logll : float
maximized log-likelihood (including constant terms)
Examples
--------
>>> import numpy as np
>>> import pysal as ps
>>> np.set_printoptions(suppress=True) #prevent scientific format
>>> db = ps.open(ps.examples.get_path("south.dbf"),'r')
>>> y_name = "HR90"
>>> y = np.array(db.by_col(y_name))
>>> y.shape = (len(y),1)
>>> x_names = ["RD90","PS90","UE90","DV90"]
>>> x = np.array([db.by_col(var) for var in x_names]).T
>>> x = np.hstack((np.ones((len(y),1)),x))
>>> ww = ps.open(ps.examples.get_path("south_q.gal"))
>>> w = ww.read()
>>> ww.close()
>>> w.transform = 'r'
>>> mlerr = BaseML_Error(y,x,w) #doctest: +SKIP
>>> "{0:.6f}".format(mlerr.lam) #doctest: +SKIP
'0.299078'
>>> np.around(mlerr.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.6f}".format(mlerr.mean_y) #doctest: +SKIP
'9.549293'
>>> "{0:.6f}".format(mlerr.std_y) #doctest: +SKIP
'7.038851'
>>> np.diag(mlerr.vm) #doctest: +SKIP
array([ 1.06476526, 0.05548248, 0.04544514, 0.00614425, 0.01481356,
0.00143001])
>>> "{0:.6f}".format(mlerr.sig2[0][0]) #doctest: +SKIP
'32.406854'
>>> "{0:.6f}".format(mlerr.logll) #doctest: +SKIP
'-4471.407067'
>>> mlerr1 = BaseML_Error(y,x,w,method='ord') #doctest: +SKIP
>>> "{0:.6f}".format(mlerr1.lam) #doctest: +SKIP
'0.299078'
>>> np.around(mlerr1.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.6f}".format(mlerr1.mean_y) #doctest: +SKIP
'9.549293'
>>> "{0:.6f}".format(mlerr1.std_y) #doctest: +SKIP
'7.038851'
>>> np.around(np.diag(mlerr1.vm), decimals=4) #doctest: +SKIP
array([ 1.0648, 0.0555, 0.0454, 0.0061, 0.0148, 0.0014])
>>> "{0:.4f}".format(mlerr1.sig2[0][0]) #doctest: +SKIP
'32.4069'
>>> "{0:.4f}".format(mlerr1.logll) #doctest: +SKIP
'-4471.4071'
"""
def __init__(self, y, x, w, method='full', epsilon=0.0000001, regimes_att=None):
# set up main regression variables and spatial filters
self.y = y
if regimes_att:
self.x = x.toarray()
else:
self.x = x
self.n, self.k = self.x.shape
self.method = method
self.epsilon = epsilon
#W = w.full()[0] #wait to build pending what is needed
#Wsp = w.sparse
ylag = ps.lag_spatial(w, self.y)
xlag = self.get_x_lag(w, regimes_att)
# call minimizer using concentrated log-likelihood to get lambda
methodML = method.upper()
if methodML in ['FULL', 'LU', 'ORD']:
if methodML == 'FULL':
W = w.full()[0] # need dense here
res = minimize_scalar(err_c_loglik, 0.0, bounds=(-1.0, 1.0),
args=(self.n, self.y, ylag, self.x,
xlag, W), method='bounded',
tol=epsilon)
elif methodML == 'LU':
I = sp.identity(w.n)
Wsp = w.sparse # need sparse here
res = minimize_scalar(err_c_loglik_sp, 0.0, bounds=(-1.0,1.0),
args=(self.n, self.y, ylag,
self.x, xlag, I, Wsp),
method='bounded', tol=epsilon)
W = Wsp
elif methodML == 'ORD':
# check on symmetry structure
if w.asymmetry(intrinsic=False) == []:
ww = symmetrize(w)
WW = np.array(ww.todense())
evals = la.eigvalsh(WW)
W = WW
else:
W = w.full()[0] # need dense here
evals = la.eigvals(W)
res = minimize_scalar(
err_c_loglik_ord, 0.0, bounds=(-1.0, 1.0),
args=(self.n, self.y, ylag, self.x,
xlag, evals), method='bounded',
tol=epsilon)
else:
raise Exception("{0} is an unsupported method".format(method))
self.lam = res.x
# compute full log-likelihood, including constants
ln2pi = np.log(2.0 * np.pi)
llik = -res.fun - self.n / 2.0 * ln2pi - self.n / 2.0
self.logll = llik
# b, residuals and predicted values
ys = self.y - self.lam * ylag
xs = self.x - self.lam * xlag
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
b = np.dot(xsxsi, xsys)
self.betas = np.vstack((b, self.lam))
self.u = y - np.dot(self.x, b)
self.predy = self.y - self.u
# residual variance
self.e_filtered = self.u - self.lam * ps.lag_spatial(w, self.u)
self.sig2 = np.dot(self.e_filtered.T, self.e_filtered) / self.n
# variance-covariance matrix betas
varb = self.sig2 * xsxsi
# variance-covariance matrix lambda, sigma
a = -self.lam * W
spfill_diagonal(a, 1.0)
ai = spinv(a)
wai = spdot(W, ai)
tr1 = wai.diagonal().sum()
wai2 = spdot(wai, wai)
tr2 = wai2.diagonal().sum()
waiTwai = spdot(wai.T, wai)
tr3 = waiTwai.diagonal().sum()
v1 = np.vstack((tr2 + tr3,
tr1 / self.sig2))
v2 = np.vstack((tr1 / self.sig2,
self.n / (2.0 * self.sig2 ** 2)))
v = np.hstack((v1, v2))
self.vm1 = np.linalg.inv(v)
# create variance matrix for beta, lambda
vv = np.hstack((varb, np.zeros((self.k, 1))))
vv1 = np.hstack(
(np.zeros((1, self.k)), self.vm1[0, 0] * np.ones((1, 1))))
self.vm = np.vstack((vv, vv1))
def get_x_lag(self, w, regimes_att):
if regimes_att:
xlag = ps.lag_spatial(w, regimes_att['x'])
xlag = REGI.Regimes_Frame.__init__(self, xlag,
regimes_att['regimes'], constant_regi=None, cols2regi=regimes_att['cols2regi'])[0]
xlag = xlag.toarray()
else:
xlag = ps.lag_spatial(w, self.x)
return xlag
class ML_Error(BaseML_Error):
"""
ML estimation of the spatial lag model with all results and diagnostics;
Anselin (1988) [Anselin1988]_
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
method : string
if 'full', brute force calculation (full matrix expressions)
if 'ord', Ord eigenvalue method
if 'LU', LU sparse matrix decomposition
epsilon : float
tolerance criterion in mimimize_scalar function and inverse_product
spat_diag : boolean
if True, include spatial diagnostics
vm : boolean
if True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
betas : array
(k+1)x1 array of estimated coefficients (rho first)
lam : float
estimate of spatial autoregressive coefficient
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant, excluding lambda)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
method : string
log Jacobian method
if 'full': brute force (full matrix computations)
epsilon : float
tolerance criterion used in minimize_scalar function and inverse_product
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
varb : array
Variance covariance matrix (k+1 x k+1) - includes var(lambda)
vm1 : array
variance covariance matrix for lambda, sigma (2 x 2)
sig2 : float
Sigma squared used in computations
logll : float
maximized log-likelihood (including constant terms)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
utu : float
Sum of squared residuals
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
>>> import numpy as np
>>> import pysal as ps
>>> np.set_printoptions(suppress=True) #prevent scientific format
>>> db = ps.open(ps.examples.get_path("south.dbf"),'r')
>>> ds_name = "south.dbf"
>>> y_name = "HR90"
>>> y = np.array(db.by_col(y_name))
>>> y.shape = (len(y),1)
>>> x_names = ["RD90","PS90","UE90","DV90"]
>>> x = np.array([db.by_col(var) for var in x_names]).T
>>> ww = ps.open(ps.examples.get_path("south_q.gal"))
>>> w = ww.read()
>>> ww.close()
>>> w_name = "south_q.gal"
>>> w.transform = 'r'
>>> mlerr = ML_Error(y,x,w,name_y=y_name,name_x=x_names,\
name_w=w_name,name_ds=ds_name) #doctest: +SKIP
>>> np.around(mlerr.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.4f}".format(mlerr.lam) #doctest: +SKIP
'0.2991'
>>> "{0:.4f}".format(mlerr.mean_y) #doctest: +SKIP
'9.5493'
>>> "{0:.4f}".format(mlerr.std_y) #doctest: +SKIP
'7.0389'
>>> np.around(np.diag(mlerr.vm), decimals=4) #doctest: +SKIP
array([ 1.0648, 0.0555, 0.0454, 0.0061, 0.0148, 0.0014])
>>> np.around(mlerr.sig2, decimals=4) #doctest: +SKIP
array([[ 32.4069]])
>>> "{0:.4f}".format(mlerr.logll) #doctest: +SKIP
'-4471.4071'
>>> "{0:.4f}".format(mlerr.aic) #doctest: +SKIP
'8952.8141'
>>> "{0:.4f}".format(mlerr.schwarz) #doctest: +SKIP
'8979.0779'
>>> "{0:.4f}".format(mlerr.pr2) #doctest: +SKIP
'0.3058'
>>> "{0:.4f}".format(mlerr.utu) #doctest: +SKIP
'48534.9148'
>>> np.around(mlerr.std_err, decimals=4) #doctest: +SKIP
array([ 1.0319, 0.2355, 0.2132, 0.0784, 0.1217, 0.0378])
>>> np.around(mlerr.z_stat, decimals=4) #doctest: +SKIP
array([[ 5.9593, 0. ],
[ 18.6902, 0. ],
[ 8.3422, 0. ],
[ -4.8233, 0. ],
[ 3.9913, 0.0001],
[ 7.9089, 0. ]])
>>> mlerr.name_y #doctest: +SKIP
'HR90'
>>> mlerr.name_x #doctest: +SKIP
['CONSTANT', 'RD90', 'PS90', 'UE90', 'DV90', 'lambda']
>>> mlerr.name_w #doctest: +SKIP
'south_q.gal'
>>> mlerr.name_ds #doctest: +SKIP
'south.dbf'
>>> mlerr.title #doctest: +SKIP
'MAXIMUM LIKELIHOOD SPATIAL ERROR (METHOD = FULL)'
"""
def __init__(self, y, x, w, method='full', epsilon=0.0000001,
spat_diag=False, vm=False, name_y=None, name_x=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
method = method.upper()
BaseML_Error.__init__(self, y=y, x=x_constant,
w=w, method=method, epsilon=epsilon)
self.title = "MAXIMUM LIKELIHOOD SPATIAL ERROR" + \
" (METHOD = " + method + ")"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_x.append('lambda')
self.name_w = USER.set_name_w(name_w, w)
self.aic = DIAG.akaike(reg=self)
self.schwarz = DIAG.schwarz(reg=self)
SUMMARY.ML_Error(reg=self, w=w, vm=vm, spat_diag=spat_diag)
def err_c_loglik(lam, n, y, ylag, x, xlag, W):
# concentrated log-lik for error model, no constants, brute force
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
a = -lam * W
np.fill_diagonal(a, 1.0)
jacob = np.log(np.linalg.det(a))
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def err_c_loglik_sp(lam, n, y, ylag, x, xlag, I, Wsp):
# concentrated log-lik for error model, no constants, LU
if isinstance(lam, np.ndarray):
if lam.shape == (1,1):
lam = lam[0][0] #why does the interior value change?
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
a = I - lam * Wsp
LU = SuperLU(a.tocsc())
jacob = np.sum(np.log(np.abs(LU.U.diagonal())))
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def err_c_loglik_ord(lam, n, y, ylag, x, xlag, evals):
# concentrated log-lik for error model, no constants, eigenvalues
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
revals = lam * evals
jacob = np.log(1 - revals).sum()
if isinstance(jacob, complex):
jacob = jacob.real
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
| bsd-3-clause |
electron/libchromiumcontent | script/lib/filesystem.py | 2 | 1582 | """Filesystem related helper functions.
"""
import contextlib
import errno
import os
import shutil
import sys
import tarfile
import tempfile
import urllib2
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def rm_f(path):
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def rm_rf(path):
try:
shutil.rmtree(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def safe_unlink(path):
try:
os.unlink(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def byte_to_mb(n):
return str(n / 1024 / 1024) + 'MB'
def download_and_extract(destination, url, verbose):
print url
with tempfile.TemporaryFile() as t:
with contextlib.closing(urllib2.urlopen(url)) as u:
total = int(u.headers['content-length'])
done = 0
last_length = 0
while True:
chunk = u.read(1024*1024)
done += len(chunk)
if not len(chunk):
break
if verbose:
percent = '{0:.2f}%'.format(round(float(done) / float(total), 4) * 100)
ratio = '(' + byte_to_mb(done) + '/' + byte_to_mb(total) + ')'
line = '-> ' + percent + ' ' + ratio
sys.stderr.write(line.ljust(last_length) + '\r')
last_length = len(line)
sys.stderr.flush()
t.write(chunk)
if verbose:
sys.stderr.write('\nExtracting...\n')
sys.stderr.flush()
with tarfile.open(fileobj=t, mode='r:bz2') as z:
z.extractall(destination)
| mit |
sharpdeep/seafile | scripts/upgrade/db_update_helper.py | 18 | 10798 | #coding: UTF-8
import sys
import os
import ConfigParser
import glob
HAS_MYSQLDB = True
try:
import MySQLdb
except ImportError:
HAS_MYSQLDB = False
HAS_SQLITE3 = True
try:
import sqlite3
except ImportError:
HAS_SQLITE3 = False
class EnvManager(object):
def __init__(self):
self.upgrade_dir = os.path.dirname(__file__)
self.install_path = os.path.dirname(self.upgrade_dir)
self.top_dir = os.path.dirname(self.install_path)
self.ccnet_dir = os.environ['CCNET_CONF_DIR']
self.seafile_dir = os.environ['SEAFILE_CONF_DIR']
env_mgr = EnvManager()
class Utils(object):
@staticmethod
def highlight(content, is_error=False):
'''Add ANSI color to content to get it highlighted on terminal'''
if is_error:
return '\x1b[1;31m%s\x1b[m' % content
else:
return '\x1b[1;32m%s\x1b[m' % content
@staticmethod
def info(msg):
print Utils.highlight('[INFO] ') + msg
@staticmethod
def error(msg):
print Utils.highlight('[ERROR] ') + msg
sys.exit(1)
@staticmethod
def read_config(config_path, defaults):
cp = ConfigParser.ConfigParser(defaults)
cp.read(config_path)
return cp
class MySQLDBInfo(object):
def __init__(self, host, port, username, password, db, unix_socket=None):
self.host = host
self.port = port
self.username = username
self.password = password
self.db = db
self.unix_socket = unix_socket
class DBUpdater(object):
def __init__(self, version, name):
self.sql_dir = os.path.join(env_mgr.upgrade_dir, 'sql', version, name)
@staticmethod
def get_instance(version):
'''Detect whether we are using mysql or sqlite3'''
ccnet_db_info = DBUpdater.get_ccnet_mysql_info()
seafile_db_info = DBUpdater.get_seafile_mysql_info()
seahub_db_info = DBUpdater.get_seahub_mysql_info()
if ccnet_db_info and seafile_db_info and seahub_db_info:
Utils.info('You are using MySQL')
if not HAS_MYSQLDB:
Utils.error('Python MySQLdb module is not found')
updater = MySQLDBUpdater(version, ccnet_db_info, seafile_db_info, seahub_db_info)
elif (ccnet_db_info is None) and (seafile_db_info is None) and (seahub_db_info is None):
Utils.info('You are using SQLite3')
if not HAS_SQLITE3:
Utils.error('Python sqlite3 module is not found')
updater = SQLiteDBUpdater(version)
else:
def to_db_string(info):
if info is None:
return 'SQLite3'
else:
return 'MySQL'
Utils.error('Error:\n ccnet is using %s\n seafile is using %s\n seahub is using %s\n'
% (to_db_string(ccnet_db_info),
to_db_string(seafile_db_info),
to_db_string(seahub_db_info)))
return updater
def update_db(self):
ccnet_sql = os.path.join(self.sql_dir, 'ccnet.sql')
seafile_sql = os.path.join(self.sql_dir, 'seafile.sql')
seahub_sql = os.path.join(self.sql_dir, 'seahub.sql')
if os.path.exists(ccnet_sql):
Utils.info('updating ccnet database...')
self.update_ccnet_sql(ccnet_sql)
if os.path.exists(seafile_sql):
Utils.info('updating seafile database...')
self.update_seafile_sql(seafile_sql)
if os.path.exists(seahub_sql):
Utils.info('updating seahub database...')
self.update_seahub_sql(seahub_sql)
@staticmethod
def get_ccnet_mysql_info():
ccnet_conf = os.path.join(env_mgr.ccnet_dir, 'ccnet.conf')
defaults = {
'HOST': '127.0.0.1',
'PORT': '3306',
'UNIX_SOCKET': '',
}
config = Utils.read_config(ccnet_conf, defaults)
db_section = 'Database'
if not config.has_section(db_section):
return None
type = config.get(db_section, 'ENGINE')
if type != 'mysql':
return None
try:
host = config.get(db_section, 'HOST')
port = config.getint(db_section, 'PORT')
username = config.get(db_section, 'USER')
password = config.get(db_section, 'PASSWD')
db = config.get(db_section, 'DB')
unix_socket = config.get(db_section, 'UNIX_SOCKET')
except ConfigParser.NoOptionError, e:
Utils.error('Database config in ccnet.conf is invalid: %s' % e)
info = MySQLDBInfo(host, port, username, password, db, unix_socket)
return info
@staticmethod
def get_seafile_mysql_info():
seafile_conf = os.path.join(env_mgr.seafile_dir, 'seafile.conf')
defaults = {
'HOST': '127.0.0.1',
'PORT': '3306',
'UNIX_SOCKET': '',
}
config = Utils.read_config(seafile_conf, defaults)
db_section = 'database'
if not config.has_section(db_section):
return None
type = config.get(db_section, 'type')
if type != 'mysql':
return None
try:
host = config.get(db_section, 'host')
port = config.getint(db_section, 'port')
username = config.get(db_section, 'user')
password = config.get(db_section, 'password')
db = config.get(db_section, 'db_name')
unix_socket = config.get(db_section, 'unix_socket')
except ConfigParser.NoOptionError, e:
Utils.error('Database config in seafile.conf is invalid: %s' % e)
info = MySQLDBInfo(host, port, username, password, db, unix_socket)
return info
@staticmethod
def get_seahub_mysql_info():
sys.path.insert(0, env_mgr.top_dir)
try:
import seahub_settings # pylint: disable=F0401
except ImportError, e:
Utils.error('Failed to import seahub_settings.py: %s' % e)
if not hasattr(seahub_settings, 'DATABASES'):
return None
try:
d = seahub_settings.DATABASES['default']
if d['ENGINE'] != 'django.db.backends.mysql':
return None
host = d.get('HOST', '127.0.0.1')
port = int(d.get('PORT', 3306))
username = d['USER']
password = d['PASSWORD']
db = d['NAME']
unix_socket = host if host.startswith('/') else None
except KeyError:
Utils.error('Database config in seahub_settings.py is invalid: %s' % e)
info = MySQLDBInfo(host, port, username, password, db, unix_socket)
return info
def update_ccnet_sql(self, ccnet_sql):
raise NotImplementedError
def update_seafile_sql(self, seafile_sql):
raise NotImplementedError
def update_seahub_sql(self, seahub_sql):
raise NotImplementedError
class CcnetSQLiteDB(object):
def __init__(self, ccnet_dir):
self.ccnet_dir = ccnet_dir
def get_db(self, dbname):
dbs = (
'ccnet.db',
'GroupMgr/groupmgr.db',
'misc/config.db',
'OrgMgr/orgmgr.db',
)
for db in dbs:
if os.path.splitext(os.path.basename(db))[0] == dbname:
return os.path.join(self.ccnet_dir, db)
class SQLiteDBUpdater(DBUpdater):
def __init__(self, version):
DBUpdater.__init__(self, version, 'sqlite3')
self.ccnet_db = CcnetSQLiteDB(env_mgr.ccnet_dir)
self.seafile_db = os.path.join(env_mgr.seafile_dir, 'seafile.db')
self.seahub_db = os.path.join(env_mgr.top_dir, 'seahub.db')
def update_db(self):
super(SQLiteDBUpdater, self).update_db()
for sql_path in glob.glob(os.path.join(self.sql_dir, 'ccnet', '*.sql')):
self.update_ccnet_sql(sql_path)
def apply_sqls(self, db_path, sql_path):
with open(sql_path, 'r') as fp:
lines = fp.read().split(';')
with sqlite3.connect(db_path) as conn:
for line in lines:
line = line.strip()
if not line:
continue
else:
conn.execute(line)
def update_ccnet_sql(self, sql_path):
dbname = os.path.splitext(os.path.basename(sql_path))[0]
self.apply_sqls(self.ccnet_db.get_db(dbname), sql_path)
def update_seafile_sql(self, sql_path):
self.apply_sqls(self.seafile_db, sql_path)
def update_seahub_sql(self, sql_path):
self.apply_sqls(self.seahub_db, sql_path)
class MySQLDBUpdater(DBUpdater):
def __init__(self, version, ccnet_db_info, seafile_db_info, seahub_db_info):
DBUpdater.__init__(self, version, 'mysql')
self.ccnet_db_info = ccnet_db_info
self.seafile_db_info = seafile_db_info
self.seahub_db_info = seahub_db_info
def update_ccnet_sql(self, ccnet_sql):
self.apply_sqls(self.ccnet_db_info, ccnet_sql)
def update_seafile_sql(self, seafile_sql):
self.apply_sqls(self.seafile_db_info, seafile_sql)
def update_seahub_sql(self, seahub_sql):
self.apply_sqls(self.seahub_db_info, seahub_sql)
def get_conn(self, info):
kw = dict(
user=info.username,
passwd=info.password,
db=info.db,
)
if info.unix_socket:
kw['unix_socket'] = info.unix_socket
else:
kw['host'] = info.host
kw['port'] = info.port
try:
conn = MySQLdb.connect(**kw)
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
msg = str(e.args[1])
else:
msg = str(e)
Utils.error('Failed to connect to mysql database %s: %s' % (info.db, msg))
return conn
def execute_sql(self, conn, sql):
cursor = conn.cursor()
try:
cursor.execute(sql)
conn.commit()
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
msg = str(e.args[1])
else:
msg = str(e)
Utils.error('Failed to execute sql: %s' % msg)
def apply_sqls(self, info, sql_path):
with open(sql_path, 'r') as fp:
lines = fp.read().split(';')
conn = self.get_conn(info)
for line in lines:
line = line.strip()
if not line:
continue
else:
self.execute_sql(conn, line)
def main():
version = sys.argv[1]
db_updater = DBUpdater.get_instance(version)
db_updater.update_db()
return 0
if __name__ == '__main__':
main()
| gpl-2.0 |
ic-hep/DIRAC | DataManagementSystem/scripts/dirac-admin-user-quota.py | 9 | 1201 | #!/usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( """
Show storage quotas for specified users or for all registered users if nobody is specified
Usage:
%s [user1 ...]
""" % Script.scriptName )
Script.parseCommandLine()
users = Script.getPositionalArgs()
from DIRAC import gLogger, gConfig
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
if not users:
res = gConfig.getSections( '/Registry/Users' )
if not res['OK']:
gLogger.error( "Failed to retrieve user list from CS", res['Message'] )
DIRAC.exit( 2 )
users = res['Value']
gLogger.notice( "-"*30 )
gLogger.notice( "%s|%s" % ( 'Username'.ljust( 15 ), 'Quota (GB)'.rjust( 15 ) ) )
gLogger.notice( "-"*30 )
for user in sorted( users ):
quota = gConfig.getValue( '/Registry/Users/%s/Quota' % user, 0 )
if not quota:
quota = gConfig.getValue( '/Registry/DefaultStorageQuota' )
gLogger.notice( "%s|%s" % ( user.ljust( 15 ), str( quota ).rjust( 15 ) ) )
gLogger.notice( "-"*30 )
DIRAC.exit( 0 )
| gpl-3.0 |
cmbclh/vnpy1.7 | vnpy/trader/gateway/ltsGateway/ltsGateway.py | 7 | 47839 | # encoding: UTF-8
'''
vn.lts的gateway接入
'''
import os
import json
from vnpy.api.lts import MdApi, QryApi, TdApi, defineDict
from vnpy.trader.vtFunction import getTempPath, getJsonPath
from vnpy.trader.vtGateway import *
# 以下为一些VT类型和LTS类型的映射字典
# 价格类型映射
priceTypeMap= {}
priceTypeMap[PRICETYPE_LIMITPRICE] = defineDict["SECURITY_FTDC_OPT_LimitPrice"]
priceTypeMap[PRICETYPE_MARKETPRICE] = defineDict["SECURITY_FTDC_OPT_AnyPrice"]
priceTypeMap[PRICETYPE_FAK] = defineDict["SECURITY_FTDC_OPT_BestPrice"]
priceTypeMap[PRICETYPE_FOK] = defineDict["SECURITY_FTDC_OPT_AllLimitPrice"]
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
# 方向类型映射
directionMap = {}
directionMap[DIRECTION_LONG] = defineDict["SECURITY_FTDC_D_Buy"]
directionMap[DIRECTION_SHORT] = defineDict["SECURITY_FTDC_D_Sell"]
directionMapReverse = {v: k for k, v in directionMap.items()}
# 开平类型映射
offsetMap = {}
offsetMap[OFFSET_OPEN] = defineDict["SECURITY_FTDC_OF_Open"]
offsetMap[OFFSET_CLOSE] = defineDict["SECURITY_FTDC_OF_Close"]
offsetMap[OFFSET_CLOSETODAY] = defineDict["SECURITY_FTDC_OF_CloseToday"]
offsetMap[OFFSET_CLOSEYESTERDAY] = defineDict["SECURITY_FTDC_OF_CloseYesterday"]
offsetMapReverse = {v:k for k,v in offsetMap.items()}
# 交易所类型映射
exchangeMap = {}
exchangeMap[EXCHANGE_SSE] = 'SSE'
exchangeMap[EXCHANGE_SZSE] = 'SZE'
exchangeMap[EXCHANGE_HKEX] = 'HGE'
exchangeMapReverse = {v:k for k,v in exchangeMap.items()}
# 持仓类型映射
posiDirectionMap = {}
posiDirectionMap[DIRECTION_NET] = defineDict["SECURITY_FTDC_PD_Net"]
posiDirectionMap[DIRECTION_LONG] = defineDict["SECURITY_FTDC_PD_Long"]
posiDirectionMap[DIRECTION_SHORT] = defineDict["SECURITY_FTDC_PD_Short"]
posiDirectionMapReverse = {v:k for k,v in posiDirectionMap.items()}
########################################################################################
class LtsGateway(VtGateway):
"""Lts接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='LTS'):
"""Constructor"""
super(LtsGateway, self).__init__(eventEngine, gatewayName)
self.mdApi = LtsMdApi(self)
self.tdApi = LtsTdApi(self)
self.qryApi = LtsQryApi(self)
self.mdConnected = False
self.tdConnected = False
self.qryConnected = False
self.qryEnabled = False # 是否要启动循环查询
self.fileName = self.gatewayName + '_connect.json'
self.filePath = getJsonPath(self.fileName, __file__)
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json 文件
try:
f = file(self.filePath)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
userID = str(setting['userID'])
mdPassword = str(setting['mdPassword'])
tdPassword = str(setting['tdPassword'])
brokerID = str(setting['brokerID'])
tdAddress = str(setting['tdAddress'])
mdAddress = str(setting['mdAddress'])
qryAddress = str(setting['qryAddress'])
productInfo = str(setting['productInfo'])
authCode = str(setting['authCode'])
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'连接配置缺少字段,请检查'
self.onLog(log)
return
# 创建行情和交易接口对象
self.mdApi.connect(userID, mdPassword, brokerID, mdAddress)
self.tdApi.connect(userID, tdPassword, brokerID, tdAddress, productInfo, authCode)
self.qryApi.connect(userID, tdPassword, brokerID, qryAddress, productInfo, authCode)
# 初始化并启动查询
self.initQuery()
self.startQuery()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
self.mdApi.subscribe(subscribeReq)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.tdApi.sendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.tdApi.cancelOrder(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.qryApi.qryAccount()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.qryApi.qryPosition()
#----------------------------------------------------------------------
def close(self):
"""关闭"""
if self.mdConnected:
self.mdApi.close()
if self.tdConnected:
self.tdApi.close()
if self.qryConnected:
self.qryApi.close()
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount, self.qryPosition]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
########################################################################
class LtsMdApi(MdApi):
"""Lts行情API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(LtsMdApi, self).__init__()
self.gateway = gateway #gateway对象
self.gatewayName = gateway.gatewayName #gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登陆状态
self.subscribedSymbols = set()
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接成功'
self.gateway.onLog(log)
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self,n):
"""服务器断开"""
self.connectionStatus= False
self.loginStatus = False
self.gateway.mdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
"""心跳报警"""
pass
#----------------------------------------------------------------------
def onRspError(self,error,n,last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登录完成'
self.gateway.onLog(log)
# 重新订阅之前订阅的合约
for subscribeReq in self.subscribedSymbols:
self.subscribe(subscribeReq)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
# 通常不在乎订阅错误,选择忽略
pass
#----------------------------------------------------------------------
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
# 同上
pass
#----------------------------------------------------------------------
def onRtnDepthMarketData(self, data):
"""行情推送"""
tick = VtTickData()
tick.gatewayName = self.gatewayName
tick.symbol = data['InstrumentID']
tick.exchange = exchangeMapReverse.get(data['ExchangeID'], u'未知')
tick.vtSymbol = '.'.join([tick.symbol, tick.exchange])
tick.lastPrice = data['LastPrice']
tick.volume = data['Volume']
tick.openInterest = data['OpenInterest']
tick.time = '.'.join([data['UpdateTime'], str(data['UpdateMillisec']/100)])
tick.date = data['TradingDay']
tick.openPrice = data['OpenPrice']
tick.highPrice = data['HighestPrice']
tick.lowPrice = data['LowestPrice']
tick.preClosePrice = data['PreClosePrice']
tick.upperLimit = data['UpperLimitPrice']
tick.lowerLimit = data['LowerLimitPrice']
# LTS有5档行情
tick.bidPrice1 = data['BidPrice1']
tick.bidVolume1 = data['BidVolume1']
tick.askPrice1 = data['AskPrice1']
tick.askVolume1 = data['AskVolume1']
tick.bidPrice2 = data['BidPrice2']
tick.bidVolume2 = data['BidVolume2']
tick.askPrice2 = data['AskPrice2']
tick.askVolume2 = data['AskVolume2']
tick.bidPrice3 = data['BidPrice3']
tick.bidVolume3 = data['BidVolume3']
tick.askPrice3 = data['AskPrice3']
tick.askVolume3 = data['AskVolume3']
tick.bidPrice4 = data['BidPrice4']
tick.bidVolume4 = data['BidVolume4']
tick.askPrice4 = data['AskPrice4']
tick.askVolume4 = data['AskVolume4']
tick.bidPrice5 = data['BidPrice5']
tick.bidVolume5 = data['BidVolume5']
tick.askPrice5 = data['AskPrice5']
tick.askVolume5 = data['AskVolume5']
self.gateway.onTick(tick)
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = getTempPath(self.gatewayName + '_')
self.createFtdcMdApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
req = {}
req['InstrumentID'] = str(subscribeReq.symbol)
req['ExchangeID'] = exchangeMap.get(str(subscribeReq.exchange), '')
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
if self.loginStatus:
self.subscribeMarketData(req)
self.subscribedSymbols.add(subscribeReq)
#----------------------------------------------------------------------
def login(self):
"""登录"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
########################################################################
class LtsTdApi(TdApi):
"""LTS交易API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""API对象的初始化函数"""
super(LtsTdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.orderRef = EMPTY_INT # 订单编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
self.productInfo = EMPTY_STRING # 程序产品名称
self.authCode = EMPTY_STRING # 授权码
self.randCode = EMPTY_STRING # 随机码
self.frontID = EMPTY_INT # 前置机编号
self.sessionID = EMPTY_INT # 会话编号
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接成功'
self.gateway.onLog(log)
# 前置机连接后,请求随机码
self.reqID += 1
self.reqFetchAuthRandCode({}, self.reqID)
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
""""""
pass
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.frontID = str(data['FrontID'])
self.sessionID = str(data['SessionID'])
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登录完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gateway
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspFetchAuthRandCode(self, data, error, n, last):
"""请求随机认证码"""
self.randCode = data['RandCode']
self.login()
#----------------------------------------------------------------------
def onRspUserPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspTradingAccountPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspOrderInsert(self, data, error, n, last):
"""发单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspOrderAction(self, data, error, n, last):
"""撤单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRtnOrder(self, data):
"""报单回报"""
# 更新最大报单编号
newref = data['OrderRef']
self.orderRef = max(self.orderRef, int(newref))
# 创建报单数据对象
order = VtOrderData()
order.gatewayName = self.gatewayName
# 保存代码和报单号
order.symbol = data['InstrumentID']
order.exchange = exchangeMapReverse.get(data['ExchangeID'], '')
order.vtSymbol = '.'.join([order.symbol, order.exchange])
order.orderID = data['OrderRef']
# 方向
if data['Direction'] == '0':
order.direction = DIRECTION_LONG
elif data['Direction'] == '1':
order.direction = DIRECTION_SHORT
else:
order.direction = DIRECTION_UNKNOWN
# 开平
if data['CombOffsetFlag'] == '0':
order.offset = OFFSET_OPEN
elif data['CombOffsetFlag'] == '1':
order.offset = OFFSET_CLOSE
else:
order.offset = OFFSET_UNKNOWN
# 状态
if data['OrderStatus'] == '0':
order.status = STATUS_ALLTRADED
elif data['OrderStatus'] == '1':
order.status = STATUS_PARTTRADED
elif data['OrderStatus'] == '3':
order.status = STATUS_NOTTRADED
elif data['OrderStatus'] == '5':
order.status = STATUS_CANCELLED
else:
order.status = STATUS_UNKNOWN
# 价格、报单量等数值
order.price = float(data['LimitPrice'])
order.totalVolume = data['VolumeTotalOriginal']
order.tradedVolume = data['VolumeTraded']
order.orderTime = data['InsertTime']
order.cancelTime = data['CancelTime']
order.frontID = data['FrontID']
order.sessionID = data['SessionID']
# CTP的报单号一致性维护需要基于frontID, sessionID, orderID三个字段
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
# 推送
self.gateway.onOrder(order)
#----------------------------------------------------------------------
def onRtnTrade(self, data):
"""成交回报"""
# 创建报单数据对象
trade = VtTradeData()
trade.gatewayName = self.gatewayName
# 保存代码和报单号
trade.symbol = data['InstrumentID']
trade.exchange = exchangeMapReverse.get(data['ExchangeID'], '')
trade.vtSymbol = '.'.join([trade.symbol, trade.exchange])
trade.tradeID = data['TradeID']
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = data['OrderRef']
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
# 方向
trade.direction = directionMapReverse.get(data['Direction'], '')
# 开平
trade.offset = offsetMapReverse.get(data['OffsetFlag'], '')
# 价格、报单量等数值
trade.price = float(data['Price'])
trade.volume = data['Volume']
trade.tradeTime = data['TradeTime']
# 推送
self.gateway.onTrade(trade)
#----------------------------------------------------------------------
def onErrRtnOrderInsert(self, data, error):
"""发单错误回报(交易所)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onErrRtnOrderAction(self, data, error):
"""撤单错误回报(交易所)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspFundOutByLiber(self, data, error, n, last):
"""LTS发起出金应答"""
pass
#----------------------------------------------------------------------
def onRtnFundOutByLiber(self, data):
"""LTS发起出金通知"""
pass
#----------------------------------------------------------------------
def onErrRtnFundOutByLiber(self, data, error):
"""LTS发起出金错误回报"""
pass
#----------------------------------------------------------------------
def onRtnFundInByBank(self, data):
"""银行发起入金通知"""
pass
#----------------------------------------------------------------------
def onRspFundInterTransfer(self, data, error, n, last):
"""资金内转应答"""
pass
#----------------------------------------------------------------------
def onRtnFundInterTransferSerial(self, data):
"""资金内转流水通知"""
pass
#----------------------------------------------------------------------
def onErrRtnFundInterTransfer(self, data, error):
"""资金内转错误回报"""
pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address, productInfo, authCode):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
self.productInfo = productInfo
self.authCode = authCode
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = getTempPath(self.gatewayName + '_')
self.createFtdcTraderApi(path)
# 设置数据同步模式为推送从今日开始所有数据
self.subscribePrivateTopic(0)
self.subscribePublicTopic(0)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def login(self):
"""连接服务器"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
req['UserProductInfo'] = self.productInfo
req['AuthCode'] = self.authCode
req['RandCode'] = self.randCode
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
self.reqID += 1
self.orderRef += 1
req = {}
req['InstrumentID'] = str(orderReq.symbol)
req['LimitPrice'] = str(orderReq.price) # LTS里的价格是字符串
req['VolumeTotalOriginal'] = int(orderReq.volume)
req['ExchangeID'] = exchangeMap.get(orderReq.exchange, '')
# 下面如果由于传入的类型本接口不支持,则会返回空字符串
try:
req['OrderPriceType'] = priceTypeMap[orderReq.priceType]
req['Direction'] = directionMap[orderReq.direction]
req['CombOffsetFlag'] = offsetMap[orderReq.offset]
req['ExchangeID'] = exchangeMap[orderReq.exchange]
except KeyError:
return ''
req['OrderRef'] = str(self.orderRef)
req['InvestorID'] = self.userID
req['UserID'] = self.userID
req['BrokerID'] = self.brokerID
req['CombHedgeFlag'] = defineDict['SECURITY_FTDC_HF_Speculation'] # 投机单
req['ContingentCondition'] = defineDict['SECURITY_FTDC_CC_Immediately'] # 立即发单
req['ForceCloseReason'] = defineDict['SECURITY_FTDC_FCC_NotForceClose'] # 非强平
req['IsAutoSuspend'] = 0 # 非自动挂起
req['TimeCondition'] = defineDict['SECURITY_FTDC_TC_GFD'] # 今日有效
req['VolumeCondition'] = defineDict['SECURITY_FTDC_VC_AV'] # 任意成交量
req['MinVolume'] = 1 # 最小成交量为1
req['UserForceClose'] = 0
self.reqOrderInsert(req, self.reqID)
# 返回订单号(字符串),便于某些算法进行动态管理
vtOrderID = '.'.join([self.gatewayName, str(self.orderRef)])
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.reqID += 1
req = {}
req['InstrumentID'] = cancelOrderReq.symbol
req['ExchangeID'] = cancelOrderReq.exchange
req['OrderRef'] = cancelOrderReq.orderID
req['FrontID'] = cancelOrderReq.frontID
req['SessionID'] = cancelOrderReq.sessionID
req['ActionFlag'] = defineDict['SECURITY_FTDC_AF_Delete']
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqOrderAction(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
########################################################################
class LtsQryApi(QryApi):
"""Lts账户查询实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""API对象的初始化函数"""
super(LtsQryApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.orderRef = EMPTY_INT # 订单编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
self.productInfo = EMPTY_STRING # 程序产品名称
self.authCode = EMPTY_STRING # 授权码
self.randCode = EMPTY_STRING # 随机码
self.frontID = EMPTY_INT # 前置机编号
self.sessionID = EMPTY_INT # 会话编号
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器连接成功'
self.gateway.onLog(log)
# 前置机连接后,请求随机码
self.reqID += 1
self.reqFetchAuthRandCode({}, self.reqID)
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
""""""
pass
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.frontID = str(data['FrontID'])
self.sessionID = str(data['SessionID'])
self.loginStatus = True
self.gateway.qryConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器登录完成'
self.gateway.onLog(log)
# 查询合约代码
self.reqID += 1
self.reqQryInstrument({}, self.reqID)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gateway
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.qryConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspFetchAuthRandCode(self, data, error, n, last):
"""请求随机认证码"""
self.randCode = data['RandCode']
self.login()
#----------------------------------------------------------------------
def onRspQryExchange(self, data, error, n, last):
pass
#----------------------------------------------------------------------
def onRspQryInstrument(self, data, error, n, last):
"""合约查询回报"""
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = data['InstrumentID']
contract.exchange = exchangeMapReverse[data['ExchangeID']]
contract.vtSymbol = '.'.join([contract.symbol, contract.exchange])
contract.name = data['InstrumentName'].decode('GBK')
# 合约数值
contract.size = data['VolumeMultiple']
contract.priceTick = data['PriceTick']
contract.strikePrice = data['ExecPrice']
contract.underlyingSymbol = data['MarketID']
# 合约类型
if data['ProductClass'] == '1':
contract.productClass = PRODUCT_FUTURES
elif data['ProductClass'] == '2':
contract.productClass = PRODUCT_OPTION
elif data['ProductClass'] == '3':
contract.productClass = PRODUCT_COMBINATION
elif data['ProductClass'] == '6':
contract.productClass = PRODUCT_EQUITY
elif data['ProductClass'] == '8':
contract.productClass = PRODUCT_EQUITY
else:
print data['ProductClass']
# 期权类型
if data['InstrumentType'] == '1':
contract.optionType = OPTION_CALL
elif data['InstrumentType'] == '2':
contract.optionType = OPTION_PUT
# 推送
self.gateway.onContract(contract)
if last:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易合约信息获取完成'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onRspQryInvestor(self, data, error, n, last):
"""投资者查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryTradingCode(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTradingAccount(self, data, error, n, last):
"""资金账户查询回报"""
account = VtAccountData()
account.gatewayName = self.gatewayName
# 账户代码
account.accountID = data['AccountID']
account.vtAccountID = '.'.join([self.gatewayName, account.accountID])
# 数值相关
account.preBalance = data['PreBalance']
account.available = data['Available']
account.commission = data['Commission']
account.margin = data['CurrMargin']
#account.closeProfit = data['CloseProfit']
#account.positionProfit = data['PositionProfit']
# 这里的balance和快期中的账户不确定是否一样,需要测试
account.balance = data['Balance']
# 推送
self.gateway.onAccount(account)
#----------------------------------------------------------------------
def onRspQryBondInterest(self, data, error, n, last):
"""债券利息查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryMarketRationInfo(self, data, error, n, last):
"""市值配售查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentCommissionRate(self, data, error, n, last):
"""合约手续费查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryETFInstrument(self, data, error, n, last):
"""ETF基金查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryETFBasket(self, data, error, n, last):
"""ETF股票篮查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryOFInstrument(self, data, error, n, last):
"""OF合约查询回报"""
pass
#----------------------------------------------------------------------
def onRspQrySFInstrument(self, data, error, n, last):
"""SF合约查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentUnitMargin(self, data, error, n, last):
"""查询单手保证金"""
pass
#----------------------------------------------------------------------
def onRspQryPreDelivInfo(self, data, error, n , last):
"""查询预交割信息"""
pass
#----------------------------------------------------------------------
def onRsyQryCreditStockAssignInfo(self, data, error, n, last):
"""查询可融券分配"""
pass
#----------------------------------------------------------------------
def onRspQryCreditCashAssignInfo(self, data, error, n , last):
"""查询可融资分配"""
pass
#----------------------------------------------------------------------
def onRsyQryConversionRate(self, data, error, n, last):
"""查询证券这算率"""
pass
#----------------------------------------------------------------------
def onRspQryHisCreditDebtInfo(self,data, error, n, last):
"""查询历史信用负债"""
pass
#----------------------------------------------------------------------
def onRspQryMarketDataStaticInfo(self, data, error, n, last):
"""查询行情静态信息"""
pass
#----------------------------------------------------------------------
def onRspQryExpireRepurchInfo(self, data, error, n, last):
"""查询到期回购信息响应"""
pass
#----------------------------------------------------------------------
def onRspQryBondPledgeRate(self, data, error, n, last):
"""查询债券质押为标准券比例"""
pass
#----------------------------------------------------------------------
def onRspQryPledgeBond(self, data, error, n, last):
"""查询债券质押代码对照关系"""
pass
#----------------------------------------------------------------------
def onRspQryOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTrade(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPosition(self, data, error, n, last):
"""持仓查询回报"""
pos = VtPositionData()
pos.gatewayName = self.gatewayName
# 保存代码
pos.symbol = data['InstrumentID']
pos.exchange = exchangeMapReverse.get(data['ExchangeID'], '')
pos.vtSymbol = '.'.join([pos.symbol, pos.exchange])
# 方向和持仓冻结数量
pos.direction = posiDirectionMapReverse.get(data['PosiDirection'], '')
if pos.direction == DIRECTION_NET or pos.direction == DIRECTION_LONG:
pos.frozen = data['LongFrozen']
elif pos.direction == DIRECTION_SHORT:
pos.frozen = data['ShortFrozen']
# 持仓量
pos.position = data['Position']
pos.ydPosition = data['YdPosition']
# 持仓均价
if pos.position:
pos.price = data['OpenCost'] / pos.position
# VT系统持仓名
pos.vtPositionName = '.'.join([pos.vtSymbol, pos.direction])
# 推送
self.gateway.onPosition(pos)
#----------------------------------------------------------------------
def OnRspQryFundTransferSerial(self, data, error, n, last):
"""资金转账查询"""
pass
#----------------------------------------------------------------------
def onRspQryFundInterTransferSerial(self, data, error,n, last):
"""资金内转流水查询"""
pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address, productInfo, authCode):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
self.productInfo = productInfo
self.authCode = authCode
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = getTempPath(self.gatewayName + '_')
self.createFtdcQueryApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def login(self):
"""连接服务器"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
req['UserProductInfo'] = self.productInfo
req['AuthCode'] = self.authCode
req['RandCode'] = self.randCode
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户"""
self.reqID += 1
#是否需要INVESTERID, BROKERID?
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqQryTradingAccount(req, self.reqID)
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.reqID += 1
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqQryInvestorPosition(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
| mit |
FedoraScientific/salome-kernel | src/KERNEL_PY/iparameters.py | 1 | 9144 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2016 CEA/DEN, EDF R&D, OPEN CASCADE
#
# Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
# CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
import salome
import string
import SALOME
import SALOMEDS
import SALOME_Session_idl
PT_INTEGER = 0
PT_REAL = 1
PT_BOOLEAN = 2
PT_STRING = 3
PT_REALARRAY = 4
PT_INTARRAY = 5
PT_STRARRAY = 6
_AP_LISTS_LIST_ = "AP_LISTS_LIST"
_AP_ENTRIES_LIST_ = "AP_ENTRIES_LIST"
_AP_PROPERTIES_LIST_ = "AP_PROPERTIES_LIST"
_AP_DUMP_PYTHON_ = "AP_DUMP_PYTHON"
vp_session = None
def getSession():
global vp_session
if vp_session is None:
vp_session = salome.naming_service.Resolve("/Kernel/Session")
vp_session = vp_session._narrow(SALOME.Session)
pass
return vp_session
class IParameters:
"""
Interface IParameters was designed to provide a common way to set up
named parameters. The interface is initialized by AttributeParameter that
is used as a container of stored data.
The interface supports 3 types of named parameters:
1. Named list - a named list consists of string values.
One can append value to list (method 'append'), get a number of values
in the list (method 'nbValues'), get all values of the given list
(method 'getValues') and get names of all created named lists.
2. Parameters - that is a set of named string values associated with some named
entity. Parameter consists of tree elements: entity name, a parameter name
and a parameter value. Thus for one named entity there are an arbitrary number
of pair 'name parameter : value'.
One can add a new parameter to entry (method 'setParameter'), get a value of
a given named parameter of the given entry (method 'getParameter'), get a number
of parameters of the given entry (method 'getNbParameters'), get all names of
parameters for the given entry (method 'getAllParameterNames'), get all
values of parameters for the entry (method 'getAllParameterValues') and get all
stored entries (method 'getEntries')
3. Property - a property has a name and a string value.
One can set property (method 'setProperty'), getProperty (method 'getProperty') and
get a list of all stored properties (method 'getProperties').
Note:
Methods not mentioned above are private and is not supposed to be used
by module's developers.
"""
def __init__(self, attributeParameter, clr=False):
"""Initializes the instance. If clr parameter is True, all IAPP attribute values are cleared."""
self._ap = attributeParameter
if ( clr ): self.clear()
pass
def clear(self):
"""Clear parameters"""
self._ap.Clear()
def append(self, listName, value):
"""Appends a value to the named list"""
if self._ap is None: return -1
v = []
if self._ap.IsSet(listName, PT_STRARRAY) == 0:
if self._ap.IsSet(_AP_LISTS_LIST_, PT_STRARRAY) == 0: self._ap.SetStrArray(_AP_LISTS_LIST_, v);
if listName != _AP_ENTRIES_LIST_ and listName != _AP_PROPERTIES_LIST_:
self.append(_AP_LISTS_LIST_, listName)
pass
self._ap.SetStrArray(listName, v)
pass
v = self._ap.GetStrArray(listName)
v.append(value)
self._ap.SetStrArray(listName, v)
return (len(v)-1)
def nbValues(self, listName):
"""Returns a number of values in the named list"""
if self._ap is None: return -1
if self._ap.IsSet(listName, PT_STRARRAY) == 0: return 0
v = self._ap.GetStrArray(listName)
return len(v)
def getValues(self, listName):
"""Returns a list of values in the named list"""
v = []
if self._ap is None: return v
if self._ap.IsSet(listName, PT_STRARRAY) == 0: return v
return self._ap.GetStrArray(listName)
def getLists(self):
"""Returns a list of named lists' names"""
v = []
if self._ap is None: return v
if self._ap.IsSet(_AP_LISTS_LIST_, PT_STRARRAY) == 0: return v
return self._ap.GetStrArray(_AP_LISTS_LIST_)
def setParameter(self, entry, parameterName, value):
"""Sets a value of the named parameter for the entry"""
if self._ap is None: return
v = []
if self._ap.IsSet(entry, PT_STRARRAY) ==0:
self.append(_AP_ENTRIES_LIST_, entry) #Add the entry to the internal list of entries
self._ap.SetStrArray(entry, v)
pass
v = self._ap.GetStrArray(entry)
v.append(parameterName)
v.append(value)
self._ap.SetStrArray(entry, v)
pass
def getParameter(self, entry, parameterName):
"""Returns a value of the named parameter for the entry"""
if self._ap is None: return ""
if self._ap.IsSet(entry, PT_STRARRAY) == 0: return ""
v = self._ap.GetStrArray(entry)
length = len(v);
i = 0
while i<length:
if v[i] == parameterName: return v[i+1]
i+=1
pass
return ""
def getAllParameterNames(self, entry):
"""Returns all parameter names of the given entry"""
v = []
names = []
if self._ap is None: return v
if self._ap.IsSet(entry, PT_STRARRAY) == 0: return v
v = self._ap.GetStrArray(entry)
length = len(v)
i = 0
while i<length:
names.append(v[i])
i+=2
pass
return names
def getAllParameterValues(self, entry):
"""Returns all parameter values of the given entry"""
v = []
values = []
if self._ap is None: return v
if self._ap.IsSet(entry, PT_STRARRAY) == 0: return v
v = self._ap.GetStrArray(entry)
length = len(v)
i = 1
while i<length:
values.append(v[i]+1)
i+=2
pass
return values
def getNbParameters(self, entry):
"""Returns a number of parameters of the entry"""
if self._ap is None: return -1
if self._ap.IsSet(entry, PT_STRARRAY) == 0: return -1
return len(self._ap.GetStrArray(entry))/2
def getEntries(self):
"""Returns all entries"""
v = []
if self._ap is None: return v
if self._ap.IsSet(_AP_ENTRIES_LIST_, PT_STRARRAY) == 0: return v
return self._ap.GetStrArray(_AP_ENTRIES_LIST_)
def setProperty(self, name, value):
"""Sets a property value"""
if self._ap is None: return
if self._ap.IsSet(name, PT_STRING) == 0:
self.append(_AP_PROPERTIES_LIST_, name) #Add the property to the internal list of properties
pass
self._ap.SetString(name, value)
pass
def getProperty(self, name):
"""Returns a value of the named property"""
if self._ap is None: return ""
if self._ap.IsSet(name, PT_STRING) == 0: return ""
return self._ap.GetString(name)
def getProperties(self):
"""Returns all propetries"""
v = []
if self._ap is None: return v
if self._ap.IsSet(_AP_PROPERTIES_LIST_, PT_STRARRAY) == 0: return v
return self._ap.GetStrArray(_AP_PROPERTIES_LIST_)
def parseValue(self, value, separator, fromEnd):
"""Breaks a value string in two parts which is divided by separator."""
v = []
pos = - 1
if fromEnd == 1: pos = value.rfind(separator)
else: pos = value.find(separator)
if pos < 0:
v.append(value)
return v
part1 = value[0:pos]
part2 = value[pos+1:len(value)]
v.append(part1)
v.append(part2)
return v
def setDumpPython(self, isDumping):
"""Enables/Disables the dumping to Python"""
if self._ap is None: return
_ap.SetBool(_AP_DUMP_PYTHON_, isDumping)
pass
def isDumpPython(self):
"""Returns whether there is the dumping to Python"""
if self._ap is None: return 0
if self._ap.IsSet(_AP_DUMP_PYTHON_, PT_BOOLEAN) == 0: return 0
return self._ap.GetBool(_AP_DUMP_PYTHON_)
pass
| lgpl-2.1 |
vigilv/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
google-research/robel | robel/robot_env.py | 1 | 21356 | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base environment API for robotics tasks."""
import abc
import collections
from typing import Any, Dict, Optional, Sequence, Union, Tuple
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from robel.components.builder import ComponentBuilder
from robel.simulation.sim_scene import SimScene, SimBackend
from robel.simulation.renderer import RenderMode
DEFAULT_RENDER_SIZE = 480
# The simulation backend to use by default.
DEFAULT_SIM_BACKEND = SimBackend.MUJOCO_PY
def make_box_space(low: Union[float, Sequence[float]],
high: Union[float, Sequence[float]],
shape: Optional[Tuple[int]] = None) -> gym.spaces.Box:
"""Returns a Box gym space."""
# HACK: Fallback for gym 0.9.x
# TODO(michaelahn): Consider whether we still need to support 0.9.x
try:
return spaces.Box(low, high, shape, dtype=np.float32)
except TypeError:
return spaces.Box(low, high, shape)
class RobotEnv(gym.Env, metaclass=abc.ABCMeta):
"""Base Gym environment for robotics tasks."""
def __init__(self,
sim_model: Any,
observation_keys: Optional[Sequence[str]] = None,
reward_keys: Optional[Sequence[str]] = None,
use_dict_obs: bool = False,
frame_skip: int = 1,
camera_settings: Optional[Dict] = None,
sim_backend: SimBackend = DEFAULT_SIM_BACKEND,
sticky_action_probability: float = 0.):
"""Initializes a robotics environment.
Args:
sim_model: The path to the simulation to load.
observation_keys: The keys of `get_obs_dict` to extract and flatten
for the default implementation of `_get_obs`. If this is not
set, `get_obs_dict` must return an OrderedDict.
reward_keys: The keys of `get_reward_dict` to extract and sum for
the default implementation of `_get_total_reward`. If this is
not set, `_get_total_reward` will sum all of the values.
use_dict_obs: If True, the observations will be returned as
dictionaries rather than as a flattened array. The observation
space of this environment will be a dictionary space.
frame_skip: The number of simulation steps per environment step.
This multiplied by the timestep defined in the model file is the
step duration.
camera_settings: Settings to apply to the free camera in simulation.
sim_backend: The simulation backend to use.
sticky_action_probability: Repeat previous action with this
probability. Default is 0 (no sticky actions).
"""
self._observation_keys = observation_keys
self._reward_keys = reward_keys
self._use_dict_obs = use_dict_obs
self._sticky_action_probability = sticky_action_probability
self._components = []
# The following spaces are initialized by their respective `initialize`
# methods, e.g. `_initialize_observation_space`.
self._observation_space = None
self._action_space = None
self._state_space = None
# The following are populated by step() and/or reset().
self.last_action = None
self.last_obs_dict = None
self.last_reward_dict = None
self.last_score_dict = None
self.is_done = False
self.step_count = 0
# Load the simulation.
self.sim_scene = SimScene.create(
sim_model, backend=sim_backend, frame_skip=frame_skip)
self.sim = self.sim_scene.sim
self.model = self.sim_scene.model
self.data = self.sim_scene.data
if camera_settings:
self.sim_scene.renderer.set_free_camera_settings(**camera_settings)
# Set common metadata for Gym environments.
self.metadata = {
'render.modes': ['human', 'rgb_array', 'depth_array'],
'video.frames_per_second': int(
np.round(1.0 / self.sim_scene.step_duration))
}
# Ensure gym does not try to patch `_step` and `_reset`.
self._gym_disable_underscore_compat = True
self.seed()
#===========================================================================
# Environment API.
# These methods should not be overridden by subclasses.
#===========================================================================
@property
def observation_space(self) -> gym.Space:
"""Returns the observation space of the environment.
The observation space is the return specification for `reset`,
`_get_obs`, and the first element of the returned tuple from `step`.
Subclasses should override `_initialize_observation_space` to customize
the observation space.
"""
# Initialize and cache the observation space on the first call.
if self._observation_space is None:
self._observation_space = self._initialize_observation_space()
assert self._observation_space is not None
return self._observation_space
@property
def action_space(self) -> gym.Space:
"""Returns the action space of the environment.
The action space is the argument specifiction for `step`.
Subclasses should override `_initialize_action_space` to customize the
action space.
"""
# Initialize and cache the action space on the first call.
if self._action_space is None:
self._action_space = self._initialize_action_space()
assert self._action_space is not None
return self._action_space
@property
def state_space(self) -> gym.Space:
"""Returns the state space of the environment.
The state space is the return specification for `get_state` and is the
argument specification for `set_state`.
Subclasses should override `_initialize_state_space` to customize the
state space.
"""
# Initialize and cache the state space on the first call.
if self._state_space is None:
self._state_space = self._initialize_state_space()
assert self._state_space is not None
return self._state_space
@property
def dt(self) -> float:
"""Returns the step duration of each step, in seconds."""
return self.sim_scene.step_duration
@property
def obs_dim(self) -> int:
"""Returns the size of the observation space.
NOTE: This is for compatibility with gym.MujocoEnv.
"""
if not isinstance(self.observation_space, spaces.Box):
raise NotImplementedError('`obs_dim` only supports Box spaces.')
return np.prod(self.observation_space.shape).item()
@property
def action_dim(self) -> int:
"""Returns the size of the action space."""
if not isinstance(self.action_space, spaces.Box):
raise NotImplementedError('`action_dim` only supports Box spaces.')
return np.prod(self.action_space.shape).item()
def seed(self, seed: Optional[int] = None) -> Sequence[int]:
"""Seeds the environment.
Args:
seed: The value to seed the random number generator with. If None,
uses a random seed.
"""
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self) -> Any:
"""Resets the environment.
Args:
state: The state to reset to. This must match with the state space
of the environment.
Returns:
The initial observation of the environment after resetting.
"""
self.last_action = None
self.sim.reset()
self.sim.forward()
self._reset()
obs_dict = self.get_obs_dict()
self.last_obs_dict = obs_dict
self.last_reward_dict = None
self.last_score_dict = None
self.is_done = False
self.step_count = 0
return self._get_obs(obs_dict)
def step(self, action: Any) -> Tuple[Any, float, bool, Dict]:
"""Runs one timestep of the environment with the given action.
Subclasses must override 4 subcomponents of step:
- `_step`: Applies an action to the robot
- `get_obs_dict`: Returns the current observation of the robot.
- `get_reward_dict`: Calculates the reward for the step.
- `get_done`: Returns whether the episode should terminate.
Args:
action: An action to control the environment.
Returns:
observation: The observation of the environment after the timestep.
reward: The amount of reward obtained during the timestep.
done: Whether the episode has ended. `env.reset()` should be called
if this is True.
info: Auxiliary information about the timestep.
"""
# Perform the step.
action = self._preprocess_action(action)
self._step(action)
self.last_action = action
# Get the observation after the step.
obs_dict = self.get_obs_dict()
self.last_obs_dict = obs_dict
flattened_obs = self._get_obs(obs_dict)
# Get the rewards for the observation.
batched_action = np.expand_dims(np.atleast_1d(action), axis=0)
batched_obs_dict = {
k: np.expand_dims(np.atleast_1d(v), axis=0)
for k, v in obs_dict.items()
}
batched_reward_dict = self.get_reward_dict(batched_action,
batched_obs_dict)
# Calculate the total reward.
reward_dict = {k: v.item() for k, v in batched_reward_dict.items()}
self.last_reward_dict = reward_dict
reward = self._get_total_reward(reward_dict)
# Calculate the score.
batched_score_dict = self.get_score_dict(batched_obs_dict,
batched_reward_dict)
score_dict = {k: v.item() for k, v in batched_score_dict.items()}
self.last_score_dict = score_dict
# Get whether the episode should end.
dones = self.get_done(batched_obs_dict, batched_reward_dict)
done = dones.item()
self.is_done = done
# Combine the dictionaries as the auxiliary information.
info = collections.OrderedDict()
info.update(('obs/' + key, val) for key, val in obs_dict.items())
info.update(('reward/' + key, val) for key, val in reward_dict.items())
info['reward/total'] = reward
info.update(('score/' + key, val) for key, val in score_dict.items())
self.step_count += 1
return flattened_obs, reward, done, info
def render(
self,
mode: str = 'human',
width: int = DEFAULT_RENDER_SIZE,
height: int = DEFAULT_RENDER_SIZE,
camera_id: int = -1,
) -> Optional[np.ndarray]:
"""Renders the environment.
Args:
mode: The type of rendering to use.
- 'human': Renders to a graphical window.
- 'rgb_array': Returns the RGB image as an np.ndarray.
- 'depth_array': Returns the depth image as an np.ndarray.
width: The width of the rendered image. This only affects offscreen
rendering.
height: The height of the rendered image. This only affects
offscreen rendering.
camera_id: The ID of the camera to use. By default, this is the free
camera. If specified, only affects offscreen rendering.
Returns:
If mode is `rgb_array` or `depth_array`, a Numpy array of the
rendered pixels. Otherwise, returns None.
"""
if mode == 'human':
self.sim_scene.renderer.render_to_window()
elif mode == 'rgb_array':
return self.sim_scene.renderer.render_offscreen(
width, height, mode=RenderMode.RGB, camera_id=camera_id)
elif mode == 'depth_array':
return self.sim_scene.renderer.render_offscreen(
width, height, mode=RenderMode.DEPTH, camera_id=camera_id)
else:
raise NotImplementedError(mode)
return None
def close(self):
"""Cleans up any resources used by the environment."""
for component in self._components:
component.close()
self._components.clear()
self.sim_scene.close()
#===========================================================================
# Overridable Methods
#===========================================================================
@abc.abstractmethod
def _reset(self):
"""Task-specific reset for the environment."""
@abc.abstractmethod
def _step(self, action: np.ndarray):
"""Task-specific step for the environment."""
@abc.abstractmethod
def get_obs_dict(self) -> Dict[str, Any]:
"""Returns the current observation of the environment.
Returns:
A dictionary of observation values. This should be an ordered
dictionary if `observation_keys` isn't set.
"""
@abc.abstractmethod
def get_reward_dict(
self,
action: np.ndarray,
obs_dict: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""Returns the reward for the given action and observation.
Args:
action: A batch of actions.
obs_dict: A dictionary of batched observations. The batch dimension
matches the batch dimension of the actions.
Returns:
A dictionary of reward components. The values should be batched to
match the given actions and observations.
"""
@abc.abstractmethod
def get_score_dict(
self,
obs_dict: Dict[str, np.ndarray],
reward_dict: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""Returns a standardized measure of success for the environment.
Args:
obs_dict: A dictionary of batched observations.
reward_dict: A dictionary of batched rewards to correspond with the
observations.
Returns:
A dictionary of scores.
"""
def get_done(
self,
obs_dict: Dict[str, np.ndarray],
reward_dict: Dict[str, np.ndarray],
) -> np.ndarray:
"""Returns whether the episode should terminate.
Args:
obs_dict: A dictionary of batched observations.
reward_dict: A dictionary of batched rewards to correspond with the
observations.
Returns:
A boolean to denote if the episode should terminate. This should
have the same batch dimension as the observations and rewards.
"""
del obs_dict
return np.zeros_like(next(iter(reward_dict.values())), dtype=bool)
def get_state(self) -> Any:
"""Returns the current state of the environment."""
return (self.data.qpos.copy(), self.data.qvel.copy())
def set_state(self, state: Any):
"""Sets the state of the environment."""
qpos, qvel = state
self.data.qpos[:] = qpos
self.data.qvel[:] = qvel
self.sim.forward()
def _initialize_observation_space(self) -> gym.Space:
"""Returns the observation space to use for this environment.
The default implementation calls `_get_obs()` and returns a dictionary
space if the observation is a mapping, or a box space otherwise.
"""
observation = self._get_obs()
if isinstance(observation, collections.Mapping):
assert self._use_dict_obs
return spaces.Dict({
key: make_box_space(-np.inf, np.inf, shape=np.shape(value))
for key, value in observation.items()
})
return make_box_space(-np.inf, np.inf, shape=observation.shape)
def _initialize_action_space(self) -> gym.Space:
"""Returns the action space to use for this environment.
The default implementation uses the simulation's control actuator
dimensions as the action space, using normalized actions in [-1, 1].
"""
return make_box_space(-1.0, 1.0, shape=(self.model.nu,))
def _initialize_state_space(self) -> gym.Space:
"""Returns the state space to use for this environment.
The default implementation calls `get_state()` and returns a space
corresponding to the type of the state object:
- Mapping: Dict space
- List/Tuple: Tuple space
"""
state = self.get_state()
if isinstance(state, collections.Mapping):
return spaces.Dict({
key: make_box_space(-np.inf, np.inf, shape=np.shape(value))
for key, value in state.items() # pylint: disable=no-member
})
elif isinstance(state, (list, tuple)):
return spaces.Tuple([
make_box_space(-np.inf, np.inf, shape=np.shape(value))
for value in state
])
raise NotImplementedError(
'Override _initialize_state_space for state: {}'.format(state))
def _get_last_action(self) -> np.ndarray:
"""Returns the previous action, or zeros if no action has been taken."""
if self.last_action is None:
return np.zeros((self.action_dim,), dtype=self.action_space.dtype)
return self.last_action
def _preprocess_action(self, action: np.ndarray) -> np.ndarray:
"""Transforms an action before passing it to `_step()`.
Args:
action: The action in the environment's action space.
Returns:
The transformed action to pass to `_step()`.
"""
# Clip to the normalized action space.
action = np.clip(action, -1.0, 1.0)
# Prevent elements of the action from changing if sticky actions are
# being used.
if self._sticky_action_probability > 0 and self.last_action is not None:
sticky_indices = (
self.np_random.uniform() < self._sticky_action_probability)
action = np.where(sticky_indices, self.last_action, action)
return action
def _get_obs(self, obs_dict: Optional[Dict[str, np.ndarray]] = None) -> Any:
"""Returns the current observation of the environment.
This matches the environment's observation space.
"""
if obs_dict is None:
obs_dict = self.get_obs_dict()
if self._use_dict_obs:
if self._observation_keys:
obs = collections.OrderedDict(
(key, obs_dict[key]) for key in self._observation_keys)
else:
obs = obs_dict
else:
if self._observation_keys:
obs_values = (obs_dict[key] for key in self._observation_keys)
else:
assert isinstance(obs_dict, collections.OrderedDict), \
'Must use OrderedDict if not using `observation_keys`'
obs_values = obs_dict.values()
obs = np.concatenate([np.ravel(v) for v in obs_values])
return obs
def _get_total_reward(self, reward_dict: Dict[str, np.ndarray]) -> float:
"""Returns the total reward for the given reward dictionary.
The default implementation extracts the keys from `reward_keys` and sums
the values.
Args:
reward_dict: A dictionary of rewards. The values may have a batch
dimension.
Returns:
The total reward for the dictionary.
"""
# TODO(michaelahn): Enforce that the reward values are scalar.
if self._reward_keys:
reward_values = (reward_dict[key] for key in self._reward_keys)
else:
reward_values = reward_dict.values()
return np.sum(np.fromiter(reward_values, dtype=float))
def _add_component(self, component_builder: ComponentBuilder,
**component_kwargs) -> Any:
"""Creates a new component for this environment instance.
Args:
component_builder: The configured ComponentBuilder to build the
component with.
"""
# Build the component.
component = component_builder.build(
sim_scene=self.sim_scene,
random_state=self.np_random,
**component_kwargs)
self._components.append(component)
return component
| apache-2.0 |
rismalrv/edx-platform | common/lib/xmodule/xmodule/peer_grading_module.py | 56 | 29601 | import json
import logging
from datetime import datetime
from django.utils.timezone import UTC
from lxml import etree
from pkg_resources import resource_string
from xblock.fields import Dict, String, Scope, Boolean, Float, Reference
from xmodule.capa_module import ComplexEncoder
from xmodule.fields import Date, Timedelta
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.raw_module import RawDescriptor
from xmodule.timeinfo import TimeInfo
from xmodule.x_module import XModule, module_attr
from xmodule.open_ended_grading_classes.peer_grading_service import PeerGradingService, MockPeerGradingService
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
from xmodule.validation import StudioValidation, StudioValidationMessage
from open_ended_grading_classes import combined_open_ended_rubric
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
EXTERNAL_GRADER_NO_CONTACT_ERROR = "Failed to contact external graders. Please notify course staff."
MAX_ALLOWED_FEEDBACK_LENGTH = 5000
class PeerGradingFields(object):
use_for_single_location = Boolean(
display_name=_("Show Single Problem"),
help=_('When True, only the single problem specified by "Link to Problem Location" is shown. '
'When False, a panel is displayed with all problems available for peer grading.'),
default=False,
scope=Scope.settings
)
link_to_location = Reference(
display_name=_("Link to Problem Location"),
help=_('The location of the problem being graded. Only used when "Show Single Problem" is True.'),
default="",
scope=Scope.settings
)
graded = Boolean(
display_name=_("Graded"),
help=_('Defines whether the student gets credit for grading this problem. Only used when "Show Single Problem" is True.'),
default=False,
scope=Scope.settings
)
due = Date(
help=_("Due date that should be displayed."),
scope=Scope.settings)
graceperiod = Timedelta(
help=_("Amount of grace to give on the due date."),
scope=Scope.settings
)
student_data_for_location = Dict(
help=_("Student data for a given peer grading problem."),
scope=Scope.user_state
)
weight = Float(
display_name=_("Problem Weight"),
help=_("Defines the number of points each problem is worth. If the value is not set, each problem is worth one point."),
scope=Scope.settings, values={"min": 0, "step": ".1"},
default=1
)
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_("Peer Grading Interface")
)
data = String(
help=_("Html contents to display for this module"),
default='<peergrading></peergrading>',
scope=Scope.content
)
class InvalidLinkLocation(Exception):
"""
Exception for the case in which a peer grading module tries to link to an invalid location.
"""
pass
class PeerGradingModule(PeerGradingFields, XModule):
"""
PeerGradingModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__
"""
_VERSION = 1
js = {
'coffee': [
resource_string(__name__, 'js/src/peergrading/peer_grading.coffee'),
resource_string(__name__, 'js/src/peergrading/peer_grading_problem.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
js_module_name = "PeerGrading"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
def __init__(self, *args, **kwargs):
super(PeerGradingModule, self).__init__(*args, **kwargs)
# Copy this to a new variable so that we can edit it if needed.
# We need to edit it if the linked module cannot be found, so
# we can revert to panel model.
self.use_for_single_location_local = self.use_for_single_location
# We need to set the location here so the child modules can use it.
self.runtime.set('location', self.location)
if self.runtime.open_ended_grading_interface:
self.peer_gs = PeerGradingService(self.system.open_ended_grading_interface, self.system.render_template)
else:
self.peer_gs = MockPeerGradingService()
if self.use_for_single_location_local:
linked_descriptors = self.descriptor.get_required_module_descriptors()
if len(linked_descriptors) == 0:
error_msg = "Peer grading module {0} is trying to use single problem mode without "
"a location specified.".format(self.location)
log.error(error_msg)
# Change module over to panel mode from single problem mode.
self.use_for_single_location_local = False
else:
self.linked_problem = self.system.get_module(linked_descriptors[0])
try:
self.timeinfo = TimeInfo(self.due, self.graceperiod)
except Exception:
log.error("Error parsing due date information in location {0}".format(self.location))
raise
self.display_due_date = self.timeinfo.display_due_date
try:
self.student_data_for_location = json.loads(self.student_data_for_location)
except Exception: # pylint: disable=broad-except
# OK with this broad exception because we just want to continue on any error
pass
@property
def ajax_url(self):
"""
Returns the `ajax_url` from the system, with any trailing '/' stripped off.
"""
ajax_url = self.system.ajax_url
if not ajax_url.endswith("/"):
ajax_url += "/"
return ajax_url
def closed(self):
return self._closed(self.timeinfo)
def _closed(self, timeinfo):
if timeinfo.close_date is not None and datetime.now(UTC()) > timeinfo.close_date:
return True
return False
def _err_response(self, msg):
"""
Return a HttpResponse with a json dump with success=False, and the given error message.
"""
return {'success': False, 'error': msg}
def _check_required(self, data, required):
actual = set(data.keys())
missing = required - actual
if len(missing) > 0:
return False, "Missing required keys: {0}".format(', '.join(missing))
else:
return True, ""
def get_html(self):
"""
Needs to be implemented by inheritors. Renders the HTML that students see.
@return:
"""
if self.closed():
return self.peer_grading_closed()
if not self.use_for_single_location_local:
return self.peer_grading()
else:
# b/c handle_ajax expects serialized data payload and directly calls peer_grading
return self.peer_grading_problem({'location': self.link_to_location.to_deprecated_string()})['html']
def handle_ajax(self, dispatch, data):
"""
Needs to be implemented by child modules. Handles AJAX events.
@return:
"""
handlers = {
'get_next_submission': self.get_next_submission,
'show_calibration_essay': self.show_calibration_essay,
'is_student_calibrated': self.is_student_calibrated,
'save_grade': self.save_grade,
'save_calibration_essay': self.save_calibration_essay,
'problem': self.peer_grading_problem,
}
if dispatch not in handlers:
# This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
# This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
data_dict = handlers[dispatch](data)
return json.dumps(data_dict, cls=ComplexEncoder)
def query_data_for_location(self, location):
student_id = self.system.anonymous_student_id
success = False
response = {}
try:
response = self.peer_gs.get_data_for_location(location, student_id)
_count_graded = response['count_graded']
_count_required = response['count_required']
success = True
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error getting location data from controller for location %s, student %s", location, student_id)
return success, response
def get_progress(self):
pass
def get_score(self):
max_score = None
score = None
weight = self.weight
#The old default was None, so set to 1 if it is the old default weight
if weight is None:
weight = 1
score_dict = {
'score': score,
'total': max_score,
}
if not self.use_for_single_location_local or not self.graded:
return score_dict
try:
count_graded = self.student_data_for_location['count_graded']
count_required = self.student_data_for_location['count_required']
except:
success, response = self.query_data_for_location(self.link_to_location)
if not success:
log.exception(
"No instance data found and could not get data from controller for loc {0} student {1}".format(
self.system.location.to_deprecated_string(), self.system.anonymous_student_id
))
return None
count_graded = response['count_graded']
count_required = response['count_required']
if count_required > 0 and count_graded >= count_required:
# Ensures that once a student receives a final score for peer grading, that it does not change.
self.student_data_for_location = response
score = int(count_graded >= count_required and count_graded > 0) * float(weight)
total = float(weight)
score_dict['score'] = score
score_dict['total'] = total
return score_dict
def max_score(self):
''' Maximum score. Two notes:
* This is generic; in abstract, a problem could be 3/5 points on one
randomization, and 5/7 on another
'''
max_grade = None
if self.use_for_single_location_local and self.graded:
max_grade = self.weight
return max_grade
def get_next_submission(self, data):
"""
Makes a call to the grading controller for the next essay that should be graded
Returns a json dict with the following keys:
'success': bool
'submission_id': a unique identifier for the submission, to be passed back
with the grade.
'submission': the submission, rendered as read-only html for grading
'rubric': the rubric, also rendered as html.
'submission_key': a key associated with the submission for validation reasons
'error': if success is False, will have an error message with more info.
"""
required = set(['location'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
grader_id = self.system.anonymous_student_id
location = data['location']
try:
response = self.peer_gs.get_next_submission(location, grader_id)
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error getting next submission. server url: %s location: %s, grader_id: %s", self.peer_gs.url, location, grader_id)
# This is a student_facing_error
return {'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
def save_grade(self, data):
"""
Saves the grade of a given submission.
Input:
The request should have the following keys:
location - problem location
submission_id - id associated with this submission
submission_key - submission key given for validation purposes
score - the grade that was given to the submission
feedback - the feedback from the student
Returns
A json object with the following keys:
success: bool indicating whether the save was a success
error: if there was an error in the submission, this is the error message
"""
required = ['location', 'submission_id', 'submission_key', 'score', 'feedback', 'submission_flagged', 'answer_unknown']
if data.get("submission_flagged", False) in ["false", False, "False", "FALSE"]:
required.append("rubric_scores[]")
success, message = self._check_required(data, set(required))
if not success:
return self._err_response(message)
success, message = self._check_feedback_length(data)
if not success:
return self._err_response(message)
data_dict = {k: data.get(k) for k in required}
if 'rubric_scores[]' in required:
data_dict['rubric_scores'] = data.getall('rubric_scores[]')
data_dict['grader_id'] = self.system.anonymous_student_id
try:
response = self.peer_gs.save_grade(**data_dict)
success, location_data = self.query_data_for_location(data_dict['location'])
#Don't check for success above because the response = statement will raise the same Exception as the one
#that will cause success to be false.
response.update({'required_done': False})
if 'count_graded' in location_data and 'count_required' in location_data and int(location_data['count_graded']) >= int(location_data['count_required']):
response['required_done'] = True
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error saving grade to open ended grading service. server url: %s", self.peer_gs.url)
# This is a student_facing_error
return {
'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR
}
def is_student_calibrated(self, data):
"""
Calls the grading controller to see if the given student is calibrated
on the given problem
Input:
In the request, we need the following arguments:
location - problem location
Returns:
Json object with the following keys
success - bool indicating whether or not the call was successful
calibrated - true if the grader has fully calibrated and can now move on to grading
- false if the grader is still working on calibration problems
total_calibrated_on_so_far - the number of calibration essays for this problem
that this grader has graded
"""
required = set(['location'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
grader_id = self.system.anonymous_student_id
location = data['location']
try:
response = self.peer_gs.is_student_calibrated(location, grader_id)
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error from open ended grading service. server url: %s, grader_id: %s, location: %s", self.peer_gs.url, grader_id, location)
# This is a student_facing_error
return {
'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR
}
def show_calibration_essay(self, data):
"""
Fetch the next calibration essay from the grading controller and return it
Inputs:
In the request
location - problem location
Returns:
A json dict with the following keys
'success': bool
'submission_id': a unique identifier for the submission, to be passed back
with the grade.
'submission': the submission, rendered as read-only html for grading
'rubric': the rubric, also rendered as html.
'submission_key': a key associated with the submission for validation reasons
'error': if success is False, will have an error message with more info.
"""
required = set(['location'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
grader_id = self.system.anonymous_student_id
location = data['location']
try:
response = self.peer_gs.show_calibration_essay(location, grader_id)
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error from open ended grading service. server url: %s, location: %s", self.peer_gs.url, location)
# This is a student_facing_error
return {'success': False,
'error': EXTERNAL_GRADER_NO_CONTACT_ERROR}
# if we can't parse the rubric into HTML,
except etree.XMLSyntaxError:
# This is a dev_facing_error
log.exception("Cannot parse rubric string.")
# This is a student_facing_error
return {'success': False,
'error': 'Error displaying submission. Please notify course staff.'}
def save_calibration_essay(self, data):
"""
Saves the grader's grade of a given calibration.
Input:
The request should have the following keys:
location - problem location
submission_id - id associated with this submission
submission_key - submission key given for validation purposes
score - the grade that was given to the submission
feedback - the feedback from the student
Returns
A json object with the following keys:
success: bool indicating whether the save was a success
error: if there was an error in the submission, this is the error message
actual_score: the score that the instructor gave to this calibration essay
"""
required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]'])
success, message = self._check_required(data, required)
if not success:
return self._err_response(message)
data_dict = {k: data.get(k) for k in required}
data_dict['rubric_scores'] = data.getall('rubric_scores[]')
data_dict['student_id'] = self.system.anonymous_student_id
data_dict['calibration_essay_id'] = data_dict['submission_id']
try:
response = self.peer_gs.save_calibration_essay(**data_dict)
if 'actual_rubric' in response:
rubric_renderer = combined_open_ended_rubric.CombinedOpenEndedRubric(self.system.render_template, True)
response['actual_rubric'] = rubric_renderer.render_rubric(response['actual_rubric'])['html']
return response
except GradingServiceError:
# This is a dev_facing_error
log.exception("Error saving calibration grade")
# This is a student_facing_error
return self._err_response('There was an error saving your score. Please notify course staff.')
def peer_grading_closed(self):
'''
Show the Peer grading closed template
'''
html = self.system.render_template('peer_grading/peer_grading_closed.html', {
'use_for_single_location': self.use_for_single_location_local
})
return html
def _find_corresponding_module_for_location(self, location):
"""
Find the peer grading module that exists at the given location.
"""
try:
return self.descriptor.system.load_item(location)
except ItemNotFoundError:
# The linked problem doesn't exist.
log.error("Problem {0} does not exist in this course.".format(location))
raise
except NoPathToItem:
# The linked problem does not have a path to it (ie is in a draft or other strange state).
log.error("Cannot find a path to problem {0} in this course.".format(location))
raise
def peer_grading(self, _data=None):
'''
Show a peer grading interface
'''
# call problem list service
success = False
error_text = ""
problem_list = []
try:
problem_list_dict = self.peer_gs.get_problem_list(self.course_id, self.system.anonymous_student_id)
success = problem_list_dict['success']
if 'error' in problem_list_dict:
error_text = problem_list_dict['error']
problem_list = problem_list_dict['problem_list']
except GradingServiceError:
# This is a student_facing_error
error_text = EXTERNAL_GRADER_NO_CONTACT_ERROR
log.error(error_text)
success = False
# catch error if if the json loads fails
except ValueError:
# This is a student_facing_error
error_text = "Could not get list of problems to peer grade. Please notify course staff."
log.error(error_text)
success = False
except Exception:
log.exception("Could not contact peer grading service.")
success = False
good_problem_list = []
for problem in problem_list:
problem_location = problem['location']
try:
descriptor = self._find_corresponding_module_for_location(problem_location)
except (NoPathToItem, ItemNotFoundError):
continue
if descriptor:
problem['due'] = descriptor.due
grace_period = descriptor.graceperiod
try:
problem_timeinfo = TimeInfo(problem['due'], grace_period)
except Exception:
log.error("Malformed due date or grace period string for location {0}".format(problem_location))
raise
if self._closed(problem_timeinfo):
problem['closed'] = True
else:
problem['closed'] = False
else:
# if we can't find the due date, assume that it doesn't have one
problem['due'] = None
problem['closed'] = False
good_problem_list.append(problem)
ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading.html', {
'ajax_url': ajax_url,
'success': success,
'problem_list': good_problem_list,
'error_text': error_text,
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location_local,
})
return html
def peer_grading_problem(self, data=None):
'''
Show individual problem interface
'''
if data is None or data.get('location') is None:
if not self.use_for_single_location_local:
# This is an error case, because it must be set to use a single location to be called without get parameters
# This is a dev_facing_error
log.error(
"Peer grading problem in peer_grading_module called with no get parameters, but use_for_single_location is False.")
return {'html': "", 'success': False}
problem_location = self.link_to_location
elif data.get('location') is not None:
problem_location = self.course_id.make_usage_key_from_deprecated_string(data.get('location'))
self._find_corresponding_module_for_location(problem_location)
ajax_url = self.ajax_url
html = self.system.render_template('peer_grading/peer_grading_problem.html', {
'view_html': '',
'problem_location': problem_location,
'course_id': self.course_id,
'ajax_url': ajax_url,
# Checked above
'staff_access': False,
'use_single_location': self.use_for_single_location_local,
})
return {'html': html, 'success': True}
def get_instance_state(self):
"""
Returns the current instance state. The module can be recreated from the instance state.
Input: None
Output: A dictionary containing the instance state.
"""
state = {
'student_data_for_location': self.student_data_for_location,
}
return json.dumps(state)
def _check_feedback_length(self, data):
feedback = data.get("feedback")
if feedback and len(feedback) > MAX_ALLOWED_FEEDBACK_LENGTH:
return False, "Feedback is too long, Max length is {0} characters.".format(
MAX_ALLOWED_FEEDBACK_LENGTH
)
else:
return True, ""
def validate(self):
"""
Message for either error or warning validation message/s.
Returns message and type. Priority given to error type message.
"""
return self.descriptor.validate()
class PeerGradingDescriptor(PeerGradingFields, RawDescriptor):
"""
Module for adding peer grading questions
"""
mako_template = "widgets/raw-edit.html"
module_class = PeerGradingModule
filename_extension = "xml"
has_score = True
always_recalculate_grades = True
#Specify whether or not to pass in open ended interface
needs_open_ended_interface = True
metadata_translations = {
'is_graded': 'graded',
'attempts': 'max_attempts',
'due_data': 'due'
}
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(PeerGradingDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([PeerGradingFields.due, PeerGradingFields.graceperiod])
return non_editable_fields
def get_required_module_descriptors(self):
"""
Returns a list of XModuleDescriptor instances upon which this module depends, but are
not children of this module.
"""
# If use_for_single_location is True, this is linked to an open ended problem.
if self.use_for_single_location:
# Try to load the linked module.
# If we can't load it, return empty list to avoid exceptions on progress page.
try:
linked_module = self.system.load_item(self.link_to_location)
return [linked_module]
except (NoPathToItem, ItemNotFoundError):
error_message = ("Cannot find the combined open ended module "
"at location {0} being linked to from peer "
"grading module {1}").format(self.link_to_location, self.location)
log.error(error_message)
return []
else:
return []
# Proxy to PeerGradingModule so that external callers don't have to know if they're working
# with a module or a descriptor
closed = module_attr('closed')
get_instance_state = module_attr('get_instance_state')
get_next_submission = module_attr('get_next_submission')
graded = module_attr('graded')
is_student_calibrated = module_attr('is_student_calibrated')
peer_grading = module_attr('peer_grading')
peer_grading_closed = module_attr('peer_grading_closed')
peer_grading_problem = module_attr('peer_grading_problem')
peer_gs = module_attr('peer_gs')
query_data_for_location = module_attr('query_data_for_location')
save_calibration_essay = module_attr('save_calibration_essay')
save_grade = module_attr('save_grade')
show_calibration_essay = module_attr('show_calibration_essay')
use_for_single_location_local = module_attr('use_for_single_location_local')
_find_corresponding_module_for_location = module_attr('_find_corresponding_module_for_location')
def validate(self):
"""
Validates the state of this instance. This is the override of the general XBlock method,
and it will also ask its superclass to validate.
"""
validation = super(PeerGradingDescriptor, self).validate()
validation = StudioValidation.copy(validation)
i18n_service = self.runtime.service(self, "i18n")
validation.summary = StudioValidationMessage(
StudioValidationMessage.ERROR,
i18n_service.ugettext(
"ORA1 is no longer supported. To use this assessment, "
"replace this ORA1 component with an ORA2 component."
)
)
return validation
| agpl-3.0 |
CallaJun/hackprince | indico/numpy/polynomial/tests/test_hermite_e.py | 123 | 17069 | """Tests for hermite_e module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.hermite_e as herme
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
He0 = np.array([1])
He1 = np.array([0, 1])
He2 = np.array([-1, 0, 1])
He3 = np.array([0, -3, 0, 1])
He4 = np.array([3, 0, -6, 0, 1])
He5 = np.array([0, 15, 0, -10, 0, 1])
He6 = np.array([-15, 0, 45, 0, -15, 0, 1])
He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1])
He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1])
He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1])
Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9]
def trim(x):
return herme.hermetrim(x, tol=1e-6)
class TestConstants(TestCase):
def test_hermedomain(self):
assert_equal(herme.hermedomain, [-1, 1])
def test_hermezero(self):
assert_equal(herme.hermezero, [0])
def test_hermeone(self):
assert_equal(herme.hermeone, [1])
def test_hermex(self):
assert_equal(herme.hermex, [0, 1])
class TestArithmetic(TestCase):
x = np.linspace(-3, 3, 100)
def test_hermeadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herme.hermeadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermesub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herme.hermesub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermemulx(self):
assert_equal(herme.hermemulx([0]), [0])
assert_equal(herme.hermemulx([1]), [0, 1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i, 0, 1]
assert_equal(herme.hermemulx(ser), tgt)
def test_hermemul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = herme.hermeval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0]*j + [1]
val2 = herme.hermeval(self.x, pol2)
pol3 = herme.hermemul(pol1, pol2)
val3 = herme.hermeval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_hermediv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = herme.hermeadd(ci, cj)
quo, rem = herme.hermediv(tgt, ci)
res = herme.hermeadd(herme.hermemul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([4., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_hermeval(self):
#check empty input
assert_equal(herme.hermeval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Helist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = herme.hermeval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(herme.hermeval(x, [1]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims)
def test_hermeval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = herme.hermeval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermeval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_hermeval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = herme.hermeval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermeval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_hermegrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = herme.hermegrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermegrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_hermegrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = herme.hermegrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermegrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_hermeint(self):
# check exceptions
assert_raises(ValueError, herme.hermeint, [0], .5)
assert_raises(ValueError, herme.hermeint, [0], -1)
assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = herme.hermeint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i])
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herme.hermeval(-1, hermeint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2)
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1)
res = herme.hermeint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k])
res = herme.hermeint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1)
res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k], scl=2)
res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T
res = herme.hermeint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c) for c in c2d])
res = herme.hermeint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d])
res = herme.hermeint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_hermeder(self):
# check exceptions
assert_raises(ValueError, herme.hermeder, [0], .5)
assert_raises(ValueError, herme.hermeder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = herme.hermeder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herme.hermeder(herme.hermeint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herme.hermeder(
herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T
res = herme.hermeder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeder(c) for c in c2d])
res = herme.hermeder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_hermevander(self):
# check for 1d x
x = np.arange(3)
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
def test_hermevander2d(self):
# also tests hermeval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = herme.hermevander2d(x1, x2, [1, 2])
tgt = herme.hermeval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_hermevander3d(self):
# also tests hermeval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = herme.hermevander3d(x1, x2, x3, [1, 2, 3])
tgt = herme.hermeval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_hermefit(self):
def f(x):
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, herme.hermefit, [1], [1], -1)
assert_raises(TypeError, herme.hermefit, [[1]], [1], 0)
assert_raises(TypeError, herme.hermefit, [], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0)
assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0)
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = herme.hermefit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(herme.hermeval(x, coef3), y)
#
coef4 = herme.hermefit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(herme.hermeval(x, coef4), y)
#
coef2d = herme.hermefit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = herme.hermefit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(herme.hermefit(x, x, 1), [0, 1])
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, herme.hermecompanion, [])
assert_raises(ValueError, herme.hermecompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(herme.hermecompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(herme.hermecompanion([1, 2])[0, 0] == -.5)
class TestGauss(TestCase):
def test_100(self):
x, w = herme.hermegauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = herme.hermevander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.sqrt(2*np.pi)
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase):
def test_hermefromroots(self):
res = herme.hermefromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = herme.hermefromroots(roots)
res = herme.hermeval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herme.herme2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermeroots(self):
assert_almost_equal(herme.hermeroots([1]), [])
assert_almost_equal(herme.hermeroots([1, 1]), [-1])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = herme.hermeroots(herme.hermefromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermetrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herme.hermetrim, coef, -1)
# Test results
assert_equal(herme.hermetrim(coef), coef[:-1])
assert_equal(herme.hermetrim(coef, 1), coef[:-3])
assert_equal(herme.hermetrim(coef, 2), [0])
def test_hermeline(self):
assert_equal(herme.hermeline(3, 4), [3, 4])
def test_herme2poly(self):
for i in range(10):
assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i])
def test_poly2herme(self):
for i in range(10):
assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-5, 5, 11)
tgt = np.exp(-.5*x**2)
res = herme.hermeweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
| lgpl-3.0 |
Ayrx/cryptography | src/_cffi_src/commoncrypto/cf.py | 8 | 3224 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <CoreFoundation/CoreFoundation.h>
"""
TYPES = """
typedef bool Boolean;
typedef signed long OSStatus;
typedef unsigned char UInt8;
typedef uint32_t UInt32;
typedef const void * CFAllocatorRef;
const CFAllocatorRef kCFAllocatorDefault;
typedef ... *CFDataRef;
typedef signed long long CFIndex;
typedef ... *CFStringRef;
typedef ... *CFArrayRef;
typedef ... *CFMutableArrayRef;
typedef ... *CFBooleanRef;
typedef ... *CFErrorRef;
typedef ... *CFNumberRef;
typedef ... *CFTypeRef;
typedef ... *CFDictionaryRef;
typedef ... *CFMutableDictionaryRef;
typedef struct {
...;
} CFDictionaryKeyCallBacks;
typedef struct {
...;
} CFDictionaryValueCallBacks;
typedef struct {
...;
} CFRange;
typedef struct {
...;
} CFArrayCallBacks;
typedef UInt32 CFStringEncoding;
enum {
kCFStringEncodingASCII = 0x0600
};
enum {
kCFNumberSInt8Type = 1,
kCFNumberSInt16Type = 2,
kCFNumberSInt32Type = 3,
kCFNumberSInt64Type = 4,
kCFNumberFloat32Type = 5,
kCFNumberFloat64Type = 6,
kCFNumberCharType = 7,
kCFNumberShortType = 8,
kCFNumberIntType = 9,
kCFNumberLongType = 10,
kCFNumberLongLongType = 11,
kCFNumberFloatType = 12,
kCFNumberDoubleType = 13,
kCFNumberCFIndexType = 14,
kCFNumberNSIntegerType = 15,
kCFNumberCGFloatType = 16,
kCFNumberMaxType = 16
};
typedef int CFNumberType;
const CFDictionaryKeyCallBacks kCFTypeDictionaryKeyCallBacks;
const CFDictionaryValueCallBacks kCFTypeDictionaryValueCallBacks;
const CFArrayCallBacks kCFTypeArrayCallBacks;
const CFBooleanRef kCFBooleanTrue;
const CFBooleanRef kCFBooleanFalse;
"""
FUNCTIONS = """
CFDataRef CFDataCreate(CFAllocatorRef, const UInt8 *, CFIndex);
CFStringRef CFStringCreateWithCString(CFAllocatorRef, const char *,
CFStringEncoding);
CFDictionaryRef CFDictionaryCreate(CFAllocatorRef, const void **,
const void **, CFIndex,
const CFDictionaryKeyCallBacks *,
const CFDictionaryValueCallBacks *);
CFMutableDictionaryRef CFDictionaryCreateMutable(
CFAllocatorRef,
CFIndex,
const CFDictionaryKeyCallBacks *,
const CFDictionaryValueCallBacks *
);
void CFDictionarySetValue(CFMutableDictionaryRef, const void *, const void *);
CFIndex CFArrayGetCount(CFArrayRef);
const void *CFArrayGetValueAtIndex(CFArrayRef, CFIndex);
CFIndex CFDataGetLength(CFDataRef);
void CFDataGetBytes(CFDataRef, CFRange, UInt8 *);
CFRange CFRangeMake(CFIndex, CFIndex);
void CFShow(CFTypeRef);
Boolean CFBooleanGetValue(CFBooleanRef);
CFNumberRef CFNumberCreate(CFAllocatorRef, CFNumberType, const void *);
void CFRelease(CFTypeRef);
CFTypeRef CFRetain(CFTypeRef);
CFMutableArrayRef CFArrayCreateMutable(CFAllocatorRef, CFIndex,
const CFArrayCallBacks *);
void CFArrayAppendValue(CFMutableArrayRef, const void *);
"""
MACROS = """
"""
CUSTOMIZATIONS = """
"""
| bsd-3-clause |
kingmotley/SickRage | sickbeard/providers/torrentproject.py | 1 | 4736 | # coding=utf-8
# Author: Gonçalo M. (aka duramato/supergonkas) <[email protected]>
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from requests.compat import urljoin
import validators
from sickbeard import logger, tvcache
from sickbeard.common import USER_AGENT
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class TorrentProjectProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "TorrentProject")
# Credentials
self.public = True
# Torrent Stats
self.minseed = None
self.minleech = None
# URLs
self.url = 'https://torrentproject.se/'
self.custom_url = None
self.headers.update({'User-Agent': USER_AGENT})
# Proper Strings
# Cache
self.cache = tvcache.TVCache(self, search_params={'RSS': ['0day']})
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
results = []
search_params = {
'out': 'json',
'filter': 2101,
'showmagnets': 'on',
'num': 50
}
for mode in search_strings: # Mode = RSS, Season, Episode
items = []
logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_params['s'] = search_string
if self.custom_url:
if not validators.url(self.custom_url):
logger.log("Invalid custom url set, please check your settings", logger.WARNING)
return results
search_url = self.custom_url
else:
search_url = self.url
torrents = self.get_url(search_url, params=search_params, returns='json')
if not (torrents and "total_found" in torrents and int(torrents["total_found"]) > 0):
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
del torrents["total_found"]
results = []
for i in torrents:
title = torrents[i]["title"]
seeders = try_int(torrents[i]["seeds"], 1)
leechers = try_int(torrents[i]["leechs"], 0)
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Torrent doesn't meet minimum seeds & leechers not selecting : {0}".format(title), logger.DEBUG)
continue
t_hash = torrents[i]["torrent_hash"]
torrent_size = torrents[i]["torrent_size"]
if not all([t_hash, torrent_size]):
continue
download_url = torrents[i]["magnet"] + self._custom_trackers
size = convert_size(torrent_size) or -1
if not all([title, download_url]):
continue
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': t_hash}
if mode != 'RSS':
logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
provider = TorrentProjectProvider()
| gpl-3.0 |
sidartaoliveira/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options_facts.py | 33 | 5092 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vpc_dhcp_options_facts
short_description: Gather facts about dhcp options sets in AWS
description:
- Gather facts about dhcp options sets in AWS
version_added: "2.2"
requirements: [ boto3 ]
author: "Nick Aslanidis (@naslanidis)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
required: false
default: null
dhcp_options_ids:
description:
- Get details of specific DHCP Option ID
- Provide this value as a list
required: false
default: None
aliases: ['DhcpOptionsIds']
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather facts about all DHCP Option sets for an account or profile
ec2_vpc_dhcp_options_facts:
region: ap-southeast-2
profile: production
register: dhcp_facts
- name: Gather facts about a filtered list of DHCP Option sets
ec2_vpc_dhcp_options_facts:
region: ap-southeast-2
profile: production
filters:
"tag:Name": "abc-123"
register: dhcp_facts
- name: Gather facts about a specific DHCP Option set by DhcpOptionId
ec2_vpc_dhcp_options_facts:
region: ap-southeast-2
profile: production
DhcpOptionsIds: dopt-123fece2
register: dhcp_facts
'''
RETURN = '''
dhcp_options:
description: The dhcp option sets for the account
returned: always
type: list
changed:
description: True if listing the dhcp options succeeds
type: bool
returned: always
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn, HAS_BOTO3
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, get_aws_connection_info
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
def get_dhcp_options_info(dhcp_option):
dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'],
'DhcpConfigurations': dhcp_option['DhcpConfigurations'],
'Tags': boto3_tag_list_to_ansible_dict(dhcp_option['Tags'])}
return dhcp_option_info
def list_dhcp_options(client, module):
params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get('filters')))
if module.params.get("dry_run"):
params['DryRun'] = True
if module.params.get("dhcp_options_ids"):
params['DhcpOptionsIds'] = module.params.get("dhcp_options_ids")
try:
all_dhcp_options = client.describe_dhcp_options(**params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
results = [camel_dict_to_snake_dict(get_dhcp_options_info(option))
for option in all_dhcp_options['DhcpOptions']]
module.exit_json(dhcp_options=results)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(type='dict', default={}),
dry_run=dict(type='bool', default=False, aliases=['DryRun']),
dhcp_options_ids=dict(type='list', aliases=['DhcpOptionIds'])
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required.')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg="Can't authorize connection - " + str(e))
# call your function here
results = list_dhcp_options(connection, module)
module.exit_json(result=results)
if __name__ == '__main__':
main()
| gpl-3.0 |
refstudycentre/versification | util.py | 1 | 11774 |
import numpy as np
import unicodecsv
import codecs
import goslate
import sqlite3
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
def imp_load(filename):
texts = []
books = []
chapters = []
verses = []
# Read in a whole bible
with codecs.open(filename,encoding='utf-8') as f:
bibletext = f.read()
# Split by verse
bible_verses = bibletext.split('$$$')
# Process verses
for verse in bible_verses:
try:
verse = verse.split('\n',1)
ref = verse[0].strip()
text = verse[1].strip()
ref = ref.split('.')
book = ref[0].strip()
cnum = ref[1].strip()
vnum = ref[2].strip()
texts.append(text)
books.append(book)
chapters.append(cnum)
verses.append(vnum)
except IndexError:
pass
return books, chapters, verses, texts
def calculate_similarity(texts, translations):
# Train the tf-idf thingy on the translated texts
tfidf = TfidfVectorizer().fit_transform(texts)
# Build a matrix representation of the similarities between verses
# This will yield a simmetrical matrix
# TODO: For performance and logical reasons: Only calculate similarity for nearby verses, assume others 0 ?
M = np.array([linear_kernel(tfidf[j:j+1], tfidf).flatten() for j in range(len(texts))])
# Hack(ish): Set similarity with verses of same translation to 0
for i in range(len(M)):
for j in range(i+1):
if translations[i] == translations[j]:
M[i][j] = M[j][i] = 0
# print np.round(M*100,0)
return M
def find_best_couple(M,t):
"""
find best couple in similarity matrix M
the translation(s) of each verse is given in t
"""
# assume values are 0 for verses in same translation
i_max, j_max = np.unravel_index(M.argmax(), M.shape)
P_max = M[i_max, j_max]
return i_max, j_max, P_max
def merge_nodes(M,a,b):
"""
merge indices a and b in similarity matrix M into one supernode,
averaging similarity values between the supernode and other verses
"""
N = len(M)
# calculate a new row (and column) for the supernode
supernode_similarity = [np.average([M[k][a],M[k][b]]) for k in range(N)]
# append the row (this will jumble the verse order...)
newM = np.append(M, np.array(supernode_similarity)[None,:], axis=0)
# append 0 (supernode's similarity with itself) to the row and add it as a column
supernode_similarity.append(0.)
newM = np.append(newM, np.array(supernode_similarity)[:,None], axis=1)
# to preserve verse indices, don't delete
# newM = np.delete(newM,[a,b],axis=0)
# rather make rows a and b 0
# to preserve verse indices, don't delete
# newM = np.delete(newM,[a,b],axis=1)
# rather make columns a and b 0
newM[:,a] = np.zeros_like(newM[:,a])
newM[:,b] = np.zeros_like(newM[:,b])
newM[a,:] = np.zeros_like(newM[a,:])
newM[b,:] = np.zeros_like(newM[b,:])
return newM
def group_verses(M, t, numT, P_min = 0.1):
"""
Automatically group verses
t = the translation of each verse
numT = max number of verses in a group = number of translations
"""
t = [[val] for val in t]
N = len(M)
groups = {} # keyed by supernode index
iteration = 0
max_iteration = N
while iteration < max_iteration:
iteration += 1
#print "\t\tGrouping: iteration ",iteration
i,j,P = find_best_couple(M, t)
#print "\t\tbest couple: ",i,j,P
# Stop iterating if similarity gets too low...
if P < P_min:
break;
group = []
# merge supernodes if they exist, else merge nodes:
if i in groups:
group.extend(groups[i])
else:
group.append(i)
if j in groups:
group.extend(groups[j])
else:
group.append(j)
# group now contains all of the verses for the new supernode
if len(group) > numT:
# this grouping is invalid
# prevent it from happening again by making P 0
M[i][j] = 0
else:
# valid grouping. save it.
# Remove the previous supernode groups
if i in groups:
del groups[i]
if j in groups:
del groups[j]
# Create the supernode
M = merge_nodes(M,i,j)
t.append(t[i] + t[j])
# Save the index of the new supernode
supernode_index = len(M)-1
groups[supernode_index] = group
print "\r\t\t",len(groups),
print
return groups
def align(input_translations, input_filenames, output_filename):
"""
Load one csv file for each translation
Group, align and sort the verses
Export a csv file containing a column for each translation
"""
if len(input_translations) != len(input_filenames):
raise ValueError("Number of translations and number of files must be the same")
M = len(input_translations)
# Load pre-translated data
print "\tLoading data from files..."
#translations,books,chapters,verses,texts_original,texts_en = load_translated_verses(input_translations, input_filenames)
translations,chapters,verses,texts_original,texts_en = csv_import_translated_books(input_filenames, input_translations)
# Calculate similarity between verses
print "\tCalculating similarity matrix..."
similarity = calculate_similarity(texts_en, translations)
def canonical_group_cmp(a, b):
"""
Define sort order for groups of verses
"""
# find two verses from the same translation to compare their canonical order
for i in a:
for j in b:
if translations[i] == translations[j]:
return i - j
# Group the verses
print "\tGrouping verses..."
groups = group_verses(similarity, translations, 3).values()
# print groups
# Put groups back into canonical order
print "\tSorting verses..."
groups.sort(canonical_group_cmp)
# prepare data for csv export
print "\tPreparing csv data..."
csv_rows = []
csv_rows.append(input_translations) # headers
for group in groups:
# create a row in the csv file for every group
if len(group) == M:
# rows where all translations are present, are quick:
group.sort()
row = [u"{0}:{1}:{2}".format(chapters[verse],verses[verse],texts_original[verse]) for verse in group]
else:
# for other rows, we have to find the missing translation, and substitute it with a blank
row = []
for translation in input_translations:
found = False
for verse in group:
if translation == translations[verse]:
# verse found for this translation
row.append(u"{0}:{1}:{2}".format(chapters[verse],verses[verse],texts_original[verse]))
found = True
break
if not found:
# fill in a blank
row.append("")
csv_rows.append(row)
# print csv_rows
# Export to csv file
print "\tWriting csv file..."
with open(output_filename,'wb') as f:
cw = unicodecsv.writer(f, encoding='utf-8')
cw.writerows(csv_rows)
print "\tDone!"
def translate_csv(in_filename, language, out_filename):
"""
Load a bible book from csv file
translate it
save it as a new file
"""
# Create a translator object
gs = goslate.Goslate(retry_times=100, timeout=100)
# Load the bible book to be translated
chapters,verses,texts_original = csv_import_book(in_filename)
# Batch translate the verses if necessary
if language != 'en':
print "Batch translating {0} verses from '{1}' to 'en'".format(len(texts_original), language)
texts_translated = gs.translate(texts_original, 'en', language)
else:
print "Not translating {0} verses already in 'en'".format(len(texts_original))
texts_translated = texts_original
# Write to CSV file
rows = zip(chapters, verses, texts_original, texts_translated)
with open(out_filename,'wb') as f:
cw = unicodecsv.writer(f, encoding='utf-8')
cw.writerow(['chapter','verse','text_original','text_english'])
cw.writerows(rows)
def csv_import_book(filename):
"""
load bible book from csv file
"""
texts = []
chapters = []
verses = []
# Read in a whole file of verses
with open(filename,'rb') as f:
cr = unicodecsv.reader(f, encoding='utf-8')
header = cr.next() # skip header
# Process verses
for cnum,vnum,text in cr:
chapters.append(int(cnum)) # parse integer
verses.append(int(vnum)) # parse integer
texts.append(text.strip()) # remove surrounding whitespace
# return results
return chapters,verses,texts
def csv_export_book(filename, rows=[], chapters=[], verses=[], texts=[]):
if not len(rows) > 0:
rows = zip(chapters, verses, texts)
with open(filename,'wb') as f:
cw = unicodecsv.writer(f,encoding='utf-8')
cw.writerow(['chapter','verse','text'])
cw.writerows(rows)
def csv_import_translated_book(input_file):
"""
import a single translated book from a single translation from single csv file
"""
texts_en = []
texts_original = []
chapters = []
verses = []
# Read in a whole (Google translated) file of verses
with open(input_file, 'rb') as f:
cr = unicodecsv.reader(f, encoding='utf-8')
header = cr.next() # skip header
# Process verses
for cnum,vnum,text_original,text_en in cr:
chapters.append(int(cnum))
verses.append(int(vnum))
texts_original.append(text_original.strip())
texts_en.append(text_en.strip())
# return results
return chapters,verses,texts_original,texts_en
def csv_import_translated_books(input_files, input_translations):
"""
import a single book from M translations from M csv files
"""
if len(input_files) != len(input_translations):
raise ValueError("Number of input files and translations are not the same")
translations = []
chapters = []
verses = []
texts_original = []
texts_en = []
for in_file,translation in zip(input_files,input_translations):
c,v,o,e = csv_import_translated_book(in_file)
chapters.extend(c)
verses.extend(v)
texts_original.extend(o)
texts_en.extend(e)
translations.extend([translation]*len(e))
return translations,chapters,verses,texts_original,texts_en
def csv_import_aligned_book(input_file):
"""
Import a single aligned book (e.g. after it is checked by humans)
"""
groups = []
with open(input_file, 'rb') as f:
cr = unicodecsv.reader(f, encoding='utf-8')
translations = cr.next() # header contains translation names
for row in cr:
group = {}
for i in range(len(translations)):
verse = row[i].split(':',3)
group[translations[i]] = {
'chapternum':int(verse[0]),
'versenum':int(verse[1]),
'text':verse[2].strip()
}
groups.append(group)
return groups | gpl-2.0 |
msabramo/github3.py | github3/git.py | 5 | 6891 | # -*- coding: utf-8 -*-
"""
github3.git
===========
This module contains all the classes relating to Git Data.
See also: http://developer.github.com/v3/git/
"""
from __future__ import unicode_literals
from json import dumps
from base64 import b64decode
from .models import GitHubObject, GitHubCore, BaseCommit
from .users import User
from .decorators import requires_auth
class Blob(GitHubObject):
"""The :class:`Blob <Blob>` object.
See also: http://developer.github.com/v3/git/blobs/
"""
def __init__(self, blob):
super(Blob, self).__init__(blob)
self._api = blob.get('url', '')
#: Raw content of the blob.
self.content = blob.get('content').encode()
#: Encoding of the raw content.
self.encoding = blob.get('encoding')
#: Decoded content of the blob.
self.decoded = self.content
if self.encoding == 'base64':
self.decoded = b64decode(self.content)
#: Size of the blob in bytes
self.size = blob.get('size')
#: SHA1 of the blob
self.sha = blob.get('sha')
def _repr(self):
return '<Blob [{0:.10}]>'.format(self.sha)
class GitData(GitHubCore):
"""The :class:`GitData <GitData>` object. This isn't directly returned to
the user (developer) ever. This is used to prevent duplication of some
common items among other Git Data objects.
"""
def __init__(self, data, session=None):
super(GitData, self).__init__(data, session)
#: SHA of the object
self.sha = data.get('sha')
self._api = data.get('url', '')
class Commit(BaseCommit):
"""The :class:`Commit <Commit>` object. This represents a commit made in a
repository.
See also: http://developer.github.com/v3/git/commits/
"""
def __init__(self, commit, session=None):
super(Commit, self).__init__(commit, session)
#: dict containing at least the name, email and date the commit was
#: created
self.author = commit.get('author', {}) or {}
# If GH returns nil/None then make sure author is a dict
self._author_name = self.author.get('name', '')
#: dict containing similar information to the author attribute
self.committer = commit.get('committer', {}) or {}
# blank the data if GH returns no data
self._commit_name = self.committer.get('name', '')
#: :class:`Tree <Tree>` the commit belongs to.
self.tree = None
if commit.get('tree'):
self.tree = Tree(commit.get('tree'), self._session)
def _repr(self):
return '<Commit [{0}:{1}]>'.format(self._author_name, self.sha)
def author_as_User(self):
"""Attempt to return the author attribute as a
:class:`User <github3.users.User>`. No guarantees are made about the
validity of this object, i.e., having a login or created_at object.
"""
return User(self.author, self._session)
def committer_as_User(self):
"""Attempt to return the committer attribute as a
:class:`User <github3.users.User>` object. No guarantees are made
about the validity of this object.
"""
return User(self.committer, self._session)
class Reference(GitHubCore):
"""The :class:`Reference <Reference>` object. This represents a reference
created on a repository.
See also: http://developer.github.com/v3/git/refs/
"""
def __init__(self, ref, session=None):
super(Reference, self).__init__(ref, session)
self._api = ref.get('url', '')
#: The reference path, e.g., refs/heads/sc/featureA
self.ref = ref.get('ref')
#: :class:`GitObject <GitObject>` the reference points to
self.object = GitObject(ref.get('object', {}))
def _repr(self):
return '<Reference [{0}]>'.format(self.ref)
def _update_(self, ref):
self.__init__(ref, self._session)
@requires_auth
def delete(self):
"""Delete this reference.
:returns: bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def update(self, sha, force=False):
"""Update this reference.
:param str sha: (required), sha of the reference
:param bool force: (optional), force the update or not
:returns: bool
"""
data = {'sha': sha, 'force': force}
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False
class GitObject(GitData):
"""The :class:`GitObject <GitObject>` object."""
def __init__(self, obj):
super(GitObject, self).__init__(obj, None)
#: The type of object.
self.type = obj.get('type')
def _repr(self):
return '<Git Object [{0}]>'.format(self.sha)
class Tag(GitData):
"""The :class:`Tag <Tag>` object.
See also: http://developer.github.com/v3/git/tags/
"""
def __init__(self, tag):
super(Tag, self).__init__(tag, None)
#: String of the tag
self.tag = tag.get('tag')
#: Commit message for the tag
self.message = tag.get('message')
#: dict containing the name and email of the person
self.tagger = tag.get('tagger')
#: :class:`GitObject <GitObject>` for the tag
self.object = GitObject(tag.get('object', {}))
def _repr(self):
return '<Tag [{0}]>'.format(self.tag)
class Tree(GitData):
"""The :class:`Tree <Tree>` object.
See also: http://developer.github.com/v3/git/trees/
"""
def __init__(self, tree, session=None):
super(Tree, self).__init__(tree, session)
#: list of :class:`Hash <Hash>` objects
self.tree = [Hash(t) for t in tree.get('tree', [])]
def _repr(self):
return '<Tree [{0}]>'.format(self.sha)
def recurse(self):
"""Recurse into the tree.
:returns: :class:`Tree <Tree>`
"""
json = self._json(self._get(self._api, params={'recursive': '1'}),
200)
return Tree(json, self._session) if json else None
class Hash(GitHubObject):
"""The :class:`Hash <Hash>` object.
See also: http://developer.github.com/v3/git/trees/#create-a-tree
"""
def __init__(self, info):
super(Hash, self).__init__(info)
#: Path to file
self.path = info.get('path')
#: File mode
self.mode = info.get('mode')
#: Type of hash, e.g., blob
self.type = info.get('type')
#: Size of hash
self.size = info.get('size')
#: SHA of the hash
self.sha = info.get('sha')
#: URL of this object in the GitHub API
self.url = info.get('url')
def _repr(self):
return '<Hash [{0}]>'.format(self.sha)
| bsd-3-clause |
oesteban/mriqc | mriqc/qc/anatomical.py | 1 | 21553 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# pylint: disable=no-member
r"""
Measures based on noise measurements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_cjv:
- :py:func:`~mriqc.qc.anatomical.cjv` -- **coefficient of joint variation**
(:abbr:`CJV (coefficient of joint variation)`):
The ``cjv`` of GM and WM was proposed as objective function by [Ganzetti2016]_ for
the optimization of :abbr:`INU (intensity non-uniformity)` correction algorithms.
Higher values are related to the presence of heavy head motion and large
:abbr:`INU (intensity non-uniformity)` artifacts. Lower values are better.
.. _iqms_cnr:
- :py:func:`~mriqc.qc.anatomical.cnr` -- **contrast-to-noise ratio**
(:abbr:`CNR (contrast-to-noise ratio)`): The ``cnr`` [Magnota2006]_,
is an extension of the :abbr:`SNR (signal-to-noise Ratio)` calculation
to evaluate how separated the tissue distributions of GM and WM are.
Higher values indicate better quality.
.. _iqms_snr:
- :py:func:`~mriqc.qc.anatomical.snr` -- **signal-to-noise ratio**
(:abbr:`SNR (signal-to-noise ratio)`): calculated within the
tissue mask.
.. _iqms_snrd:
- :py:func:`~mriqc.qc.anatomical.snr_dietrich`: **Dietrich's SNR**
(:abbr:`SNRd (signal-to-noise ratio, Dietrich 2007)`) as proposed
by [Dietrich2007]_, using the air background as reference.
.. _iqms_qi2:
- :py:func:`~mriqc.qc.anatomical.art_qi2`: **Mortamet's quality index 2**
(:abbr:`QI2 (quality index 2)`) is a calculation of the goodness-of-fit
of a :math:`\chi^2` distribution on the air mask,
once the artifactual intensities detected for computing
the :abbr:`QI1 (quality index 1)` index have been removed [Mortamet2009]_.
Measures based on information theory
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_efc:
- :py:func:`~mriqc.qc.anatomical.efc`:
The :abbr:`EFC (Entropy Focus Criterion)`
[Atkinson1997]_ uses the Shannon entropy of voxel intensities as
an indication of ghosting and blurring induced by head motion.
Lower values are better.
The original equation is normalized by the maximum entropy, so that the
:abbr:`EFC (Entropy Focus Criterion)` can be compared across images with
different dimensions.
.. _iqms_fber:
- :py:func:`~mriqc.qc.anatomical.fber`:
The :abbr:`FBER (Foreground-Background Energy Ratio)` [Shehzad2015]_,
defined as the mean energy of image values within the head relative
to outside the head [QAP-measures]_.
Higher values are better.
Measures targeting specific artifacts
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_inu:
- **inu_\*** (*nipype interface to N4ITK*): summary statistics (max, min and median)
of the :abbr:`INU (intensity non-uniformity)` field as extracted by the N4ITK algorithm
[Tustison2010]_. Values closer to 1.0 are better.
.. _iqms_qi:
- :py:func:`~mriqc.qc.anatomical.art_qi1`:
Detect artifacts in the image using the method described in [Mortamet2009]_.
The :abbr:`QI1 (quality index 1)` is the proportion of voxels with intensity
corrupted by artifacts normalized by the number of voxels in the background.
Lower values are better.
.. figure:: ../resources/mortamet-mrm2009.png
The workflow to compute the artifact detection from [Mortamet2009]_.
.. _iqms_wm2max:
- :py:func:`~mriqc.qc.anatomical.wm2max`:
The white-matter to maximum intensity ratio is the median intensity
within the WM mask over the 95% percentile of the full intensity
distribution, that captures the existence of long tails due to
hyper-intensity of the carotid vessels and fat. Values
should be around the interval [0.6, 0.8].
Other measures
^^^^^^^^^^^^^^
.. _iqms_fwhm:
- **fwhm** (*nipype interface to AFNI*): The :abbr:`FWHM (full-width half maximum)` of
the spatial distribution of the image intensity values in units of voxels [Forman1995]_.
Lower values are better. Uses the gaussian width estimator filter implemented in
AFNI's ``3dFWHMx``:
.. math ::
\text{FWHM} = \sqrt{-{\left[4 \ln{(1-\frac{\sigma^2_{X^m_{i+1,j}-X^m_{i,j}}}
{2\sigma^2_{X^m_{i,j}}}})\right]}^{-1}}
.. _iqms_icvs:
- :py:func:`~mriqc.qc.anatomical.volume_fraction` (**icvs_\***):
the
:abbr:`ICV (intracranial volume)` fractions of :abbr:`CSF (cerebrospinal fluid)`,
:abbr:`GM (gray-matter)` and :abbr:`WM (white-matter)`. They should move within
a normative range.
.. _iqms_rpve:
- :py:func:`~mriqc.qc.anatomical.rpve` (**rpve_\***): the
:abbr:`rPVe (residual partial voluming error)` of :abbr:`CSF (cerebrospinal fluid)`,
:abbr:`GM (gray-matter)` and :abbr:`WM (white-matter)`. Lower values are better.
.. _iqms_summary:
- :py:func:`~mriqc.qc.anatomical.summary_stats` (**summary_\*_\***):
Mean, standard deviation, 5% percentile and 95% percentile of the distribution
of background, :abbr:`CSF (cerebrospinal fluid)`, :abbr:`GM (gray-matter)` and
:abbr:`WM (white-matter)`.
.. _iqms_tpm:
- **overlap_\*_\***:
The overlap of the :abbr:`TPMs (tissue probability maps)` estimated from the image and
the corresponding maps from the ICBM nonlinear-asymmetric 2009c template.
.. math ::
\text{JI}^k = \frac{\sum_i \min{(\text{TPM}^k_i, \text{MNI}^k_i)}}
{\sum_i \max{(\text{TPM}^k_i, \text{MNI}^k_i)}}
.. topic:: References
.. [Dietrich2007] Dietrich et al., *Measurement of SNRs in MR images: influence
of multichannel coils, parallel imaging and reconstruction filters*, JMRI 26(2):375--385.
2007. doi:`10.1002/jmri.20969 <http://dx.doi.org/10.1002/jmri.20969>`_.
.. [Ganzetti2016] Ganzetti et al., *Intensity inhomogeneity correction of structural MR images:
a data-driven approach to define input algorithm parameters*. Front Neuroinform 10:10. 2016.
doi:`10.3389/finf.201600010 <http://dx.doi.org/10.3389/finf.201600010>`_.
.. [Magnota2006] Magnotta, VA., & Friedman, L., *Measurement of signal-to-noise
and contrast-to-noise in the fBIRN multicenter imaging study*.
J Dig Imag 19(2):140-147, 2006. doi:`10.1007/s10278-006-0264-x
<http://dx.doi.org/10.1007/s10278-006-0264-x>`_.
.. [Mortamet2009] Mortamet B et al., *Automatic quality assessment in
structural brain magnetic resonance imaging*, Mag Res Med 62(2):365-372,
2009. doi:`10.1002/mrm.21992 <http://dx.doi.org/10.1002/mrm.21992>`_.
.. [Tustison2010] Tustison NJ et al., *N4ITK: improved N3 bias correction*,
IEEE Trans Med Imag, 29(6):1310-20,
2010. doi:`10.1109/TMI.2010.2046908 <http://dx.doi.org/10.1109/TMI.2010.2046908>`_.
.. [Shehzad2015] Shehzad Z et al., *The Preprocessed Connectomes Project
Quality Assessment Protocol - a resource for measuring the quality of MRI data*,
Front. Neurosci. Conference Abstract: Neuroinformatics 2015.
doi:`10.3389/conf.fnins.2015.91.00047 <https://doi.org/10.3389/conf.fnins.2015.91.00047>`_.
.. [Forman1995] Forman SD et al., *Improved assessment of significant activation in functional
magnetic resonance imaging (fMRI): use of a cluster-size threshold*,
Magn. Reson. Med. 33 (5), 636–647, 1995.
doi:`10.1002/mrm.1910330508 <https://doi.org/10.1002/mrm.1910330508>`_.
mriqc.qc.anatomical module
^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
import os.path as op
from sys import version_info
from math import pi, sqrt
import numpy as np
import scipy.ndimage as nd
from scipy.stats import kurtosis # pylint: disable=E0611
from io import open # pylint: disable=W0622
from builtins import zip, range # pylint: disable=W0622
from six import string_types
DIETRICH_FACTOR = 1.0 / sqrt(2 / (4 - pi))
FSL_FAST_LABELS = {'csf': 1, 'gm': 2, 'wm': 3, 'bg': 0}
PY3 = version_info[0] > 2
def snr(mu_fg, sigma_fg, n):
r"""
Calculate the :abbr:`SNR (Signal-to-Noise Ratio)`.
The estimation may be provided with only one foreground region in
which the noise is computed as follows:
.. math::
\text{SNR} = \frac{\mu_F}{\sigma_F\sqrt{n/(n-1)}},
where :math:`\mu_F` is the mean intensity of the foreground and
:math:`\sigma_F` is the standard deviation of the same region.
:param float mu_fg: mean of foreground.
:param float sigma_fg: standard deviation of foreground.
:param int n: number of voxels in foreground mask.
:return: the computed SNR
"""
return float(mu_fg / (sigma_fg * sqrt(n / (n - 1))))
def snr_dietrich(mu_fg, sigma_air):
r"""
Calculate the :abbr:`SNR (Signal-to-Noise Ratio)`.
This must be an air mask around the head, and it should not contain artifacts.
The computation is done following the eq. A.12 of [Dietrich2007]_, which
includes a correction factor in the estimation of the standard deviation of
air and its Rayleigh distribution:
.. math::
\text{SNR} = \frac{\mu_F}{\sqrt{\frac{2}{4-\pi}}\,\sigma_\text{air}}.
:param float mu_fg: mean of foreground.
:param float sigma_air: standard deviation of the air surrounding the head ("hat" mask).
:return: the computed SNR for the foreground segmentation
"""
if sigma_air < 1.0:
from .. import MRIQC_LOG
MRIQC_LOG.warning('SNRd - background sigma is too small (%f)', sigma_air)
sigma_air += 1.0
return float(DIETRICH_FACTOR * mu_fg / sigma_air)
def cnr(mu_wm, mu_gm, sigma_air):
r"""
Calculate the :abbr:`CNR (Contrast-to-Noise Ratio)` [Magnota2006]_.
Higher values are better.
.. math::
\text{CNR} = \frac{|\mu_\text{GM} - \mu_\text{WM} |}{\sqrt{\sigma_B^2 +
\sigma_\text{WM}^2 + \sigma_\text{GM}^2}},
where :math:`\sigma_B` is the standard deviation of the noise distribution within
the air (background) mask.
:param float mu_wm: mean of signal within white-matter mask.
:param float mu_gm: mean of signal within gray-matter mask.
:param float sigma_air: standard deviation of the air surrounding the head ("hat" mask).
:return: the computed CNR
"""
return float(abs(mu_wm - mu_gm) / sigma_air)
def cjv(mu_wm, mu_gm, sigma_wm, sigma_gm):
r"""
Calculate the :abbr:`CJV (coefficient of joint variation)`, a measure
related to :abbr:`SNR (Signal-to-Noise Ratio)` and
:abbr:`CNR (Contrast-to-Noise Ratio)` that is presented as a proxy for
the :abbr:`INU (intensity non-uniformity)` artifact [Ganzetti2016]_.
Lower is better.
.. math::
\text{CJV} = \frac{\sigma_\text{WM} + \sigma_\text{GM}}{|\mu_\text{WM} - \mu_\text{GM}|}.
:param float mu_wm: mean of signal within white-matter mask.
:param float mu_gm: mean of signal within gray-matter mask.
:param float sigma_wm: standard deviation of signal within white-matter mask.
:param float sigma_gm: standard deviation of signal within gray-matter mask.
:return: the computed CJV
"""
return float((sigma_wm + sigma_gm) / abs(mu_wm - mu_gm))
def fber(img, headmask, rotmask=None):
r"""
Calculate the :abbr:`FBER (Foreground-Background Energy Ratio)` [Shehzad2015]_,
defined as the mean energy of image values within the head relative
to outside the head. Higher values are better.
.. math::
\text{FBER} = \frac{E[|F|^2]}{E[|B|^2]}
:param numpy.ndarray img: input data
:param numpy.ndarray headmask: a mask of the head (including skull, skin, etc.)
:param numpy.ndarray rotmask: a mask of empty voxels inserted after a rotation of
data
"""
fg_mu = np.median(np.abs(img[headmask > 0]) ** 2)
airmask = np.ones_like(headmask, dtype=np.uint8)
airmask[headmask > 0] = 0
if rotmask is not None:
airmask[rotmask > 0] = 0
bg_mu = np.median(np.abs(img[airmask == 1]) ** 2)
if bg_mu < 1.0e-3:
return 0
return float(fg_mu / bg_mu)
def efc(img, framemask=None):
r"""
Calculate the :abbr:`EFC (Entropy Focus Criterion)` [Atkinson1997]_.
Uses the Shannon entropy of voxel intensities as an indication of ghosting
and blurring induced by head motion. A range of low values is better,
with EFC = 0 for all the energy concentrated in one pixel.
.. math::
\text{E} = - \sum_{j=1}^N \frac{x_j}{x_\text{max}}
\ln \left[\frac{x_j}{x_\text{max}}\right]
with :math:`x_\text{max} = \sqrt{\sum_{j=1}^N x^2_j}`.
The original equation is normalized by the maximum entropy, so that the
:abbr:`EFC (Entropy Focus Criterion)` can be compared across images with
different dimensions:
.. math::
\text{EFC} = \left( \frac{N}{\sqrt{N}} \, \log{\sqrt{N}^{-1}} \right) \text{E}
:param numpy.ndarray img: input data
:param numpy.ndarray framemask: a mask of empty voxels inserted after a rotation of
data
"""
if framemask is None:
framemask = np.zeros_like(img, dtype=np.uint8)
n_vox = np.sum(1 - framemask)
# Calculate the maximum value of the EFC (which occurs any time all
# voxels have the same value)
efc_max = 1.0 * n_vox * (1.0 / np.sqrt(n_vox)) * \
np.log(1.0 / np.sqrt(n_vox))
# Calculate the total image energy
b_max = np.sqrt((img[framemask == 0]**2).sum())
# Calculate EFC (add 1e-16 to the image data to keep log happy)
return float((1.0 / efc_max) * np.sum((img[framemask == 0] / b_max) * np.log(
(img[framemask == 0] + 1e-16) / b_max)))
def wm2max(img, mu_wm):
r"""
Calculate the :abbr:`WM2MAX (white-matter-to-max ratio)`,
defined as the maximum intensity found in the volume w.r.t. the
mean value of the white matter tissue. Values close to 1.0 are
better:
.. math ::
\text{WM2MAX} = \frac{\mu_\text{WM}}{P_{99.95}(X)}
"""
return float(mu_wm / np.percentile(img.reshape(-1), 99.95))
def art_qi1(airmask, artmask):
r"""
Detect artifacts in the image using the method described in [Mortamet2009]_.
Caculates :math:`\text{QI}_1`, as the proportion of voxels with intensity
corrupted by artifacts normalized by the number of voxels in the background:
.. math ::
\text{QI}_1 = \frac{1}{N} \sum\limits_{x\in X_\text{art}} 1
Lower values are better.
:param numpy.ndarray airmask: input air mask, without artifacts
:param numpy.ndarray artmask: input artifacts mask
"""
# Count the number of voxels that remain after the opening operation.
# These are artifacts.
return float(artmask.sum() / (airmask.sum() + artmask.sum()))
def art_qi2(img, airmask, min_voxels=int(1e3), max_voxels=int(3e5), save_plot=True):
r"""
Calculates :math:`\text{QI}_2`, based on the goodness-of-fit of a centered
:math:`\chi^2` distribution onto the intensity distribution of
non-artifactual background (within the "hat" mask):
.. math ::
\chi^2_n = \frac{2}{(\sigma \sqrt{2})^{2n} \, (n - 1)!}x^{2n - 1}\, e^{-\frac{x}{2}}
where :math:`n` is the number of coil elements.
:param numpy.ndarray img: input data
:param numpy.ndarray airmask: input air mask without artifacts
"""
from sklearn.neighbors import KernelDensity
from scipy.stats import chi2
from mriqc.viz.misc import plot_qi2
# S. Ogawa was born
np.random.seed(1191935)
data = img[airmask > 0]
data = data[data > 0]
# Write out figure of the fitting
out_file = op.abspath('error.svg')
with open(out_file, 'w') as ofh:
ofh.write('<p>Background noise fitting could not be plotted.</p>')
if len(data) < min_voxels:
return 0.0, out_file
modelx = data if len(data) < max_voxels else np.random.choice(
data, size=max_voxels)
x_grid = np.linspace(0.0, np.percentile(data, 99), 1000)
# Estimate data pdf with KDE on a random subsample
kde_skl = KernelDensity(bandwidth=0.05 * np.percentile(data, 98),
kernel='gaussian').fit(modelx[:, np.newaxis])
kde = np.exp(kde_skl.score_samples(x_grid[:, np.newaxis]))
# Find cutoff
kdethi = np.argmax(kde[::-1] > kde.max() * 0.5)
# Fit X^2
param = chi2.fit(modelx[modelx < np.percentile(data, 95)], 32)
chi_pdf = chi2.pdf(x_grid, *param[:-2], loc=param[-2], scale=param[-1])
# Compute goodness-of-fit (gof)
gof = float(np.abs(kde[-kdethi:] - chi_pdf[-kdethi:]).mean())
if save_plot:
out_file = plot_qi2(x_grid, kde, chi_pdf, modelx, kdethi)
return gof, out_file
def volume_fraction(pvms):
r"""
Computes the :abbr:`ICV (intracranial volume)` fractions
corresponding to the (partial volume maps).
.. math ::
\text{ICV}^k = \frac{\sum_i p^k_i}{\sum\limits_{x \in X_\text{brain}} 1}
:param list pvms: list of :code:`numpy.ndarray` of partial volume maps.
"""
tissue_vfs = {}
total = 0
for k, lid in list(FSL_FAST_LABELS.items()):
if lid == 0:
continue
tissue_vfs[k] = pvms[lid - 1].sum()
total += tissue_vfs[k]
for k in list(tissue_vfs.keys()):
tissue_vfs[k] /= total
return {k: float(v) for k, v in list(tissue_vfs.items())}
def rpve(pvms, seg):
"""
Computes the :abbr:`rPVe (residual partial voluming error)`
of each tissue class.
.. math ::
\\text{rPVE}^k = \\frac{1}{N} \\left[ \\sum\\limits_{p^k_i \
\\in [0.5, P_{98}]} p^k_i + \\sum\\limits_{p^k_i \\in [P_{2}, 0.5)} 1 - p^k_i \\right]
"""
pvfs = {}
for k, lid in list(FSL_FAST_LABELS.items()):
if lid == 0:
continue
pvmap = pvms[lid - 1]
pvmap[pvmap < 0.] = 0.
pvmap[pvmap >= 1.] = 1.
totalvol = np.sum(pvmap > 0.0)
upth = np.percentile(pvmap[pvmap > 0], 98)
loth = np.percentile(pvmap[pvmap > 0], 2)
pvmap[pvmap < loth] = 0
pvmap[pvmap > upth] = 0
pvfs[k] = (pvmap[pvmap > 0.5].sum() + (1.0 - pvmap[pvmap <= 0.5]).sum()) / totalvol
return {k: float(v) for k, v in list(pvfs.items())}
def summary_stats(img, pvms, airmask=None, erode=True):
r"""
Estimates the mean, the standard deviation, the 95\%
and the 5\% percentiles of each tissue distribution.
.. warning ::
Sometimes (with datasets that have been partially processed), the air
mask will be empty. In those cases, the background stats will be zero
for the mean, median, percentiles and kurtosis, the sum of voxels in
the other remaining labels for ``n``, and finally the MAD and the
:math:`\sigma` will be calculated as:
.. math ::
\sigma_\text{BG} = \sqrt{\sum \sigma_\text{i}^2}
"""
from .. import MRIQC_LOG
from statsmodels.robust.scale import mad
# Check type of input masks
dims = np.squeeze(np.array(pvms)).ndim
if dims == 4:
# If pvms is from FSL FAST, create the bg mask
stats_pvms = [np.zeros_like(img)] + pvms
elif dims == 3:
stats_pvms = [np.ones_like(pvms) - pvms, pvms]
else:
raise RuntimeError('Incorrect image dimensions ({0:d})'.format(
np.array(pvms).ndim))
if airmask is not None:
stats_pvms[0] = airmask
labels = list(FSL_FAST_LABELS.items())
if len(stats_pvms) == 2:
labels = list(zip(['bg', 'fg'], list(range(2))))
output = {}
for k, lid in labels:
mask = np.zeros_like(img, dtype=np.uint8)
mask[stats_pvms[lid] > 0.85] = 1
if erode:
struc = nd.generate_binary_structure(3, 2)
mask = nd.binary_erosion(
mask, structure=struc).astype(np.uint8)
nvox = float(mask.sum())
if nvox < 1e3:
MRIQC_LOG.warning('calculating summary stats of label "%s" in a very small '
'mask (%d voxels)', k, int(nvox))
if k == 'bg':
continue
output[k] = {
'mean': float(img[mask == 1].mean()),
'stdv': float(img[mask == 1].std()),
'median': float(np.median(img[mask == 1])),
'mad': float(mad(img[mask == 1])),
'p95': float(np.percentile(img[mask == 1], 95)),
'p05': float(np.percentile(img[mask == 1], 5)),
'k': float(kurtosis(img[mask == 1])),
'n': nvox,
}
if 'bg' not in output:
output['bg'] = {
'mean': 0.,
'median': 0.,
'p95': 0.,
'p05': 0.,
'k': 0.,
'stdv': sqrt(sum(val['stdv']**2
for _, val in list(output.items()))),
'mad': sqrt(sum(val['mad']**2
for _, val in list(output.items()))),
'n': sum(val['n'] for _, val in list(output.items()))
}
if 'bg' in output and output['bg']['mad'] == 0.0 and output['bg']['stdv'] > 1.0:
MRIQC_LOG.warning('estimated MAD in the background was too small ('
'MAD=%f)', output['bg']['mad'])
output['bg']['mad'] = output['bg']['stdv'] / DIETRICH_FACTOR
return output
def _prepare_mask(mask, label, erode=True):
fgmask = mask.copy()
if np.issubdtype(fgmask.dtype, np.integer):
if isinstance(label, string_types):
label = FSL_FAST_LABELS[label]
fgmask[fgmask != label] = 0
fgmask[fgmask == label] = 1
else:
fgmask[fgmask > .95] = 1.
fgmask[fgmask < 1.] = 0
if erode:
# Create a structural element to be used in an opening operation.
struc = nd.generate_binary_structure(3, 2)
# Perform an opening operation on the background data.
fgmask = nd.binary_opening(fgmask, structure=struc).astype(np.uint8)
return fgmask
| bsd-3-clause |
analogdevicesinc/gnuradio | gr-analog/examples/fmtest.py | 40 | 7941 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio import channels
import sys, math, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class fmtx(gr.hier_block2):
def __init__(self, lo_freq, audio_rate, if_rate):
gr.hier_block2.__init__(self, "build_fm",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
fmtx = analog.nbfm_tx(audio_rate, if_rate, max_dev=5e3, tau=75e-6)
# Local oscillator
lo = analog.sig_source_c(if_rate, # sample rate
analog.GR_SIN_WAVE, # waveform type
lo_freq, # frequency
1.0, # amplitude
0) # DC Offset
mixer = blocks.multiply_cc()
self.connect(self, fmtx, (mixer, 0))
self.connect(lo, (mixer, 1))
self.connect(mixer, self)
class fmtest(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._nsamples = 1000000
self._audio_rate = 8000
# Set up N channels with their own baseband and IF frequencies
self._N = 5
chspacing = 16000
freq = [10, 20, 30, 40, 50]
f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]
self._if_rate = 4*self._N*self._audio_rate
# Create a signal source and frequency modulate it
self.sum = blocks.add_cc()
for n in xrange(self._N):
sig = analog.sig_source_f(self._audio_rate, analog.GR_SIN_WAVE, freq[n], 0.5)
fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)
self.connect(sig, fm)
self.connect(fm, (self.sum, n))
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamples)
self.snk_tx = blocks.vector_sink_c()
self.channel = channels.channel_model(0.1)
self.connect(self.sum, self.head, self.channel, self.snk_tx)
# Design the channlizer
self._M = 10
bw = chspacing/2.0
t_bw = chspacing/10.0
self._chan_rate = self._if_rate / self._M
self._taps = filter.firdes.low_pass_2(1, self._if_rate, bw, t_bw,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
tpc = math.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
self.connect(self.channel, self.pfb)
# Create a file sink for each of M output channels of the filter and connect it
self.fmdet = list()
self.squelch = list()
self.snks = list()
for i in xrange(self._M):
self.fmdet.append(analog.nbfm_rx(self._audio_rate, self._chan_rate))
self.squelch.append(analog.standard_squelch(self._audio_rate*10))
self.snks.append(blocks.vector_sink_f())
self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])
def num_tx_channels(self):
return self._N
def num_rx_channels(self):
return self._M
def main():
fm = fmtest()
tstart = time.time()
fm.run()
tend = time.time()
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 100000
fftlen = 8192
winfunc = scipy.blackman
# Plot transmitted signal
fs = fm._if_rate
d = fm.snk_tx.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
visible=False)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-120.0, 20.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-5, 5])
# Set up the number of rows and columns for plotting the subfigures
Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
if(fm.num_rx_channels() % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = fm._audio_rate
for i in xrange(len(fm.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = fm.snks[i].data()[Ns:Ne]
sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)
X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
visible=False)
#X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
X_o = 10.0*scipy.log10(abs(X))
#f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+0.1])
sp2_f.set_ylim([-120.0, 20.0])
sp2_f.grid(True)
sp2_f.set_title(("Channel %d" % i), weight="bold")
sp2_f.set_xlabel("Frequency (kHz)")
sp2_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs_o
Tmax = len(d)*Ts
t_o = scipy.arange(0, Tmax, Ts)
x_t = scipy.array(d)
sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)
p2_t = sp2_t.plot(t_o, x_t.real, "b")
p2_t = sp2_t.plot(t_o, x_t.imag, "r")
sp2_t.set_xlim([min(t_o), max(t_o)+1])
sp2_t.set_ylim([-1, 1])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
eng-tools/sfsimodels | tests/test_interaction.py | 1 | 2586 | from sfsimodels import models
import sfsimodels as sm
import json
import numpy as np
def test_link_building_and_soil():
number_of_storeys = 6
interstorey_height = 3.4 # m
n_bays = 3
fb = models.FrameBuilding(number_of_storeys, n_bays)
fb.id = 1
fb.interstorey_heights = interstorey_height * np.ones(number_of_storeys)
fb.floor_length = 18.0 # m
fb.floor_width = 16.0 # m
fd = models.RaftFoundation()
fd.length = 4
fd.width = 6
fd.height = 0.0
fd.density = 3
fd2 = models.RaftFoundation()
fd2.length = 14
fd2.width = 16
fd2.height = 10.0
fd2.density = 13
# link building to foundation
fd.set_building(fb, two_way=False)
assert fd.building.n_bays == 3
assert fb.foundation is None
fd.set_building(fb, two_way=True)
assert fb.foundation.length == 4
# one way link
fb.set_foundation(fd2, two_way=False)
assert fb.foundation.length == 14
assert fd2.building is None
fb.set_foundation(fd2, two_way=True)
assert fb.foundation.length == 14
assert np.isclose(fd2.building.floor_width, 16.0)
structure = models.SDOFBuilding()
structure.set_foundation(fd, two_way=True)
assert structure.foundation.width == 6
assert isinstance(fd.building, models.SDOFBuilding)
def test_save_and_load_w_linked_building_and_soil():
number_of_storeys = 6
interstorey_height = 3.4 # m
wb = models.WallBuilding(number_of_storeys)
wb.id = 1
wb.interstorey_heights = interstorey_height * np.ones(number_of_storeys)
wb.floor_length = 18.0 # m
wb.floor_width = 16.0 # m
fd = models.RaftFoundation()
fd.length = 4
fd.width = 6
fd.height = 0.0
fd.density = 3
fd.id = 1
# link building to foundation
fd.set_building(wb, two_way=False)
assert fd.building.n_storeys == number_of_storeys
assert wb.foundation is None
fd.set_building(wb, two_way=True)
assert wb.foundation.length == 4
ecp_output = sm.Output()
ecp_output.add_to_dict(wb)
ecp_output.add_to_dict(fd)
ecp_output.name = "a single wall building"
ecp_output.units = "N, kg, m, s"
ecp_output.comments = ""
p_str = json.dumps(ecp_output.to_dict(), skipkeys=["__repr__"], indent=4)
objs = sm.loads_json(p_str)
building = objs["building"][1]
foundation = objs["foundation"][1]
assert foundation.width == 6
assert building.foundation.width == 6, building.fd
assert np.isclose(building.floor_length, 18.0)
if __name__ == '__main__':
test_save_and_load_w_linked_building_and_soil() | mit |
incaser/odoo-odoo | addons/l10n_de/__init__.py | 693 | 1057 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
stianvi/ansible-modules-core | cloud/openstack/os_object.py | 58 | 4111 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_object
short_description: Create or Delete objects and containers from OpenStack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
extends_documentation_fragment: openstack
description:
- Create or Delete objects and containers from OpenStack
options:
container:
description:
- The name of the container in which to create the object
required: true
name:
description:
- Name to be give to the object. If omitted, operations will be on
the entire container
required: false
filename:
description:
- Path to local file to be uploaded.
required: false
container_access:
description:
- desired container access level.
required: false
choices: ['private', 'public']
default: private
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
# Creates a object named 'fstab' in the 'config' container
- os_object: cloud=mordred state=present name=fstab container=config filename=/etc/fstab
# Deletes a container called config and all of its contents
- os_object: cloud=rax-iad state=absent container=config
'''
def process_object(
cloud_obj, container, name, filename, container_access, **kwargs):
changed = False
container_obj = cloud_obj.get_container(container)
if kwargs['state'] == 'present':
if not container_obj:
container_obj = cloud_obj.create_container(container)
changed = True
if cloud_obj.get_container_access(container) != container_access:
cloud_obj.set_container_access(container, container_access)
changed = True
if name:
if cloud_obj.is_object_stale(container, name, filename):
cloud_obj.create_object(container, name, filename)
changed = True
else:
if container_obj:
if name:
if cloud_obj.get_object_metadata(container, name):
cloud_obj.delete_object(container, name)
changed= True
else:
cloud_obj.delete_container(container)
changed= True
return changed
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
container=dict(required=True),
filename=dict(required=False, default=None),
container_access=dict(default='private', choices=['private', 'public']),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
changed = process_object(cloud, **module.params)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == "__main__":
main()
| gpl-3.0 |
philoniare/horizon | openstack_dashboard/test/integration_tests/tests/test_sahara_image_registry.py | 37 | 2145 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.tests import decorators
IMAGE_NAME = helpers.gen_random_resource_name("image")
@decorators.services_required("sahara")
class TestSaharaImageRegistry(helpers.TestCase):
def setUp(self):
super(TestSaharaImageRegistry, self).setUp()
image_pg = self.home_pg.go_to_compute_imagespage()
image_pg.create_image(IMAGE_NAME)
image_pg.wait_until_image_active(IMAGE_NAME)
def test_image_register_unregister(self):
"""Test the image registration in Sahara."""
image_reg_pg = self.home_pg.go_to_dataprocessing_imageregistrypage()
image_reg_pg.register_image(IMAGE_NAME, self.CONFIG.scenario.ssh_user,
"Test description")
image_reg_pg.wait_until_image_registered(IMAGE_NAME)
self.assertTrue(image_reg_pg.is_image_registered(IMAGE_NAME),
"Image was not registered.")
self.assertFalse(image_reg_pg.is_error_message_present(),
"Error message occurred during image creation.")
image_reg_pg.unregister_image(IMAGE_NAME)
self.assertFalse(image_reg_pg.is_error_message_present())
self.assertFalse(image_reg_pg.is_image_registered(IMAGE_NAME),
"Image was not unregistered.")
def tearDown(self):
image_pg = self.home_pg.go_to_compute_imagespage()
image_pg.delete_image(IMAGE_NAME)
super(TestSaharaImageRegistry, self).tearDown()
| apache-2.0 |
tensorflow/agents | tf_agents/replay_buffers/replay_buffer.py | 1 | 13019 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF-Agents Replay Buffer API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
from tf_agents.utils import common
from tensorflow.python.data.util import nest as data_nest # pylint:disable=g-direct-tensorflow-import # TF internal
from tensorflow.python.util import deprecation # pylint:disable=g-direct-tensorflow-import # TF internal
class ReplayBuffer(tf.Module):
"""Abstract base class for TF-Agents replay buffer.
In eager mode, methods modify the buffer or return values directly. In graph
mode, methods return ops that do so when executed.
"""
def __init__(self, data_spec, capacity, stateful_dataset=False):
"""Initializes the replay buffer.
Args:
data_spec: A spec or a list/tuple/nest of specs describing a single item
that can be stored in this buffer
capacity: number of elements that the replay buffer can hold.
stateful_dataset: whether the dataset contains stateful ops or not.
"""
super(ReplayBuffer, self).__init__()
common.check_tf1_allowed()
self._data_spec = data_spec
self._capacity = capacity
self._stateful_dataset = stateful_dataset
@property
def data_spec(self):
"""Returns the spec for items in the replay buffer."""
return self._data_spec
@property
def capacity(self):
"""Returns the capacity of the replay buffer."""
return self._capacity
@property
def stateful_dataset(self):
"""Returns whether the dataset of the replay buffer has stateful ops."""
return self._stateful_dataset
def num_frames(self):
"""Returns the number of frames in the replay buffer."""
return self._num_frames()
def add_batch(self, items):
"""Adds a batch of items to the replay buffer.
Args:
items: An item or list/tuple/nest of items to be added to the replay
buffer. `items` must match the data_spec of this class, with a
batch_size dimension added to the beginning of each tensor/array.
Returns:
Adds `items` to the replay buffer.
"""
return self._add_batch(items)
@deprecation.deprecated(
date=None,
instructions=(
'Use `as_dataset(..., single_deterministic_pass=False) instead.'
))
def get_next(self, sample_batch_size=None, num_steps=None, time_stacked=True):
"""Returns an item or batch of items from the buffer.
Args:
sample_batch_size: (Optional.) An optional batch_size to specify the
number of items to return. If None (default), a single item is returned
which matches the data_spec of this class (without a batch dimension).
Otherwise, a batch of sample_batch_size items is returned, where each
tensor in items will have its first dimension equal to sample_batch_size
and the rest of the dimensions match the corresponding data_spec. See
examples below.
num_steps: (Optional.) Optional way to specify that sub-episodes are
desired. If None (default), in non-episodic replay buffers, a batch of
single items is returned. In episodic buffers, full episodes are
returned (note that sample_batch_size must be None in that case).
Otherwise, a batch of sub-episodes is returned, where a sub-episode is a
sequence of consecutive items in the replay_buffer. The returned tensors
will have first dimension equal to sample_batch_size (if
sample_batch_size is not None), subsequent dimension equal to num_steps,
if time_stacked=True and remaining dimensions which match the data_spec
of this class. See examples below.
time_stacked: (Optional.) Boolean, when true and num_steps > 1 it returns
the items stacked on the time dimension. See examples below for details.
Examples of tensor shapes returned: (B = batch size, T = timestep, D =
data spec) get_next(sample_batch_size=None, num_steps=None,
time_stacked=True)
return shape (non-episodic): [D]
return shape (episodic): [T, D] (T = full length of the episode)
get_next(sample_batch_size=B, num_steps=None, time_stacked=True)
return shape (non-episodic): [B, D]
return shape (episodic): Not supported get_next(sample_batch_size=B,
num_steps=T, time_stacked=True)
return shape: [B, T, D] get_next(sample_batch_size=None, num_steps=T,
time_stacked=False)
return shape: ([D], [D], ..) T tensors in the tuple
get_next(sample_batch_size=B, num_steps=T, time_stacked=False)
return shape: ([B, D], [B, D], ..) T tensors in the tuple
Returns:
A 2-tuple containing:
- An item or sequence of (optionally batched and stacked) items.
- Auxiliary info for the items (i.e. ids, probs).
"""
return self._get_next(sample_batch_size, num_steps, time_stacked)
def as_dataset(self,
sample_batch_size=None,
num_steps=None,
num_parallel_calls=None,
sequence_preprocess_fn=None,
single_deterministic_pass=False):
"""Creates and returns a dataset that returns entries from the buffer.
A single entry from the dataset is the result of the following pipeline:
* Sample sequences from the underlying data store
* (optionally) Process them with `sequence_preprocess_fn`,
* (optionally) Split them into subsequences of length `num_steps`
* (optionally) Batch them into batches of size `sample_batch_size`.
In practice, this pipeline is executed in parallel as much as possible
if `num_parallel_calls != 1`.
Some additional notes:
If `num_steps is None`, different replay buffers will behave differently.
For example, `TFUniformReplayBuffer` will return single time steps without
a time dimension. In contrast, e.g., `EpisodicReplayBuffer` will return
full sequences (since each sequence may be an episode of unknown length,
the outermost shape dimension will be `None`).
If `sample_batch_size is None`, no batching is performed; and there is no
outer batch dimension in the returned Dataset entries. This setting
is useful with variable episode lengths using e.g. `EpisodicReplayBuffer`,
because it allows the user to get full episodes back, and use `tf.data`
to build padded or truncated batches themselves.
If `single_determinsitic_pass == True`, the replay buffer will make
every attempt to ensure every time step is visited once and exactly once
in a deterministic manner (though true determinism depends on the
underlying data store). Additional work may be done to ensure minibatches
do not have multiple rows from the same episode. In some cases, this
may mean arguments like `num_parallel_calls` are ignored.
Args:
sample_batch_size: (Optional.) An optional batch_size to specify the
number of items to return. If None (default), a single item is returned
which matches the data_spec of this class (without a batch dimension).
Otherwise, a batch of sample_batch_size items is returned, where each
tensor in items will have its first dimension equal to sample_batch_size
and the rest of the dimensions match the corresponding data_spec.
num_steps: (Optional.) Optional way to specify that sub-episodes are
desired. If None (default), a batch of single items is returned.
Otherwise, a batch of sub-episodes is returned, where a sub-episode is a
sequence of consecutive items in the replay_buffer. The returned tensors
will have first dimension equal to sample_batch_size (if
sample_batch_size is not None), subsequent dimension equal to num_steps,
and remaining dimensions which match the data_spec of this class.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process in parallel. If not
specified, elements will be processed sequentially.
sequence_preprocess_fn: (Optional) fn for preprocessing the collected
data before it is split into subsequences of length `num_steps`.
Defined in `TFAgent.preprocess_sequence`. Defaults to pass through.
single_deterministic_pass: Python boolean. If `True`, the dataset will
return a single deterministic pass through its underlying data.
**NOTE**: If the buffer is modified while a Dataset iterator is
iterating over this data, the iterator may miss any new data or
otherwise have subtly invalid data.
Returns:
A dataset of type tf.data.Dataset, elements of which are 2-tuples of:
- An item or sequence of items or batch thereof
- Auxiliary info for the items (i.e. ids, probs).
Raises:
NotImplementedError: If a non-default argument value is not supported.
ValueError: If the data spec contains lists that must be converted to
tuples.
"""
# data_tf.nest.flatten does not flatten python lists, nest.flatten does.
if tf.nest.flatten(self._data_spec) != data_nest.flatten(self._data_spec):
raise ValueError(
'Cannot perform gather; data spec contains lists and this conflicts '
'with gathering operator. Convert any lists to tuples. '
'For example, if your spec looks like [a, b, c], '
'change it to (a, b, c). Spec structure is:\n {}'.format(
tf.nest.map_structure(lambda spec: spec.dtype, self._data_spec)))
if single_deterministic_pass:
ds = self._single_deterministic_pass_dataset(
sample_batch_size=sample_batch_size,
num_steps=num_steps,
sequence_preprocess_fn=sequence_preprocess_fn,
num_parallel_calls=num_parallel_calls)
else:
ds = self._as_dataset(
sample_batch_size=sample_batch_size,
num_steps=num_steps,
sequence_preprocess_fn=sequence_preprocess_fn,
num_parallel_calls=num_parallel_calls)
if self._stateful_dataset:
options = tf.data.Options()
if hasattr(options, 'experimental_allow_stateful'):
options.experimental_allow_stateful = True
ds = ds.with_options(options)
return ds
@deprecation.deprecated(
date=None,
instructions=(
'Use `as_dataset(..., single_deterministic_pass=True)` instead.'
))
def gather_all(self):
"""Returns all the items in buffer.
Returns:
Returns all the items currently in the buffer. Returns a tensor
of shape [B, T, ...] where B = batch size, T = timesteps,
and the remaining shape is the shape spec of the items in the buffer.
"""
return self._gather_all()
def clear(self):
"""Resets the contents of replay buffer.
Returns:
Clears the replay buffer contents.
"""
return self._clear()
# Subclasses must implement these methods.
@abc.abstractmethod
def _num_frames(self):
"""Returns the number of frames in the replay buffer."""
raise NotImplementedError
@abc.abstractmethod
def _add_batch(self, items):
"""Adds a batch of items to the replay buffer."""
raise NotImplementedError
@abc.abstractmethod
def _get_next(self, sample_batch_size, num_steps, time_stacked):
"""Returns an item or batch of items from the buffer."""
raise NotImplementedError
@abc.abstractmethod
def _as_dataset(self,
sample_batch_size,
num_steps,
sequence_preprocess_fn,
num_parallel_calls):
"""Creates and returns a dataset that returns entries from the buffer."""
raise NotImplementedError
@abc.abstractmethod
def _single_deterministic_pass_dataset(self,
sample_batch_size,
num_steps,
sequence_preprocess_fn,
num_parallel_calls):
"""Creates and returns a dataset that returns entries from the buffer."""
raise NotImplementedError
@abc.abstractmethod
def _gather_all(self):
"""Returns all the items in buffer."""
raise NotImplementedError
@abc.abstractmethod
def _clear(self):
"""Clears the replay buffer."""
raise NotImplementedError
| apache-2.0 |
vitaly4uk/django | django/contrib/gis/db/backends/spatialite/base.py | 445 | 3615 | import sys
from ctypes.util import find_library
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import (
Database, DatabaseWrapper as SQLiteDatabaseWrapper, SQLiteCursorWrapper,
)
from django.utils import six
from .client import SpatiaLiteClient
from .features import DatabaseFeatures
from .introspection import SpatiaLiteIntrospection
from .operations import SpatiaLiteOperations
from .schema import SpatialiteSchemaEditor
class DatabaseWrapper(SQLiteDatabaseWrapper):
SchemaEditorClass = SpatialiteSchemaEditor
def __init__(self, *args, **kwargs):
# Before we get too far, make sure pysqlite 2.5+ is installed.
if Database.version_info < (2, 5, 0):
raise ImproperlyConfigured('Only versions of pysqlite 2.5+ are '
'compatible with SpatiaLite and GeoDjango.')
# Trying to find the location of the SpatiaLite library.
# Here we are figuring out the path to the SpatiaLite library
# (`libspatialite`). If it's not in the system library path (e.g., it
# cannot be found by `ctypes.util.find_library`), then it may be set
# manually in the settings via the `SPATIALITE_LIBRARY_PATH` setting.
self.spatialite_lib = getattr(settings, 'SPATIALITE_LIBRARY_PATH',
find_library('spatialite'))
if not self.spatialite_lib:
raise ImproperlyConfigured('Unable to locate the SpatiaLite library. '
'Make sure it is in your library path, or set '
'SPATIALITE_LIBRARY_PATH in your settings.'
)
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = SpatiaLiteOperations(self)
self.client = SpatiaLiteClient(self)
self.introspection = SpatiaLiteIntrospection(self)
def get_new_connection(self, conn_params):
conn = super(DatabaseWrapper, self).get_new_connection(conn_params)
# Enabling extension loading on the SQLite connection.
try:
conn.enable_load_extension(True)
except AttributeError:
raise ImproperlyConfigured(
'The pysqlite library does not support C extension loading. '
'Both SQLite and pysqlite must be configured to allow '
'the loading of extensions to use SpatiaLite.')
# Loading the SpatiaLite library extension on the connection, and returning
# the created cursor.
cur = conn.cursor(factory=SQLiteCursorWrapper)
try:
cur.execute("SELECT load_extension(%s)", (self.spatialite_lib,))
except Exception as msg:
new_msg = (
'Unable to load the SpatiaLite library extension '
'"%s" because: %s') % (self.spatialite_lib, msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
cur.close()
return conn
def prepare_database(self):
super(DatabaseWrapper, self).prepare_database()
# Check if spatial metadata have been initialized in the database
with self.cursor() as cursor:
cursor.execute("PRAGMA table_info(geometry_columns);")
if cursor.fetchall() == []:
arg = "1" if self.features.supports_initspatialmetadata_in_one_transaction else ""
cursor.execute("SELECT InitSpatialMetaData(%s)" % arg)
| bsd-3-clause |
salguarnieri/intellij-community | python/lib/Lib/xml/sax/__init__.py | 117 | 3413 | """Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
drivers2 -- Contains the driver for that wraps a Java sax implementation in python
objects.
"""
from xmlreader import InputSource
from handler import ContentHandler, ErrorHandler
from _exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
inpsrc.setByteStream(StringIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.drivers2.drv_javasax"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.drivers2.drv_javasax
import os, sys
if os.environ.has_key("PY_SAX_PARSER"):
default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError,e:
import sys
if sys.modules.has_key(parser_name):
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
| apache-2.0 |
keisuke-umezawa/chainer | tests/chainer_tests/initializer_tests/test_constant.py | 4 | 3718 | import unittest
from chainer import backend
from chainer.backends import cuda
from chainer import initializers
from chainer import testing
from chainer.testing import attr
import numpy
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestIdentity(unittest.TestCase):
scale = 0.1
shape = (2, 2)
def setUp(self):
self.check_options = {}
if self.dtype == numpy.float16:
self.check_options = {'atol': 1e-4, 'rtol': 1e-3}
def check_initializer(self, w):
initializer = initializers.Identity(scale=self.scale)
initializer(w)
testing.assert_allclose(
w, self.scale * numpy.identity(len(self.shape)),
**self.check_options)
def test_initializer_cpu(self):
w = numpy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
@attr.gpu
def test_initializer_gpu(self):
w = cuda.cupy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
def check_shaped_initializer(self, xp):
initializer = initializers.Identity(
scale=self.scale, dtype=self.dtype)
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(backend.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
testing.assert_allclose(
w, self.scale * numpy.identity(len(self.shape)),
**self.check_options)
def test_shaped_initializer_cpu(self):
self.check_shaped_initializer(numpy)
@attr.gpu
def test_shaped_initializer_gpu(self):
self.check_shaped_initializer(cuda.cupy)
@testing.parameterize(
{'shape': (2, 3)},
{'shape': (2, 2, 4)},
{'shape': ()},
{'shape': 0})
class TestIdentityInvalid(unittest.TestCase):
def setUp(self):
self.initializer = initializers.Identity()
def test_invalid_shape(self):
w = numpy.empty(self.shape, dtype=numpy.float32)
with self.assertRaises(ValueError):
self.initializer(w)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestConstant(unittest.TestCase):
fill_value = 0.1
shape = (2, 3)
def setUp(self):
self.check_options = {}
if self.dtype == numpy.float16:
self.check_options = {'atol': 1e-4, 'rtol': 1e-3}
def check_initializer(self, w):
initializer = initializers.Constant(fill_value=self.fill_value)
initializer(w)
testing.assert_allclose(
w, numpy.full(self.shape, self.fill_value),
**self.check_options)
def test_initializer_cpu(self):
w = numpy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
@attr.gpu
def test_initializer_gpu(self):
w = cuda.cupy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
def check_shaped_initializer(self, xp):
initializer = initializers.Constant(
fill_value=self.fill_value, dtype=self.dtype)
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(backend.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
testing.assert_allclose(
w, numpy.full(self.shape, self.fill_value),
**self.check_options)
def test_shaped_initializer_cpu(self):
self.check_shaped_initializer(numpy)
@attr.gpu
def test_shaped_initializer_gpu(self):
self.check_shaped_initializer(cuda.cupy)
testing.run_module(__name__, __file__)
| mit |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Lib/xml/dom/minicompat.py | 209 | 3330 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
try:
unicode
except NameError:
StringTypes = type(''),
else:
StringTypes = type(''), type(unicode(''))
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name)).im_func
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
| gpl-3.0 |
zhjunlang/kbengine | kbe/res/scripts/common/Lib/distutils/core.py | 80 | 8909 | """distutils.core
The only module that needs to be imported to use the Distutils; provides
the 'setup' function (which is to be called from the setup script). Also
indirectly provides the Distribution and Command classes, although they are
really defined in distutils.dist and distutils.cmd.
"""
import os
import sys
from distutils.debug import DEBUG
from distutils.errors import *
# Mainly import these so setup scripts can "from distutils.core import" them.
from distutils.dist import Distribution
from distutils.cmd import Command
from distutils.config import PyPIRCCommand
from distutils.extension import Extension
# This is a barebones help message generated displayed when the user
# runs the setup script with no arguments at all. More useful help
# is generated with various --help options: global help, list commands,
# and per-command help.
USAGE = """\
usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...]
or: %(script)s --help [cmd1 cmd2 ...]
or: %(script)s --help-commands
or: %(script)s cmd --help
"""
def gen_usage (script_name):
script = os.path.basename(script_name)
return USAGE % vars()
# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'.
_setup_stop_after = None
_setup_distribution = None
# Legal keyword arguments for the setup() function
setup_keywords = ('distclass', 'script_name', 'script_args', 'options',
'name', 'version', 'author', 'author_email',
'maintainer', 'maintainer_email', 'url', 'license',
'description', 'long_description', 'keywords',
'platforms', 'classifiers', 'download_url',
'requires', 'provides', 'obsoletes',
)
# Legal keyword arguments for the Extension constructor
extension_keywords = ('name', 'sources', 'include_dirs',
'define_macros', 'undef_macros',
'library_dirs', 'libraries', 'runtime_library_dirs',
'extra_objects', 'extra_compile_args', 'extra_link_args',
'swig_opts', 'export_symbols', 'depends', 'language')
def setup (**attrs):
"""The gateway to the Distutils: do everything your setup script needs
to do, in a highly flexible and user-driven way. Briefly: create a
Distribution instance; find and parse config files; parse the command
line; run each Distutils command found there, customized by the options
supplied to 'setup()' (as keyword arguments), in config files, and on
the command line.
The Distribution instance might be an instance of a class supplied via
the 'distclass' keyword argument to 'setup'; if no such class is
supplied, then the Distribution class (in dist.py) is instantiated.
All other arguments to 'setup' (except for 'cmdclass') are used to set
attributes of the Distribution instance.
The 'cmdclass' argument, if supplied, is a dictionary mapping command
names to command classes. Each command encountered on the command line
will be turned into a command class, which is in turn instantiated; any
class found in 'cmdclass' is used in place of the default, which is
(for command 'foo_bar') class 'foo_bar' in module
'distutils.command.foo_bar'. The command class must provide a
'user_options' attribute which is a list of option specifiers for
'distutils.fancy_getopt'. Any command-line options between the current
and the next command are used to set attributes of the current command
object.
When the entire command-line has been successfully parsed, calls the
'run()' method on each command object in turn. This method will be
driven entirely by the Distribution object (which each command object
has a reference to, thanks to its constructor), and the
command-specific options that became attributes of each command
object.
"""
global _setup_stop_after, _setup_distribution
# Determine the distribution class -- either caller-supplied or
# our Distribution (see below).
klass = attrs.get('distclass')
if klass:
del attrs['distclass']
else:
klass = Distribution
if 'script_name' not in attrs:
attrs['script_name'] = os.path.basename(sys.argv[0])
if 'script_args' not in attrs:
attrs['script_args'] = sys.argv[1:]
# Create the Distribution instance, using the remaining arguments
# (ie. everything except distclass) to initialize it
try:
_setup_distribution = dist = klass(attrs)
except DistutilsSetupError as msg:
if 'name' not in attrs:
raise SystemExit("error in setup command: %s" % msg)
else:
raise SystemExit("error in %s setup command: %s" % \
(attrs['name'], msg))
if _setup_stop_after == "init":
return dist
# Find and parse the config file(s): they will override options from
# the setup script, but be overridden by the command line.
dist.parse_config_files()
if DEBUG:
print("options (after parsing config files):")
dist.dump_option_dicts()
if _setup_stop_after == "config":
return dist
# Parse the command line and override config files; any
# command-line errors are the end user's fault, so turn them into
# SystemExit to suppress tracebacks.
try:
ok = dist.parse_command_line()
except DistutilsArgError as msg:
raise SystemExit(gen_usage(dist.script_name) + "\nerror: %s" % msg)
if DEBUG:
print("options (after parsing command line):")
dist.dump_option_dicts()
if _setup_stop_after == "commandline":
return dist
# And finally, run all the commands found on the command line.
if ok:
try:
dist.run_commands()
except KeyboardInterrupt:
raise SystemExit("interrupted")
except OSError as exc:
if DEBUG:
sys.stderr.write("error: %s\n" % (exc,))
raise
else:
raise SystemExit("error: %s" % (exc,))
except (DistutilsError,
CCompilerError) as msg:
if DEBUG:
raise
else:
raise SystemExit("error: " + str(msg))
return dist
# setup ()
def run_setup (script_name, script_args=None, stop_after="run"):
"""Run a setup script in a somewhat controlled environment, and
return the Distribution instance that drives things. This is useful
if you need to find out the distribution meta-data (passed as
keyword args from 'script' to 'setup()', or the contents of the
config files or command-line.
'script_name' is a file that will be read and run with 'exec()';
'sys.argv[0]' will be replaced with 'script' for the duration of the
call. 'script_args' is a list of strings; if supplied,
'sys.argv[1:]' will be replaced by 'script_args' for the duration of
the call.
'stop_after' tells 'setup()' when to stop processing; possible
values:
init
stop after the Distribution instance has been created and
populated with the keyword arguments to 'setup()'
config
stop after config files have been parsed (and their data
stored in the Distribution instance)
commandline
stop after the command-line ('sys.argv[1:]' or 'script_args')
have been parsed (and the data stored in the Distribution)
run [default]
stop after all commands have been run (the same as if 'setup()'
had been called in the usual way
Returns the Distribution instance, which provides all information
used to drive the Distutils.
"""
if stop_after not in ('init', 'config', 'commandline', 'run'):
raise ValueError("invalid value for 'stop_after': %r" % (stop_after,))
global _setup_stop_after, _setup_distribution
_setup_stop_after = stop_after
save_argv = sys.argv
g = {'__file__': script_name}
l = {}
try:
try:
sys.argv[0] = script_name
if script_args is not None:
sys.argv[1:] = script_args
with open(script_name, 'rb') as f:
exec(f.read(), g, l)
finally:
sys.argv = save_argv
_setup_stop_after = None
except SystemExit:
# Hmm, should we do something if exiting with a non-zero code
# (ie. error)?
pass
except:
raise
if _setup_distribution is None:
raise RuntimeError(("'distutils.core.setup()' was never called -- "
"perhaps '%s' is not a Distutils setup script?") % \
script_name)
# I wonder if the setup script's namespace -- g and l -- would be of
# any interest to callers?
#print "_setup_distribution:", _setup_distribution
return _setup_distribution
# run_setup ()
| lgpl-3.0 |
skuarch/namebench | nb_third_party/dns/tsigkeyring.py | 248 | 1658 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""A place to store TSIG keys."""
import base64
import dns.name
def from_text(textring):
"""Convert a dictionary containing (textual DNS name, base64 secret) pairs
into a binary keyring which has (dns.name.Name, binary secret) pairs.
@rtype: dict"""
keyring = {}
for keytext in textring:
keyname = dns.name.from_text(keytext)
secret = base64.decodestring(textring[keytext])
keyring[keyname] = secret
return keyring
def to_text(keyring):
"""Convert a dictionary containing (dns.name.Name, binary secret) pairs
into a text keyring which has (textual DNS name, base64 secret) pairs.
@rtype: dict"""
textring = {}
for keyname in keyring:
keytext = dns.name.to_text(keyname)
secret = base64.encodestring(keyring[keyname])
textring[keytext] = secret
return textring
| apache-2.0 |
kemalakyol48/python-for-android | python-modules/twisted/twisted/news/database.py | 49 | 33743 | # -*- test-case-name: twisted.news.test -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
News server backend implementations.
"""
import getpass, pickle, time, socket
import os
import StringIO
from email.Message import Message
from email.Generator import Generator
from zope.interface import implements, Interface
from twisted.news.nntp import NNTPError
from twisted.mail import smtp
from twisted.internet import defer
from twisted.enterprise import adbapi
from twisted.persisted import dirdbm
from twisted.python.hashlib import md5
ERR_NOGROUP, ERR_NOARTICLE = range(2, 4) # XXX - put NNTP values here (I guess?)
OVERVIEW_FMT = [
'Subject', 'From', 'Date', 'Message-ID', 'References',
'Bytes', 'Lines', 'Xref'
]
def hexdigest(md5): #XXX: argh. 1.5.2 doesn't have this.
return ''.join(map(lambda x: hex(ord(x))[2:], md5.digest()))
class Article:
def __init__(self, head, body):
self.body = body
self.headers = {}
header = None
for line in head.split('\r\n'):
if line[0] in ' \t':
i = list(self.headers[header])
i[1] += '\r\n' + line
else:
i = line.split(': ', 1)
header = i[0].lower()
self.headers[header] = tuple(i)
if not self.getHeader('Message-ID'):
s = str(time.time()) + self.body
id = hexdigest(md5(s)) + '@' + socket.gethostname()
self.putHeader('Message-ID', '<%s>' % id)
if not self.getHeader('Bytes'):
self.putHeader('Bytes', str(len(self.body)))
if not self.getHeader('Lines'):
self.putHeader('Lines', str(self.body.count('\n')))
if not self.getHeader('Date'):
self.putHeader('Date', time.ctime(time.time()))
def getHeader(self, header):
h = header.lower()
if self.headers.has_key(h):
return self.headers[h][1]
else:
return ''
def putHeader(self, header, value):
self.headers[header.lower()] = (header, value)
def textHeaders(self):
headers = []
for i in self.headers.values():
headers.append('%s: %s' % i)
return '\r\n'.join(headers) + '\r\n'
def overview(self):
xover = []
for i in OVERVIEW_FMT:
xover.append(self.getHeader(i))
return xover
class NewsServerError(Exception):
pass
class INewsStorage(Interface):
"""
An interface for storing and requesting news articles
"""
def listRequest():
"""
Returns a deferred whose callback will be passed a list of 4-tuples
containing (name, max index, min index, flags) for each news group
"""
def subscriptionRequest():
"""
Returns a deferred whose callback will be passed the list of
recommended subscription groups for new server users
"""
def postRequest(message):
"""
Returns a deferred whose callback will be invoked if 'message'
is successfully posted to one or more specified groups and
whose errback will be invoked otherwise.
"""
def overviewRequest():
"""
Returns a deferred whose callback will be passed the a list of
headers describing this server's overview format.
"""
def xoverRequest(group, low, high):
"""
Returns a deferred whose callback will be passed a list of xover
headers for the given group over the given range. If low is None,
the range starts at the first article. If high is None, the range
ends at the last article.
"""
def xhdrRequest(group, low, high, header):
"""
Returns a deferred whose callback will be passed a list of XHDR data
for the given group over the given range. If low is None,
the range starts at the first article. If high is None, the range
ends at the last article.
"""
def listGroupRequest(group):
"""
Returns a deferred whose callback will be passed a two-tuple of
(group name, [article indices])
"""
def groupRequest(group):
"""
Returns a deferred whose callback will be passed a five-tuple of
(group name, article count, highest index, lowest index, group flags)
"""
def articleExistsRequest(id):
"""
Returns a deferred whose callback will be passed with a true value
if a message with the specified Message-ID exists in the database
and with a false value otherwise.
"""
def articleRequest(group, index, id = None):
"""
Returns a deferred whose callback will be passed a file-like object
containing the full article text (headers and body) for the article
of the specified index in the specified group, and whose errback
will be invoked if the article or group does not exist. If id is
not None, index is ignored and the article with the given Message-ID
will be returned instead, along with its index in the specified
group.
"""
def headRequest(group, index):
"""
Returns a deferred whose callback will be passed the header for
the article of the specified index in the specified group, and
whose errback will be invoked if the article or group does not
exist.
"""
def bodyRequest(group, index):
"""
Returns a deferred whose callback will be passed the body for
the article of the specified index in the specified group, and
whose errback will be invoked if the article or group does not
exist.
"""
class NewsStorage:
"""
Backwards compatibility class -- There is no reason to inherit from this,
just implement INewsStorage instead.
"""
def listRequest(self):
raise NotImplementedError()
def subscriptionRequest(self):
raise NotImplementedError()
def postRequest(self, message):
raise NotImplementedError()
def overviewRequest(self):
return defer.succeed(OVERVIEW_FMT)
def xoverRequest(self, group, low, high):
raise NotImplementedError()
def xhdrRequest(self, group, low, high, header):
raise NotImplementedError()
def listGroupRequest(self, group):
raise NotImplementedError()
def groupRequest(self, group):
raise NotImplementedError()
def articleExistsRequest(self, id):
raise NotImplementedError()
def articleRequest(self, group, index, id = None):
raise NotImplementedError()
def headRequest(self, group, index):
raise NotImplementedError()
def bodyRequest(self, group, index):
raise NotImplementedError()
class _ModerationMixin:
"""
Storage implementations can inherit from this class to get the easy-to-use
C{notifyModerators} method which will take care of sending messages which
require moderation to a list of moderators.
"""
sendmail = staticmethod(smtp.sendmail)
def notifyModerators(self, moderators, article):
"""
Send an article to a list of group moderators to be moderated.
@param moderators: A C{list} of C{str} giving RFC 2821 addresses of
group moderators to notify.
@param article: The article requiring moderation.
@type article: L{Article}
@return: A L{Deferred} which fires with the result of sending the email.
"""
# Moderated postings go through as long as they have an Approved
# header, regardless of what the value is
group = article.getHeader('Newsgroups')
subject = article.getHeader('Subject')
if self._sender is None:
# This case should really go away. This isn't a good default.
sender = 'twisted-news@' + socket.gethostname()
else:
sender = self._sender
msg = Message()
msg['Message-ID'] = smtp.messageid()
msg['From'] = sender
msg['To'] = ', '.join(moderators)
msg['Subject'] = 'Moderate new %s message: %s' % (group, subject)
msg['Content-Type'] = 'message/rfc822'
payload = Message()
for header, value in article.headers.values():
payload.add_header(header, value)
payload.set_payload(article.body)
msg.attach(payload)
out = StringIO.StringIO()
gen = Generator(out, False)
gen.flatten(msg)
msg = out.getvalue()
return self.sendmail(self._mailhost, sender, moderators, msg)
class PickleStorage(_ModerationMixin):
"""
A trivial NewsStorage implementation using pickles
Contains numerous flaws and is generally unsuitable for any
real applications. Consider yourself warned!
"""
implements(INewsStorage)
sharedDBs = {}
def __init__(self, filename, groups=None, moderators=(),
mailhost=None, sender=None):
"""
@param mailhost: A C{str} giving the mail exchange host which will
accept moderation emails from this server. Must accept emails
destined for any address specified as a moderator.
@param sender: A C{str} giving the address which will be used as the
sender of any moderation email generated by this server.
"""
self.datafile = filename
self.load(filename, groups, moderators)
self._mailhost = mailhost
self._sender = sender
def getModerators(self, groups):
# first see if any groups are moderated. if so, nothing gets posted,
# but the whole messages gets forwarded to the moderator address
moderators = []
for group in groups:
moderators.extend(self.db['moderators'].get(group, None))
return filter(None, moderators)
def listRequest(self):
"Returns a list of 4-tuples: (name, max index, min index, flags)"
l = self.db['groups']
r = []
for i in l:
if len(self.db[i].keys()):
low = min(self.db[i].keys())
high = max(self.db[i].keys()) + 1
else:
low = high = 0
if self.db['moderators'].has_key(i):
flags = 'm'
else:
flags = 'y'
r.append((i, high, low, flags))
return defer.succeed(r)
def subscriptionRequest(self):
return defer.succeed(['alt.test'])
def postRequest(self, message):
cleave = message.find('\r\n\r\n')
headers, article = message[:cleave], message[cleave + 4:]
a = Article(headers, article)
groups = a.getHeader('Newsgroups').split()
xref = []
# Check moderated status
moderators = self.getModerators(groups)
if moderators and not a.getHeader('Approved'):
return self.notifyModerators(moderators, a)
for group in groups:
if self.db.has_key(group):
if len(self.db[group].keys()):
index = max(self.db[group].keys()) + 1
else:
index = 1
xref.append((group, str(index)))
self.db[group][index] = a
if len(xref) == 0:
return defer.fail(None)
a.putHeader('Xref', '%s %s' % (
socket.gethostname().split()[0],
''.join(map(lambda x: ':'.join(x), xref))
))
self.flush()
return defer.succeed(None)
def overviewRequest(self):
return defer.succeed(OVERVIEW_FMT)
def xoverRequest(self, group, low, high):
if not self.db.has_key(group):
return defer.succeed([])
r = []
for i in self.db[group].keys():
if (low is None or i >= low) and (high is None or i <= high):
r.append([str(i)] + self.db[group][i].overview())
return defer.succeed(r)
def xhdrRequest(self, group, low, high, header):
if not self.db.has_key(group):
return defer.succeed([])
r = []
for i in self.db[group].keys():
if low is None or i >= low and high is None or i <= high:
r.append((i, self.db[group][i].getHeader(header)))
return defer.succeed(r)
def listGroupRequest(self, group):
if self.db.has_key(group):
return defer.succeed((group, self.db[group].keys()))
else:
return defer.fail(None)
def groupRequest(self, group):
if self.db.has_key(group):
if len(self.db[group].keys()):
num = len(self.db[group].keys())
low = min(self.db[group].keys())
high = max(self.db[group].keys())
else:
num = low = high = 0
flags = 'y'
return defer.succeed((group, num, high, low, flags))
else:
return defer.fail(ERR_NOGROUP)
def articleExistsRequest(self, id):
for group in self.db['groups']:
for a in self.db[group].values():
if a.getHeader('Message-ID') == id:
return defer.succeed(1)
return defer.succeed(0)
def articleRequest(self, group, index, id = None):
if id is not None:
raise NotImplementedError
if self.db.has_key(group):
if self.db[group].has_key(index):
a = self.db[group][index]
return defer.succeed((
index,
a.getHeader('Message-ID'),
StringIO.StringIO(a.textHeaders() + '\r\n' + a.body)
))
else:
return defer.fail(ERR_NOARTICLE)
else:
return defer.fail(ERR_NOGROUP)
def headRequest(self, group, index):
if self.db.has_key(group):
if self.db[group].has_key(index):
a = self.db[group][index]
return defer.succeed((index, a.getHeader('Message-ID'), a.textHeaders()))
else:
return defer.fail(ERR_NOARTICLE)
else:
return defer.fail(ERR_NOGROUP)
def bodyRequest(self, group, index):
if self.db.has_key(group):
if self.db[group].has_key(index):
a = self.db[group][index]
return defer.succeed((index, a.getHeader('Message-ID'), StringIO.StringIO(a.body)))
else:
return defer.fail(ERR_NOARTICLE)
else:
return defer.fail(ERR_NOGROUP)
def flush(self):
f = open(self.datafile, 'w')
pickle.dump(self.db, f)
f.close()
def load(self, filename, groups = None, moderators = ()):
if PickleStorage.sharedDBs.has_key(filename):
self.db = PickleStorage.sharedDBs[filename]
else:
try:
self.db = pickle.load(open(filename))
PickleStorage.sharedDBs[filename] = self.db
except IOError:
self.db = PickleStorage.sharedDBs[filename] = {}
self.db['groups'] = groups
if groups is not None:
for i in groups:
self.db[i] = {}
self.db['moderators'] = dict(moderators)
self.flush()
class Group:
name = None
flags = ''
minArticle = 1
maxArticle = 0
articles = None
def __init__(self, name, flags = 'y'):
self.name = name
self.flags = flags
self.articles = {}
class NewsShelf(_ModerationMixin):
"""
A NewStorage implementation using Twisted's dirdbm persistence module.
"""
implements(INewsStorage)
def __init__(self, mailhost, path, sender=None):
"""
@param mailhost: A C{str} giving the mail exchange host which will
accept moderation emails from this server. Must accept emails
destined for any address specified as a moderator.
@param sender: A C{str} giving the address which will be used as the
sender of any moderation email generated by this server.
"""
self.path = path
self._mailhost = self.mailhost = mailhost
self._sender = sender
if not os.path.exists(path):
os.mkdir(path)
self.dbm = dirdbm.Shelf(os.path.join(path, "newsshelf"))
if not len(self.dbm.keys()):
self.initialize()
def initialize(self):
# A dictionary of group name/Group instance items
self.dbm['groups'] = dirdbm.Shelf(os.path.join(self.path, 'groups'))
# A dictionary of group name/email address
self.dbm['moderators'] = dirdbm.Shelf(os.path.join(self.path, 'moderators'))
# A list of group names
self.dbm['subscriptions'] = []
# A dictionary of MessageID strings/xref lists
self.dbm['Message-IDs'] = dirdbm.Shelf(os.path.join(self.path, 'Message-IDs'))
def addGroup(self, name, flags):
self.dbm['groups'][name] = Group(name, flags)
def addSubscription(self, name):
self.dbm['subscriptions'] = self.dbm['subscriptions'] + [name]
def addModerator(self, group, email):
self.dbm['moderators'][group] = email
def listRequest(self):
result = []
for g in self.dbm['groups'].values():
result.append((g.name, g.maxArticle, g.minArticle, g.flags))
return defer.succeed(result)
def subscriptionRequest(self):
return defer.succeed(self.dbm['subscriptions'])
def getModerator(self, groups):
# first see if any groups are moderated. if so, nothing gets posted,
# but the whole messages gets forwarded to the moderator address
for group in groups:
try:
return self.dbm['moderators'][group]
except KeyError:
pass
return None
def notifyModerator(self, moderator, article):
"""
Notify a single moderator about an article requiring moderation.
C{notifyModerators} should be preferred.
"""
return self.notifyModerators([moderator], article)
def postRequest(self, message):
cleave = message.find('\r\n\r\n')
headers, article = message[:cleave], message[cleave + 4:]
article = Article(headers, article)
groups = article.getHeader('Newsgroups').split()
xref = []
# Check for moderated status
moderator = self.getModerator(groups)
if moderator and not article.getHeader('Approved'):
return self.notifyModerators([moderator], article)
for group in groups:
try:
g = self.dbm['groups'][group]
except KeyError:
pass
else:
index = g.maxArticle + 1
g.maxArticle += 1
g.articles[index] = article
xref.append((group, str(index)))
self.dbm['groups'][group] = g
if not xref:
return defer.fail(NewsServerError("No groups carried: " + ' '.join(groups)))
article.putHeader('Xref', '%s %s' % (socket.gethostname().split()[0], ' '.join(map(lambda x: ':'.join(x), xref))))
self.dbm['Message-IDs'][article.getHeader('Message-ID')] = xref
return defer.succeed(None)
def overviewRequest(self):
return defer.succeed(OVERVIEW_FMT)
def xoverRequest(self, group, low, high):
if not self.dbm['groups'].has_key(group):
return defer.succeed([])
if low is None:
low = 0
if high is None:
high = self.dbm['groups'][group].maxArticle
r = []
for i in range(low, high + 1):
if self.dbm['groups'][group].articles.has_key(i):
r.append([str(i)] + self.dbm['groups'][group].articles[i].overview())
return defer.succeed(r)
def xhdrRequest(self, group, low, high, header):
if group not in self.dbm['groups']:
return defer.succeed([])
if low is None:
low = 0
if high is None:
high = self.dbm['groups'][group].maxArticle
r = []
for i in range(low, high + 1):
if self.dbm['groups'][group].articles.has_key(i):
r.append((i, self.dbm['groups'][group].articles[i].getHeader(header)))
return defer.succeed(r)
def listGroupRequest(self, group):
if self.dbm['groups'].has_key(group):
return defer.succeed((group, self.dbm['groups'][group].articles.keys()))
return defer.fail(NewsServerError("No such group: " + group))
def groupRequest(self, group):
try:
g = self.dbm['groups'][group]
except KeyError:
return defer.fail(NewsServerError("No such group: " + group))
else:
flags = g.flags
low = g.minArticle
high = g.maxArticle
num = high - low + 1
return defer.succeed((group, num, high, low, flags))
def articleExistsRequest(self, id):
return defer.succeed(id in self.dbm['Message-IDs'])
def articleRequest(self, group, index, id = None):
if id is not None:
try:
xref = self.dbm['Message-IDs'][id]
except KeyError:
return defer.fail(NewsServerError("No such article: " + id))
else:
group, index = xref[0]
index = int(index)
try:
a = self.dbm['groups'][group].articles[index]
except KeyError:
return defer.fail(NewsServerError("No such group: " + group))
else:
return defer.succeed((
index,
a.getHeader('Message-ID'),
StringIO.StringIO(a.textHeaders() + '\r\n' + a.body)
))
def headRequest(self, group, index, id = None):
if id is not None:
try:
xref = self.dbm['Message-IDs'][id]
except KeyError:
return defer.fail(NewsServerError("No such article: " + id))
else:
group, index = xref[0]
index = int(index)
try:
a = self.dbm['groups'][group].articles[index]
except KeyError:
return defer.fail(NewsServerError("No such group: " + group))
else:
return defer.succeed((index, a.getHeader('Message-ID'), a.textHeaders()))
def bodyRequest(self, group, index, id = None):
if id is not None:
try:
xref = self.dbm['Message-IDs'][id]
except KeyError:
return defer.fail(NewsServerError("No such article: " + id))
else:
group, index = xref[0]
index = int(index)
try:
a = self.dbm['groups'][group].articles[index]
except KeyError:
return defer.fail(NewsServerError("No such group: " + group))
else:
return defer.succeed((index, a.getHeader('Message-ID'), StringIO.StringIO(a.body)))
class NewsStorageAugmentation:
"""
A NewsStorage implementation using Twisted's asynchronous DB-API
"""
implements(INewsStorage)
schema = """
CREATE TABLE groups (
group_id SERIAL,
name VARCHAR(80) NOT NULL,
flags INTEGER DEFAULT 0 NOT NULL
);
CREATE UNIQUE INDEX group_id_index ON groups (group_id);
CREATE UNIQUE INDEX name_id_index ON groups (name);
CREATE TABLE articles (
article_id SERIAL,
message_id TEXT,
header TEXT,
body TEXT
);
CREATE UNIQUE INDEX article_id_index ON articles (article_id);
CREATE UNIQUE INDEX article_message_index ON articles (message_id);
CREATE TABLE postings (
group_id INTEGER,
article_id INTEGER,
article_index INTEGER NOT NULL
);
CREATE UNIQUE INDEX posting_article_index ON postings (article_id);
CREATE TABLE subscriptions (
group_id INTEGER
);
CREATE TABLE overview (
header TEXT
);
"""
def __init__(self, info):
self.info = info
self.dbpool = adbapi.ConnectionPool(**self.info)
def __setstate__(self, state):
self.__dict__ = state
self.info['password'] = getpass.getpass('Database password for %s: ' % (self.info['user'],))
self.dbpool = adbapi.ConnectionPool(**self.info)
del self.info['password']
def listRequest(self):
# COALESCE may not be totally portable
# it is shorthand for
# CASE WHEN (first parameter) IS NOT NULL then (first parameter) ELSE (second parameter) END
sql = """
SELECT groups.name,
COALESCE(MAX(postings.article_index), 0),
COALESCE(MIN(postings.article_index), 0),
groups.flags
FROM groups LEFT OUTER JOIN postings
ON postings.group_id = groups.group_id
GROUP BY groups.name, groups.flags
ORDER BY groups.name
"""
return self.dbpool.runQuery(sql)
def subscriptionRequest(self):
sql = """
SELECT groups.name FROM groups,subscriptions WHERE groups.group_id = subscriptions.group_id
"""
return self.dbpool.runQuery(sql)
def postRequest(self, message):
cleave = message.find('\r\n\r\n')
headers, article = message[:cleave], message[cleave + 4:]
article = Article(headers, article)
return self.dbpool.runInteraction(self._doPost, article)
def _doPost(self, transaction, article):
# Get the group ids
groups = article.getHeader('Newsgroups').split()
if not len(groups):
raise NNTPError('Missing Newsgroups header')
sql = """
SELECT name, group_id FROM groups
WHERE name IN (%s)
""" % (', '.join([("'%s'" % (adbapi.safe(group),)) for group in groups]),)
transaction.execute(sql)
result = transaction.fetchall()
# No relevant groups, bye bye!
if not len(result):
raise NNTPError('None of groups in Newsgroup header carried')
# Got some groups, now find the indices this article will have in each
sql = """
SELECT groups.group_id, COALESCE(MAX(postings.article_index), 0) + 1
FROM groups LEFT OUTER JOIN postings
ON postings.group_id = groups.group_id
WHERE groups.group_id IN (%s)
GROUP BY groups.group_id
""" % (', '.join([("%d" % (id,)) for (group, id) in result]),)
transaction.execute(sql)
indices = transaction.fetchall()
if not len(indices):
raise NNTPError('Internal server error - no indices found')
# Associate indices with group names
gidToName = dict([(b, a) for (a, b) in result])
gidToIndex = dict(indices)
nameIndex = []
for i in gidToName:
nameIndex.append((gidToName[i], gidToIndex[i]))
# Build xrefs
xrefs = socket.gethostname().split()[0]
xrefs = xrefs + ' ' + ' '.join([('%s:%d' % (group, id)) for (group, id) in nameIndex])
article.putHeader('Xref', xrefs)
# Hey! The article is ready to be posted! God damn f'in finally.
sql = """
INSERT INTO articles (message_id, header, body)
VALUES ('%s', '%s', '%s')
""" % (
adbapi.safe(article.getHeader('Message-ID')),
adbapi.safe(article.textHeaders()),
adbapi.safe(article.body)
)
transaction.execute(sql)
# Now update the posting to reflect the groups to which this belongs
for gid in gidToName:
sql = """
INSERT INTO postings (group_id, article_id, article_index)
VALUES (%d, (SELECT last_value FROM articles_article_id_seq), %d)
""" % (gid, gidToIndex[gid])
transaction.execute(sql)
return len(nameIndex)
def overviewRequest(self):
sql = """
SELECT header FROM overview
"""
return self.dbpool.runQuery(sql).addCallback(lambda result: [header[0] for header in result])
def xoverRequest(self, group, low, high):
sql = """
SELECT postings.article_index, articles.header
FROM articles,postings,groups
WHERE postings.group_id = groups.group_id
AND groups.name = '%s'
AND postings.article_id = articles.article_id
%s
%s
""" % (
adbapi.safe(group),
low is not None and "AND postings.article_index >= %d" % (low,) or "",
high is not None and "AND postings.article_index <= %d" % (high,) or ""
)
return self.dbpool.runQuery(sql).addCallback(
lambda results: [
[id] + Article(header, None).overview() for (id, header) in results
]
)
def xhdrRequest(self, group, low, high, header):
sql = """
SELECT articles.header
FROM groups,postings,articles
WHERE groups.name = '%s' AND postings.group_id = groups.group_id
AND postings.article_index >= %d
AND postings.article_index <= %d
""" % (adbapi.safe(group), low, high)
return self.dbpool.runQuery(sql).addCallback(
lambda results: [
(i, Article(h, None).getHeader(h)) for (i, h) in results
]
)
def listGroupRequest(self, group):
sql = """
SELECT postings.article_index FROM postings,groups
WHERE postings.group_id = groups.group_id
AND groups.name = '%s'
""" % (adbapi.safe(group),)
return self.dbpool.runQuery(sql).addCallback(
lambda results, group = group: (group, [res[0] for res in results])
)
def groupRequest(self, group):
sql = """
SELECT groups.name,
COUNT(postings.article_index),
COALESCE(MAX(postings.article_index), 0),
COALESCE(MIN(postings.article_index), 0),
groups.flags
FROM groups LEFT OUTER JOIN postings
ON postings.group_id = groups.group_id
WHERE groups.name = '%s'
GROUP BY groups.name, groups.flags
""" % (adbapi.safe(group),)
return self.dbpool.runQuery(sql).addCallback(
lambda results: tuple(results[0])
)
def articleExistsRequest(self, id):
sql = """
SELECT COUNT(message_id) FROM articles
WHERE message_id = '%s'
""" % (adbapi.safe(id),)
return self.dbpool.runQuery(sql).addCallback(
lambda result: bool(result[0][0])
)
def articleRequest(self, group, index, id = None):
if id is not None:
sql = """
SELECT postings.article_index, articles.message_id, articles.header, articles.body
FROM groups,postings LEFT OUTER JOIN articles
ON articles.message_id = '%s'
WHERE groups.name = '%s'
AND groups.group_id = postings.group_id
""" % (adbapi.safe(id), adbapi.safe(group))
else:
sql = """
SELECT postings.article_index, articles.message_id, articles.header, articles.body
FROM groups,articles LEFT OUTER JOIN postings
ON postings.article_id = articles.article_id
WHERE postings.article_index = %d
AND postings.group_id = groups.group_id
AND groups.name = '%s'
""" % (index, adbapi.safe(group))
return self.dbpool.runQuery(sql).addCallback(
lambda result: (
result[0][0],
result[0][1],
StringIO.StringIO(result[0][2] + '\r\n' + result[0][3])
)
)
def headRequest(self, group, index):
sql = """
SELECT postings.article_index, articles.message_id, articles.header
FROM groups,articles LEFT OUTER JOIN postings
ON postings.article_id = articles.article_id
WHERE postings.article_index = %d
AND postings.group_id = groups.group_id
AND groups.name = '%s'
""" % (index, adbapi.safe(group))
return self.dbpool.runQuery(sql).addCallback(lambda result: result[0])
def bodyRequest(self, group, index):
sql = """
SELECT postings.article_index, articles.message_id, articles.body
FROM groups,articles LEFT OUTER JOIN postings
ON postings.article_id = articles.article_id
WHERE postings.article_index = %d
AND postings.group_id = groups.group_id
AND groups.name = '%s'
""" % (index, adbapi.safe(group))
return self.dbpool.runQuery(sql).addCallback(
lambda result: result[0]
).addCallback(
lambda (index, id, body): (index, id, StringIO.StringIO(body))
)
####
#### XXX - make these static methods some day
####
def makeGroupSQL(groups):
res = ''
for g in groups:
res = res + """\n INSERT INTO groups (name) VALUES ('%s');\n""" % (adbapi.safe(g),)
return res
def makeOverviewSQL():
res = ''
for o in OVERVIEW_FMT:
res = res + """\n INSERT INTO overview (header) VALUES ('%s');\n""" % (adbapi.safe(o),)
return res
| apache-2.0 |
Dellware78/mtasa-blue | vendor/google-breakpad/src/third_party/protobuf/protobuf/gtest/test/gtest_throw_on_failure_test.py | 2917 | 5766 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO([email protected]): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-3.0 |
gladk/palabos | scons/scons-local-2.1.0/SCons/Tool/sgicc.py | 21 | 1878 | """SCons.Tool.sgicc
Tool-specific initialization for MIPSPro cc on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgicc.py 5357 2011/09/09 21:31:03 bdeegan"
import cc
def generate(env):
"""Add Builders and construction variables for gcc to an Environment."""
cc.generate(env)
env['CXX'] = 'CC'
env['SHOBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
def exists(env):
return env.Detect('cc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| agpl-3.0 |
hujiajie/chromium-crosswalk | tools/telemetry/telemetry/internal/platform/power_monitor/cros_power_monitor.py | 17 | 6081 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import re
from telemetry import decorators
from telemetry.internal.platform.power_monitor import sysfs_power_monitor
class CrosPowerMonitor(sysfs_power_monitor.SysfsPowerMonitor):
"""PowerMonitor that relies on 'dump_power_status' to monitor power
consumption of a single ChromeOS application.
"""
def __init__(self, platform_backend):
"""Constructor.
Args:
platform_backend: A LinuxBasedPlatformBackend object.
Attributes:
_initial_power: The result of 'dump_power_status' before the test.
_start_time: The epoch time at which the test starts executing.
"""
super(CrosPowerMonitor, self).__init__(platform_backend)
self._initial_power = None
self._start_time = None
@decorators.Cache
def CanMonitorPower(self):
return super(CrosPowerMonitor, self).CanMonitorPower()
def StartMonitoringPower(self, browser):
super(CrosPowerMonitor, self).StartMonitoringPower(browser)
if self._IsOnBatteryPower():
sample = self._platform.RunCommand(['dump_power_status;', 'date', '+%s'])
self._initial_power, self._start_time = CrosPowerMonitor.SplitSample(
sample)
else:
logging.warning('Device not on battery power during power monitoring. '
'Results may be incorrect.')
def StopMonitoringPower(self):
# Don't need to call self._CheckStop here; it's called by the superclass
cpu_stats = super(CrosPowerMonitor, self).StopMonitoringPower()
power_stats = {}
if self._IsOnBatteryPower():
sample = self._platform.RunCommand(['dump_power_status;', 'date', '+%s'])
final_power, end_time = CrosPowerMonitor.SplitSample(sample)
# The length of the test is used to measure energy consumption.
length_h = (end_time - self._start_time) / 3600.0
power_stats = CrosPowerMonitor.ParsePower(self._initial_power,
final_power, length_h)
else:
logging.warning('Device not on battery power during power monitoring. '
'Results may be incorrect.')
return CrosPowerMonitor.CombineResults(cpu_stats, power_stats)
@staticmethod
def SplitSample(sample):
"""Splits a power and time sample into the two separate values.
Args:
sample: The result of calling 'dump_power_status; date +%s' on the
device.
Returns:
A tuple of power sample and epoch time of the sample.
"""
sample = sample.strip()
index = sample.rfind('\n')
power = sample[:index]
time = sample[index + 1:]
return power, int(time)
@staticmethod
def IsOnBatteryPower(status, board):
"""Determines if the devices is being charged.
Args:
status: The parsed result of 'dump_power_status'
board: The name of the board running the test.
Returns:
True if the device is on battery power; False otherwise.
"""
on_battery = status['line_power_connected'] == '0'
# Butterfly can incorrectly report AC online for some time after unplug.
# Check battery discharge state to confirm.
if board == 'butterfly':
on_battery |= status['battery_discharging'] == '1'
return on_battery
def _IsOnBatteryPower(self):
"""Determines if the device is being charged.
Returns:
True if the device is on battery power; False otherwise.
"""
status = CrosPowerMonitor.ParsePowerStatus(
self._platform.RunCommand(['dump_power_status']))
board_data = self._platform.RunCommand(['cat', '/etc/lsb-release'])
board = re.search('BOARD=(.*)', board_data).group(1)
return CrosPowerMonitor.IsOnBatteryPower(status, board)
@staticmethod
def ParsePowerStatus(sample):
"""Parses 'dump_power_status' command output.
Args:
sample: The output of 'dump_power_status'
Returns:
Dictionary containing all fields from 'dump_power_status'
"""
rv = collections.defaultdict(dict)
for ln in sample.splitlines():
words = ln.split()
assert len(words) == 2
rv[words[0]] = words[1]
return dict(rv)
@staticmethod
def ParsePower(initial_stats, final_stats, length_h):
"""Parse output of 'dump_power_status'
Args:
initial_stats: The output of 'dump_power_status' before the test.
final_stats: The output of 'dump_power_status' after the test.
length_h: The length of the test in hours.
Returns:
Dictionary in the format returned by StopMonitoringPower().
"""
initial = CrosPowerMonitor.ParsePowerStatus(initial_stats)
final = CrosPowerMonitor.ParsePowerStatus(final_stats)
# The charge value reported by 'dump_power_status' is not precise enough to
# give meaningful results across shorter tests, so average energy rate and
# the length of the test are used.
initial_power_mw = float(initial['battery_energy_rate']) * 10 ** 3
final_power_mw = float(final['battery_energy_rate']) * 10 ** 3
average_power_mw = (initial_power_mw + final_power_mw) / 2.0
# Duplicating CrOS battery fields where applicable.
def CopyFinalState(field, key):
"""Copy fields from battery final state."""
if field in final:
battery[key] = float(final[field])
battery = {}
CopyFinalState('battery_charge_full', 'charge_full')
CopyFinalState('battery_charge_full_design', 'charge_full_design')
CopyFinalState('battery_charge', 'charge_now')
CopyFinalState('battery_current', 'current_now')
CopyFinalState('battery_energy', 'energy')
CopyFinalState('battery_energy_rate', 'energy_rate')
CopyFinalState('battery_voltage', 'voltage_now')
return {'identifier': 'dump_power_status',
'power_samples_mw': [initial_power_mw, final_power_mw],
'energy_consumption_mwh': average_power_mw * length_h,
'component_utilization': {'battery': battery}}
| bsd-3-clause |
dongritengfei/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py | 121 | 2047 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.tool.bot.botinfo import BotInfo
from webkitpy.tool.mocktool import MockTool
from webkitpy.common.net.statusserver_mock import MockStatusServer
from webkitpy.port.test import TestPort
class BotInfoTest(unittest.TestCase):
def test_summary_text(self):
tool = MockTool()
tool.status_server = MockStatusServer("MockBotId")
self.assertEqual(BotInfo(tool, 'port-name').summary_text(), "Bot: MockBotId Port: port-name Platform: MockPlatform 1.0")
| bsd-3-clause |
edbrannin/Robotframework-SQLAlchemy-Library | src/SQLAlchemyLibrary/__init__.py | 1 | 2769 | # Copyright (c) 2010 Franz Allan Valencia See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from SQLAlchemyLibrary.connection_manager import ConnectionManager
from SQLAlchemyLibrary.query import Query
from SQLAlchemyLibrary.assertion import Assertion
__version_file_path__ = os.path.join(os.path.dirname(__file__), 'VERSION')
__version__ = open(__version_file_path__, 'r').read().strip()
class SQLAlchemyLibrary(ConnectionManager, Query, Assertion):
"""
SQLAlchemy Library allows you to interact with your database in Robot Framework tests.
This can allow you to query your database after an action has been made to verify the results.
This can use any database supported by SQLAlchemy, including Oracle, MySQL, Postgres, SQLite.
(Not yet tested on Oracle).
This should be a drop-in replacement for DatabaseLibrary in most situations.
Advantages over DatabaseLibrary
- Ability to provide named-parameter BIND values
== References: ==
- SQLAlchemy documentation - http://docs.sqlalchemy.org/en/latest/index.html
- List of SQLAlchemy Dialects - http://docs.sqlalchemy.org/en/latest/dialects/
- Python Database Programming - http://wiki.python.org/moin/DatabaseProgramming/
== Notes: ==
=== Example Usage: ===
| # Setup |
| Connect to Database |
| # Guard assertion (verify that test started in expected state). |
| Check if not exists in database | select id from person where first_name = :first_name and last_name = :last_name | firat_name=Franz Allan | last_name=See |
| # Drive UI to do some action |
| Go To | http://localhost/person/form.html | | # From selenium library |
| Input Text | name=first_name | Franz Allan | # From selenium library |
| Input Text | name=last_name | See | # From selenium library |
| Click Button | Save | | # From selenium library |
| # Log results |
| @{queryResults} | Query | select * from person |
| Log Many | @{queryResults} |
| # Verify if persisted in the database |
| Check if exists in database | select id from person where first_name = 'Franz Allan' and last_name = 'See' |
| # Teardown |
| Disconnect from Database |
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
| apache-2.0 |
sv-dev1/odoo | addons/mrp_repair/__init__.py | 380 | 1087 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp_repair
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ibladesi/TF101-HighOC-3P2 | tools/perf/scripts/python/syscall-counts-by-pid.py | 944 | 1744 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38d %10d\n" % (id, val),
| gpl-2.0 |
jakirkham/nanshe | nanshe/registerer.py | 3 | 5569 | """
The ``registerer`` module allows the registration algorithm to be run.
===============================================================================
Overview
===============================================================================
The ``main`` function actually starts the algorithm and can be called
externally. Configuration files for the registerer are provided in the
examples_ and are entitled registerer. Any attributes on the raw dataset are
copied to the registered dataset.
.. _examples: http://github.com/nanshe-org/nanshe/tree/master/examples
===============================================================================
API
===============================================================================
"""
__author__ = "John Kirkham <[email protected]>"
__date__ = "$Feb 20, 2015 13:00:51 EST$"
import itertools
import os
import h5py
from nanshe.util import iters, prof
from nanshe.io import hdf5, xjson
from nanshe.imp import registration
# Get the logger
trace_logger = prof.getTraceLogger(__name__)
@prof.log_call(trace_logger)
def main(*argv):
"""
Simple main function (like in C). Takes all arguments (as from
sys.argv) and returns an exit status.
Args:
argv(list): arguments (includes command line call).
Returns:
int: exit code (0 if success)
"""
# Only necessary if running main (normally if calling command line). No
# point in importing otherwise.
import argparse
argv = list(argv)
# Creates command line parser
parser = argparse.ArgumentParser(
description="Parses input from the command line " +
"for a registration job."
)
parser.add_argument("config_filename",
metavar="CONFIG_FILE",
type=str,
help="JSON file that provides configuration options " +
"for how to import TIFF(s)."
)
parser.add_argument("input_filenames",
metavar="INPUT_FILE",
type=str,
nargs=1,
help="HDF5 file to import (this should include a " +
"path to where the internal dataset should be " +
"stored)."
)
parser.add_argument("output_filenames",
metavar="OUTPUT_FILE",
type=str,
nargs=1,
help="HDF5 file to export (this should include a " +
"path to where the internal dataset should be " +
"stored)."
)
# Results of parsing arguments
# (ignore the first one as it is the command line call).
parsed_args = parser.parse_args(argv[1:])
# Go ahead and stuff in parameters with the other parsed_args
parsed_args.parameters = xjson.read_parameters(parsed_args.config_filename)
parsed_args.input_file_components = []
for each_input_filename in parsed_args.input_filenames:
parsed_args.input_file_components.append(
hdf5.serializers.split_hdf5_path(each_input_filename)
)
parsed_args.output_file_components = []
for each_output_filename in parsed_args.output_filenames:
parsed_args.output_file_components.append(
hdf5.serializers.split_hdf5_path(each_output_filename)
)
for each_input_filename_components, each_output_filename_components in iters.izip(
parsed_args.input_file_components, parsed_args.output_file_components):
with h5py.File(each_input_filename_components[0], "r") as input_file:
with h5py.File(each_output_filename_components[0], "a") as output_file:
data = input_file[each_input_filename_components[1]]
result_filename = registration.register_mean_offsets(
data, to_truncate=True, **parsed_args.parameters
)
with h5py.File(result_filename, "r") as result_file:
result_file.copy(
"reg_frames",
output_file[os.path.dirname(each_output_filename_components[1])],
name=each_output_filename_components[1]
)
if parsed_args.parameters.get("include_shift", False):
result_file.copy(
"space_shift",
output_file[os.path.dirname(each_output_filename_components[1])],
name=each_output_filename_components[1] + "_shift"
)
# Copy all attributes from raw data to the final result.
output = output_file[
each_output_filename_components[1]
]
for each_attr_name in data.attrs:
output.attrs[each_attr_name] = data.attrs[each_attr_name]
# Only remove the directory if our input or output files are
# not stored there.
os.remove(result_filename)
in_out_dirnames = set(
os.path.dirname(os.path.abspath(_.filename)) for _ in [
input_file, output_file
]
)
result_dirname = os.path.dirname(result_filename)
if result_dirname not in in_out_dirnames:
os.rmdir(result_dirname)
return(0)
| bsd-3-clause |
diegocortassa/TACTIC | 3rd_party/site-packages/pytz/__init__.py | 5 | 34206 | '''
datetime.tzinfo timezone definitions generated from the
Olson timezone database:
ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
See the datetime section of the Python Library Reference for information
on how to use these modules.
'''
import sys
import datetime
import os.path
from pytz.exceptions import AmbiguousTimeError
from pytz.exceptions import InvalidTimeError
from pytz.exceptions import NonExistentTimeError
from pytz.exceptions import UnknownTimeZoneError
from pytz.lazy import LazyDict, LazyList, LazySet
from pytz.tzinfo import unpickler
from pytz.tzfile import build_tzinfo
# The IANA (nee Olson) database is updated several times a year.
OLSON_VERSION = '2018c'
VERSION = '2018.3' # Switching to pip compatible version numbering.
__version__ = VERSION
OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling
__all__ = [
'timezone', 'utc', 'country_timezones', 'country_names',
'AmbiguousTimeError', 'InvalidTimeError',
'NonExistentTimeError', 'UnknownTimeZoneError',
'all_timezones', 'all_timezones_set',
'common_timezones', 'common_timezones_set',
]
try:
unicode
except NameError: # Python 3.x
# Python 3.x doesn't have unicode(), making writing code
# for Python 2.3 and Python 3.x a pain.
unicode = str
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii('\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
if type(s) == bytes:
s = s.decode('ASCII')
else:
s.encode('ASCII') # Raise an exception if not ASCII
return s # But the string - not a byte string.
else: # Python 2.x
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii(u'Hello')
'Hello'
>>> ascii(u'\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
return s.encode('ASCII')
def open_resource(name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
It is possible to specify different location for zoneinfo
subdir by using the PYTZ_TZDATADIR environment variable.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
zoneinfo_dir = os.environ.get('PYTZ_TZDATADIR', None)
if zoneinfo_dir is not None:
filename = os.path.join(zoneinfo_dir, *name_parts)
else:
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename):
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
if resource_stream is not None:
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb')
def resource_exists(name):
"""Return true if the given resource exists"""
try:
open_resource(name).close()
return True
except IOError:
return False
_tzinfo_cache = {}
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(unicode('US/Eastern')) is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> try:
... timezone('Asia/Shangri-La')
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
>>> try:
... timezone(unicode('\N{TRADE MARK SIGN}'))
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
'''
if zone.upper() == 'UTC':
return utc
try:
zone = ascii(zone)
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if zone in all_timezones_set:
fp = open_resource(zone)
try:
_tzinfo_cache[zone] = build_tzinfo(zone, fp)
finally:
fp.close()
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone]
def _unmunge_zone(zone):
"""Undo the time zone name munging done by older versions of pytz."""
return zone.replace('_plus_', '+').replace('_minus_', '-')
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(datetime.tzinfo):
"""UTC
Optimized UTC implementation. It unpickles using the single module global
instance defined beneath this class declaration.
"""
zone = "UTC"
_utcoffset = ZERO
_dst = ZERO
_tzname = zone
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(utc.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __reduce__(self):
return _UTC, ()
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
def _UTC():
"""Factory function for utc unpickling.
Makes sure that unpickling a utc instance always returns the same
module global.
These examples belong in the UTC class above, but it is obscured; or in
the README.txt, but we are not depending on Python 2.4 so integrating
the README.txt examples with the unit tests is not trivial.
>>> import datetime, pickle
>>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc)
>>> naive = dt.replace(tzinfo=None)
>>> p = pickle.dumps(dt, 1)
>>> naive_p = pickle.dumps(naive, 1)
>>> len(p) - len(naive_p)
17
>>> new = pickle.loads(p)
>>> new == dt
True
>>> new is dt
False
>>> new.tzinfo is dt.tzinfo
True
>>> utc is UTC is timezone('UTC')
True
>>> utc is timezone('GMT')
False
"""
return utc
_UTC.__safe_for_unpickling__ = True
def _p(*args):
"""Factory function for unpickling pytz tzinfo instances.
Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle
by shortening the path.
"""
return unpickler(*args)
_p.__safe_for_unpickling__ = True
class _CountryTimezoneDict(LazyDict):
"""Map ISO 3166 country code to a list of timezone names commonly used
in that country.
iso3166_code is the two letter code used to identify the country.
>>> def print_list(list_of_strings):
... 'We use a helper so doctests work under Python 2.3 -> 3.x'
... for s in list_of_strings:
... print(s)
>>> print_list(country_timezones['nz'])
Pacific/Auckland
Pacific/Chatham
>>> print_list(country_timezones['ch'])
Europe/Zurich
>>> print_list(country_timezones['CH'])
Europe/Zurich
>>> print_list(country_timezones[unicode('ch')])
Europe/Zurich
>>> print_list(country_timezones['XXX'])
Traceback (most recent call last):
...
KeyError: 'XXX'
Previously, this information was exposed as a function rather than a
dictionary. This is still supported::
>>> print_list(country_timezones('nz'))
Pacific/Auckland
Pacific/Chatham
"""
def __call__(self, iso3166_code):
"""Backwards compatibility."""
return self[iso3166_code]
def _fill(self):
data = {}
zone_tab = open_resource('zone.tab')
try:
for line in zone_tab:
line = line.decode('UTF-8')
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
if zone not in all_timezones_set:
continue
try:
data[code].append(zone)
except KeyError:
data[code] = [zone]
self.data = data
finally:
zone_tab.close()
country_timezones = _CountryTimezoneDict()
class _CountryNameDict(LazyDict):
'''Dictionary proving ISO3166 code -> English name.
>>> print(country_names['au'])
Australia
'''
def _fill(self):
data = {}
zone_tab = open_resource('iso3166.tab')
try:
for line in zone_tab.readlines():
line = line.decode('UTF-8')
if line.startswith('#'):
continue
code, name = line.split(None, 1)
data[code] = name.strip()
self.data = data
finally:
zone_tab.close()
country_names = _CountryNameDict()
# Time-zone info based solely on fixed offsets
class _FixedOffset(datetime.tzinfo):
zone = None # to match the standard pytz API
def __init__(self, minutes):
if abs(minutes) >= 1440:
raise ValueError("absolute offset is too large", minutes)
self._minutes = minutes
self._offset = datetime.timedelta(minutes=minutes)
def utcoffset(self, dt):
return self._offset
def __reduce__(self):
return FixedOffset, (self._minutes, )
def dst(self, dt):
return ZERO
def tzname(self, dt):
return None
def __repr__(self):
return 'pytz.FixedOffset(%d)' % self._minutes
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def FixedOffset(offset, _tzinfos={}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> one.dst(datetime.datetime.now())
datetime.timedelta(0)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
>>> two.dst(datetime.datetime.now())
datetime.timedelta(0)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
FixedOffset.__safe_for_unpickling__ = True
def _test():
import doctest
sys.path.insert(0, os.pardir)
import pytz
return doctest.testmod(pytz)
if __name__ == '__main__':
_test()
all_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/ComodRivadavia',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Nelson',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Knox_IN',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Punta_Arenas',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Atyrau',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Barnaul',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Famagusta',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Katmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Tomsk',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yangon',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/NSW',
'Australia/North',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'CET',
'CST6CDT',
'Canada/Atlantic',
'Canada/Central',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'Chile/Continental',
'Chile/EasterIsland',
'Cuba',
'EET',
'EST',
'EST5EDT',
'Egypt',
'Eire',
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
'Etc/GMT0',
'Etc/Greenwich',
'Etc/UCT',
'Etc/UTC',
'Etc/Universal',
'Etc/Zulu',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Astrakhan',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Kirov',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Saratov',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Ulyanovsk',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT-0',
'GMT0',
'Greenwich',
'HST',
'Hongkong',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'MST',
'MST7MDT',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'NZ',
'NZ-CHAT',
'Navajo',
'PRC',
'PST8PDT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/East-Indiana',
'US/Eastern',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Samoa',
'UTC',
'Universal',
'W-SU',
'WET',
'Zulu']
all_timezones = LazyList(
tz for tz in all_timezones if resource_exists(tz))
all_timezones_set = LazySet(all_timezones)
common_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Fort_Nelson',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Punta_Arenas',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Atyrau',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Barnaul',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Colombo',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Famagusta',
'Asia/Gaza',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kathmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Riyadh',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Tomsk',
'Asia/Ulaanbaatar',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yangon',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faroe',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/Perth',
'Australia/Sydney',
'Canada/Atlantic',
'Canada/Central',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Astrakhan',
'Europe/Athens',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Kirov',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Saratov',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Ulyanovsk',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GMT',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Wake',
'Pacific/Wallis',
'US/Alaska',
'US/Arizona',
'US/Central',
'US/Eastern',
'US/Hawaii',
'US/Mountain',
'US/Pacific',
'UTC']
common_timezones = LazyList(
tz for tz in common_timezones if tz in all_timezones)
common_timezones_set = LazySet(common_timezones)
| epl-1.0 |
EvanK/ansible | lib/ansible/modules/net_tools/nios/nios_naptr_record.py | 68 | 5884 | #!/usr/bin/python
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: nios_naptr_record
version_added: "2.7"
author: "Blair Rampling (@brampling)"
short_description: Configure Infoblox NIOS NAPTR records
description:
- Adds and/or removes instances of NAPTR record objects from
Infoblox NIOS servers. This module manages NIOS C(record:naptr) objects
using the Infoblox WAPI interface over REST.
requirements:
- infoblox_client
extends_documentation_fragment: nios
options:
name:
description:
- Specifies the fully qualified hostname to add or remove from
the system
required: true
view:
description:
- Sets the DNS view to associate this a record with. The DNS
view must already be configured on the system
required: true
default: default
aliases:
- dns_view
order:
description:
- Configures the order (0-65535) for this NAPTR record. This parameter
specifies the order in which the NAPTR rules are applied when
multiple rules are present.
required: true
preference:
description:
- Configures the preference (0-65535) for this NAPTR record. The
preference field determines the order NAPTR records are processed
when multiple records with the same order parameter are present.
required: true
replacement:
description:
- Configures the replacement field for this NAPTR record.
For nonterminal NAPTR records, this field specifies the
next domain name to look up.
required: true
services:
description:
- Configures the services field (128 characters maximum) for this
NAPTR record. The services field contains protocol and service
identifiers, such as "http+E2U" or "SIPS+D2T".
required: false
flags:
description:
- Configures the flags field for this NAPTR record. These control the
interpretation of the fields for an NAPTR record object. Supported
values for the flags field are "U", "S", "P" and "A".
required: false
regexp:
description:
- Configures the regexp field for this NAPTR record. This is the
regular expression-based rewriting rule of the NAPTR record. This
should be a POSIX compliant regular expression, including the
substitution rule and flags. Refer to RFC 2915 for the field syntax
details.
required: false
ttl:
description:
- Configures the TTL to be associated with this NAPTR record
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
default: present
choices:
- present
- absent
'''
EXAMPLES = '''
- name: configure a NAPTR record
nios_naptr_record:
name: '*.subscriber-100.ansiblezone.com'
order: 1000
preference: 10
replacement: replacement1.network.ansiblezone.com
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: add a comment to an existing NAPTR record
nios_naptr_record:
name: '*.subscriber-100.ansiblezone.com'
order: 1000
preference: 10
replacement: replacement1.network.ansiblezone.com
comment: this is a test comment
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: remove a NAPTR record from the system
nios_naptr_record:
name: '*.subscriber-100.ansiblezone.com'
order: 1000
preference: 10
replacement: replacement1.network.ansiblezone.com
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.net_tools.nios.api import WapiModule
def main():
''' Main entry point for module execution
'''
ib_spec = dict(
name=dict(required=True, ib_req=True),
view=dict(default='default', aliases=['dns_view'], ib_req=True),
order=dict(type='int', ib_req=True),
preference=dict(type='int', ib_req=True),
replacement=dict(ib_req=True),
services=dict(),
flags=dict(),
regexp=dict(),
ttl=dict(type='int'),
extattrs=dict(type='dict'),
comment=dict(),
)
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
wapi = WapiModule(module)
result = wapi.run('record:naptr', ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
elinebakker/paparazzi | sw/tools/calibration/report_imu_scaled.py | 24 | 4378 | #! /usr/bin/env python
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
import sys
import os
from optparse import OptionParser
import calibration_utils
def main():
usage = "usage: %prog [options] log_filename.data" + "\n" + "Run %prog --help to list the options."
parser = OptionParser(usage)
parser.add_option("-i", "--id", dest="ac_id",
action="store",
help="aircraft id to use")
parser.add_option("-p", "--plot",
help="Show sensor plots",
action="store_true", dest="plot")
parser.add_option("-s", "--start", dest="start",
action="store",
type=int, default=0,
help="start time in seconds")
parser.add_option("-e", "--end", dest="end",
action="store",
type=int, default=36000,
help="end time in seconds")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
else:
if os.path.isfile(args[0]):
filename = args[0]
else:
print(args[0] + " not found")
sys.exit(1)
ac_ids = calibration_utils.get_ids_in_log(filename)
if options.ac_id is None:
if len(ac_ids) == 1:
options.ac_id = ac_ids[0]
else:
parser.error("More than one aircraft id found in log file. Specify the id to use.")
if options.verbose:
print("Using aircraft id "+options.ac_id)
if not filename.endswith(".data"):
parser.error("Please specify a *.data log file")
if options.verbose:
print("reading file "+filename+" for aircraft "+options.ac_id+" and scaled sensors")
#Moved these checks to the command line parser above
#
#if options.start is None:
# options.start = 0
#if options.end is None:
# options.end = 36000
# read scaled sensor measurements from log file
# TBD: Eventually populate the sensor attributes/values with data found in the messages.xml file
sensor_names = [ "ACCEL", "GYRO", "MAG" ]
sensor_attrs = [ [0.0009766, "m/s2", "ax", "ay", "az"], [0.0139882, "deg/s", "gp", "gq", "gr"], [0.0004883, "unit", "mx", "my", "mz"] ]
for sensor_name in sensor_names:
measurements = calibration_utils.read_log_scaled(options.ac_id, filename, sensor_name, options.start, options.end)
if len(measurements) > 0:
if options.verbose:
print("found "+str(len(measurements))+" records")
calibration_utils.print_imu_scaled(sensor_name, measurements, sensor_attrs[sensor_names.index(sensor_name)])
if options.plot:
calibration_utils.plot_imu_scaled(sensor_name, measurements, sensor_attrs[sensor_names.index(sensor_name)])
calibration_utils.plot_imu_scaled_fft(sensor_name, measurements, sensor_attrs[sensor_names.index(sensor_name)])
else:
print("Warning: found zero IMU_"+sensor_name+"_SCALED measurements for aircraft with id "+options.ac_id+" in log file!")
#sys.exit(1)
print("")
# coefficient = calibration_utils.estimate_mag_current_relation(measurements)
# print("")
# print("<define name= \"MAG_X_CURRENT_COEF\" value=\""+str(coefficient[0])+"\"/>")
# print("<define name= \"MAG_Y_CURRENT_COEF\" value=\""+str(coefficient[1])+"\"/>")
# print("<define name= \"MAG_Z_CURRENT_COEF\" value=\""+str(coefficient[2])+"\"/>")
if __name__ == "__main__":
main()
| gpl-2.0 |
jaimahajan1997/sympy | sympy/functions/special/gamma_functions.py | 22 | 32460 | from __future__ import print_function, division
from sympy.core import Add, S, sympify, oo, pi, Dummy
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.numbers import Rational
from sympy.core.power import Pow
from sympy.core.compatibility import range
from .zeta_functions import zeta
from .error_functions import erf, erfc
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.integers import ceiling, floor
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.combinatorial.numbers import bernoulli, harmonic
from sympy.functions.combinatorial.factorials import factorial, rf, RisingFactorial
###############################################################################
############################ COMPLETE GAMMA FUNCTION ##########################
###############################################################################
class gamma(Function):
r"""
The gamma function
.. math::
\Gamma(x) := \int^{\infty}_{0} t^{x-1} e^{t} \mathrm{d}t.
The ``gamma`` function implements the function which passes through the
values of the factorial function, i.e. `\Gamma(n) = (n - 1)!` when n is
an integer. More general, `\Gamma(z)` is defined in the whole complex
plane except at the negative integers where there are simple poles.
Examples
========
>>> from sympy import S, I, pi, oo, gamma
>>> from sympy.abc import x
Several special values are known:
>>> gamma(1)
1
>>> gamma(4)
6
>>> gamma(S(3)/2)
sqrt(pi)/2
The Gamma function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(gamma(x))
gamma(conjugate(x))
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(gamma(x), x)
gamma(x)*polygamma(0, x)
Series expansion is also supported:
>>> from sympy import series
>>> series(gamma(x), x, 0, 3)
1/x - EulerGamma + x*(EulerGamma**2/2 + pi**2/12) + x**2*(-EulerGamma*pi**2/12 + polygamma(2, 1)/6 - EulerGamma**3/6) + O(x**3)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> gamma(pi).evalf(40)
2.288037795340032417959588909060233922890
>>> gamma(1+I).evalf(20)
0.49801566811835604271 - 0.15494982830181068512*I
See Also
========
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_function
.. [2] http://dlmf.nist.gov/5
.. [3] http://mathworld.wolfram.com/GammaFunction.html
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma/
"""
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return self.func(self.args[0])*polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg.is_Integer:
if arg.is_positive:
return factorial(arg - 1)
else:
return S.ComplexInfinity
elif arg.is_Rational:
if arg.q == 2:
n = abs(arg.p) // arg.q
if arg.is_positive:
k, coeff = n, S.One
else:
n = k = n + 1
if n & 1 == 0:
coeff = S.One
else:
coeff = S.NegativeOne
for i in range(3, 2*k, 2):
coeff *= i
if arg.is_positive:
return coeff*sqrt(S.Pi) / 2**n
else:
return 2**n*sqrt(S.Pi) / coeff
if arg.is_integer and arg.is_nonpositive:
return S.ComplexInfinity
def _eval_expand_func(self, **hints):
arg = self.args[0]
if arg.is_Rational:
if abs(arg.p) > arg.q:
x = Dummy('x')
n = arg.p // arg.q
p = arg.p - n*arg.q
return self.func(x + n)._eval_expand_func().subs(x, Rational(p, arg.q))
if arg.is_Add:
coeff, tail = arg.as_coeff_add()
if coeff and coeff.q != 1:
intpart = floor(coeff)
tail = (coeff - intpart,) + tail
coeff = intpart
tail = arg._new_rawargs(*tail, reeval=False)
return self.func(tail)*RisingFactorial(tail, coeff)
return self.func(*self.args)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
x = self.args[0]
if x.is_positive or x.is_noninteger:
return True
def _eval_is_positive(self):
x = self.args[0]
if x.is_positive:
return True
elif x.is_noninteger:
return floor(x).is_even
def _eval_rewrite_as_tractable(self, z):
return exp(loggamma(z))
def _eval_rewrite_as_factorial(self, z):
return factorial(z - 1)
def _eval_nseries(self, x, n, logx):
x0 = self.args[0].limit(x, 0)
if not (x0.is_Integer and x0 <= 0):
return super(gamma, self)._eval_nseries(x, n, logx)
t = self.args[0] - x0
return (self.func(t + 1)/rf(self.args[0], -x0 + 1))._eval_nseries(x, n, logx)
def _latex(self, printer, exp=None):
if len(self.args) != 1:
raise ValueError("Args length should be 1")
aa = printer._print(self.args[0])
if exp:
return r'\Gamma^{%s}{\left(%s \right)}' % (printer._print(exp), aa)
else:
return r'\Gamma{\left(%s \right)}' % aa
@staticmethod
def _latex_no_arg(printer):
return r'\Gamma'
###############################################################################
################## LOWER and UPPER INCOMPLETE GAMMA FUNCTIONS #################
###############################################################################
class lowergamma(Function):
r"""
The lower incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\gamma(s, x) := \int_0^x t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \Gamma(s, x).
This can be shown to be the same as
.. math::
\gamma(s, x) = \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
Examples
========
>>> from sympy import lowergamma, S
>>> from sympy.abc import s, x
>>> lowergamma(s, x)
lowergamma(s, x)
>>> lowergamma(3, x)
-x**2*exp(-x) - 2*x*exp(-x) + 2 - 2*exp(-x)
>>> lowergamma(-S(1)/2, x)
-2*sqrt(pi)*erf(sqrt(x)) - 2*exp(-x)/sqrt(x)
See Also
========
gamma: Gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Incomplete_gamma_function#Lower_incomplete_Gamma_function
.. [2] Abramowitz, Milton; Stegun, Irene A., eds. (1965), Chapter 6, Section 5,
Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables
.. [3] http://dlmf.nist.gov/8
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] http://functions.wolfram.com/GammaBetaErf/Gamma3/
"""
def fdiff(self, argindex=2):
from sympy import meijerg, unpolarify
if argindex == 2:
a, z = self.args
return exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return gamma(a)*digamma(a) - log(z)*uppergamma(a, z) \
- meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, a, x):
# For lack of a better place, we use this one to extract branching
# information. The following can be
# found in the literature (c/f references given above), albeit scattered:
# 1) For fixed x != 0, lowergamma(s, x) is an entire function of s
# 2) For fixed positive integers s, lowergamma(s, x) is an entire
# function of x.
# 3) For fixed non-positive integers s,
# lowergamma(s, exp(I*2*pi*n)*x) =
# 2*pi*I*n*(-1)**(-s)/factorial(-s) + lowergamma(s, x)
# (this follows from lowergamma(s, x).diff(x) = x**(s-1)*exp(-x)).
# 4) For fixed non-integral s,
# lowergamma(s, x) = x**s*gamma(s)*lowergamma_unbranched(s, x),
# where lowergamma_unbranched(s, x) is an entire function (in fact
# of both s and x), i.e.
# lowergamma(s, exp(2*I*pi*n)*x) = exp(2*pi*I*n*a)*lowergamma(a, x)
from sympy import unpolarify, I
nx, n = x.extract_branch_factor()
if a.is_integer and a.is_positive:
nx = unpolarify(x)
if nx != x:
return lowergamma(a, nx)
elif a.is_integer and a.is_nonpositive:
if n != 0:
return 2*pi*I*n*(-1)**(-a)/factorial(-a) + lowergamma(a, nx)
elif n != 0:
return exp(2*pi*I*n*a)*lowergamma(a, nx)
# Special values.
if a.is_Number:
# TODO this should be non-recursive
if a is S.One:
return S.One - exp(-x)
elif a is S.Half:
return sqrt(pi)*erf(sqrt(x))
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, x) - x**b * exp(-x)
if not a.is_Integer:
return (cls(a + 1, x) + x**a * exp(-x))/a
def _eval_evalf(self, prec):
from mpmath import mp, workprec
from sympy import Expr
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, 0, z)
return Expr._from_mpmath(res, prec)
def _eval_conjugate(self):
z = self.args[1]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_rewrite_as_uppergamma(self, s, x):
return gamma(s) - uppergamma(s, x)
def _eval_rewrite_as_expint(self, s, x):
from sympy import expint
if s.is_integer and s.is_nonpositive:
return self
return self.rewrite(uppergamma).rewrite(expint)
@staticmethod
def _latex_no_arg(printer):
return r'\gamma'
class uppergamma(Function):
r"""
The upper incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\Gamma(s, x) := \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \gamma(s, x).
where `\gamma(s, x)` is the lower incomplete gamma function,
:class:`lowergamma`. This can be shown to be the same as
.. math::
\Gamma(s, x) = \Gamma(s) - \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
The upper incomplete gamma function is also essentially equivalent to the
generalized exponential integral:
.. math::
\operatorname{E}_{n}(x) = \int_{1}^{\infty}{\frac{e^{-xt}}{t^n} \, dt} = x^{n-1}\Gamma(1-n,x).
Examples
========
>>> from sympy import uppergamma, S
>>> from sympy.abc import s, x
>>> uppergamma(s, x)
uppergamma(s, x)
>>> uppergamma(3, x)
x**2*exp(-x) + 2*x*exp(-x) + 2*exp(-x)
>>> uppergamma(-S(1)/2, x)
-2*sqrt(pi)*erfc(sqrt(x)) + 2*exp(-x)/sqrt(x)
>>> uppergamma(-2, x)
expint(3, x)/x**2
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Incomplete_gamma_function#Upper_incomplete_Gamma_function
.. [2] Abramowitz, Milton; Stegun, Irene A., eds. (1965), Chapter 6, Section 5,
Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables
.. [3] http://dlmf.nist.gov/8
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] http://functions.wolfram.com/GammaBetaErf/Gamma3/
.. [6] http://en.wikipedia.org/wiki/Exponential_integral#Relation_with_other_functions
"""
def fdiff(self, argindex=2):
from sympy import meijerg, unpolarify
if argindex == 2:
a, z = self.args
return -exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return uppergamma(a, z)*log(z) + meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
from mpmath import mp, workprec
from sympy import Expr
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, z, mp.inf)
return Expr._from_mpmath(res, prec)
@classmethod
def eval(cls, a, z):
from sympy import unpolarify, I, expint
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
return S.Zero
elif z is S.Zero:
# TODO: Holds only for Re(a) > 0:
return gamma(a)
# We extract branching information here. C/f lowergamma.
nx, n = z.extract_branch_factor()
if a.is_integer and (a > 0) == True:
nx = unpolarify(z)
if z != nx:
return uppergamma(a, nx)
elif a.is_integer and (a <= 0) == True:
if n != 0:
return -2*pi*I*n*(-1)**(-a)/factorial(-a) + uppergamma(a, nx)
elif n != 0:
return gamma(a)*(1 - exp(2*pi*I*n*a)) + exp(2*pi*I*n*a)*uppergamma(a, nx)
# Special values.
if a.is_Number:
# TODO this should be non-recursive
if a is S.One:
return exp(-z)
elif a is S.Half:
return sqrt(pi)*erfc(sqrt(z))
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, z) + z**b * exp(-z)
elif b.is_Integer:
return expint(-b, z)*unpolarify(z)**(b + 1)
if not a.is_Integer:
return (cls(a + 1, z) - z**a * exp(-z))/a
def _eval_conjugate(self):
z = self.args[1]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_rewrite_as_lowergamma(self, s, x):
return gamma(s) - lowergamma(s, x)
def _eval_rewrite_as_expint(self, s, x):
from sympy import expint
return expint(1 - s, x)*x**s
###############################################################################
###################### POLYGAMMA and LOGGAMMA FUNCTIONS #######################
###############################################################################
class polygamma(Function):
r"""
The function ``polygamma(n, z)`` returns ``log(gamma(z)).diff(n + 1)``.
It is a meromorphic function on `\mathbb{C}` and defined as the (n+1)-th
derivative of the logarithm of the gamma function:
.. math::
\psi^{(n)} (z) := \frac{\mathrm{d}^{n+1}}{\mathrm{d} z^{n+1}} \log\Gamma(z).
Examples
========
Several special values are known:
>>> from sympy import S, polygamma
>>> polygamma(0, 1)
-EulerGamma
>>> polygamma(0, 1/S(2))
-2*log(2) - EulerGamma
>>> polygamma(0, 1/S(3))
-3*log(3)/2 - sqrt(3)*pi/6 - EulerGamma
>>> polygamma(0, 1/S(4))
-3*log(2) - pi/2 - EulerGamma
>>> polygamma(0, 2)
-EulerGamma + 1
>>> polygamma(0, 23)
-EulerGamma + 19093197/5173168
>>> from sympy import oo, I
>>> polygamma(0, oo)
oo
>>> polygamma(0, -oo)
oo
>>> polygamma(0, I*oo)
oo
>>> polygamma(0, -I*oo)
oo
Differentiation with respect to x is supported:
>>> from sympy import Symbol, diff
>>> x = Symbol("x")
>>> diff(polygamma(0, x), x)
polygamma(1, x)
>>> diff(polygamma(0, x), x, 2)
polygamma(2, x)
>>> diff(polygamma(0, x), x, 3)
polygamma(3, x)
>>> diff(polygamma(1, x), x)
polygamma(2, x)
>>> diff(polygamma(1, x), x, 2)
polygamma(3, x)
>>> diff(polygamma(2, x), x)
polygamma(3, x)
>>> diff(polygamma(2, x), x, 2)
polygamma(4, x)
>>> n = Symbol("n")
>>> diff(polygamma(n, x), x)
polygamma(n + 1, x)
>>> diff(polygamma(n, x), x, 2)
polygamma(n + 2, x)
We can rewrite polygamma functions in terms of harmonic numbers:
>>> from sympy import harmonic
>>> polygamma(0, x).rewrite(harmonic)
harmonic(x - 1) - EulerGamma
>>> polygamma(2, x).rewrite(harmonic)
2*harmonic(x - 1, 3) - 2*zeta(3)
>>> ni = Symbol("n", integer=True)
>>> polygamma(ni, x).rewrite(harmonic)
(-1)**(n + 1)*(-harmonic(x - 1, n + 1) + zeta(n + 1))*factorial(n)
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Polygamma_function
.. [2] http://mathworld.wolfram.com/PolygammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma/
.. [4] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
def fdiff(self, argindex=2):
if argindex == 2:
n, z = self.args[:2]
return polygamma(n + 1, z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_is_positive(self):
if self.args[1].is_positive and (self.args[0] > 0) == True:
return self.args[0].is_odd
def _eval_is_negative(self):
if self.args[1].is_positive and (self.args[0] > 0) == True:
return self.args[0].is_even
def _eval_is_real(self):
return self.args[0].is_real
def _eval_aseries(self, n, args0, x, logx):
from sympy import Order
if args0[1] != oo or not \
(self.args[0].is_Integer and self.args[0].is_nonnegative):
return super(polygamma, self)._eval_aseries(n, args0, x, logx)
z = self.args[1]
N = self.args[0]
if N == 0:
# digamma function series
# Abramowitz & Stegun, p. 259, 6.3.18
r = log(z) - 1/(2*z)
o = None
if n < 2:
o = Order(1/z, x)
else:
m = ceiling((n + 1)//2)
l = [bernoulli(2*k) / (2*k*z**(2*k)) for k in range(1, m)]
r -= Add(*l)
o = Order(1/z**(2*m), x)
return r._eval_nseries(x, n, logx) + o
else:
# proper polygamma function
# Abramowitz & Stegun, p. 260, 6.4.10
# We return terms to order higher than O(x**n) on purpose
# -- otherwise we would not be able to return any terms for
# quite a long time!
fac = gamma(N)
e0 = fac + N*fac/(2*z)
m = ceiling((n + 1)//2)
for k in range(1, m):
fac = fac*(2*k + N - 1)*(2*k + N - 2) / ((2*k)*(2*k - 1))
e0 += bernoulli(2*k)*fac/z**(2*k)
o = Order(1/z**(2*m), x)
if n == 0:
o = Order(1/z, x)
elif n == 1:
o = Order(1/z**2, x)
r = e0._eval_nseries(z, n, logx) + o
return (-1 * (-1/z)**N * r)._eval_nseries(x, n, logx)
@classmethod
def eval(cls, n, z):
n, z = list(map(sympify, (n, z)))
from sympy import unpolarify
if n.is_integer:
if n.is_nonnegative:
nz = unpolarify(z)
if z != nz:
return polygamma(n, nz)
if n == -1:
return loggamma(z)
else:
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
if n.is_Number:
if n is S.Zero:
return S.Infinity
else:
return S.Zero
elif z.is_Integer:
if z.is_nonpositive:
return S.ComplexInfinity
else:
if n is S.Zero:
return -S.EulerGamma + harmonic(z - 1, 1)
elif n.is_odd:
return (-1)**(n + 1)*factorial(n)*zeta(n + 1, z)
if n == 0:
if z is S.NaN:
return S.NaN
elif z.is_Rational:
# TODO actually *any* n/m can be done, but that is messy
lookup = {S(1)/2: -2*log(2) - S.EulerGamma,
S(1)/3: -S.Pi/2/sqrt(3) - 3*log(3)/2 - S.EulerGamma,
S(1)/4: -S.Pi/2 - 3*log(2) - S.EulerGamma,
S(3)/4: -3*log(2) - S.EulerGamma + S.Pi/2,
S(2)/3: -3*log(3)/2 + S.Pi/2/sqrt(3) - S.EulerGamma}
if z > 0:
n = floor(z)
z0 = z - n
if z0 in lookup:
return lookup[z0] + Add(*[1/(z0 + k) for k in range(n)])
elif z < 0:
n = floor(1 - z)
z0 = z + n
if z0 in lookup:
return lookup[z0] - Add(*[1/(z0 - 1 - k) for k in range(n)])
elif z in (S.Infinity, S.NegativeInfinity):
return S.Infinity
else:
t = z.extract_multiplicatively(S.ImaginaryUnit)
if t in (S.Infinity, S.NegativeInfinity):
return S.Infinity
# TODO n == 1 also can do some rational z
def _eval_expand_func(self, **hints):
n, z = self.args
if n.is_Integer and n.is_nonnegative:
if z.is_Add:
coeff = z.args[0]
if coeff.is_Integer:
e = -(n + 1)
if coeff > 0:
tail = Add(*[Pow(
z - i, e) for i in range(1, int(coeff) + 1)])
else:
tail = -Add(*[Pow(
z + i, e) for i in range(0, int(-coeff))])
return polygamma(n, z - coeff) + (-1)**n*factorial(n)*tail
elif z.is_Mul:
coeff, z = z.as_two_terms()
if coeff.is_Integer and coeff.is_positive:
tail = [ polygamma(n, z + Rational(
i, coeff)) for i in range(0, int(coeff)) ]
if n == 0:
return Add(*tail)/coeff + log(coeff)
else:
return Add(*tail)/coeff**(n + 1)
z *= coeff
return polygamma(n, z)
def _eval_rewrite_as_zeta(self, n, z):
if n >= S.One:
return (-1)**(n + 1)*factorial(n)*zeta(n + 1, z)
else:
return self
def _eval_rewrite_as_harmonic(self, n, z):
if n.is_integer:
if n == S.Zero:
return harmonic(z - 1) - S.EulerGamma
else:
return S.NegativeOne**(n+1) * factorial(n) * (zeta(n+1) - harmonic(z-1, n+1))
def _eval_as_leading_term(self, x):
from sympy import Order
n, z = [a.as_leading_term(x) for a in self.args]
o = Order(z, x)
if n == 0 and o.contains(1/x):
return o.getn() * log(x)
else:
return self.func(n, z)
class loggamma(Function):
r"""
The ``loggamma`` function implements the logarithm of the
gamma function i.e, `\log\Gamma(x)`.
Examples
========
Several special values are known. For numerical integral
arguments we have:
>>> from sympy import loggamma
>>> loggamma(-2)
oo
>>> loggamma(0)
oo
>>> loggamma(1)
0
>>> loggamma(2)
0
>>> loggamma(3)
log(2)
and for symbolic values:
>>> from sympy import Symbol
>>> n = Symbol("n", integer=True, positive=True)
>>> loggamma(n)
log(gamma(n))
>>> loggamma(-n)
oo
for half-integral values:
>>> from sympy import S, pi
>>> loggamma(S(5)/2)
log(3*sqrt(pi)/4)
>>> loggamma(n/2)
log(2**(-n + 1)*sqrt(pi)*gamma(n)/gamma(n/2 + 1/2))
and general rational arguments:
>>> from sympy import expand_func
>>> L = loggamma(S(16)/3)
>>> expand_func(L).doit()
-5*log(3) + loggamma(1/3) + log(4) + log(7) + log(10) + log(13)
>>> L = loggamma(S(19)/4)
>>> expand_func(L).doit()
-4*log(4) + loggamma(3/4) + log(3) + log(7) + log(11) + log(15)
>>> L = loggamma(S(23)/7)
>>> expand_func(L).doit()
-3*log(7) + log(2) + loggamma(2/7) + log(9) + log(16)
The loggamma function has the following limits towards infinity:
>>> from sympy import oo
>>> loggamma(oo)
oo
>>> loggamma(-oo)
zoo
The loggamma function obeys the mirror symmetry
if `x \in \mathbb{C} \setminus \{-\infty, 0\}`:
>>> from sympy.abc import x
>>> from sympy import conjugate
>>> conjugate(loggamma(x))
loggamma(conjugate(x))
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(loggamma(x), x)
polygamma(0, x)
Series expansion is also supported:
>>> from sympy import series
>>> series(loggamma(x), x, 0, 4)
-log(x) - EulerGamma*x + pi**2*x**2/12 + x**3*polygamma(2, 1)/6 + O(x**4)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> from sympy import I
>>> loggamma(5).evalf(30)
3.17805383034794561964694160130
>>> loggamma(I).evalf(20)
-0.65092319930185633889 - 1.8724366472624298171*I
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_function
.. [2] http://dlmf.nist.gov/5
.. [3] http://mathworld.wolfram.com/LogGammaFunction.html
.. [4] http://functions.wolfram.com/GammaBetaErf/LogGamma/
"""
@classmethod
def eval(cls, z):
z = sympify(z)
if z.is_integer:
if z.is_nonpositive:
return S.Infinity
elif z.is_positive:
return log(gamma(z))
elif z.is_rational:
p, q = z.as_numer_denom()
# Half-integral values:
if p.is_positive and q == 2:
return log(sqrt(S.Pi) * 2**(1 - p) * gamma(p) / gamma((p + 1)*S.Half))
if z is S.Infinity:
return S.Infinity
elif abs(z) is S.Infinity:
return S.ComplexInfinity
if z is S.NaN:
return S.NaN
def _eval_expand_func(self, **hints):
from sympy import Sum
z = self.args[0]
if z.is_Rational:
p, q = z.as_numer_denom()
# General rational arguments (u + p/q)
# Split z as n + p/q with p < q
n = p // q
p = p - n*q
if p.is_positive and q.is_positive and p < q:
k = Dummy("k")
if n.is_positive:
return loggamma(p / q) - n*log(q) + Sum(log((k - 1)*q + p), (k, 1, n))
elif n.is_negative:
return loggamma(p / q) - n*log(q) + S.Pi*S.ImaginaryUnit*n - Sum(log(k*q - p), (k, 1, -n))
elif n.is_zero:
return loggamma(p / q)
return self
def _eval_nseries(self, x, n, logx=None):
x0 = self.args[0].limit(x, 0)
if x0 is S.Zero:
f = self._eval_rewrite_as_intractable(*self.args)
return f._eval_nseries(x, n, logx)
return super(loggamma, self)._eval_nseries(x, n, logx)
def _eval_aseries(self, n, args0, x, logx):
from sympy import Order
if args0[0] != oo:
return super(loggamma, self)._eval_aseries(n, args0, x, logx)
z = self.args[0]
m = min(n, ceiling((n + S(1))/2))
r = log(z)*(z - S(1)/2) - z + log(2*pi)/2
l = [bernoulli(2*k) / (2*k*(2*k - 1)*z**(2*k - 1)) for k in range(1, m)]
o = None
if m == 0:
o = Order(1, x)
else:
o = Order(1/z**(2*m - 1), x)
# It is very inefficient to first add the order and then do the nseries
return (r + Add(*l))._eval_nseries(x, n, logx) + o
def _eval_rewrite_as_intractable(self, z):
return log(gamma(z))
def _eval_is_real(self):
return self.args[0].is_real
def _eval_conjugate(self):
z = self.args[0]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(z.conjugate())
def fdiff(self, argindex=1):
if argindex == 1:
return polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def _sage_(self):
import sage.all as sage
return sage.log_gamma(self.args[0]._sage_())
def digamma(x):
r"""
The digamma function is the first derivative of the loggamma function i.e,
.. math::
\psi(x) := \frac{\mathrm{d}}{\mathrm{d} z} \log\Gamma(z)
= \frac{\Gamma'(z)}{\Gamma(z) }
In this case, ``digamma(z) = polygamma(0, z)``.
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Digamma_function
.. [2] http://mathworld.wolfram.com/DigammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
return polygamma(0, x)
def trigamma(x):
r"""
The trigamma function is the second derivative of the loggamma function i.e,
.. math::
\psi^{(1)}(z) := \frac{\mathrm{d}^{2}}{\mathrm{d} z^{2}} \log\Gamma(z).
In this case, ``trigamma(z) = polygamma(1, z)``.
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Trigamma_function
.. [2] http://mathworld.wolfram.com/TrigammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
return polygamma(1, x)
| bsd-3-clause |
synctree/synctree-awsebcli | ebcli/operations/deployops.py | 1 | 1403 | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ..lib import elasticbeanstalk, aws
from ..core import io
from . import commonops
def deploy(app_name, env_name, version, label, message, staged=False,
timeout=5):
region_name = aws.get_region_name()
io.log_info('Deploying code to ' + env_name + " in region " + (region_name or 'default'))
if version:
app_version_label = version
else:
# Create app version
app_version_label = commonops.create_app_version(
app_name, label=label, message=message, staged=staged)
# swap env to new app version
request_id = elasticbeanstalk.update_env_application_version(
env_name, app_version_label)
commonops.wait_for_success_events(request_id,
timeout_in_minutes=timeout,
can_abort=True) | apache-2.0 |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/MySQLdb/times.py | 76 | 3488 | """times module
This module provides some Date and Time classes for dealing with MySQL data.
Use Python datetime module to handle date and time columns."""
import math
from time import localtime
from datetime import date, datetime, time, timedelta
from _mysql import string_literal
Date = date
Time = time
TimeDelta = timedelta
Timestamp = datetime
DateTimeDeltaType = timedelta
DateTimeType = datetime
def DateFromTicks(ticks):
"""Convert UNIX ticks into a date instance."""
return date(*localtime(ticks)[:3])
def TimeFromTicks(ticks):
"""Convert UNIX ticks into a time instance."""
return time(*localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
"""Convert UNIX ticks into a datetime instance."""
return datetime(*localtime(ticks)[:6])
format_TIME = format_DATE = str
def format_TIMEDELTA(v):
seconds = int(v.seconds) % 60
minutes = int(v.seconds / 60) % 60
hours = int(v.seconds / 3600) % 24
return '%d %d:%d:%d' % (v.days, hours, minutes, seconds)
def format_TIMESTAMP(d):
return d.isoformat(" ")
def DateTime_or_None(s):
if ' ' in s:
sep = ' '
elif 'T' in s:
sep = 'T'
else:
return Date_or_None(s)
try:
d, t = s.split(sep, 1)
if '.' in t:
t, ms = t.split('.',1)
ms = ms.ljust(6, '0')
else:
ms = 0
return datetime(*[ int(x) for x in d.split('-')+t.split(':')+[ms] ])
except (SystemExit, KeyboardInterrupt):
raise
except:
return Date_or_None(s)
def TimeDelta_or_None(s):
try:
h, m, s = s.split(':')
if '.' in s:
s, ms = s.split('.')
ms = ms.ljust(6, '0')
else:
ms = 0
h, m, s, ms = int(h), int(m), int(s), int(ms)
td = timedelta(hours=abs(h), minutes=m, seconds=s,
microseconds=ms)
if h < 0:
return -td
else:
return td
except ValueError:
# unpacking or int/float conversion failed
return None
def Time_or_None(s):
try:
h, m, s = s.split(':')
if '.' in s:
s, ms = s.split('.')
ms = ms.ljust(6, '0')
else:
ms = 0
h, m, s, ms = int(h), int(m), int(s), int(ms)
return time(hour=h, minute=m, second=s,
microsecond=ms)
except ValueError:
return None
def Date_or_None(s):
try:
return date(*[ int(x) for x in s.split('-',2)])
except (SystemExit, KeyboardInterrupt):
raise
except:
return None
def DateTime2literal(d, c):
"""Format a DateTime object as an ISO timestamp."""
return string_literal(format_TIMESTAMP(d),c)
def DateTimeDelta2literal(d, c):
"""Format a DateTimeDelta object as a time."""
return string_literal(format_TIMEDELTA(d),c)
def mysql_timestamp_converter(s):
"""Convert a MySQL TIMESTAMP to a Timestamp object."""
# MySQL>4.1 returns TIMESTAMP in the same format as DATETIME
if s[4] == '-': return DateTime_or_None(s)
s = s + "0"*(14-len(s)) # padding
parts = map(int, filter(None, (s[:4],s[4:6],s[6:8],
s[8:10],s[10:12],s[12:14])))
try:
return Timestamp(*parts)
except (SystemExit, KeyboardInterrupt):
raise
except:
return None
| mit |
Esri/ArcREST | samples/update_user_password.py | 5 | 1203 | """
Update a users passwords
version 3.5.x
Python 2/3
"""
from __future__ import print_function
from arcresthelper import securityhandlerhelper
import arcrest
if __name__ == "__main__":
username = ''# Username
proxy_port = None
proxy_url = None
securityinfo = {}
securityinfo['security_type'] = 'Portal'#LDAP, NTLM, OAuth, Portal, PKI, ArcGIS
securityinfo['username'] = "" #User Name
securityinfo['password'] = "" #password
securityinfo['org_url'] = "https://www.arcgis.com"
securityinfo['proxy_url'] = proxy_url
securityinfo['proxy_port'] = proxy_port
securityinfo['referer_url'] = None
securityinfo['token_url'] = None
securityinfo['certificatefile'] = None
securityinfo['keyfile'] = None
securityinfo['client_id'] = None
securityinfo['secret_id'] = None
shh = securityhandlerhelper.securityhandlerhelper(securityinfo=securityinfo)
if shh.valid == False:
print (shh.message)
else:
admin = arcrest.manageorg.Administration(securityHandler=shh.securityhandler, initialize=True)
user = admin.community.users.user(str(username).strip())
print (user.update(password="1234testtest"))
| apache-2.0 |
ghchinoy/tensorflow | tensorflow/contrib/timeseries/examples/known_anomaly.py | 24 | 7880 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of using an exogenous feature to ignore a known anomaly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/changepoints.csv")
def state_space_estimator(exogenous_feature_columns):
"""Constructs a StructuralEnsembleRegressor."""
def _exogenous_update_condition(times, features):
del times # unused
# Make exogenous updates sparse by setting an update condition. This in
# effect allows missing exogenous features: if the condition evaluates to
# False, no update is performed. Otherwise we sometimes end up with "leaky"
# updates which add unnecessary uncertainty to the model even when there is
# no changepoint.
return tf.equal(tf.squeeze(features["is_changepoint"], axis=-1), "yes")
return (
tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=12,
# Extract a smooth period by constraining the number of latent values
# being cycled between.
cycle_num_latent_values=3,
num_features=1,
exogenous_feature_columns=exogenous_feature_columns,
exogenous_update_condition=_exogenous_update_condition),
# Use truncated backpropagation with a window size of 64, batching
# together 4 of these windows (random offsets) per training step. Training
# with exogenous features often requires somewhat larger windows.
4, 64)
def autoregressive_estimator(exogenous_feature_columns):
input_window_size = 8
output_window_size = 2
return (
tf.contrib.timeseries.ARRegressor(
periodicities=12,
num_features=1,
input_window_size=input_window_size,
output_window_size=output_window_size,
exogenous_feature_columns=exogenous_feature_columns),
64, input_window_size + output_window_size)
def train_and_evaluate_exogenous(
estimator_fn, csv_file_name=_DATA_FILE, train_steps=300):
"""Training, evaluating, and predicting on a series with changepoints."""
# Indicate the format of our exogenous feature, in this case a string
# representing a boolean value.
string_feature = tf.feature_column.categorical_column_with_vocabulary_list(
key="is_changepoint", vocabulary_list=["no", "yes"])
# Specify the way this feature is presented to the model, here using a one-hot
# encoding.
one_hot_feature = tf.feature_column.indicator_column(
categorical_column=string_feature)
estimator, batch_size, window_size = estimator_fn(
exogenous_feature_columns=[one_hot_feature])
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
# Indicate the format of our CSV file. First we have two standard columns,
# one for times and one for values. The third column is a custom exogenous
# feature indicating whether each timestep is a changepoint. The
# changepoint feature name must match the string_feature column name
# above.
column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES,
tf.contrib.timeseries.TrainEvalFeatures.VALUES,
"is_changepoint"),
# Indicate dtypes for our features.
column_dtypes=(tf.int64, tf.float32, tf.string),
# This CSV has a header line; here we just ignore it.
skip_header_lines=1)
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=batch_size, window_size=window_size)
estimator.train(input_fn=train_input_fn, steps=train_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Create an input_fn for prediction, with a simulated changepoint. Since all
# of the anomalies in the training data are explained by the exogenous
# feature, we should get relatively confident predictions before the indicated
# changepoint (since we are telling the model that no changepoint exists at
# those times) and relatively uncertain predictions after.
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features={
"is_changepoint": [["no"] * 49 + ["yes"] + ["no"] * 50]})))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
# Indicate the locations of the changepoints for plotting vertical lines.
anomaly_locations = []
with open(csv_file_name, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row["is_changepoint"] == "yes":
anomaly_locations.append(int(row["time"]))
anomaly_locations.append(predictions["times"][49])
return (times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations)
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit, anomaly_locations):
"""Plot the time series and anomalies in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.axvline(anomaly_locations[0], linestyle="dotted", label="changepoints")
for anomaly_location in anomaly_locations[1:]:
pyplot.axvline(anomaly_location, linestyle="dotted")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Ignoring a known anomaly (state space)",
*train_and_evaluate_exogenous(
estimator_fn=state_space_estimator))
make_plot("Ignoring a known anomaly (autoregressive)",
*train_and_evaluate_exogenous(
estimator_fn=autoregressive_estimator, train_steps=3000))
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
infoxchange/lettuce | tests/integration/lib/Django-1.3/django/utils/log.py | 152 | 3494 | import logging
import sys
from django.core import mail
# Make sure a NullHandler is available
# This was added in Python 2.7/3.2
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Make sure that dictConfig is available
# This was added in Python 2.7/3.2
try:
from logging.config import dictConfig
except ImportError:
from django.utils.dictconfig import dictConfig
if sys.version_info < (2, 5):
class LoggerCompat(object):
def __init__(self, logger):
self._logger = logger
def __getattr__(self, name):
val = getattr(self._logger, name)
if callable(val):
def _wrapper(*args, **kwargs):
# Python 2.4 logging module doesn't support 'extra' parameter to
# methods of Logger
kwargs.pop('extra', None)
return val(*args, **kwargs)
return _wrapper
else:
return val
def getLogger(name=None):
return LoggerCompat(logging.getLogger(name=name))
else:
getLogger = logging.getLogger
# Ensure the creation of the Django logger
# with a null handler. This ensures we don't get any
# 'No handlers could be found for logger "django"' messages
logger = getLogger('django')
if not logger.handlers:
logger.addHandler(NullHandler())
class AdminEmailHandler(logging.Handler):
def __init__(self, include_html=False):
logging.Handler.__init__(self)
self.include_html = include_html
"""An exception log handler that e-mails log entries to site admins.
If the request is passed as the first argument to the log record,
request data will be provided in the
"""
def emit(self, record):
import traceback
from django.conf import settings
from django.views.debug import ExceptionReporter
try:
if sys.version_info < (2,5):
# A nasty workaround required because Python 2.4's logging
# module doesn't support passing in extra context.
# For this handler, the only extra data we need is the
# request, and that's in the top stack frame.
request = record.exc_info[2].tb_frame.f_locals['request']
else:
request = record.request
subject = '%s (%s IP): %s' % (
record.levelname,
(request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'),
record.msg
)
request_repr = repr(request)
except:
subject = '%s: %s' % (
record.levelname,
record.msg
)
request = None
request_repr = "Request repr() unavailable"
if record.exc_info:
exc_info = record.exc_info
stack_trace = '\n'.join(traceback.format_exception(*record.exc_info))
else:
exc_info = (None, record.msg, None)
stack_trace = 'No stack trace available'
message = "%s\n\n%s" % (stack_trace, request_repr)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
html_message = self.include_html and reporter.get_traceback_html() or None
mail.mail_admins(subject, message, fail_silently=True,
html_message=html_message)
| gpl-3.0 |
haniehrajabi/ryu | ryu/app/simple_isolation.py | 22 | 14069 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011, 2012 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
MAC address based isolation logic.
"""
import logging
import struct
from ryu.app.rest_nw_id import NW_ID_UNKNOWN, NW_ID_EXTERNAL
from ryu.base import app_manager
from ryu.exception import MacAddressDuplicated
from ryu.exception import PortUnknown
from ryu.controller import dpset
from ryu.controller import mac_to_network
from ryu.controller import mac_to_port
from ryu.controller import network
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import nx_match
from ryu.lib.mac import haddr_to_str
from ryu.lib import mac
class SimpleIsolation(app_manager.RyuApp):
_CONTEXTS = {
'network': network.Network,
'dpset': dpset.DPSet,
}
def __init__(self, *args, **kwargs):
super(SimpleIsolation, self).__init__(*args, **kwargs)
self.nw = kwargs['network']
self.dpset = kwargs['dpset']
self.mac2port = mac_to_port.MacToPortTable()
self.mac2net = mac_to_network.MacToNetwork(self.nw)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
datapath.send_delete_all_flows()
datapath.send_barrier()
self.mac2port.dpid_add(ev.msg.datapath_id)
self.nw.add_datapath(ev.msg)
@staticmethod
def _modflow_and_send_packet(msg, src, dst, actions):
datapath = msg.datapath
ofproto = datapath.ofproto
#
# install flow and then send packet
#
rule = nx_match.ClsRule()
rule.set_in_port(msg.in_port)
rule.set_dl_dst(dst)
rule.set_dl_src(src)
datapath.send_flow_mod(
rule=rule, cookie=0, command=datapath.ofproto.OFPFC_ADD,
idle_timeout=0, hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
buffer_id=ofproto.OFP_NO_BUFFER, out_port=ofproto.OFPP_NONE,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=actions)
datapath.send_packet_out(msg.buffer_id, msg.in_port, actions)
def _forward_to_nw_id(self, msg, src, dst, nw_id, out_port):
assert out_port is not None
datapath = msg.datapath
if not self.nw.same_network(datapath.id, nw_id, out_port,
NW_ID_EXTERNAL):
self.logger.debug('packet is blocked src %s dst %s '
'from %d to %d on datapath %d',
haddr_to_str(src), haddr_to_str(dst),
msg.in_port, out_port, datapath.id)
return
self.logger.debug("learned dpid %s in_port %d out_port "
"%d src %s dst %s",
datapath.id, msg.in_port, out_port,
haddr_to_str(src), haddr_to_str(dst))
actions = [datapath.ofproto_parser.OFPActionOutput(out_port)]
self._modflow_and_send_packet(msg, src, dst, actions)
def _flood_to_nw_id(self, msg, src, dst, nw_id):
datapath = msg.datapath
actions = []
self.logger.debug("dpid %s in_port %d src %s dst %s ports %s",
datapath.id, msg.in_port,
haddr_to_str(src), haddr_to_str(dst),
self.nw.dpids.get(datapath.id, {}).items())
for port_no in self.nw.filter_ports(datapath.id, msg.in_port,
nw_id, NW_ID_EXTERNAL):
self.logger.debug("port_no %s", port_no)
actions.append(datapath.ofproto_parser.OFPActionOutput(port_no))
self._modflow_and_send_packet(msg, src, dst, actions)
def _learned_mac_or_flood_to_nw_id(self, msg, src, dst,
dst_nw_id, out_port):
if out_port is not None:
self._forward_to_nw_id(msg, src, dst, dst_nw_id, out_port)
else:
self._flood_to_nw_id(msg, src, dst, dst_nw_id)
def _modflow_and_drop_packet(self, msg, src, dst):
self._modflow_and_send_packet(msg, src, dst, [])
def _drop_packet(self, msg):
datapath = msg.datapath
datapath.send_packet_out(msg.buffer_id, msg.in_port, [])
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
# self.logger.debug('packet in ev %s msg %s', ev, ev.msg)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
dst, src, _eth_type = struct.unpack_from('!6s6sH', buffer(msg.data), 0)
try:
port_nw_id = self.nw.get_network(datapath.id, msg.in_port)
except PortUnknown:
port_nw_id = NW_ID_UNKNOWN
if port_nw_id != NW_ID_UNKNOWN:
# Here it is assumed that the
# (port <-> network id)/(mac <-> network id) relationship
# is stable once the port is created. The port will be destroyed
# before assigning new network id to the given port.
# This is correct nova-network/nova-compute.
try:
# allow external -> known nw id change
self.mac2net.add_mac(src, port_nw_id, NW_ID_EXTERNAL)
except MacAddressDuplicated:
self.logger.warn('mac address %s is already in use.'
' So (dpid %s, port %s) can not use it',
haddr_to_str(src), datapath.id, msg.in_port)
#
# should we install drop action pro-actively for future?
#
self._drop_packet(msg)
return
old_port = self.mac2port.port_add(datapath.id, msg.in_port, src)
if old_port is not None and old_port != msg.in_port:
# We really overwrite already learned mac address.
# So discard already installed stale flow entry which conflicts
# new port.
rule = nx_match.ClsRule()
rule.set_dl_dst(src)
datapath.send_flow_mod(rule=rule,
cookie=0,
command=ofproto.OFPFC_DELETE,
idle_timeout=0,
hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
out_port=old_port)
# to make sure the old flow entries are purged.
datapath.send_barrier()
src_nw_id = self.mac2net.get_network(src, NW_ID_UNKNOWN)
dst_nw_id = self.mac2net.get_network(dst, NW_ID_UNKNOWN)
# we handle multicast packet as same as broadcast
broadcast = (dst == mac.BROADCAST) or mac.is_multicast(dst)
out_port = self.mac2port.port_get(datapath.id, dst)
#
# there are several combinations:
# in_port: known nw_id, external, unknown nw,
# src mac: known nw_id, external, unknown nw,
# dst mac: known nw_id, external, unknown nw, and broadcast/multicast
# where known nw_id: is quantum network id
# external: means that these ports are connected to outside
# unknown nw: means that we don't know this port is bounded to
# specific nw_id or external
# broadcast: the destination mac address is broadcast address
# (or multicast address)
#
# Can the following logic be refined/shortened?
#
# When NW_ID_UNKNOWN is found, registering ports might be delayed.
# So just drop only this packet and not install flow entry.
# It is expected that when next packet arrives, the port is registers
# with some network id
if port_nw_id != NW_ID_EXTERNAL and port_nw_id != NW_ID_UNKNOWN:
if broadcast:
# flood to all ports of external or src_nw_id
self._flood_to_nw_id(msg, src, dst, src_nw_id)
elif src_nw_id == NW_ID_EXTERNAL:
self._modflow_and_drop_packet(msg, src, dst)
return
elif src_nw_id == NW_ID_UNKNOWN:
self._drop_packet(msg)
return
else:
# src_nw_id != NW_ID_EXTERNAL and src_nw_id != NW_ID_UNKNOWN:
#
# try learned mac check if the port is net_id
# or
# flood to all ports of external or src_nw_id
self._learned_mac_or_flood_to_nw_id(msg, src, dst,
src_nw_id, out_port)
elif port_nw_id == NW_ID_EXTERNAL:
if src_nw_id != NW_ID_EXTERNAL and src_nw_id != NW_ID_UNKNOWN:
if broadcast:
# flood to all ports of external or src_nw_id
self._flood_to_nw_id(msg, src, dst, src_nw_id)
elif (dst_nw_id != NW_ID_EXTERNAL and
dst_nw_id != NW_ID_UNKNOWN):
if src_nw_id == dst_nw_id:
# try learned mac
# check if the port is external or same net_id
# or
# flood to all ports of external or src_nw_id
self._learned_mac_or_flood_to_nw_id(msg, src, dst,
src_nw_id,
out_port)
else:
# should not occur?
self.logger.debug("should this case happen?")
self._drop_packet(msg)
elif dst_nw_id == NW_ID_EXTERNAL:
# try learned mac
# or
# flood to all ports of external or src_nw_id
self._learned_mac_or_flood_to_nw_id(msg, src, dst,
src_nw_id, out_port)
else:
assert dst_nw_id == NW_ID_UNKNOWN
self.logger.debug("Unknown dst_nw_id")
self._drop_packet(msg)
elif src_nw_id == NW_ID_EXTERNAL:
self._modflow_and_drop_packet(msg, src, dst)
else:
# should not occur?
assert src_nw_id == NW_ID_UNKNOWN
self._drop_packet(msg)
else:
# drop packets
assert port_nw_id == NW_ID_UNKNOWN
self._drop_packet(msg)
# self.logger.debug("Unknown port_nw_id")
def _port_add(self, ev):
#
# delete flows entries that matches with
# dl_dst == broadcast/multicast
# and dl_src = network id if network id of this port is known
# to send broadcast packet to this newly added port.
#
# Openflow v1.0 doesn't support masked match of dl_dst,
# so delete all flow entries. It's inefficient, though.
#
msg = ev.msg
datapath = msg.datapath
datapath.send_delete_all_flows()
datapath.send_barrier()
self.nw.port_added(datapath, msg.desc.port_no)
def _port_del(self, ev):
# free mac addresses associated to this VM port,
# and delete related flow entries for later reuse of mac address
dps_needs_barrier = set()
msg = ev.msg
datapath = msg.datapath
datapath_id = datapath.id
port_no = msg.desc.port_no
rule = nx_match.ClsRule()
rule.set_in_port(port_no)
datapath.send_flow_del(rule=rule, cookie=0)
rule = nx_match.ClsRule()
datapath.send_flow_del(rule=rule, cookie=0, out_port=port_no)
dps_needs_barrier.add(datapath)
try:
port_nw_id = self.nw.get_network(datapath_id, port_no)
except PortUnknown:
# race condition between rest api delete port
# and openflow port deletion ofp_event
pass
else:
if port_nw_id in (NW_ID_UNKNOWN, NW_ID_EXTERNAL):
datapath.send_barrier()
return
for mac_ in self.mac2port.mac_list(datapath_id, port_no):
for (_dpid, dp) in self.dpset.get_all():
if self.mac2port.port_get(dp.id, mac_) is None:
continue
rule = nx_match.ClsRule()
rule.set_dl_src(mac_)
dp.send_flow_del(rule=rule, cookie=0)
rule = nx_match.ClsRule()
rule.set_dl_dst(mac_)
dp.send_flow_del(rule=rule, cookie=0)
dps_needs_barrier.add(dp)
self.mac2port.mac_del(dp.id, mac_)
self.mac2net.del_mac(mac_)
self.nw.port_deleted(datapath.id, port_no)
for dp in dps_needs_barrier:
dp.send_barrier()
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def port_status_handler(self, ev):
msg = ev.msg
reason = msg.reason
ofproto = msg.datapath.ofproto
if reason == ofproto.OFPPR_ADD:
self._port_add(ev)
elif reason == ofproto.OFPPR_DELETE:
self._port_del(ev)
else:
assert reason == ofproto.OFPPR_MODIFY
| apache-2.0 |
ezequielpereira/Time-Line | libs64/wx/lib/pubsub/core/topicexc.py | 9 | 3557 | '''
:copyright: Copyright 2006-2009 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE.txt for details.
'''
from topicutils import stringize
class ListenerNotValidatable(RuntimeError):
'''
Raised when an attempt is made to validate a listener relative to a
topic that doesn't have (yet) a Listener Protocol Specification.
'''
def __init__(self):
msg = 'Topics args not set yet, cannot validate listener'
RuntimeError.__init__(self, msg)
class UndefinedTopic(RuntimeError):
'''
Raised when an attempt is made to retrieve a Topic object
for a topic name that hasn't yet been created.
'''
def __init__(self, topicName, msgFormat=None):
if msgFormat is None:
msgFormat = 'Topic "%s" doesn\'t exist'
RuntimeError.__init__(self, msgFormat % topicName)
class UndefinedSubtopic(UndefinedTopic):
'''
Raised when an attempt is made to retrieve a Topic object
for a subtopic name that hasn't yet been created within
its parent topic.
'''
def __init__(self, parentName, subName):
msgFormat = 'Topic "%s" doesn\'t have "%%s" as subtopic' % parentName
UndefinedTopic.__init__(self, subName, msgFormat)
class ListenerSpecIncomplete(RuntimeError):
'''
Raised when an attempt is made to create a topic for which
a specification is not available, but pub.setTopicUnspecifiedFatal()
was called.
'''
def __init__(self, topicNameTuple):
msg = "No topic specification for topic '%s'." \
% stringize(topicNameTuple)
RuntimeError.__init__(self, msg +
" See pub.getOrCreateTopic(), pub.addTopicDefnProvider(), and/or pub.setTopicUnspecifiedFatal()")
class ListenerSpecInvalid(RuntimeError):
'''
Raised when an attempt is made to define a topic's Listener Protocol
Specification to something that is not valid.
The argument names that are invalid can be put in the 'args' list,
and the msg should say what is the problem and contain "%s" for the
args, such as ListenerSpecInvalid('duplicate args %s', ('arg1', 'arg2')).
'''
def __init__(self, msg, args):
argsMsg = msg % ','.join(args)
RuntimeError.__init__(self, 'Invalid listener spec: ' + argsMsg)
class ExcHandlerError(RuntimeError):
'''
When an exception gets raised within some listener during a
sendMessage(), the registered handler (see pub.setListenerExcHandler())
gets called (via its __call__ method) and the send operation can
resume on remaining listeners. However, if the handler itself
raises an exception while it is being called, the send operation
must be aborted: an ExcHandlerError exception gets raised.
'''
def __init__(self, badExcListenerID, topicObj, origExc=None):
'''The badExcListenerID is the name of the listener that raised
the original exception that handler was attempting to handle.
The topicObj is the pub.Topic object for the topic of the
sendMessage that had an exception raised.
The origExc is currently not used. '''
self.badExcListenerID = badExcListenerID
import traceback
self.exc = traceback.format_exc()
msg = 'The exception handler registered with pubsub raised an ' \
+ 'exception, *while* handling an exception raised by listener ' \
+ ' "%s" of topic "%s"):\n%s' \
% (self.badExcListenerID, topicObj.getName(), self.exc)
RuntimeError.__init__(self, msg)
| gpl-3.0 |
naliboff/dealii | contrib/python-bindings/tests/cell_accessor_wrapper.py | 17 | 3314 | # ---------------------------------------------------------------------
#
# Copyright (C) 2016 by the deal.II authors
#
# This file is part of the deal.II library.
#
# The deal.II library is free software; you can use it, redistribute
# it, and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# The full text of the license can be found in the file LICENSE at
# the top level of the deal.II distribution.
#
# ---------------------------------------------------------------------
import unittest
from PyDealII.Debug import *
class TestCellAccessorWrapper(unittest.TestCase):
def setUp(self):
self.triangulation = Triangulation('2D')
self.triangulation.generate_hyper_cube()
self.triangulation.refine_global(1)
def test_material_id(self):
material_id = 0
for cell in self.triangulation.active_cells():
cell.material_id = material_id
material_id += 1
material_id = 0
for cell in self.triangulation.active_cells():
self.assertEqual(cell.material_id, material_id)
material_id += 1
def test_manifold_id(self):
manifold_id = 0
for cell in self.triangulation.active_cells():
cell.manifold_id = manifold_id
manifold_id += 1
manifold_id = 0
for cell in self.triangulation.active_cells():
self.assertEqual(cell.manifold_id, manifold_id)
manifold_id += 1
def test_refine_flag(self):
index = 0
refine_flags = ['no_refinement', 'cut_x', 'cut_y', 'cut_xy']
for cell in self.triangulation.active_cells():
cell.refine_flag = refine_flags[index]
index += 1
index = 0
for cell in self.triangulation.active_cells():
self.assertEqual(cell.refine_flag, refine_flags[index])
index += 1
def test_coarsen_flag(self):
coarsen_flag = True
for cell in self.triangulation.active_cells():
cell.coarsen_flag = coarsen_flag
coarsen_flag = not coarsen_flag
coarsen_flag = True
for cell in self.triangulation.active_cells():
self.assertEqual(cell.coarsen_flag, coarsen_flag)
coarsen_flag = not coarsen_flag
def test_barycenter(self):
centers = [[0.25, 0.25], [0.75, 0.25], [0.25, 0.75], [0.75, 0.75]]
index = 0
for cell in self.triangulation.active_cells():
barycenter = cell.barycenter()
self.assertEqual(barycenter.x, centers[index][0])
self.assertEqual(barycenter.y, centers[index][1])
index += 1
def test_move_vertex(self):
point = Point([0.6, 0.6])
for cell in self.triangulation.active_cells():
cell.set_vertex(3, point)
vertex = cell.get_vertex(3)
break
vertices = [3, 2, 1, 0]
index = 0
for cell in self.triangulation.active_cells():
vertex = cell.get_vertex(vertices[index])
self.assertEqual(vertex.x, point.x)
self.assertEqual(vertex.y, point.y)
index += 1
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
maciekcc/tensorflow | tensorflow/tools/test/system_info_lib.py | 101 | 4760 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import multiprocessing
import platform
import re
import socket
# pylint: disable=g-bad-import-order
# Note: cpuinfo and psutil are not installed for you in the TensorFlow
# OSS tree. They are installable via pip.
import cpuinfo
import psutil
# pylint: enable=g-bad-import-order
from tensorflow.core.util import test_log_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.framework import errors
from tensorflow.python.platform import gfile
from tensorflow.tools.test import gpu_info_lib
def gather_machine_configuration():
"""Gather Machine Configuration. This is the top level fn of this library."""
config = test_log_pb2.MachineConfiguration()
config.cpu_info.CopyFrom(gather_cpu_info())
config.platform_info.CopyFrom(gather_platform_info())
# gather_available_device_info must come before gather_gpu_devices
# because the latter may access libcudart directly, which confuses
# TensorFlow StreamExecutor.
for d in gather_available_device_info():
config.available_device_info.add().CopyFrom(d)
for gpu in gpu_info_lib.gather_gpu_devices():
config.device_info.add().Pack(gpu)
config.memory_info.CopyFrom(gather_memory_info())
config.hostname = gather_hostname()
return config
def gather_hostname():
return socket.gethostname()
def gather_memory_info():
"""Gather memory info."""
mem_info = test_log_pb2.MemoryInfo()
vmem = psutil.virtual_memory()
mem_info.total = vmem.total
mem_info.available = vmem.available
return mem_info
def gather_cpu_info():
"""Gather CPU Information. Assumes all CPUs are the same."""
cpu_info = test_log_pb2.CPUInfo()
cpu_info.num_cores = multiprocessing.cpu_count()
# Gather num_cores_allowed
try:
with gfile.GFile('/proc/self/status', 'rb') as fh:
nc = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', fh.read())
if nc: # e.g. 'ff' => 8, 'fff' => 12
cpu_info.num_cores_allowed = (
bin(int(nc.group(1).replace(',', ''), 16)).count('1'))
except errors.OpError:
pass
finally:
if cpu_info.num_cores_allowed == 0:
cpu_info.num_cores_allowed = cpu_info.num_cores
# Gather the rest
info = cpuinfo.get_cpu_info()
cpu_info.cpu_info = info['brand']
cpu_info.num_cores = info['count']
cpu_info.mhz_per_cpu = info['hz_advertised_raw'][0] / 1.0e6
l2_cache_size = re.match(r'(\d+)', str(info.get('l2_cache_size', '')))
if l2_cache_size:
# If a value is returned, it's in KB
cpu_info.cache_size['L2'] = int(l2_cache_size.group(0)) * 1024
# Try to get the CPU governor
try:
cpu_governors = set([
gfile.GFile(f, 'r').readline().rstrip()
for f in glob.glob(
'/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor')
])
if cpu_governors:
if len(cpu_governors) > 1:
cpu_info.cpu_governor = 'mixed'
else:
cpu_info.cpu_governor = list(cpu_governors)[0]
except errors.OpError:
pass
return cpu_info
def gather_available_device_info():
"""Gather list of devices available to TensorFlow.
Returns:
A list of test_log_pb2.AvailableDeviceInfo messages.
"""
device_info_list = []
devices = device_lib.list_local_devices()
for d in devices:
device_info = test_log_pb2.AvailableDeviceInfo()
device_info.name = d.name
device_info.type = d.device_type
device_info.memory_limit = d.memory_limit
device_info.physical_description = d.physical_device_desc
device_info_list.append(device_info)
return device_info_list
def gather_platform_info():
"""Gather platform info."""
platform_info = test_log_pb2.PlatformInfo()
(platform_info.bits, platform_info.linkage) = platform.architecture()
platform_info.machine = platform.machine()
platform_info.release = platform.release()
platform_info.system = platform.system()
platform_info.version = platform.version()
return platform_info
| apache-2.0 |
t794104/ansible | lib/ansible/modules/network/fortios/fortios_firewall_policy6.py | 24 | 37823 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_policy6
short_description: Configure IPv6 policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and policy6 category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_policy6:
description:
- Configure IPv6 policies.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
action:
description:
- Policy action (allow/deny/ipsec).
choices:
- accept
- deny
- ipsec
app-category:
description:
- Application category ID list.
suboptions:
id:
description:
- Category IDs.
required: true
app-group:
description:
- Application group names.
suboptions:
name:
description:
- Application group names. Source application.group.name.
required: true
application:
description:
- Application ID list.
suboptions:
id:
description:
- Application IDs.
required: true
application-list:
description:
- Name of an existing Application list. Source application.list.name.
av-profile:
description:
- Name of an existing Antivirus profile. Source antivirus.profile.name.
comments:
description:
- Comment.
custom-log-fields:
description:
- Log field index numbers to append custom log fields to log messages for this policy.
suboptions:
field-id:
description:
- Custom log field. Source log.custom-field.id.
required: true
devices:
description:
- Names of devices or device groups that can be matched by the policy.
suboptions:
name:
description:
- Device or group name. Source user.device.alias user.device-group.name user.device-category.name.
required: true
diffserv-forward:
description:
- Enable to change packet's DiffServ values to the specified diffservcode-forward value.
choices:
- enable
- disable
diffserv-reverse:
description:
- Enable to change packet's reverse (reply) DiffServ values to the specified diffservcode-rev value.
choices:
- enable
- disable
diffservcode-forward:
description:
- Change packet's DiffServ to this value.
diffservcode-rev:
description:
- Change packet's reverse (reply) DiffServ to this value.
dlp-sensor:
description:
- Name of an existing DLP sensor. Source dlp.sensor.name.
dscp-match:
description:
- Enable DSCP check.
choices:
- enable
- disable
dscp-negate:
description:
- Enable negated DSCP match.
choices:
- enable
- disable
dscp-value:
description:
- DSCP value.
dsri:
description:
- Enable DSRI to ignore HTTP server responses.
choices:
- enable
- disable
dstaddr:
description:
- Destination address and address group names.
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name firewall.vip6.name firewall.vipgrp6.name.
required: true
dstaddr-negate:
description:
- When enabled dstaddr specifies what the destination address must NOT be.
choices:
- enable
- disable
dstintf:
description:
- Outgoing (egress) interface.
suboptions:
name:
description:
- Interface name. Source system.interface.name system.zone.name.
required: true
firewall-session-dirty:
description:
- How to handle sessions if the configuration of this firewall policy changes.
choices:
- check-all
- check-new
fixedport:
description:
- Enable to prevent source NAT from changing a session's source port.
choices:
- enable
- disable
global-label:
description:
- Label for the policy that appears when the GUI is in Global View mode.
groups:
description:
- Names of user groups that can authenticate with this policy.
suboptions:
name:
description:
- Group name. Source user.group.name.
required: true
icap-profile:
description:
- Name of an existing ICAP profile. Source icap.profile.name.
inbound:
description:
- "Policy-based IPsec VPN: only traffic from the remote network can initiate a VPN."
choices:
- enable
- disable
ippool:
description:
- Enable to use IP Pools for source NAT.
choices:
- enable
- disable
ips-sensor:
description:
- Name of an existing IPS sensor. Source ips.sensor.name.
label:
description:
- Label for the policy that appears when the GUI is in Section View mode.
logtraffic:
description:
- Enable or disable logging. Log all sessions or security profile sessions.
choices:
- all
- utm
- disable
logtraffic-start:
description:
- Record logs when a session starts and ends.
choices:
- enable
- disable
name:
description:
- Policy name.
nat:
description:
- Enable/disable source NAT.
choices:
- enable
- disable
natinbound:
description:
- "Policy-based IPsec VPN: apply destination NAT to inbound traffic."
choices:
- enable
- disable
natoutbound:
description:
- "Policy-based IPsec VPN: apply source NAT to outbound traffic."
choices:
- enable
- disable
outbound:
description:
- "Policy-based IPsec VPN: only traffic from the internal network can initiate a VPN."
choices:
- enable
- disable
per-ip-shaper:
description:
- Per-IP traffic shaper. Source firewall.shaper.per-ip-shaper.name.
policyid:
description:
- Policy ID.
required: true
poolname:
description:
- IP Pool names.
suboptions:
name:
description:
- IP pool name. Source firewall.ippool6.name.
required: true
profile-group:
description:
- Name of profile group. Source firewall.profile-group.name.
profile-protocol-options:
description:
- Name of an existing Protocol options profile. Source firewall.profile-protocol-options.name.
profile-type:
description:
- Determine whether the firewall policy allows security profile groups or single profiles only.
choices:
- single
- group
replacemsg-override-group:
description:
- Override the default replacement message group for this policy. Source system.replacemsg-group.name.
rsso:
description:
- Enable/disable RADIUS single sign-on (RSSO).
choices:
- enable
- disable
schedule:
description:
- Schedule name. Source firewall.schedule.onetime.name firewall.schedule.recurring.name firewall.schedule.group.name.
send-deny-packet:
description:
- Enable/disable return of deny-packet.
choices:
- enable
- disable
service:
description:
- Service and service group names.
suboptions:
name:
description:
- Address name. Source firewall.service.custom.name firewall.service.group.name.
required: true
service-negate:
description:
- When enabled service specifies what the service must NOT be.
choices:
- enable
- disable
session-ttl:
description:
- Session TTL in seconds for sessions accepted by this policy. 0 means use the system default session TTL.
spamfilter-profile:
description:
- Name of an existing Spam filter profile. Source spamfilter.profile.name.
srcaddr:
description:
- Source address and address group names.
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
srcaddr-negate:
description:
- When enabled srcaddr specifies what the source address must NOT be.
choices:
- enable
- disable
srcintf:
description:
- Incoming (ingress) interface.
suboptions:
name:
description:
- Interface name. Source system.zone.name system.interface.name.
required: true
ssh-filter-profile:
description:
- Name of an existing SSH filter profile. Source ssh-filter.profile.name.
ssl-mirror:
description:
- Enable to copy decrypted SSL traffic to a FortiGate interface (called SSL mirroring).
choices:
- enable
- disable
ssl-mirror-intf:
description:
- SSL mirror interface name.
suboptions:
name:
description:
- Interface name. Source system.zone.name system.interface.name.
required: true
ssl-ssh-profile:
description:
- Name of an existing SSL SSH profile. Source firewall.ssl-ssh-profile.name.
status:
description:
- Enable or disable this policy.
choices:
- enable
- disable
tcp-mss-receiver:
description:
- Receiver TCP maximum segment size (MSS).
tcp-mss-sender:
description:
- Sender TCP maximum segment size (MSS).
tcp-session-without-syn:
description:
- Enable/disable creation of TCP session without SYN flag.
choices:
- all
- data-only
- disable
timeout-send-rst:
description:
- Enable/disable sending RST packets when TCP sessions expire.
choices:
- enable
- disable
traffic-shaper:
description:
- Reverse traffic shaper. Source firewall.shaper.traffic-shaper.name.
traffic-shaper-reverse:
description:
- Reverse traffic shaper. Source firewall.shaper.traffic-shaper.name.
url-category:
description:
- URL category ID list.
suboptions:
id:
description:
- URL category ID.
required: true
users:
description:
- Names of individual users that can authenticate with this policy.
suboptions:
name:
description:
- Names of individual users that can authenticate with this policy. Source user.local.name.
required: true
utm-status:
description:
- Enable AV/web/ips protection profile.
choices:
- enable
- disable
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
vlan-cos-fwd:
description:
- "VLAN forward direction user priority: 255 passthrough, 0 lowest, 7 highest"
vlan-cos-rev:
description:
- "VLAN reverse direction user priority: 255 passthrough, 0 lowest, 7 highest"
vlan-filter:
description:
- Set VLAN filters.
voip-profile:
description:
- Name of an existing VoIP profile. Source voip.profile.name.
vpntunnel:
description:
- "Policy-based IPsec VPN: name of the IPsec VPN Phase 1. Source vpn.ipsec.phase1.name vpn.ipsec.manualkey.name."
webfilter-profile:
description:
- Name of an existing Web filter profile. Source webfilter.profile.name.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv6 policies.
fortios_firewall_policy6:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_policy6:
state: "present"
action: "accept"
app-category:
-
id: "5"
app-group:
-
name: "default_name_7 (source application.group.name)"
application:
-
id: "9"
application-list: "<your_own_value> (source application.list.name)"
av-profile: "<your_own_value> (source antivirus.profile.name)"
comments: "<your_own_value>"
custom-log-fields:
-
field-id: "<your_own_value> (source log.custom-field.id)"
devices:
-
name: "default_name_16 (source user.device.alias user.device-group.name user.device-category.name)"
diffserv-forward: "enable"
diffserv-reverse: "enable"
diffservcode-forward: "<your_own_value>"
diffservcode-rev: "<your_own_value>"
dlp-sensor: "<your_own_value> (source dlp.sensor.name)"
dscp-match: "enable"
dscp-negate: "enable"
dscp-value: "<your_own_value>"
dsri: "enable"
dstaddr:
-
name: "default_name_27 (source firewall.address6.name firewall.addrgrp6.name firewall.vip6.name firewall.vipgrp6.name)"
dstaddr-negate: "enable"
dstintf:
-
name: "default_name_30 (source system.interface.name system.zone.name)"
firewall-session-dirty: "check-all"
fixedport: "enable"
global-label: "<your_own_value>"
groups:
-
name: "default_name_35 (source user.group.name)"
icap-profile: "<your_own_value> (source icap.profile.name)"
inbound: "enable"
ippool: "enable"
ips-sensor: "<your_own_value> (source ips.sensor.name)"
label: "<your_own_value>"
logtraffic: "all"
logtraffic-start: "enable"
name: "default_name_43"
nat: "enable"
natinbound: "enable"
natoutbound: "enable"
outbound: "enable"
per-ip-shaper: "<your_own_value> (source firewall.shaper.per-ip-shaper.name)"
policyid: "49"
poolname:
-
name: "default_name_51 (source firewall.ippool6.name)"
profile-group: "<your_own_value> (source firewall.profile-group.name)"
profile-protocol-options: "<your_own_value> (source firewall.profile-protocol-options.name)"
profile-type: "single"
replacemsg-override-group: "<your_own_value> (source system.replacemsg-group.name)"
rsso: "enable"
schedule: "<your_own_value> (source firewall.schedule.onetime.name firewall.schedule.recurring.name firewall.schedule.group.name)"
send-deny-packet: "enable"
service:
-
name: "default_name_60 (source firewall.service.custom.name firewall.service.group.name)"
service-negate: "enable"
session-ttl: "62"
spamfilter-profile: "<your_own_value> (source spamfilter.profile.name)"
srcaddr:
-
name: "default_name_65 (source firewall.address6.name firewall.addrgrp6.name)"
srcaddr-negate: "enable"
srcintf:
-
name: "default_name_68 (source system.zone.name system.interface.name)"
ssh-filter-profile: "<your_own_value> (source ssh-filter.profile.name)"
ssl-mirror: "enable"
ssl-mirror-intf:
-
name: "default_name_72 (source system.zone.name system.interface.name)"
ssl-ssh-profile: "<your_own_value> (source firewall.ssl-ssh-profile.name)"
status: "enable"
tcp-mss-receiver: "75"
tcp-mss-sender: "76"
tcp-session-without-syn: "all"
timeout-send-rst: "enable"
traffic-shaper: "<your_own_value> (source firewall.shaper.traffic-shaper.name)"
traffic-shaper-reverse: "<your_own_value> (source firewall.shaper.traffic-shaper.name)"
url-category:
-
id: "82"
users:
-
name: "default_name_84 (source user.local.name)"
utm-status: "enable"
uuid: "<your_own_value>"
vlan-cos-fwd: "87"
vlan-cos-rev: "88"
vlan-filter: "<your_own_value>"
voip-profile: "<your_own_value> (source voip.profile.name)"
vpntunnel: "<your_own_value> (source vpn.ipsec.phase1.name vpn.ipsec.manualkey.name)"
webfilter-profile: "<your_own_value> (source webfilter.profile.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_policy6_data(json):
option_list = ['action', 'app-category', 'app-group',
'application', 'application-list', 'av-profile',
'comments', 'custom-log-fields', 'devices',
'diffserv-forward', 'diffserv-reverse', 'diffservcode-forward',
'diffservcode-rev', 'dlp-sensor', 'dscp-match',
'dscp-negate', 'dscp-value', 'dsri',
'dstaddr', 'dstaddr-negate', 'dstintf',
'firewall-session-dirty', 'fixedport', 'global-label',
'groups', 'icap-profile', 'inbound',
'ippool', 'ips-sensor', 'label',
'logtraffic', 'logtraffic-start', 'name',
'nat', 'natinbound', 'natoutbound',
'outbound', 'per-ip-shaper', 'policyid',
'poolname', 'profile-group', 'profile-protocol-options',
'profile-type', 'replacemsg-override-group', 'rsso',
'schedule', 'send-deny-packet', 'service',
'service-negate', 'session-ttl', 'spamfilter-profile',
'srcaddr', 'srcaddr-negate', 'srcintf',
'ssh-filter-profile', 'ssl-mirror', 'ssl-mirror-intf',
'ssl-ssh-profile', 'status', 'tcp-mss-receiver',
'tcp-mss-sender', 'tcp-session-without-syn', 'timeout-send-rst',
'traffic-shaper', 'traffic-shaper-reverse', 'url-category',
'users', 'utm-status', 'uuid',
'vlan-cos-fwd', 'vlan-cos-rev', 'vlan-filter',
'voip-profile', 'vpntunnel', 'webfilter-profile']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_policy6(data, fos):
vdom = data['vdom']
firewall_policy6_data = data['firewall_policy6']
filtered_data = filter_firewall_policy6_data(firewall_policy6_data)
if firewall_policy6_data['state'] == "present":
return fos.set('firewall',
'policy6',
data=filtered_data,
vdom=vdom)
elif firewall_policy6_data['state'] == "absent":
return fos.delete('firewall',
'policy6',
mkey=filtered_data['policyid'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_policy6']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"firewall_policy6": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"action": {"required": False, "type": "str",
"choices": ["accept", "deny", "ipsec"]},
"app-category": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"}
}},
"app-group": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"application": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"}
}},
"application-list": {"required": False, "type": "str"},
"av-profile": {"required": False, "type": "str"},
"comments": {"required": False, "type": "str"},
"custom-log-fields": {"required": False, "type": "list",
"options": {
"field-id": {"required": True, "type": "str"}
}},
"devices": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"diffserv-forward": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"diffserv-reverse": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"diffservcode-forward": {"required": False, "type": "str"},
"diffservcode-rev": {"required": False, "type": "str"},
"dlp-sensor": {"required": False, "type": "str"},
"dscp-match": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dscp-negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dscp-value": {"required": False, "type": "str"},
"dsri": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dstaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"dstaddr-negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dstintf": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"firewall-session-dirty": {"required": False, "type": "str",
"choices": ["check-all", "check-new"]},
"fixedport": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"global-label": {"required": False, "type": "str"},
"groups": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"icap-profile": {"required": False, "type": "str"},
"inbound": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ippool": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ips-sensor": {"required": False, "type": "str"},
"label": {"required": False, "type": "str"},
"logtraffic": {"required": False, "type": "str",
"choices": ["all", "utm", "disable"]},
"logtraffic-start": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"name": {"required": False, "type": "str"},
"nat": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"natinbound": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"natoutbound": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"outbound": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"per-ip-shaper": {"required": False, "type": "str"},
"policyid": {"required": True, "type": "int"},
"poolname": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"profile-group": {"required": False, "type": "str"},
"profile-protocol-options": {"required": False, "type": "str"},
"profile-type": {"required": False, "type": "str",
"choices": ["single", "group"]},
"replacemsg-override-group": {"required": False, "type": "str"},
"rsso": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"schedule": {"required": False, "type": "str"},
"send-deny-packet": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"service": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"service-negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"session-ttl": {"required": False, "type": "int"},
"spamfilter-profile": {"required": False, "type": "str"},
"srcaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"srcaddr-negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"srcintf": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"ssh-filter-profile": {"required": False, "type": "str"},
"ssl-mirror": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssl-mirror-intf": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"ssl-ssh-profile": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"tcp-mss-receiver": {"required": False, "type": "int"},
"tcp-mss-sender": {"required": False, "type": "int"},
"tcp-session-without-syn": {"required": False, "type": "str",
"choices": ["all", "data-only", "disable"]},
"timeout-send-rst": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"traffic-shaper": {"required": False, "type": "str"},
"traffic-shaper-reverse": {"required": False, "type": "str"},
"url-category": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"}
}},
"users": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"utm-status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uuid": {"required": False, "type": "str"},
"vlan-cos-fwd": {"required": False, "type": "int"},
"vlan-cos-rev": {"required": False, "type": "int"},
"vlan-filter": {"required": False, "type": "str"},
"voip-profile": {"required": False, "type": "str"},
"vpntunnel": {"required": False, "type": "str"},
"webfilter-profile": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
skjena/Assemblyx86 | triMatMult/result.py | 9 | 1238 | class Result(object):
"""
a wrapper to contain the results of a test
@testName: the name of the test run
@correct: True if the output of matched the solution; False otherwise
@timeTaken is either
the number of seconds it took the program to run
'Timed Out' if the program took too long to complete
'Crashed' if the program encountered some fatal error
"""
def __init__(self, testName, correct, timeTaken):
"""
@testName: the name of the test run
@correct: True if the output of matched the solution; False otherwise
@timeTaken is either
the number of seconds it took the program to run
'Timed Out' if the program took too long to complete
'Crashed' if the program encountered some fatal error
"""
self.testName = testName
self.correct = correct
self.timeTaken = timeTaken
#end init
def __repr__(self):
if type(self.timeTaken) == str:
format_str = 'Test: {!s} | Correct: {!s} | Time Taken: {!s}'
else:
format_str = 'Test: {!s} | Correct: {!s} | Time Taken: {:.3f}'
s = format_str.format(self.testName, self.correct, self.timeTaken)
return s
def __str__(self):
return self.__repr__()
| gpl-2.0 |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/scipy/io/matlab/tests/test_streams.py | 109 | 5442 | """ Testing
"""
from __future__ import division, print_function, absolute_import
import os
import sys
import zlib
from io import BytesIO
if sys.version_info[0] >= 3:
cStringIO = BytesIO
else:
from cStringIO import StringIO as cStringIO
from tempfile import mkstemp
import numpy as np
from numpy.testing import (assert_, assert_equal, assert_raises,
run_module_suite)
from scipy.io.matlab.streams import make_stream, \
GenericStream, cStringStream, FileStream, ZlibInputStream, \
_read_into, _read_string
fs = None
gs = None
cs = None
fname = None
def setup():
val = b'a\x00string'
global fs, gs, cs, fname
fd, fname = mkstemp()
fs = os.fdopen(fd, 'wb')
fs.write(val)
fs.close()
fs = open(fname, 'rb')
gs = BytesIO(val)
cs = cStringIO(val)
def teardown():
global fname, fs
fs.close()
del fs
os.unlink(fname)
def test_make_stream():
global fs, gs, cs
# test stream initialization
assert_(isinstance(make_stream(gs), GenericStream))
if sys.version_info[0] < 3:
assert_(isinstance(make_stream(cs), cStringStream))
assert_(isinstance(make_stream(fs), FileStream))
def test_tell_seek():
global fs, gs, cs
for s in (fs, gs, cs):
st = make_stream(s)
res = st.seek(0)
yield assert_equal, res, 0
yield assert_equal, st.tell(), 0
res = st.seek(5)
yield assert_equal, res, 0
yield assert_equal, st.tell(), 5
res = st.seek(2, 1)
yield assert_equal, res, 0
yield assert_equal, st.tell(), 7
res = st.seek(-2, 2)
yield assert_equal, res, 0
yield assert_equal, st.tell(), 6
def test_read():
global fs, gs, cs
for s in (fs, gs, cs):
st = make_stream(s)
st.seek(0)
res = st.read(-1)
yield assert_equal, res, b'a\x00string'
st.seek(0)
res = st.read(4)
yield assert_equal, res, b'a\x00st'
# read into
st.seek(0)
res = _read_into(st, 4)
yield assert_equal, res, b'a\x00st'
res = _read_into(st, 4)
yield assert_equal, res, b'ring'
yield assert_raises, IOError, _read_into, st, 2
# read alloc
st.seek(0)
res = _read_string(st, 4)
yield assert_equal, res, b'a\x00st'
res = _read_string(st, 4)
yield assert_equal, res, b'ring'
yield assert_raises, IOError, _read_string, st, 2
class TestZlibInputStream(object):
def _get_data(self, size):
data = np.random.randint(0, 256, size).astype(np.uint8).tostring()
compressed_data = zlib.compress(data)
stream = BytesIO(compressed_data)
return stream, len(compressed_data), data
def test_read(self):
block_size = 131072
SIZES = [0, 1, 10, block_size//2, block_size-1,
block_size, block_size+1, 2*block_size-1]
READ_SIZES = [block_size//2, block_size-1,
block_size, block_size+1]
def check(size, read_size):
compressed_stream, compressed_data_len, data = self._get_data(size)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
data2 = b''
so_far = 0
while True:
block = stream.read(min(read_size,
size - so_far))
if not block:
break
so_far += len(block)
data2 += block
assert_equal(data, data2)
for size in SIZES:
for read_size in READ_SIZES:
yield check, size, read_size
def test_read_max_length(self):
size = 1234
data = np.random.randint(0, 256, size).astype(np.uint8).tostring()
compressed_data = zlib.compress(data)
compressed_stream = BytesIO(compressed_data + b"abbacaca")
stream = ZlibInputStream(compressed_stream, len(compressed_data))
stream.read(len(data))
assert_equal(compressed_stream.tell(), len(compressed_data))
assert_raises(IOError, stream.read, 1)
def test_seek(self):
compressed_stream, compressed_data_len, data = self._get_data(1024)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
stream.seek(123)
p = 123
assert_equal(stream.tell(), p)
d1 = stream.read(11)
assert_equal(d1, data[p:p+11])
stream.seek(321, 1)
p = 123+11+321
assert_equal(stream.tell(), p)
d2 = stream.read(21)
assert_equal(d2, data[p:p+21])
stream.seek(641, 0)
p = 641
assert_equal(stream.tell(), p)
d3 = stream.read(11)
assert_equal(d3, data[p:p+11])
assert_raises(IOError, stream.seek, 10, 2)
assert_raises(IOError, stream.seek, -1, 1)
assert_raises(ValueError, stream.seek, 1, 123)
stream.seek(10000, 1)
assert_raises(IOError, stream.read, 12)
def test_all_data_read(self):
compressed_stream, compressed_data_len, data = self._get_data(1024)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
assert_(not stream.all_data_read())
stream.seek(512)
assert_(not stream.all_data_read())
stream.seek(1024)
assert_(stream.all_data_read())
if __name__ == "__main__":
run_module_suite()
| mit |
ArvinDevel/incubator-pulsar | dashboard/django/stats/migrations/0001_initial.py | 13 | 11195 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-21 21:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ActiveBroker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.BigIntegerField(db_index=True)),
],
),
migrations.CreateModel(
name='Broker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField(db_index=True)),
],
),
migrations.CreateModel(
name='Bundle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.BigIntegerField(db_index=True)),
('range', models.CharField(max_length=200)),
('broker', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Broker')),
],
),
migrations.CreateModel(
name='Cluster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('serviceUrl', models.URLField()),
],
),
migrations.CreateModel(
name='Consumer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.BigIntegerField(db_index=True)),
('address', models.CharField(max_length=64, null=True)),
('availablePermits', models.IntegerField(default=0)),
('connectedSince', models.DateTimeField(null=True)),
('consumerName', models.CharField(max_length=64, null=True)),
('msgRateOut', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('msgRateRedeliver', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('msgThroughputOut', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('unackedMessages', models.BigIntegerField(default=0)),
('blockedConsumerOnUnackedMsgs', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='LatestTimestamp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10, unique=True)),
('timestamp', models.BigIntegerField(default=0)),
],
),
migrations.CreateModel(
name='Namespace',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('clusters', models.ManyToManyField(to='stats.Cluster')),
],
),
migrations.CreateModel(
name='Property',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
],
options={
'verbose_name_plural': 'properties',
},
),
migrations.CreateModel(
name='Replication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.BigIntegerField(db_index=True)),
('msgRateIn', models.DecimalField(decimal_places=1, max_digits=12)),
('msgThroughputIn', models.DecimalField(decimal_places=1, max_digits=12)),
('msgRateOut', models.DecimalField(decimal_places=1, max_digits=12)),
('msgThroughputOut', models.DecimalField(decimal_places=1, max_digits=12)),
('msgRateExpired', models.DecimalField(decimal_places=1, max_digits=12)),
('replicationBacklog', models.BigIntegerField(default=0)),
('connected', models.BooleanField(default=False)),
('replicationDelayInSeconds', models.IntegerField(default=0)),
('inboundConnectedSince', models.DateTimeField(null=True)),
('outboundConnectedSince', models.DateTimeField(null=True)),
('local_cluster', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Cluster')),
('remote_cluster', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='remote_cluster', to='stats.Cluster')),
],
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('timestamp', models.BigIntegerField(db_index=True)),
('msgBacklog', models.BigIntegerField(default=0)),
('msgRateExpired', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('msgRateOut', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('msgRateRedeliver', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('msgThroughputOut', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('subscriptionType', models.CharField(choices=[('N', 'Not connected'), ('E', 'Exclusive'), ('S', 'Shared'), ('F', 'Failover')], default='N', max_length=1)),
('unackedMessages', models.BigIntegerField(default=0)),
('namespace', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Namespace')),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=1024)),
('timestamp', models.BigIntegerField(db_index=True)),
('averageMsgSize', models.IntegerField(default=0)),
('msgRateIn', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('msgRateOut', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('msgThroughputIn', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('msgThroughputOut', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('pendingAddEntriesCount', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('producerCount', models.IntegerField(default=0)),
('subscriptionCount', models.IntegerField(default=0)),
('consumerCount', models.IntegerField(default=0)),
('storageSize', models.BigIntegerField(default=0)),
('backlog', models.BigIntegerField(default=0)),
('localRateIn', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('localRateOut', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('localThroughputIn', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('localThroughputOut', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('replicationRateIn', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('replicationRateOut', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('replicationThroughputIn', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('replicationThroughputOut', models.DecimalField(decimal_places=1, default=0, max_digits=12)),
('replicationBacklog', models.BigIntegerField(default=0)),
('active_broker', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.ActiveBroker')),
('broker', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Broker')),
('bundle', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Bundle')),
('cluster', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Cluster')),
('namespace', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Namespace')),
],
),
migrations.AddField(
model_name='subscription',
name='topic',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Topic'),
),
migrations.AddField(
model_name='replication',
name='topic',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Topic'),
),
migrations.AddField(
model_name='namespace',
name='property',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Property'),
),
migrations.AddField(
model_name='consumer',
name='subscription',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Subscription'),
),
migrations.AddField(
model_name='bundle',
name='cluster',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Cluster'),
),
migrations.AddField(
model_name='bundle',
name='namespace',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Namespace'),
),
migrations.AddField(
model_name='broker',
name='cluster',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Cluster'),
),
migrations.AddField(
model_name='activebroker',
name='broker',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stats.Broker'),
),
migrations.AlterIndexTogether(
name='topic',
index_together=set([('name', 'cluster', 'timestamp')]),
),
]
| apache-2.0 |
lorisercole/thermocepstrum | thermocepstrum/i_o/read_lammps_dump.py | 1 | 19682 | # -*- coding: utf-8 -*-
################################################################################
###
### ReadLAMMPSDump - v0.1.8 - May 03, 2018
###
################################################################################
###
### a package to read LAMMPS Dump files
### (it assumes that the data column names and the number of atoms do not change)
###
################################################################################
## example:
## import read_lammps_dump as rd
## data = rd.LAMMPS_Dump(filename)
##
import numpy as np
from time import time
from thermocepstrum.utils import log
def is_string(string):
try:
float(string)
except ValueError:
return True
return False
def is_vector_variable(string):
bracket = string.rfind('[')
if (bracket == -1):
bracket = 0
return bracket
def file_length(filename):
i = -1
with open(filename) as f:
for i, l in enumerate(f, 1):
pass
return i
def get_volume(filename):
f = open(filename, 'r')
line = f.readline()
while (line):
if 'BOX BOUNDS' in line:
xlo, xhi = list(map(float, f.readline().split()))
ylo, yhi = list(map(float, f.readline().split()))
zlo, zhi = list(map(float, f.readline().split()))
break
line = f.readline()
f.close()
volume = (xhi - xlo) * (yhi - ylo) * (zhi - zlo)
return volume
def get_natoms(filename):
f = open(filename, 'r')
line = f.readline()
while (line):
if 'NUMBER OF ATOMS' in line:
natoms = int(f.readline())
break
line = f.readline()
f.close()
return natoms
class LAMMPS_Dump(object):
"""
A LAMMPS_Dump file that can be read in blocks.
example:
traj = LAMMPS_Dump(filename, preload=False) -->> do not preload list of steps (suggested if the file is big)
traj.read_timesteps(10, start_step=0, select_ckeys=['id,xu,yu,vu']) -->> Read first 10 timesteps, only the specified columns
traj.read_timesteps(10, select_ckeys=['id,xu,yu,vu']) -->> Read the next 10 timesteps, only the specified columns (DELTA_TIMESTEP is assumed)
traj.read_timesteps((10,30)) -->> Read from TIMESTEP 10 to 30
traj.read_timesteps((10,30,2)) -->> Read every 2 steps from TIMESTEP 10 to 30
print(traj.data)
"""
def __init__(self, *args, **kwargs):
#*******
if (len(args) > 0):
self.filename = args[0]
if (len(args) == 2):
self.select_ckeys = args[1]
else:
self.select_ckeys = None
else:
raise ValueError('No file given.')
group_vectors = kwargs.get('group_vectors', True)
preload_timesteps = kwargs.get('preload', True)
self._quiet = kwargs.get('quiet', False)
self._GUI = kwargs.get('GUI', False)
if self._GUI:
from ipywidgets import FloatProgress
from IPython.display import display
global FloatProgress, display
self._open_file()
self._read_ckeys(group_vectors, preload_timesteps)
self.ckey = None
#self.MAX_NSTEPS = data_length(self.filename)
#log.write_log("Data length = ", self.MAX_NSTEPS)
return
def __repr__(self):
msg = 'LAMMPS_Dump:\n' + \
' filename: {}\n'.format(self.filename) + \
' all_ckeys: {}\n'.format(self.all_ckeys) + \
' select_ckeys: {}\n'.format(self.select_ckeys) + \
' used ckey: {}\n'.format(self.ckey) + \
' all_timesteps: {}\n'.format(self.all_timesteps) + \
' select_timesteps: {}\n'.format(self.select_timesteps) + \
' used timesteps: {}\n'.format(self.timestep) + \
' start pos: {}\n'.format(self._start_byte) + \
' current pos: {}\n'.format(self.file.tell()) + \
' FIRST TIMESTEP: {}\n'.format(self.FIRST_TIMESTEP) + \
' LAST TIMESTEP: {}\n'.format(self.LAST_TIMESTEP) + \
' DELTA TIMESTEP: {}\n'.format(self.DELTA_TIMESTEP) + \
' current step: {}\n'.format(self.current_timestep)
return msg
def _open_file(self):
"""Open the file."""
try:
self.file = open(self.filename, 'r')
except:
raise ValueError('File does not exist.')
return
def _read_ckeys(self, group_vectors=True, preload_timesteps=True):
"""Read the column keys. If group_vectors=True the vector ckeys are grouped togheter"""
self._start_byte = self.file.tell()
self.all_ckeys = {}
self.all_timesteps = []
self.preload_timesteps = preload_timesteps
while True:
line = self.file.readline()
if len(line) == 0: # EOF
raise RuntimeError('Reached EOF, no ckeys found.')
values = np.array(line.split())
if (values[0] == 'ITEM:'):
if (values[1] == 'TIMESTEP'):
self.current_timestep = int(self.file.readline())
self.FIRST_TIMESTEP = self.current_timestep
self.all_timesteps.append(self.current_timestep)
# facoltativo:
elif ((values[1] == 'NUMBER') and values[2] == 'OF' and values[3] == 'ATOMS'):
self.NATOMS = int(self.file.readline())
elif ((values[1] == 'BOX') and values[2] == 'BOUNDS'):
self.BOX_BOUNDS_TYPE = values[3:6]
xbox = self.file.readline().split()
ybox = self.file.readline().split()
zbox = self.file.readline().split()
self.BOX_BOUNDS = np.array([xbox, ybox, zbox], dtype='float')
elif (values[1] == 'ATOMS'):
for i in range(2, len(values)):
if group_vectors:
bracket = is_vector_variable(values[i]) # get position of left square bracket
else:
bracket = 0
if (bracket == 0): # the variable is a scalar
key = values[i]
if (key[:2] == 'c_'): # remove 'c_' if present
key = key[2:]
self.all_ckeys[key] = [i - 2] # -2 offset
else: # the variable is a vector
key = values[i][:bracket] # name of vector
if (key[:2] == 'c_'): # remove 'c_' if present
key = key[2:]
vecidx = int(values[i][bracket + 1:-1]) # current index
if key in self.all_ckeys: # if this vector is already defined, add this component
if (vecidx > self.all_ckeys[key].size):
self.ckeys[key] = np.resize(self.all_ckeys[key], vecidx)
self.all_ckeys[key][vecidx - 1] = i - 2 # -2 offset!
else: # if it is not, define a vector
self.all_ckeys[key] = np.array([0] * vecidx)
self.all_ckeys[key][-1] = i - 2 # -2 offset!
#self._start_byte = self.file.tell()
break
#else:
# self.header += line
if self.preload_timesteps:
# get the list of time steps
while True:
line = self.file.readline()
if len(line) == 0: # EOF
break
if (line == 'ITEM: TIMESTEP\n'):
self.current_timestep = int(self.file.readline())
self.all_timesteps.append(self.current_timestep)
self.LAST_TIMESTEP = self.all_timesteps[-1]
self.DELTA_TIMESTEP = self.all_timesteps[1] - self.FIRST_TIMESTEP
self.TOT_TIMESTEPS = len(self.all_timesteps)
self.all_timesteps = np.array(self.all_timesteps)
else:
log.write_log(' ** No timesteps pre-loaded. Be careful in the selection. **')
# get the first 2 timesteps
while (len(self.all_timesteps) < 2):
line = self.file.readline()
if len(line) == 0: # EOF
break
if (line == 'ITEM: TIMESTEP\n'):
self.current_timestep = int(self.file.readline())
self.all_timesteps.append(self.current_timestep)
self.LAST_TIMESTEP = None
self.DELTA_TIMESTEP = self.all_timesteps[1] - self.FIRST_TIMESTEP
self.TOT_TIMESTEPS = None
self.all_timesteps = None
# go back to the first timestep
self.gototimestep(0) # compute_first = True
self._start_byte = 0
log.write_log(' all_ckeys = ', self.all_ckeys)
log.write_log(' TOT_TIMESTEPS = ', self.TOT_TIMESTEPS)
log.write_log(' FIRST_TIMESTEP = ', self.FIRST_TIMESTEP)
log.write_log(' DELTA_TIMESTEP = ', self.DELTA_TIMESTEP)
log.write_log(' LAST_TIMESTEP = ', self.LAST_TIMESTEP)
log.write_log(' all_timesteps = ', self.all_timesteps)
return
def _set_ckey(self, select_ckeys=None):
"""
Set the ckeys to read from the selected, checking the available ones.
If select_ckeys is not passed, then use the already selected ones, or all the available ones if no selection
was previously made.
"""
if select_ckeys is not None:
self.select_ckeys = select_ckeys
self.ckey = {}
if self.select_ckeys is None: # take all ckeys
self.ckey = self.all_ckeys
else:
for key in self.select_ckeys: # take only the selected ckeys
value = self.all_ckeys.get(key, None)
if value is not None:
self.ckey[key] = value[:] # copy all indexes (up to max dimension for vectors)
else:
log.write_log('Warning: ', key, 'key not found.')
if (len(self.ckey) == 0):
raise KeyError('No ckey set. Check selected keys.')
else:
if not self._quiet:
log.write_log(' ckey = ', self.ckey)
return
def _set_timesteps(self, selection, start_step=-1):
"""Set the timesteps to read from the selected, checking the available ones.
INPUT: N --> Read the next N steps (DELTA_TIMESTEP is assumed)
N, start_step=30 --> Read N steps from the TIMESTEP 30
if compute_first=True, read the current step as well
(10,30) --> Read from TIMESTEP 10 to 30
(10,30,2) --> Read every 2 steps from TIMESTEP 10 to 30"""
if (start_step == -1):
if self._compute_current_step:
start_step = self.current_timestep
else:
start_step = self.current_timestep + self.DELTA_TIMESTEP
elif (start_step == 0):
start_step = self.FIRST_TIMESTEP
if np.isscalar(selection) or (len(selection) == 1): # select N steps from start one
first = start_step
last = self.DELTA_TIMESTEP * selection + start_step
step = None
elif (len(selection) == 2):
first = selection[0]
last = selection[1]
step = None
elif (len(selection) == 3):
first = selection[0]
last = selection[1]
step = selection[2]
if step is None:
step = self.DELTA_TIMESTEP
elif (step % self.DELTA_TIMESTEP != 0):
log.write_log('Warning: step is not a multiple of the detected DELTA_TIMESTEP. You may get errors.')
if (first % step != 0):
first += step - first % step # round first step to the next in the list
self.timestep = []
self.select_timesteps = np.arange(first, last, step) # selected timesteps
if self.preload_timesteps:
for step in self.select_timesteps:
if step in self.all_timesteps:
self.timestep.append(step) # make list of available selected-timesteps
else:
log.write_log('Warning: timestep # {:d} not found.'.format(step))
else:
self.timestep = self.select_timesteps # use all the selected (be careful)
self.nsteps = len(self.timestep) # number of available steps
if (self.nsteps == 0):
raise ValueError('No timestep set. Check selected timesteps.')
else:
if not self._quiet:
log.write_log(' nsteps = ', self.nsteps)
log.write_log(' timestep = ', self.timestep)
return
def _initialize_dic(self):
"""Initialize the data dictionary once the ckeys and timesteps have been set."""
if self.ckey is None:
raise ValueError('ckey not set.')
if self.timestep is None:
raise ValueError('timestep not set.')
self.data = [dict() for i in range(self.nsteps)]
for istep in range(self.nsteps):
for key, idx in self.ckey.items():
if (key == 'element'): # this should be improved
self.data[istep][key] = np.zeros((self.NATOMS, len(idx)), dtype='S8')
else:
self.data[istep][key] = np.zeros((self.NATOMS, len(idx)), dtype='float64')
return
def _gototimestep(self, start_step, fast_check=True):
"""
Go to the start_step-th line in the time series (assumes step=1).
start_step = -1 --> ignore, continue from current step
0 --> go to FIRST timestep
N --> go to N-th timestep
fast_check = True --> assumes the TIMESTEP are a monotonously increasing.
If the the start_step is passed and not found then stop.
"""
if (start_step >= 0):
if (start_step <= self.current_timestep):
# or (self.current_timestep == -1): # if start_step is before/equal the current step
self.file.seek(self._start_byte) # --> start over
if (start_step == 0): # or (self.current_timestep == -1):
goto_step = self.FIRST_TIMESTEP
else:
goto_step = start_step
# search until start_step is found ***** MAY BE IMPROVED KNOWING THE N OF LINES TO SKIP ******
while True:
line = self.file.readline()
if len(line) == 0: # EOF
raise EOFError('Warning (gototimestep): reached EOF. Timestep {} NOT FOUND.'.format(goto_step))
if (line == 'ITEM: TIMESTEP\n'):
self.current_timestep = int(self.file.readline())
if (self.current_timestep == goto_step):
while (self.file.readline().find('ITEM: ATOMS') < 0): # jump to the data part
pass
break
if (fast_check) and (self.current_timestep > goto_step):
raise Warning(
'Warning (gototimestep): Timestep {} NOT FOUND up to current_step = {}. (To force check the whole trajectory set fast_check=False)'
.format(goto_step, self.current_timestep))
else:
pass
return
def gototimestep(self, start_step, fast_check=True):
"""
Go to the start_step-th line in the time series (assumes step=1).
start_step = -1 --> ignore, continue from current step
0 --> go to FIRST timestep
N --> go to N-th timestep
fast_check = True --> assumes the TIMESTEP are a monotonously increasing.
If the the start_step is passed and not found then stop.
"""
## user-called function
self._compute_current_step = True
self._gototimestep(start_step, fast_check)
return
def read_timesteps(self, selection, start_step=-1, select_ckeys=None, fast_check=True):
"""
Read selected keys of file, within the provided range.
Examples:
read_timesteps(10, start_step=0, select_ckeys=['id,xu,yu,vu']) -->> Read first 10 timesteps, only the specified columns
read_timesteps(10, select_ckeys=['id,xu,yu,vu']) -->> Read the next 10 timesteps, only the specified columns (DELTA_TIMESTEP is assumed)
read_timesteps((10,30)) -->> Read from TIMESTEP 10 to 30
read_timesteps((10,30,2)) -->> Read every 2 steps from TIMESTEP 10 to 30
"""
if self._GUI:
progbar = FloatProgress(min=0, max=100)
display(progbar)
start_time = time()
self._set_ckey(select_ckeys) # set the ckeys to read --> ckey
self._set_timesteps(selection, start_step) # set the timesteps to read --> timestep
self._initialize_dic() # allocate dictionary --> data
# extract the steps from the file
progbar_step = max(1000, int(0.005 * self.nsteps))
atomid_col = self.all_ckeys['id'][0]
for istep, step in enumerate(self.timestep):
self._gototimestep(step, fast_check) # jump to the desired step,
self.data[istep]['TIMESTEP'] = step
for nat in range(self.NATOMS): # read data (may be unsorted)
line = self.file.readline()
if len(line) == 0: # EOF
raise EOFError('Warning: reached EOF.')
values = np.array(line.split())
for key, idx in self.ckey.items(): # save the selected columns
atomid = int(values[atomid_col]) - 1 # current atom index (in LAMMPS it starts from 1)
if (key == 'element'): # this should be improved
self.data[istep][key][atomid, :] = np.array(list(map(str, values[idx])))
else:
self.data[istep][key][atomid, :] = np.array(list(map(float, values[idx])))
if ((istep + 1) % progbar_step == 0):
if self._GUI:
progbar.value = float(istep + 1) / self.nsteps * 100.
progbar.description = '%g %%' % progbar.value
else:
log.write_log(' step = {:9d} - {:6.2f}% completed'.format(istep + 1,
float(istep + 1) / self.nsteps * 100.))
if self._GUI:
progbar.close()
# check number of steps read, keep an even number of steps
if (istep + 1 < self.nsteps): # (should never happen)
if (istep == 0):
log.write_log('WARNING: no step read.')
return
else:
log.write_log('Warning: less steps read.')
self.nsteps = istep + 1
if not self._quiet:
log.write_log(' ( %d ) steps read.' % (self.nsteps))
log.write_log('DONE. Elapsed time: ', time() - start_time, 'seconds')
self._compute_current_step = False # next time do not compute the current_step
return self.data
| gpl-3.0 |
coruus/pyasn1-modules | tools/pkcs10dump.py | 26 | 1109 | #!/usr/bin/python
#
# Read ASN.1/PEM X.509 certificate requests (PKCS#10 format) on stdin,
# parse each into plain text, then build substrate from it
#
from pyasn1.codec.der import decoder, encoder
from pyasn1_modules import rfc2314, pem
import sys
if len(sys.argv) != 1:
print("""Usage:
$ cat certificateRequest.pem | %s""" % sys.argv[0])
sys.exit(-1)
certType = rfc2314.CertificationRequest()
certCnt = 0
while 1:
idx, substrate = pem.readPemBlocksFromFile(
sys.stdin, ('-----BEGIN CERTIFICATE REQUEST-----',
'-----END CERTIFICATE REQUEST-----')
)
if not substrate:
break
cert, rest = decoder.decode(substrate, asn1Spec=certType)
if rest: substrate = substrate[:-len(rest)]
print(cert.prettyPrint())
assert encoder.encode(cert, defMode=False) == substrate or \
encoder.encode(cert, defMode=True) == substrate, \
'cert recode fails'
certCnt = certCnt + 1
print('*** %s PEM certificate request(s) de/serialized' % certCnt)
| bsd-2-clause |
petecummings/django-cms | cms/tests/test_publisher.py | 32 | 44420 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from djangocms_text_ckeditor.models import Text
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.management.base import CommandError
from django.core.management import call_command
from django.core.urlresolvers import reverse
from cms.api import create_page, add_plugin, create_title
from cms.constants import PUBLISHER_STATE_PENDING, PUBLISHER_STATE_DEFAULT, PUBLISHER_STATE_DIRTY
from cms.management.commands.subcommands.publisher_publish import PublishCommand
from cms.models import CMSPlugin, Title
from cms.models.pagemodel import Page
from cms.plugin_pool import plugin_pool
from cms.test_utils.testcases import CMSTestCase as TestCase
from cms.test_utils.util.context_managers import StdoutOverride
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import force_language
from cms.utils.urlutils import admin_reverse
class PublisherCommandTests(TestCase):
"""
Tests for the publish command
"""
def test_command_line_should_raise_without_superuser(self):
with self.assertRaises(CommandError):
com = PublishCommand()
com.handle_noargs()
def test_command_line_publishes_zero_pages_on_empty_db(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 0)
self.assertEqual(published_from_output, 0)
def test_command_line_ignores_draft_page(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
create_page("The page!", "nav_playground.html", "en", published=False)
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 0)
self.assertEqual(published_from_output, 0)
self.assertEqual(Page.objects.public().count(), 0)
def test_command_line_publishes_draft_page(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
create_page("The page!", "nav_playground.html", "en", published=False)
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher_publish', include_unpublished=True)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
def test_command_line_publishes_selected_language(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
page = create_page("en title", "nav_playground.html", "en")
title = create_title('de', 'de title', page)
title.published = True
title.save()
title = create_title('fr', 'fr title', page)
title.published = True
title.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher_publish', language='de')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de'])
def test_command_line_publishes_selected_language_drafts(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
page = create_page("en title", "nav_playground.html", "en")
title = create_title('de', 'de title', page)
title.published = False
title.save()
title = create_title('fr', 'fr title', page)
title.published = False
title.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher_publish', language='de', include_unpublished=True)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de'])
def test_table_name_patching(self):
"""
This tests the plugin models patching when publishing from the command line
"""
User = get_user_model()
User.objects.create_superuser('djangocms', '[email protected]', '123456')
create_page("The page!", "nav_playground.html", "en", published=True)
draft = Page.objects.drafts()[0]
draft.reverse_id = 'a_test' # we have to change *something*
draft.save()
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
draft.publish('en')
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
# Manually undoing table name patching
Text._meta.db_table = 'djangocms_text_ckeditor_text'
plugin_pool.patched = False
with StdoutOverride():
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher_publish')
not_drafts = len(Page.objects.filter(publisher_is_draft=False))
drafts = len(Page.objects.filter(publisher_is_draft=True))
self.assertEqual(not_drafts, 1)
self.assertEqual(drafts, 1)
def test_command_line_publishes_one_page(self):
"""
Publisher always creates two Page objects for every CMS page,
one is_draft and one is_public.
The public version of the page can be either published or not.
This bit of code uses sometimes manager methods and sometimes manual
filters on purpose (this helps test the managers)
"""
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
# Now, let's create a page. That actually creates 2 Page objects
create_page("The page!", "nav_playground.html", "en", published=True)
draft = Page.objects.drafts()[0]
draft.reverse_id = 'a_test' # we have to change *something*
draft.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
# Sanity check the database (we should have one draft and one public)
not_drafts = len(Page.objects.filter(publisher_is_draft=False))
drafts = len(Page.objects.filter(publisher_is_draft=True))
self.assertEqual(not_drafts, 1)
self.assertEqual(drafts, 1)
# Now check that the non-draft has the attribute we set to the draft.
non_draft = Page.objects.public()[0]
self.assertEqual(non_draft.reverse_id, 'a_test')
def test_command_line_publish_multiple_languages(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
# Create a draft page with two published titles
page = create_page(u"The page!", "nav_playground.html", "en", published=False)
title = create_title('de', 'ja', page)
title.published = True
title.save()
title = create_title('fr', 'non', page)
title.published = True
title.save()
with StdoutOverride():
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher_publish')
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de', 'fr'])
def test_command_line_publish_one_site(self):
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
siteA = Site.objects.create(domain='a.example.com', name='a.example.com')
siteB = Site.objects.create(domain='b.example.com', name='b.example.com')
#example.com
create_page(u"example.com homepage", "nav_playground.html", "en", published=True)
#a.example.com
create_page(u"a.example.com homepage", "nav_playground.html", "de", site=siteA, published=True)
#b.example.com
create_page(u"b.example.com homepage", "nav_playground.html", "de", site=siteB, published=True)
create_page(u"b.example.com about", "nav_playground.html", "nl", site=siteB, published=True)
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher_publish', site=siteB.id)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 2)
self.assertEqual(published_from_output, 2)
def test_command_line_publish_multiple_languages_check_count(self):
"""
Publishing one page with multiple languages still counts
as one page. This test case checks whether it works
as expected.
"""
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', '[email protected]', '123456')
# Now, let's create a page with 2 languages.
page = create_page("en title", "nav_playground.html", "en", published=True)
create_title("de", "de title", page)
page.publish("de")
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
def tearDown(self):
plugin_pool.patched = False
plugin_pool.set_plugin_meta()
class PublishingTests(TestCase):
def create_page(self, title=None, **kwargs):
return create_page(title or self._testMethodName,
"nav_playground.html", "en", **kwargs)
def test_publish_home(self):
name = self._testMethodName
page = self.create_page(name, published=False)
self.assertFalse(page.publisher_public_id)
self.assertEqual(Page.objects.all().count(), 1)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.post(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], "http://testserver/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
def test_publish_single(self):
name = self._testMethodName
page = self.create_page(name, published=False)
self.assertFalse(page.is_published('en'))
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published("en")
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(public, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
page.publish("en")
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published("en")
self.assertTrue(page.is_published('en'))
self.assertEqual(page.get_publisher_state("en"), PUBLISHER_STATE_DEFAULT)
self.assertIsNotNone(page.publisher_public)
self.assertTrue(page.publisher_public_id)
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(public, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_publisher_state("en"), 0)
def test_publish_admin(self):
page = self.create_page("test_admin", published=False)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.post(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_publisher_state('en'), 0)
def test_publish_wrong_lang(self):
page = self.create_page("test_admin", published=False)
superuser = self.get_superuser()
with self.settings(
LANGUAGES=(('de', 'de'), ('en', 'en')),
CMS_LANGUAGES={1: [{'code': 'en', 'name': 'en', 'fallbacks': ['fr', 'de'], 'public': True}]}
):
with self.login_user_context(superuser):
with force_language('de'):
response = self.client.post(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
page = Page.objects.get(pk=page.pk)
def test_publish_child_first(self):
parent = self.create_page('parent', published=False)
child = self.create_page('child', published=False, parent=parent)
parent = parent.reload()
self.assertFalse(parent.is_published('en'))
self.assertFalse(child.is_published('en'))
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published('en')
for name in ('parent', 'child'):
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(public, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
child.publish("en")
child = child.reload()
self.assertTrue(child.is_published("en"))
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
self.assertIsNone(child.publisher_public)
# Since we have no parent, the state is otherwise unchanged
for name in ('parent', 'child'):
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(public, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
parent.publish("en")
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published('en')
# Cascade publish for all pending descendants
for name in ('parent', 'child'):
self.assertObjectExist(drafts, title_set__title=name)
page = drafts.get(title_set__title=name)
self.assertTrue(page.is_published("en"), name)
self.assertEqual(page.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT, name)
self.assertIsNotNone(page.publisher_public, name)
self.assertTrue(page.publisher_public.is_published('en'), name)
self.assertObjectExist(public, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
def test_simple_publisher(self):
"""
Creates the stuff needed for these tests.
Please keep this up-to-date (the docstring!)
A
/ \
B C
"""
# Create a simple tree of 3 pages
pageA = create_page("Page A", "nav_playground.html", "en",
published=True)
pageB = create_page("Page B", "nav_playground.html", "en", parent=pageA,
published=True)
pageC = create_page("Page C", "nav_playground.html", "en", parent=pageA,
published=False)
# Assert A and B are published, C unpublished
self.assertTrue(pageA.publisher_public_id)
self.assertTrue(pageB.publisher_public_id)
self.assertTrue(not pageC.publisher_public_id)
self.assertEqual(len(Page.objects.public().published("en")), 2)
# Let's publish C now.
pageC.publish("en")
# Assert all are published
self.assertTrue(pageA.publisher_public_id)
self.assertTrue(pageB.publisher_public_id)
self.assertTrue(pageC.publisher_public_id)
self.assertEqual(len(Page.objects.public().published("en")), 3)
def test_i18n_publishing(self):
page = self.create_page('parent', published=True)
self.assertEqual(Title.objects.all().count(), 2)
create_title("de", "vater", page)
self.assertEqual(Title.objects.all().count(), 3)
self.assertEqual(Title.objects.filter(published=True).count(), 2)
page.publish('de')
self.assertEqual(Title.objects.all().count(), 4)
self.assertEqual(Title.objects.filter(published=True).count(), 4)
def test_publish_ordering(self):
page = self.create_page('parent', published=True)
pageA = self.create_page('pageA', parent=page, published=True)
pageC = self.create_page('pageC', parent=page, published=True)
pageB = self.create_page('pageB', parent=page, published=True)
page = page.reload()
pageB.move_page(pageA, 'right')
pageB.publish("en")
# pageC needs reload since B has swapped places with it
pageC.reload().publish("en")
pageA.publish('en')
drafts = Page.objects.drafts().order_by('path')
draft_titles = [(p.get_title('en'), p.path) for p in drafts]
self.assertEqual([('parent', "0001"),
('pageA', "00010001"),
('pageB', "00010002"),
('pageC', "00010003")], draft_titles)
public = Page.objects.public().order_by('path')
public_titles = [(p.get_title('en'), p.path) for p in public]
self.assertEqual([('parent', "0002"),
('pageA', "00020001"),
('pageB', "00020002"),
('pageC', "00020003")], public_titles)
page.publish('en')
drafts = Page.objects.drafts().order_by('path')
draft_titles = [(p.get_title('en'), p.path) for p in drafts]
self.assertEqual([('parent', "0001"),
('pageA', "00010001"),
('pageB', "00010002"),
('pageC', "00010003")], draft_titles)
public = Page.objects.public().order_by('path')
public_titles = [(p.get_title('en'), p.path) for p in public]
self.assertEqual([('parent', "0002"),
('pageA', "00020001"),
('pageB', "00020002"),
('pageC', "00020003")], public_titles)
def test_publish_ordering2(self):
page = self.create_page('parent', published=False)
pageA = self.create_page('pageA', published=False)
pageC = self.create_page('pageC', published=False, parent=pageA)
pageB = self.create_page('pageB', published=False, parent=pageA)
page = page.reload()
pageA.publish('en')
pageB.publish('en')
pageC.publish('en')
page.publish('en')
drafts = Page.objects.filter(publisher_is_draft=True).order_by('path')
publics = Page.objects.filter(publisher_is_draft=False).order_by('path')
x = 0
for draft in drafts:
self.assertEqual(draft.publisher_public_id, publics[x].pk)
x += 1
def test_unpublish_unpublish(self):
name = self._testMethodName
page = self.create_page(name, published=True)
drafts = Page.objects.drafts()
published = Page.objects.public().published("en")
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
page.unpublish('en')
self.assertFalse(page.is_published('en'))
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
page.publish('en')
self.assertTrue(page.publisher_public_id)
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
def test_delete_title_unpublish(self):
page = self.create_page('test', published=True)
sub_page = self.create_page('test2', published=True, parent=page)
self.assertTrue(sub_page.publisher_public.is_published('en'))
page.title_set.all().delete()
self.assertFalse(sub_page.publisher_public.is_published('en', force_reload=True))
def test_modify_child_while_pending(self):
home = self.create_page("Home", published=True, in_navigation=True)
child = self.create_page("Child", published=True, parent=home,
in_navigation=False)
home = home.reload()
home.unpublish('en')
self.assertEqual(Title.objects.count(), 4)
child = child.reload()
self.assertFalse(child.publisher_public.is_published('en'))
self.assertFalse(child.in_navigation)
self.assertFalse(child.publisher_public.in_navigation)
child.in_navigation = True
child.save()
child.publish('en')
child = self.reload(child)
self.assertEqual(Title.objects.count(), 4)
self.assertTrue(child.is_published('en'))
self.assertFalse(child.publisher_public.is_published('en'))
self.assertTrue(child.in_navigation)
self.assertTrue(child.publisher_public.in_navigation)
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
home.publish('en')
child = self.reload(child)
self.assertTrue(child.is_published('en'))
self.assertTrue(child.publisher_public_id)
self.assertTrue(child.publisher_public.in_navigation)
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT)
def test_republish_with_descendants(self):
home = self.create_page("Home", published=True)
child = self.create_page("Child", published=True, parent=home)
gc = self.create_page("GC", published=True, parent=child)
self.assertTrue(child.is_published("en"))
self.assertTrue(gc.is_published('en'))
home = home.reload()
home.unpublish('en')
child = self.reload(child)
gc = self.reload(gc)
self.assertTrue(child.is_published("en"))
self.assertTrue(gc.is_published("en"))
self.assertFalse(child.publisher_public.is_published("en"))
self.assertFalse(gc.publisher_public.is_published('en'))
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
self.assertEqual(gc.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
home.publish('en')
child = self.reload(child)
gc = self.reload(gc)
self.assertTrue(child.publisher_public_id)
self.assertTrue(gc.is_published('en'))
self.assertTrue(child.is_published('en'))
self.assertTrue(gc.publisher_public_id)
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT)
self.assertEqual(gc.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT)
def test_republish_with_dirty_children(self):
home = self.create_page("Home", published=True)
dirty1 = self.create_page("Dirty1", published=True, parent=home)
dirty2 = self.create_page("Dirty2", published=True, parent=home)
home = self.reload(home)
dirty1 = self.reload(dirty1)
dirty2 = self.reload(dirty2)
dirty1.in_navigation = True
dirty1.save()
home.unpublish('en')
dirty2.in_navigation = True
dirty2.save()
dirty1 = self.reload(dirty1)
dirty2 = self.reload(dirty2)
self.assertTrue(dirty1.is_published)
self.assertTrue(dirty2.publisher_public_id)
self.assertEqual(dirty1.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
self.assertEqual(dirty2.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
home = self.reload(home)
with self.assertNumQueries(FuzzyInt(0, 100)):
home.publish('en')
dirty1 = self.reload(dirty1)
dirty2 = self.reload(dirty2)
self.assertTrue(dirty1.is_published("en"))
self.assertTrue(dirty2.is_published("en"))
self.assertTrue(dirty1.publisher_public.is_published("en"))
self.assertTrue(dirty2.publisher_public.is_published("en"))
self.assertEqual(dirty1.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
self.assertEqual(dirty2.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
def test_republish_with_unpublished_child(self):
"""
Unpub1 was never published, and unpub2 has been unpublished after the
fact. None of the grandchildren should become published.
"""
home = self.create_page("Home", published=True)
unpub1 = self.create_page("Unpub1", published=False, parent=home)
unpub2 = self.create_page("Unpub2", published=True, parent=home)
gc1 = self.create_page("GC1", published=True, parent=unpub1)
gc2 = self.create_page("GC2", published=True, parent=unpub2)
self.assertFalse(gc1.publisher_public_id)
self.assertFalse(gc1.publisher_public_id)
self.assertTrue(gc1.is_published('en'))
self.assertTrue(gc2.is_published('en'))
home.unpublish('en')
unpub1 = self.reload(unpub1)
unpub2.unpublish('en') # Just marks this as not published
for page in (unpub1, unpub2):
self.assertFalse(page.is_published('en'), page)
self.assertEqual(page.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
self.assertIsNone(unpub1.publisher_public)
self.assertIsNotNone(unpub2.publisher_public)
self.assertFalse(unpub2.publisher_public.is_published('en'))
gc1 = self.reload(gc1)
gc2 = self.reload(gc2)
for page in (gc1, gc2):
self.assertTrue(page.is_published('en'))
self.assertEqual(page.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
self.assertIsNone(gc1.publisher_public)
self.assertIsNotNone(gc2.publisher_public)
self.assertFalse(gc2.publisher_public.is_published('en'))
def test_unpublish_with_descendants(self):
page = self.create_page("Page", published=True)
child = self.create_page("Child", parent=page, published=True)
self.create_page("Grandchild", parent=child, published=True)
page = page.reload()
child.reload()
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published("en")
self.assertEqual(published.count(), 3)
self.assertEqual(page.get_descendant_count(), 2)
base = reverse('pages-root')
for url in (base, base + 'child/', base + 'child/grandchild/'):
response = self.client.get(url)
self.assertEqual(response.status_code, 200, url)
for title in ('Page', 'Child', 'Grandchild'):
self.assertObjectExist(drafts, title_set__title=title)
self.assertObjectExist(public, title_set__title=title)
self.assertObjectExist(published, title_set__title=title)
item = drafts.get(title_set__title=title)
self.assertTrue(item.publisher_public_id)
self.assertEqual(item.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT)
self.assertTrue(page.unpublish('en'), 'Unpublish was not successful')
self.assertFalse(page.is_published('en'))
cache.clear()
for url in (base, base + 'child/', base + 'child/grandchild/'):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
for title in ('Page', 'Child', 'Grandchild'):
self.assertObjectExist(drafts, title_set__title=title)
self.assertObjectExist(public, title_set__title=title)
self.assertObjectDoesNotExist(published, title_set__title=title)
item = drafts.get(title_set__title=title)
if title == 'Page':
self.assertFalse(item.is_published("en"))
self.assertFalse(item.publisher_public.is_published("en"))
# Not sure what the proper state of these are after unpublish
#self.assertEqual(page.publisher_state, PUBLISHER_STATE_DEFAULT)
self.assertTrue(page.is_dirty('en'))
else:
# The changes to the published subpages are simply that the
# published flag of the PUBLIC instance goes to false, and the
# publisher state is set to mark waiting for parent
self.assertTrue(item.is_published('en'), title)
self.assertFalse(item.publisher_public.is_published('en'), title)
self.assertEqual(item.get_publisher_state('en'), PUBLISHER_STATE_PENDING,
title)
self.assertTrue(item.is_dirty('en'), title)
def test_unpublish_with_dirty_descendants(self):
page = self.create_page("Page", published=True)
child = self.create_page("Child", parent=page, published=True)
gchild = self.create_page("Grandchild", parent=child, published=True)
child.in_navigation = True
child.save()
self.assertTrue(child.is_dirty("en"))
self.assertFalse(gchild.is_dirty('en'))
self.assertTrue(child.publisher_public.is_published('en'))
self.assertTrue(gchild.publisher_public.is_published('en'))
page.unpublish('en')
child = self.reload(child)
gchild = self.reload(gchild)
# Descendants become dirty after unpublish
self.assertTrue(child.is_dirty('en'))
self.assertTrue(gchild.is_dirty('en'))
# However, their public version is still removed no matter what
self.assertFalse(child.publisher_public.is_published('en'))
self.assertFalse(gchild.publisher_public.is_published('en'))
def test_prepublish_descendants(self):
page = self.create_page("Page", published=True)
child = self.create_page("Child", parent=page, published=False)
gchild2 = self.create_page("Grandchild2", parent=child, published=False)
self.create_page("Grandchild3", parent=child, published=True)
gchild = self.create_page("Grandchild", published=True)
gchild = gchild.reload()
child = child.reload()
gchild.move_page(target=child, position='last-child')
gchild.reload()
gchild.publish('en')
self.assertFalse(child.is_published('en'))
self.assertTrue(gchild.is_published('en'))
self.assertEqual(gchild.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
child = child.reload()
child.publish('en')
gchild2 = gchild2.reload()
gchild2.publish('en')
self.assertTrue(child.is_published("en"))
self.assertTrue(gchild.is_published("en"))
self.assertEqual(gchild.get_publisher_state('en', force_reload=True), PUBLISHER_STATE_DEFAULT)
gchild = gchild.reload()
gchild2 = gchild2.reload()
self.assertEqual(gchild.path[4:], gchild.publisher_public.path[4:])
self.assertEqual(gchild.depth, gchild.publisher_public.depth)
def test_republish_multiple_root(self):
# TODO: The paths do not match expected behaviour
home = self.create_page("Page", published=True)
other = self.create_page("Another Page", published=True)
child = self.create_page("Child", published=True, parent=home)
child2 = self.create_page("Child", published=True, parent=other)
self.assertTrue(Page.objects.filter(is_home=True).count(), 2)
self.assertTrue(home.is_home)
home = home.reload()
self.assertTrue(home.publisher_public.is_home)
root = reverse('pages-root')
self.assertEqual(home.get_absolute_url(), root)
self.assertEqual(home.get_public_object().get_absolute_url(), root)
self.assertEqual(child.get_absolute_url(), root + 'child/')
self.assertEqual(child.get_public_object().get_absolute_url(), root + 'child/')
self.assertEqual(other.get_absolute_url(), root + 'another-page/')
self.assertEqual(other.get_public_object().get_absolute_url(), root + 'another-page/')
self.assertEqual(child2.get_absolute_url(), root + 'another-page/child/')
self.assertEqual(child2.get_public_object().get_absolute_url(), root + 'another-page/child/')
home = self.reload(home)
home.unpublish('en')
home = self.reload(home)
other = self.reload(other)
child = self.reload(child)
child2 = self.reload(child2)
self.assertFalse(home.is_home)
self.assertFalse(home.publisher_public.is_home)
self.assertTrue(other.is_home)
self.assertTrue(other.publisher_public.is_home)
self.assertEqual(other.get_absolute_url(), root)
self.assertEqual(other.get_public_object().get_absolute_url(), root)
self.assertEqual(home.get_absolute_url(), root + 'page/')
self.assertEqual(home.get_public_object().get_absolute_url(), root + 'page/')
self.assertEqual(child.get_absolute_url(), root + 'page/child/')
self.assertEqual(child.get_public_object().get_absolute_url(), root + 'page/child/')
self.assertEqual(child2.get_absolute_url(), root + 'child/')
self.assertEqual(child2.get_public_object().get_absolute_url(), root + 'child/')
home.publish('en')
home = self.reload(home)
other = self.reload(other)
child = self.reload(child)
child2 = self.reload(child2)
self.assertTrue(home.is_home)
self.assertTrue(home.publisher_public.is_home)
self.assertEqual(home.get_absolute_url(), root)
self.assertEqual(home.get_public_object().get_absolute_url(), root)
self.assertEqual(child.get_absolute_url(), root + 'child/')
self.assertEqual(child.get_public_object().get_absolute_url(), root + 'child/')
self.assertEqual(other.get_absolute_url(), root + 'another-page/')
self.assertEqual(other.get_public_object().get_absolute_url(), root + 'another-page/')
self.assertEqual(child2.get_absolute_url(), root + 'another-page/child/')
self.assertEqual(child2.get_public_object().get_absolute_url(), root + 'another-page/child/')
def test_revert_contents(self):
user = self.get_superuser()
page = create_page("Page", "nav_playground.html", "en", published=True,
created_by=user)
placeholder = page.placeholders.get(slot=u"body")
deleted_plugin = add_plugin(placeholder, u"TextPlugin", u"en", body="Deleted content")
text_plugin = add_plugin(placeholder, u"TextPlugin", u"en", body="Public content")
page.publish('en')
# Modify and delete plugins
text_plugin.body = "<p>Draft content</p>"
text_plugin.save()
deleted_plugin.delete()
self.assertEqual(CMSPlugin.objects.count(), 3)
# Now let's revert and restore
page.revert('en')
self.assertEqual(page.get_publisher_state("en"), PUBLISHER_STATE_DEFAULT)
self.assertEqual(CMSPlugin.objects.count(), 4)
plugins = CMSPlugin.objects.filter(placeholder__page=page)
self.assertEqual(plugins.count(), 2)
plugins = [plugin.get_plugin_instance()[0] for plugin in plugins]
self.assertEqual(plugins[0].body, "Deleted content")
self.assertEqual(plugins[1].body, "Public content")
def test_revert_move(self):
parent = create_page("Parent", "nav_playground.html", "en", published=True)
parent_url = parent.get_absolute_url()
page = create_page("Page", "nav_playground.html", "en", published=True,
parent=parent)
other = create_page("Other", "nav_playground.html", "en", published=True)
other_url = other.get_absolute_url()
child = create_page("Child", "nav_playground.html", "en", published=True,
parent=page)
parent = parent.reload()
page = page.reload()
self.assertEqual(page.get_absolute_url(), parent_url + "page/")
self.assertEqual(child.get_absolute_url(), parent_url + "page/child/")
# Now let's move it (and the child)
page.move_page(other)
page = self.reload(page)
child = self.reload(child)
self.assertEqual(page.get_absolute_url(), other_url + "page/")
self.assertEqual(child.get_absolute_url(), other_url + "page/child/")
# Public version changed the url as well
self.assertEqual(page.publisher_public.get_absolute_url(), other_url + "page/")
self.assertEqual(child.publisher_public.get_absolute_url(), other_url + "page/child/")
def test_publish_works_with_descendants(self):
"""
For help understanding what this tests for, see:
http://articles.sitepoint.com/print/hierarchical-data-database
Creates this published structure:
home
/ \
item1 item2
/ \
subitem1 subitem2
"""
home_page = create_page("home", "nav_playground.html", "en",
published=True, in_navigation=False)
create_page("item1", "nav_playground.html", "en", parent=home_page,
published=True)
item2 = create_page("item2", "nav_playground.html", "en", parent=home_page,
published=True)
create_page("subitem1", "nav_playground.html", "en", parent=item2,
published=True)
create_page("subitem2", "nav_playground.html", "en", parent=item2,
published=True)
item2 = item2.reload()
not_drafts = list(Page.objects.filter(publisher_is_draft=False).order_by('path'))
drafts = list(Page.objects.filter(publisher_is_draft=True).order_by('path'))
self.assertEqual(len(not_drafts), 5)
self.assertEqual(len(drafts), 5)
for idx, draft in enumerate(drafts):
public = not_drafts[idx]
# Check that a node doesn't become a root node magically
self.assertEqual(bool(public.parent_id), bool(draft.parent_id))
if public.parent:
self.assertEqual(public.path[0:4], public.parent.path[0:4])
self.assertTrue(public.parent in public.get_ancestors())
self.assertTrue(public in public.parent.get_descendants())
self.assertTrue(public in public.parent.get_children())
if draft.parent:
# Same principle for the draft tree
self.assertEqual(draft.path[0:4], draft.parent.path[0:4])
self.assertTrue(draft.parent in draft.get_ancestors())
self.assertTrue(draft in draft.parent.get_descendants())
self.assertTrue(draft in draft.parent.get_children())
# Now call publish again. The structure should not change.
item2.publish('en')
not_drafts = list(Page.objects.filter(publisher_is_draft=False).order_by('path'))
drafts = list(Page.objects.filter(publisher_is_draft=True).order_by('path'))
self.assertEqual(len(not_drafts), 5)
self.assertEqual(len(drafts), 5)
for idx, draft in enumerate(drafts):
public = not_drafts[idx]
# Check that a node doesn't become a root node magically
self.assertEqual(bool(public.parent_id), bool(draft.parent_id))
self.assertEqual(public.numchild, draft.numchild)
if public.parent:
self.assertEqual(public.path[0:4], public.parent.path[0:4])
self.assertTrue(public.parent in public.get_ancestors())
self.assertTrue(public in public.parent.get_descendants())
self.assertTrue(public in public.parent.get_children())
if draft.parent:
self.assertEqual(draft.path[0:4], draft.parent.path[0:4])
self.assertTrue(draft.parent in draft.get_ancestors())
self.assertTrue(draft in draft.parent.get_descendants())
self.assertTrue(draft in draft.parent.get_children())
| bsd-3-clause |
KamranMackey/CloudBot | plugins/wordnik.py | 4 | 6404 | import re
import random
import requests
import urllib.parse
from cloudbot import hook
from cloudbot.util import web
API_URL = 'http://api.wordnik.com/v4/'
WEB_URL = 'https://www.wordnik.com/words/{}'
ATTRIB_NAMES = {
'ahd-legacy': 'AHD/Wordnik',
'century': 'Century/Wordnik',
'wiktionary': 'Wiktionary/Wordnik',
'gcide': 'GCIDE/Wordnik',
'wordnet': 'Wordnet/Wordnik'
}
def sanitize(text):
return urllib.parse.quote(text.translate({ord('\\'):None, ord('/'):None}))
@hook.on_start()
def load_key(bot):
global api_key
api_key = bot.config.get("api_keys", {}).get("wordnik", None)
@hook.command("define", "dictionary")
def define(text):
"""<word> -- Returns a dictionary definition from Wordnik for <word>."""
if not api_key:
return "This command requires an API key from wordnik.com."
word = sanitize(text)
url = API_URL + "word.json/{}/definitions".format(word)
params = {
'api_key': api_key,
'limit': 1
}
json = requests.get(url, params=params).json()
if json:
data = json[0]
data['url'] = web.try_shorten(WEB_URL.format(data['word']))
data['attrib'] = ATTRIB_NAMES[data['sourceDictionary']]
return "\x02{word}\x02: {text} - {url} ({attrib})".format(**data)
else:
return "I could not find a definition for \x02{}\x02.".format(word)
@hook.command("wordusage", "wordexample", "usage")
def word_usage(text):
"""<word> -- Returns an example sentence showing the usage of <word>."""
if not api_key:
return "This command requires an API key from wordnik.com."
word = sanitize(text)
url = API_URL + "word.json/{}/examples".format(word)
params = {
'api_key': api_key,
'limit': 10
}
json = requests.get(url, params=params).json()
if json:
out = "\x02{}\x02: ".format(word)
example = random.choice(json['examples'])
out += "{} ".format(example['text'])
return out
else:
return "I could not find any usage examples for \x02{}\x02.".format(word)
@hook.command("pronounce", "sounditout")
def pronounce(text):
"""<word> -- Returns instructions on how to pronounce <word> with an audio example."""
if not api_key:
return "This command requires an API key from wordnik.com."
word = sanitize(text)
url = API_URL + "word.json/{}/pronunciations".format(word)
params = {
'api_key': api_key,
'limit': 5
}
json = requests.get(url, params=params).json()
if json:
out = "\x02{}\x02: ".format(word)
out += " • ".join([i['raw'] for i in json])
else:
return "Sorry, I don't know how to pronounce \x02{}\x02.".format(word)
url = API_URL + "word.json/{}/audio".format(word)
params = {
'api_key': api_key,
'limit': 1,
'useCanonical': 'false'
}
json = requests.get(url, params=params).json()
if json:
url = web.try_shorten(json[0]['fileUrl'])
out += " - {}".format(url)
return out
@hook.command()
def synonym(text):
"""<word> -- Returns a list of synonyms for <word>."""
if not api_key:
return "This command requires an API key from wordnik.com."
word = sanitize(text)
url = API_URL + "word.json/{}/relatedWords".format(word)
params = {
'api_key': api_key,
'relationshipTypes': 'synonym',
'limitPerRelationshipType': 5
}
json = requests.get(url, params=params).json()
if json:
out = "\x02{}\x02: ".format(word)
out += " • ".join(json[0]['words'])
return out
else:
return "Sorry, I couldn't find any synonyms for \x02{}\x02.".format(word)
@hook.command()
def antonym(text):
"""<word> -- Returns a list of antonyms for <word>."""
if not api_key:
return "This command requires an API key from wordnik.com."
word = sanitize(text)
url = API_URL + "word.json/{}/relatedWords".format(word)
params = {
'api_key': api_key,
'relationshipTypes': 'antonym',
'limitPerRelationshipType': 5,
'useCanonical': 'false'
}
json = requests.get(url, params=params).json()
if json:
out = "\x02{}\x02: ".format(word)
out += " • ".join(json[0]['words'])
out = out[:-2]
return out
else:
return "Sorry, I couldn't find any antonyms for \x02{}\x02.".format(word)
# word of the day
@hook.command("word", "wordoftheday", autohelp=False)
def wordoftheday(text, conn):
"""returns the word of the day. To see past word of the day enter use the format yyyy-MM-dd. The specified date must be after 2009-08-10."""
if not api_key:
return "This command requires an API key from wordnik.com."
match = re.search(r'(\d\d\d\d-\d\d-\d\d)', text)
date = ""
if match:
date = match.group(1)
url = API_URL + "words.json/wordOfTheDay"
if date:
params = {
'api_key': api_key,
'date': date
}
day = date
else:
params = {
'api_key': api_key,
}
day = "today"
json = requests.get(url, params=params).json()
if json:
word = json['word']
note = json['note']
pos = json['definitions'][0]['partOfSpeech']
definition = json['definitions'][0]['text']
out = "The word for \x02{}\x02 is \x02{}\x02: ".format(day, word)
out += "\x0305({})\x0305 ".format(pos)
out += "\x0310{}\x0310 ".format(note)
out += "\x02Definition:\x02 \x0303{}\x0303".format(definition)
return out
else:
return "Sorry I couldn't find the word of the day, check out this awesome otter instead {}".format(
"http://i.imgur.com/pkuWlWx.gif")
# random word
@hook.command("wordrandom", "randomword", autohelp=False)
def random_word(conn):
"""Grabs a random word from wordnik.com"""
if not api_key:
return "This command requires an API key from wordnik.com."
url = API_URL + "words.json/randomWord"
params = {
'api_key': api_key,
'hasDictionarydef': 'true',
'vulgar': 'true'
}
json = requests.get(url, params=params).json()
if json:
word = json['word']
return "Your random word is \x02{}\x02.".format(word)
else:
return "There was a problem contacting the Wordnik API."
| gpl-3.0 |
sserrot/champion_relationships | venv/Lib/site-packages/pip/_vendor/ipaddress.py | 30 | 79875 | # Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
from __future__ import unicode_literals
import itertools
import struct
__version__ = '1.0.23'
# Compatibility functions
_compat_int_types = (int,)
try:
_compat_int_types = (int, long)
except NameError:
pass
try:
_compat_str = unicode
except NameError:
_compat_str = str
assert bytes != str
if b'\0'[0] == 0: # Python 3 semantics
def _compat_bytes_to_byte_vals(byt):
return byt
else:
def _compat_bytes_to_byte_vals(byt):
return [struct.unpack(b'!B', b)[0] for b in byt]
try:
_compat_int_from_byte_vals = int.from_bytes
except AttributeError:
def _compat_int_from_byte_vals(bytvals, endianess):
assert endianess == 'big'
res = 0
for bv in bytvals:
assert isinstance(bv, _compat_int_types)
res = (res << 8) + bv
return res
def _compat_to_bytes(intval, length, endianess):
assert isinstance(intval, _compat_int_types)
assert endianess == 'big'
if length == 4:
if intval < 0 or intval >= 2 ** 32:
raise struct.error("integer out of range for 'I' format code")
return struct.pack(b'!I', intval)
elif length == 16:
if intval < 0 or intval >= 2 ** 128:
raise struct.error("integer out of range for 'QQ' format code")
return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
else:
raise NotImplementedError()
if hasattr(int, 'bit_length'):
# Not int.bit_length , since that won't work in 2.7 where long exists
def _compat_bit_length(i):
return i.bit_length()
else:
def _compat_bit_length(i):
for res in itertools.count():
if i >> res == 0:
return res
def _compat_range(start, end, step=1):
assert step > 0
i = start
while i < end:
yield i
i += step
class _TotalOrderingMixin(object):
__slots__ = ()
# Helper that derives the other comparison operations from
# __lt__ and __eq__
# We avoid functools.total_ordering because it doesn't handle
# NotImplemented correctly yet (http://bugs.python.org/issue10042)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if less is NotImplemented or not less:
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not (less or equal)
def __ge__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
return not less
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 network. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
address)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return _compat_to_bytes(address, 4, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return _compat_to_bytes(address, 16, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = _compat_str(address).split('/')
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it)
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
return min(bits, _compat_bit_length(~number & (number - 1)))
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if (not (isinstance(first, _BaseAddress) and
isinstance(last, _BaseAddress))):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
first, last))
if first > last:
raise ValueError('last IP address must be greater than first')
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
_compat_bit_length(last_int - first_int + 1) - 1)
net = ip((first_int, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
def _collapse_addresses_internal(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
# First merge
to_merge = list(addresses)
subnets = {}
while to_merge:
net = to_merge.pop()
supernet = net.supernet()
existing = subnets.get(supernet)
if existing is None:
subnets[supernet] = net
elif existing != net:
# Merge consecutive subnets
del subnets[supernet]
to_merge.append(supernet)
# Then iterate over resulting networks, skipping subsumed subnets
last = None
for net in sorted(subnets.values()):
if last is not None:
# Since they are sorted,
# last.network_address <= net.network_address is a given.
if last.broadcast_address >= net.broadcast_address:
continue
yield net
last = net
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, nets[-1]))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
# find consecutive address ranges in the sorted sequence and summarize them
if ips:
for first, last in _find_address_range(ips):
addrs.extend(summarize_address_range(first, last))
return _collapse_addresses_internal(addrs + nets)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _IPAddressBase(_TotalOrderingMixin):
"""The mother class."""
__slots__ = ()
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return _compat_str(self)
@property
def reverse_pointer(self):
"""The name of the reverse DNS pointer for the IP address, e.g.:
>>> ipaddress.ip_address("127.0.0.1").reverse_pointer
'1.0.0.127.in-addr.arpa'
>>> ipaddress.ip_address("2001:db8::1").reverse_pointer
'1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
"""
return self._reverse_pointer()
@property
def version(self):
msg = '%200s has no version specified' % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._max_prefixlen,
self._version))
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = (
'%r (len %d != %d) is not permitted as an IPv%d address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?')
raise AddressValueError(msg % (address, address_len,
expected_len, self._version))
@classmethod
def _ip_int_from_prefix(cls, prefixlen):
"""Turn the prefix length into a bitwise netmask
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
@classmethod
def _prefix_from_ip_int(cls, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = cls._max_prefixlen // 8
details = _compat_to_bytes(ip_int, byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen
@classmethod
def _report_invalid_netmask(cls, netmask_str):
msg = '%r is not a valid netmask' % netmask_str
raise NetmaskValueError(msg)
@classmethod
def _prefix_from_prefix_string(cls, prefixlen_str):
"""Return prefix length from a numeric string
Args:
prefixlen_str: The string to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask
"""
# int allows a leading +/- as well as surrounding whitespace,
# so we ensure that isn't the case
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
cls._report_invalid_netmask(prefixlen_str)
try:
prefixlen = int(prefixlen_str)
except ValueError:
cls._report_invalid_netmask(prefixlen_str)
if not (0 <= prefixlen <= cls._max_prefixlen):
cls._report_invalid_netmask(prefixlen_str)
return prefixlen
@classmethod
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str)
def __reduce__(self):
return self.__class__, (_compat_str(self),)
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
__slots__ = ()
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return (self._ip == other._ip and
self._version == other._version)
except AttributeError:
return NotImplemented
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseAddress):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return _compat_str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
def __reduce__(self):
return self.__class__, (self._ip,)
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return '%s/%d' % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError('address out of range')
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError('address out of range')
return self._address_class(broadcast + n)
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseNetwork):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (self._version == other._version and
self.network_address == other.network_address and
int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (int(self.network_address) <= int(other._ip) <=
int(self.broadcast_address))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self)))
@property
def broadcast_address(self):
x = self._cache.get('broadcast_address')
if x is None:
x = self._address_class(int(self.network_address) |
int(self.hostmask))
self._cache['broadcast_address'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = '%200s has no associated address class' % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
list(addr1.address_exclude(addr2)) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
list(addr1.address_exclude(addr2)) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of differing address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not other.subnet_of(self):
raise ValueError('%s not contained in %s' % (other, self))
if other == self:
return
# Make sure we're comparing the network of other.
other = other.__class__('%s/%s' % (other.network_address,
other.prefixlen))
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if other.subnet_of(s1):
yield s2
s1, s2 = s1.subnets()
elif other.subnet_of(s2):
yield s1
s1, s2 = s2.subnets()
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError('%s and %s are not of the same type' % (
self, other))
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
start = int(self.network_address)
end = int(self.broadcast_address) + 1
step = (int(self.hostmask) + 1) >> prefixlen_diff
for new_addr in _compat_range(start, end, step):
current = self.__class__((new_addr, new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
new_prefixlen = self.prefixlen - prefixlen_diff
if new_prefixlen < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return self.__class__((
int(self.network_address) & (int(self.netmask) << prefixlen_diff),
new_prefixlen))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (self.network_address.is_multicast and
self.broadcast_address.is_multicast)
@staticmethod
def _is_subnet_of(a, b):
try:
# Always false if one is v4 and the other is v6.
if a._version != b._version:
raise TypeError(
"%s and %s are not of the same version" % (a, b))
return (b.network_address <= a.network_address and
b.broadcast_address >= a.broadcast_address)
except AttributeError:
raise TypeError("Unable to test subnet containment "
"between %s and %s" % (a, b))
def subnet_of(self, other):
"""Return True if this network is a subnet of other."""
return self._is_subnet_of(self, other)
def supernet_of(self, other):
"""Return True if this network is a supernet of other."""
return self._is_subnet_of(other, self)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self.network_address.is_reserved and
self.broadcast_address.is_reserved)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (self.network_address.is_link_local and
self.broadcast_address.is_link_local)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return (self.network_address.is_private and
self.broadcast_address.is_private)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 4
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2 ** IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
_max_prefixlen = IPV4LENGTH
# There are only a handful of valid v4 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
def _explode_shorthand_ip_string(self):
return _compat_str(self)
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
try:
# Check for a netmask in prefix length form
prefixlen = cls._prefix_from_prefix_string(arg)
except NetmaskValueError:
# Check for a netmask or hostmask in dotted-quad form.
# This may raise NetmaskValueError.
prefixlen = cls._prefix_from_ip_string(arg)
netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _compat_int_from_byte_vals(
map(cls._parse_octet, octets), 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_octet(cls, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == '0':
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
@classmethod
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big'))
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv4 address.
This implements the method described in RFC1035 3.5.
"""
reverse_octets = _compat_str(self).split('.')[::-1]
return '.'.join(reverse_octets) + '.in-addr.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in self._constants._reserved_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
return (
self not in self._constants._public_network and
not self.is_private)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in self._constants._multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self == self._constants._unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in self._constants._loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in self._constants._linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv4Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv4Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return (self.network < other.network or
self.network == other.network and address_less)
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionally equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Constructing from a packed address or integer
if isinstance(address, (_compat_int_types, bytes)):
self.network_address = IPv4Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
# fixme: address/network test here.
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
# We weren't given an address[1]
arg = self._max_prefixlen
self.network_address = IPv4Address(address[0])
self.netmask, self._prefixlen = self._make_netmask(arg)
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv4Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv4Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry.
"""
return (not (self.network_address in IPv4Network('100.64.0.0/10') and
self.broadcast_address in IPv4Network('100.64.0.0/10')) and
not self.is_private)
class _IPv4Constants(object):
_linklocal_network = IPv4Network('169.254.0.0/16')
_loopback_network = IPv4Network('127.0.0.0/8')
_multicast_network = IPv4Network('224.0.0.0/4')
_public_network = IPv4Network('100.64.0.0/10')
_private_networks = [
IPv4Network('0.0.0.0/8'),
IPv4Network('10.0.0.0/8'),
IPv4Network('127.0.0.0/8'),
IPv4Network('169.254.0.0/16'),
IPv4Network('172.16.0.0/12'),
IPv4Network('192.0.0.0/29'),
IPv4Network('192.0.0.170/31'),
IPv4Network('192.0.2.0/24'),
IPv4Network('192.168.0.0/16'),
IPv4Network('198.18.0.0/15'),
IPv4Network('198.51.100.0/24'),
IPv4Network('203.0.113.0/24'),
IPv4Network('240.0.0.0/4'),
IPv4Network('255.255.255.255/32'),
]
_reserved_network = IPv4Network('240.0.0.0/4')
_unspecified_address = IPv4Address('0.0.0.0')
IPv4Address._constants = _IPv4Constants
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 6
_ALL_ONES = (2 ** IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
_max_prefixlen = IPV6LENGTH
# There are only a bunch of valid v6 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
prefixlen = cls._prefix_from_prefix_string(arg)
netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (
_max_parts - 1, ip_str)
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in _compat_range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != cls._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_hextet(cls, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
@classmethod
def _compress_hextets(cls, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
@classmethod
def _string_from_ip_int(cls, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(cls._ip)
if ip_int > cls._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
hextets = cls._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = _compat_str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = _compat_str(self.ip)
else:
ip_str = _compat_str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = '%032x' % ip_int
parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return '%s/%d' % (':'.join(parts), self._prefixlen)
return ':'.join(parts)
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv6 address.
This implements the method described in RFC3596 2.5.
"""
reverse_chars = self.exploded[::-1].replace(':', '')
return '.'.join(reverse_chars) + '.ip6.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in self._constants._multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return any(self in x for x in self._constants._reserved_networks)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in self._constants._linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in self._constants._sitelocal_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv6-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, true if the address is not reserved per
iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv6Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return (self.network < other.network or
self.network == other.network and address_less)
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Efficient constructor from integer or packed address
if isinstance(address, (bytes, _compat_int_types)):
self.network_address = IPv6Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
self.network_address = IPv6Address(address[0])
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv6Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv6Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast + 1):
yield self._address_class(x)
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (self.network_address.is_site_local and
self.broadcast_address.is_site_local)
class _IPv6Constants(object):
_linklocal_network = IPv6Network('fe80::/10')
_multicast_network = IPv6Network('ff00::/8')
_private_networks = [
IPv6Network('::1/128'),
IPv6Network('::/128'),
IPv6Network('::ffff:0:0/96'),
IPv6Network('100::/64'),
IPv6Network('2001::/23'),
IPv6Network('2001:2::/48'),
IPv6Network('2001:db8::/32'),
IPv6Network('2001:10::/28'),
IPv6Network('fc00::/7'),
IPv6Network('fe80::/10'),
]
_reserved_networks = [
IPv6Network('::/8'), IPv6Network('100::/8'),
IPv6Network('200::/7'), IPv6Network('400::/6'),
IPv6Network('800::/5'), IPv6Network('1000::/4'),
IPv6Network('4000::/3'), IPv6Network('6000::/3'),
IPv6Network('8000::/3'), IPv6Network('A000::/3'),
IPv6Network('C000::/3'), IPv6Network('E000::/4'),
IPv6Network('F000::/5'), IPv6Network('F800::/6'),
IPv6Network('FE00::/9'),
]
_sitelocal_network = IPv6Network('fec0::/10')
IPv6Address._constants = _IPv6Constants
| mit |
glatard/nipype | nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py | 9 | 1577 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.maths import BinaryMaths
def test_BinaryMaths_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=2,
),
internal_datatype=dict(argstr='-dt %s',
position=1,
),
nan2zeros=dict(argstr='-nan',
position=3,
),
operand_file=dict(argstr='%s',
mandatory=True,
position=5,
xor=['operand_value'],
),
operand_value=dict(argstr='%.8f',
mandatory=True,
position=5,
xor=['operand_file'],
),
operation=dict(argstr='-%s',
mandatory=True,
position=4,
),
out_file=dict(argstr='%s',
genfile=True,
hash_files=False,
position=-2,
),
output_datatype=dict(argstr='-odt %s',
position=-1,
),
output_type=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = BinaryMaths.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_BinaryMaths_outputs():
output_map = dict(out_file=dict(),
)
outputs = BinaryMaths.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
zooba/PTVS | Python/Templates/Django/ProjectTemplates/Python/Web/StarterDjangoProject/project-wsgi.py | 10 | 1121 | """
WSGI config for $safeprojectname$ project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
For more information, visit
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'$safeprojectname$.settings')
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
| apache-2.0 |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_examples/api/custom_scale_example.py | 9 | 6401 | from __future__ import unicode_literals
import numpy as np
from numpy import ma
from matplotlib import scale as mscale
from matplotlib import transforms as mtransforms
from matplotlib.ticker import Formatter, FixedLocator
class MercatorLatitudeScale(mscale.ScaleBase):
"""
Scales data in range -pi/2 to pi/2 (-90 to 90 degrees) using
the system used to scale latitudes in a Mercator projection.
The scale function:
ln(tan(y) + sec(y))
The inverse scale function:
atan(sinh(y))
Since the Mercator scale tends to infinity at +/- 90 degrees,
there is user-defined threshold, above and below which nothing
will be plotted. This defaults to +/- 85 degrees.
source:
http://en.wikipedia.org/wiki/Mercator_projection
"""
# The scale class must have a member ``name`` that defines the
# string used to select the scale. For example,
# ``gca().set_yscale("mercator")`` would be used to select this
# scale.
name = 'mercator'
def __init__(self, axis, **kwargs):
"""
Any keyword arguments passed to ``set_xscale`` and
``set_yscale`` will be passed along to the scale's
constructor.
thresh: The degree above which to crop the data.
"""
mscale.ScaleBase.__init__(self)
thresh = kwargs.pop("thresh", (85 / 180.0) * np.pi)
if thresh >= np.pi / 2.0:
raise ValueError("thresh must be less than pi/2")
self.thresh = thresh
def get_transform(self):
"""
Override this method to return a new instance that does the
actual transformation of the data.
The MercatorLatitudeTransform class is defined below as a
nested class of this one.
"""
return self.MercatorLatitudeTransform(self.thresh)
def set_default_locators_and_formatters(self, axis):
"""
Override to set up the locators and formatters to use with the
scale. This is only required if the scale requires custom
locators and formatters. Writing custom locators and
formatters is rather outside the scope of this example, but
there are many helpful examples in ``ticker.py``.
In our case, the Mercator example uses a fixed locator from
-90 to 90 degrees and a custom formatter class to put convert
the radians to degrees and put a degree symbol after the
value::
"""
class DegreeFormatter(Formatter):
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
return "%d\u00b0" % ((x / np.pi) * 180.0)
deg2rad = np.pi / 180.0
axis.set_major_locator(FixedLocator(
np.arange(-90, 90, 10) * deg2rad))
axis.set_major_formatter(DegreeFormatter())
axis.set_minor_formatter(DegreeFormatter())
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Override to limit the bounds of the axis to the domain of the
transform. In the case of Mercator, the bounds should be
limited to the threshold that was passed in. Unlike the
autoscaling provided by the tick locators, this range limiting
will always be adhered to, whether the axis range is set
manually, determined automatically or changed through panning
and zooming.
"""
return max(vmin, -self.thresh), min(vmax, self.thresh)
class MercatorLatitudeTransform(mtransforms.Transform):
# There are two value members that must be defined.
# ``input_dims`` and ``output_dims`` specify number of input
# dimensions and output dimensions to the transformation.
# These are used by the transformation framework to do some
# error checking and prevent incompatible transformations from
# being connected together. When defining transforms for a
# scale, which are, by definition, separable and have only one
# dimension, these members should always be set to 1.
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform_non_affine(self, a):
"""
This transform takes an Nx1 ``numpy`` array and returns a
transformed copy. Since the range of the Mercator scale
is limited by the user-specified threshold, the input
array must be masked to contain only valid values.
``matplotlib`` will handle masked arrays and remove the
out-of-range data from the plot. Importantly, the
``transform`` method *must* return an array that is the
same shape as the input array, since these values need to
remain synchronized with values in the other dimension.
"""
masked = ma.masked_where((a < -self.thresh) | (a > self.thresh), a)
if masked.mask.any():
return ma.log(np.abs(ma.tan(masked) + 1.0 / ma.cos(masked)))
else:
return np.log(np.abs(np.tan(a) + 1.0 / np.cos(a)))
def inverted(self):
"""
Override this method so matplotlib knows how to get the
inverse transform for this transform.
"""
return MercatorLatitudeScale.InvertedMercatorLatitudeTransform(self.thresh)
class InvertedMercatorLatitudeTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform_non_affine(self, a):
return np.arctan(np.sinh(a))
def inverted(self):
return MercatorLatitudeScale.MercatorLatitudeTransform(self.thresh)
# Now that the Scale class has been defined, it must be registered so
# that ``matplotlib`` can find it.
mscale.register_scale(MercatorLatitudeScale)
if __name__ == '__main__':
import matplotlib.pyplot as plt
t = np.arange(-180.0, 180.0, 0.1)
s = t / 360.0 * np.pi
plt.plot(t, s, '-', lw=2)
plt.gca().set_yscale('mercator')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.title('Mercator: Projection of the Oppressor')
plt.grid(True)
plt.show()
| apache-2.0 |
strahlex/machinekit | src/emc/usr_intf/gscreen/keybindings.py | 28 | 3632 | # Gscreen is Copyright (c) 20013 Chris Morley
#
# Gscreen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Gscreen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# This holds/converts the generic function keyword to the actual function call name
# it returns this name so Gscreen can call the function to actually do something.
# you can add or change these
class Keycalls:
def __init__(self):
self.ESTOP = 'on_keycall_ESTOP'
self.POWER = 'on_keycall_POWER'
self.ABORT = 'on_keycall_ABORT'
self.XPOS = 'on_keycall_XPOS'
self.XNEG = 'on_keycall_XNEG'
self.YPOS = 'on_keycall_YPOS'
self.YNEG = 'on_keycall_YNEG'
self.ZPOS = 'on_keycall_ZPOS'
self.ZNEG = 'on_keycall_ZNEG'
self.APOS = 'on_keycall_APOS'
self.ANEG = 'on_keycall_ANEG'
self.INCREMENTS = 'on_keycall_INCREMENTS'
self.TEST = 'on_keycall_INCREMENTS'
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, item, value):
return setattr(self, item, value)
# This holds/converts the actual keypress (keyname = gtk.gdk.keyval_name(event.keyval))
# to a generic function keyword
# you can add or change these.
class Keybinding:
def __init__(self):
self.F1 = 'ESTOP'
self.F2 = 'POWER'
self.Escape = 'ABORT'
self.Up = 'YPOS'
self.Down = 'YNEG'
self.Right = 'XPOS'
self.Left = 'XNEG'
self.Page_Up = 'ZPOS'
self.Page_Down = 'ZNEG'
self.bracketleft = 'APOS'
self.bracketright = 'ANEG'
self.i = 'INCREMENTS'
self.I = 'INCREMENTS'
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, item, value):
return setattr(self, item, value)
# These is the public methods for key conversion to function call name.
# get_call and get_binding are for confirmation of a call or binding entry.
# convert() takes a key string (from gtk.gdk.keyval_name(event.keyval)) and converts it to a function call string or returns None
# add_call and add_binding allow adding or changing calls or bindings
# add_conversion() does both at the same time
class Keylookup:
def __init__(self):
self.keycall = Keycalls()
self.keybinding = Keybinding()
def get_call(self,binding):
try:
return self.keycall[binding]
except:
print "No key function call"
return None
def get_binding(self,key):
try:
return self.keybinding[key]
except:
print "No key binding"
return None
def convert(self,key):
try:
b = self.keybinding[key]
return self.keycall[b]
except:
return None
def add_binding(self,key,binding):
try:
self.keybinding[key] = binding
except:
print "Binding for key %s could not be added"% key
def add_call(self,binding,function):
try:
self.keycall[binding] = function
except:
print "Binding %s could not be added"% binding
def add_conversion(self,key,binding,function):
self.add_binding(key,binding)
self.add_call(binding,function)
| lgpl-2.1 |
LEXmono/q | urllib3/contrib/ntlmpool.py | 312 | 4478 | """
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
from __future__ import absolute_import
from logging import getLogger
from ntlm import ntlm
from .. import HTTPSConnectionPool
from ..packages.six.moves.http_client import HTTPSConnection
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
self.num_connections, self.host, self.authurl)
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', reshdr)
log.debug('Response data: %s [...]', res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', dict(res.getheaders()))
log.debug('Response data: %s [...]', res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| apache-2.0 |
MAECProject/MAECProject.github.io | documentation/idioms/av_classification/maec_av_classification.py | 1 | 1818 | # Code for MAEC AV Classification Idiom
from maec.package.package import Package
from maec.package.malware_subject import MalwareSubject
from maec.package.analysis import Analysis
from maec.bundle.bundle import Bundle
from maec.bundle.av_classification import AVClassification
from cybox.core import Object
from cybox.objects.win_executable_file_object import WinExecutableFile
# Set up the necessary Package, Malware Subject, Analysis Bundle Instances
p = Package()
ms = MalwareSubject()
b = Bundle()
a = Analysis()
# Set the Malware_Instance_Object_Attributes on the Malware Subject
ms.malware_instance_object_attributes = Object()
ms.malware_instance_object_attributes.properties = WinExecutableFile()
ms.malware_instance_object_attributes.properties.add_hash("076e5b2bae0b4b3a3d81c85610b95cd4")
ms.malware_instance_object_attributes.properties.add_hash("4484e08903744ceeaedd8f5e1bfc06b2c4688e76")
# Populate the Analysis with the metadata relating to the Analysis that was performed
a.method = "static"
a.type_ = "triage"
a.set_findings_bundle(b.id_)
# Set the requisite attributes on the Bundle
b.defined_subject = False
b.content_type = "static analysis tool output"
# Create the AV Classifications
av1 = AVClassification()
av1.name = "Microsoft"
av1.classification_name = "PWS:Win32/Zbot.gen!B"
av2 = AVClassification()
av2.name = "Symantec"
av2.classification_name = "Backdoor.Paproxy"
av3 = AVClassification()
av3.name = "TrendMicro"
av3.classification_name = "TSPY_ZBOT.TD"
# Add the AV classifications to the Bundle
b.add_av_classification(av1)
b.add_av_classification(av2)
b.add_av_classification(av3)
# Build up the full Package/Malware Subject/Analysis/Bundle hierarchy
p.add_malware_subject(ms)
ms.add_analysis(a)
ms.add_findings_bundle(b)
# Output the built up Package to XML
print p.to_xml()
| bsd-3-clause |
borosnborea/SwordGO_app | example/kivymap/.buildozer/venv/lib/python2.7/site-packages/pip/_vendor/distlib/version.py | 132 | 23711 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-440,
setuptools-compatible and semantic versioning.
"""
import logging
import re
from .compat import string_types
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?")
comp_re = re.compile(r'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$')
num_re = re.compile(r'^\d+(\.\d+)*$')
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
'===': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
m = self.dist_re.match(s)
if not m:
raise ValueError('Not valid: %r' % s)
groups = m.groups('')
self.name = groups[0].strip()
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if groups[2]:
constraints = [c.strip() for c in groups[2].split(',')]
for c in constraints:
m = self.comp_re.match(c)
if not m:
raise ValueError('Invalid %r in %r' % (c, s))
groups = m.groups()
op = groups[0] or '~='
s = groups[1]
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
if not self.num_re.match(vn):
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: Strring or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
def _pep_440_key(s):
s = s.strip()
m = PEP440_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[1].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
if not groups[0]:
epoch = 0
else:
epoch = int(groups[0])
pre = groups[4:6]
post = groups[7:9]
dev = groups[10:12]
local = groups[13]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
parts = []
for part in local.split('.'):
# to ensure that numeric compares as > lexicographic, avoid
# comparing them directly, but encode a tuple which ensures
# correct sorting
if part.isdigit():
part = (1, int(part))
else:
part = (0, part)
parts.append(part)
local = tuple(parts)
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return epoch, nums, pre, post, dev, local
_normalized_key = _pep_440_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # mininum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP440_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'===': '_match_arbitrary',
'!=': '_match_ne',
}
def _adjust_local(self, version, constraint, prefix):
if prefix:
strip_local = '+' not in constraint and version._parts[-1]
else:
# both constraint and version are
# NormalizedVersion instances.
# If constraint does not have a local component,
# ensure the version doesn't, either.
strip_local = not constraint._parts[-1] and version._parts[-1]
if strip_local:
s = version._string.split('+', 1)[0]
version = self.version_class(s)
return version, constraint
def _match_lt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version <= constraint
def _match_ge(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version >= constraint
def _match_eq(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_arbitrary(self, version, constraint, prefix):
return str(version) == str(constraint)
def _match_ne(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version == constraint:
return True
if version < constraint:
return False
# if not prefix:
# return True
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is pobably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
@property
def is_prerelease(self):
result = False
for x in self._parts:
if (isinstance(x, string_types) and x.startswith('*') and
x < '*final'):
result = True
break
return result
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile('^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
| gpl-3.0 |
halvertoluke/edx-platform | common/djangoapps/student/tests/test_reset_password.py | 34 | 11864 | """
Test the various password reset flows
"""
import json
import re
import unittest
from django.core.cache import cache
from django.conf import settings
from django.test import TestCase
from django.test.client import RequestFactory
from django.contrib.auth.models import User
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX
from django.contrib.auth.tokens import default_token_generator
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, base36_to_int, int_to_base36
from mock import Mock, patch
import ddt
from student.views import password_reset, password_reset_confirm_wrapper, SETTING_CHANGE_INITIATED
from student.tests.factories import UserFactory
from student.tests.test_email import mock_render_to_string
from util.testing import EventTestMixin
from .test_microsite import fake_microsite_get_value
@ddt.ddt
class ResetPasswordTests(EventTestMixin, TestCase):
""" Tests that clicking reset password sends email, and doesn't activate the user
"""
request_factory = RequestFactory()
def setUp(self):
super(ResetPasswordTests, self).setUp('student.views.tracker')
self.user = UserFactory.create()
self.user.is_active = False
self.user.save()
self.token = default_token_generator.make_token(self.user)
self.uidb36 = int_to_base36(self.user.id)
self.user_bad_passwd = UserFactory.create()
self.user_bad_passwd.is_active = False
self.user_bad_passwd.password = UNUSABLE_PASSWORD_PREFIX
self.user_bad_passwd.save()
def uidb36_to_uidb64(self, uidb36=None):
""" Converts uidb36 into uidb64 """
return force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36 or self.uidb36))))
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_user_bad_password_reset(self):
"""Tests password reset behavior for user with password marked UNUSABLE_PASSWORD_PREFIX"""
bad_pwd_req = self.request_factory.post('/password_reset/', {'email': self.user_bad_passwd.email})
bad_pwd_resp = password_reset(bad_pwd_req)
# If they've got an unusable password, we return a successful response code
self.assertEquals(bad_pwd_resp.status_code, 200)
obj = json.loads(bad_pwd_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_nonexist_email_password_reset(self):
"""Now test the exception cases with of reset_password called with invalid email."""
bad_email_req = self.request_factory.post('/password_reset/', {'email': self.user.email + "makeItFail"})
bad_email_resp = password_reset(bad_email_req)
# Note: even if the email is bad, we return a successful response code
# This prevents someone potentially trying to "brute-force" find out which
# emails are and aren't registered with edX
self.assertEquals(bad_email_resp.status_code, 200)
obj = json.loads(bad_email_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_password_reset_ratelimited(self):
""" Try (and fail) resetting password 30 times in a row on an non-existant email address """
cache.clear()
for i in xrange(30):
good_req = self.request_factory.post('/password_reset/', {
'email': 'thisdoesnotexist{0}@foo.com'.format(i)
})
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
# then the rate limiter should kick in and give a HttpForbidden response
bad_req = self.request_factory.post('/password_reset/', {'email': '[email protected]'})
bad_resp = password_reset(bad_req)
self.assertEquals(bad_resp.status_code, 403)
self.assert_no_events_were_emitted()
cache.clear()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_reset_password_email(self, send_email):
"""Tests contents of reset password email, and that user is not active"""
good_req = self.request_factory.post('/password_reset/', {'email': self.user.email})
good_req.user = self.user
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
obj = json.loads(good_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
(subject, msg, from_addr, to_addrs) = send_email.call_args[0]
self.assertIn("Password reset", subject)
self.assertIn("You're receiving this e-mail because you requested a password reset", msg)
self.assertEquals(from_addr, settings.DEFAULT_FROM_EMAIL)
self.assertEquals(len(to_addrs), 1)
self.assertIn(self.user.email, to_addrs)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None,
)
#test that the user is not active
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
re.search(r'password_reset_confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/', msg).groupdict()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data((False, 'http://'), (True, 'https://'))
@ddt.unpack
def test_reset_password_email_https(self, is_secure, protocol, send_email):
"""
Tests that the right url protocol is included in the reset password link
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.is_secure = Mock(return_value=is_secure)
req.user = self.user
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
expected_msg = "Please go to the following page and choose a new password:\n\n" + protocol
self.assertIn(expected_msg, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data(('Crazy Awesome Site', 'Crazy Awesome Site'), (None, 'edX'))
@ddt.unpack
def test_reset_password_email_domain(self, domain_override, platform_name, send_email):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
with patch("django.conf.settings.PLATFORM_NAME", platform_name):
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.get_host = Mock(return_value=domain_override)
req.user = self.user
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
reset_msg = "you requested a password reset for your user account at {}"
if domain_override:
reset_msg = reset_msg.format(domain_override)
else:
reset_msg = reset_msg.format(settings.SITE_NAME)
self.assertIn(reset_msg, msg)
sign_off = "The {} Team".format(platform_name)
self.assertIn(sign_off, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch("microsite_configuration.microsite.get_value", fake_microsite_get_value)
@patch('django.core.mail.send_mail')
def test_reset_password_email_microsite(self, send_email):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.get_host = Mock(return_value=None)
req.user = self.user
password_reset(req)
_, msg, from_addr, _ = send_email.call_args[0]
reset_msg = "you requested a password reset for your user account at openedx.localhost"
self.assertIn(reset_msg, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
self.assertEqual(from_addr, "[email protected]")
@patch('student.views.password_reset_confirm')
def test_reset_password_bad_token(self, reset_confirm):
"""Tests bad token and uidb36 in password reset"""
bad_reset_req = self.request_factory.get('/password_reset_confirm/NO-OP/')
password_reset_confirm_wrapper(bad_reset_req, 'NO', 'OP')
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['uidb64'], self.uidb36_to_uidb64('NO'))
self.assertEquals(confirm_kwargs['token'], 'OP')
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
@patch('student.views.password_reset_confirm')
def test_reset_password_good_token(self, reset_confirm):
"""Tests good token and uidb36 in password reset"""
good_reset_req = self.request_factory.get('/password_reset_confirm/{0}-{1}/'.format(self.uidb36, self.token))
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['uidb64'], self.uidb36_to_uidb64())
self.assertEquals(confirm_kwargs['token'], self.token)
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
@patch('student.views.password_reset_confirm')
@patch("microsite_configuration.microsite.get_value", fake_microsite_get_value)
def test_reset_password_good_token_microsite(self, reset_confirm):
"""Tests password reset confirmation page for micro site"""
good_reset_req = self.request_factory.get('/password_reset_confirm/{0}-{1}/'.format(self.uidb36, self.token))
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['extra_context']['platform_name'], 'Fake University')
@patch('student.views.password_reset_confirm')
def test_reset_password_with_reused_password(self, reset_confirm):
"""Tests good token and uidb36 in password reset"""
good_reset_req = self.request_factory.get('/password_reset_confirm/{0}-{1}/'.format(self.uidb36, self.token))
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['uidb64'], self.uidb36_to_uidb64())
self.assertEquals(confirm_kwargs['token'], self.token)
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
| agpl-3.0 |
christiandev/l10n-brazil | __unported__/l10n_br_account_product/sped/nfe/validator/txt.py | 2 | 12103 | # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2012 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU Affero General Public License for more details. #
# #
#You should have received a copy of the GNU Affero General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from openerp import pooler
from openerp.osv import orm
from openerp.tools.translate import _
def validate(cr, uid, ids, context=None):
strErro = u''
pool = pooler.get_pool(cr.dbname)
if context is None:
context = {}
for inv in pool.get('account.invoice').browse(cr, uid, ids):
#Nota fiscal
if inv.issuer == '1' or inv.fiscal_type == 'service' or \
not inv.fiscal_document_electronic:
continue
if not inv.document_serie_id:
strErro = u'Nota Fiscal - Série da nota fiscal\n'
if not inv.fiscal_document_id:
strErro += u'Nota Fiscal - Tipo de documento fiscal\n'
if not inv.document_serie_id.internal_sequence_id:
strErro += u'Nota Fiscal - Número da nota fiscal, a série deve ter uma sequencia interna\n'
#Emitente
if not inv.company_id.partner_id.legal_name:
strErro += u'Emitente - Razão Social\n'
if not inv.company_id.partner_id.name:
strErro += u'Emitente - Fantasia\n'
if not inv.company_id.partner_id.cnpj_cpf:
strErro += u'Emitente - CNPJ/CPF\n'
if not inv.company_id.partner_id.street:
strErro += u'Emitente / Endereço - Logradouro\n'
if not inv.company_id.partner_id.number:
strErro += u'Emitente / Endereço - Número\n'
if not inv.company_id.partner_id.zip:
strErro += u'Emitente / Endereço - CEP\n'
if not inv.company_id.cnae_main_id:
strErro += u'Emitente / CNAE Principal\n'
if not inv.company_id.partner_id.inscr_est:
strErro += u'Emitente / Inscrição Estadual\n'
if not inv.company_id.partner_id.state_id:
strErro += u'Emitente / Endereço - Estado\n'
else:
if not inv.company_id.partner_id.state_id.ibge_code:
strErro += u'Emitente / Endereço - Código do IBGE do estado\n'
if not inv.company_id.partner_id.state_id.name:
strErro += u'Emitente / Endereço - Nome do estado\n'
if not inv.company_id.partner_id.l10n_br_city_id:
strErro += u'Emitente / Endereço - município\n'
else:
if not inv.company_id.partner_id.l10n_br_city_id.name:
strErro += u'Emitente / Endereço - Nome do município\n'
if not inv.company_id.partner_id.l10n_br_city_id.ibge_code:
strErro += u'Emitente / Endereço - Código do IBGE do município\n'
if not inv.company_id.partner_id.country_id:
strErro += u'Emitente / Endereço - país\n'
else:
if not inv.company_id.partner_id.country_id.name:
strErro += u'Emitente / Endereço - Nome do país\n'
if not inv.company_id.partner_id.country_id.bc_code:
strErro += u'Emitente / Endereço - Código do BC do país\n'
#Destinatário
if inv.partner_id.is_company and not inv.partner_id.legal_name:
strErro += u'Destinatário - Razão Social\n'
if inv.partner_id.country_id.id == inv.company_id.partner_id.country_id.id:
if not inv.partner_id.cnpj_cpf:
strErro += u'Destinatário - CNPJ/CPF\n'
if not inv.partner_id.street:
strErro += u'Destinatário / Endereço - Logradouro\n'
if not inv.partner_id.number:
strErro += u'Destinatário / Endereço - Número\n'
if inv.partner_id.country_id.id == inv.company_id.partner_id.country_id.id:
if not inv.partner_id.zip:
strErro += u'Destinatário / Endereço - CEP\n'
if inv.partner_id.country_id.id == inv.company_id.partner_id.country_id.id:
if not inv.partner_id.state_id:
strErro += u'Destinatário / Endereço - Estado\n'
else:
if not inv.partner_id.state_id.ibge_code:
strErro += u'Destinatário / Endereço - Código do IBGE do estado\n'
if not inv.partner_id.state_id.name:
strErro += u'Destinatário / Endereço - Nome do estado\n'
if inv.partner_id.country_id.id == inv.company_id.partner_id.country_id.id:
if not inv.partner_id.l10n_br_city_id:
strErro += u'Destinatário / Endereço - Município\n'
else:
if not inv.partner_id.l10n_br_city_id.name:
strErro += u'Destinatário / Endereço - Nome do município\n'
if not inv.partner_id.l10n_br_city_id.ibge_code:
strErro += u'Destinatário / Endereço - Código do IBGE do município\n'
if not inv.partner_id.country_id:
strErro += u'Destinatário / Endereço - País\n'
else:
if not inv.partner_id.country_id.name:
strErro += u'Destinatário / Endereço - Nome do país\n'
if not inv.partner_id.country_id.bc_code:
strErro += u'Destinatário / Endereço - Código do BC do país\n'
#endereco de entrega
if inv.partner_shipping_id:
if inv.partner_id.id != inv.partner_shipping_id.id:
if not inv.partner_shipping_id.street:
strErro += u'Destinatário / Endereço de Entrega - Logradouro\n'
if not inv.partner_shipping_id.number:
strErro += u'Destinatário / Endereço de Entrega - Número\n'
if not inv.partner_shipping_id.zip:
strErro += u'Destinatário / Endereço de Entrega - CEP\n'
if not inv.partner_shipping_id.state_id:
strErro += u'Destinatário / Endereço de Entrega - Estado\n'
else:
if not inv.partner_shipping_id.state_id.ibge_code:
strErro += u'Destinatário / Endereço de Entrega - Código do IBGE do estado\n'
if not inv.partner_shipping_id.state_id.name:
strErro += u'Destinatário / Endereço de Entrega - Nome do estado\n'
if not inv.partner_shipping_id.l10n_br_city_id:
strErro += u'Destinatário / Endereço - Município\n'
else:
if not inv.partner_shipping_id.l10n_br_city_id.name:
strErro += u'Destinatário / Endereço de Entrega - Nome do município\n'
if not inv.partner_shipping_id.l10n_br_city_id.ibge_code:
strErro += u'Destinatário / Endereço de Entrega - Código do IBGE do município\n'
if not inv.partner_shipping_id.country_id:
strErro += u'Destinatário / Endereço de Entrega - País\n'
else:
if not inv.partner_shipping_id.country_id.name:
strErro += u'Destinatário / Endereço de Entrega - Nome do país\n'
if not inv.partner_shipping_id.country_id.bc_code:
strErro += u'Destinatário / Endereço de Entrega - Código do BC do país\n'
#produtos
for inv_line in inv.invoice_line:
if inv_line.product_id:
if not inv_line.product_id.default_code:
strErro += u'Produtos e Serviços: %s, Qtde: %s - Referência/Código do produto\n' % (inv_line.product_id.name, inv_line.quantity)
if not inv_line.product_id.name:
strErro += u'Produtos e Serviços: %s - %s, Qtde: %s - Nome do produto\n' % (inv_line.product_id.default_code, inv_line.product_id.name, inv_line.quantity)
if not inv_line.cfop_id:
strErro += u'Produtos e Serviços: %s - %s, Qtde: %s - CFOP\n' % (inv_line.product_id.default_code, inv_line.product_id.name, inv_line.quantity)
else:
if not inv_line.cfop_id.code:
strErro += u'Produtos e Serviços: %s - %s, Qtde: %s - Código do CFOP\n' % (inv_line.product_id.default_code, inv_line.product_id.name, inv_line.quantity)
if not inv_line.uos_id:
strErro += u'Produtos e Serviços: %s - %s, Qtde: %s - Unidade de medida\n' % (inv_line.product_id.default_code, inv_line.product_id.name, inv_line.quantity)
if not inv_line.quantity:
strErro += u'Produtos e Serviços: %s - %s, Qtde: %s - Quantidade\n' % (inv_line.product_id.default_code, inv_line.product_id.name, inv_line.quantity)
#Se for Documento Fiscal de Produto
if inv.fiscal_type == 'product':
if not inv_line.fiscal_classification_id:
strErro += u'Produtos e Serviços: %s - %s, Qtde: %s - Classificação Fiscal(NCM)\n' % (inv_line.product_id.default_code, inv_line.product_id.name, inv_line.quantity)
if not inv_line.price_unit:
strErro += u'Produtos e Serviços: %s - %s, Qtde: %s - Preco unitario\n' % (inv_line.product_id.default_code, inv_line.product_id.name, inv_line.quantity)
if inv_line.product_type == 'product':
if not inv_line.icms_cst_id:
strErro += u'Produtos e Serviços: %s - %s, Qtde: %s - CST do ICMS\n' % (inv_line.product_id.default_code, inv_line.product_id.name, inv_line.quantity)
if not inv_line.ipi_cst_id:
strErro += u'Produtos e Serviços: %s - %s, Qtde: %s - CST do IPI\n' % (inv_line.product_id.default_code, inv_line.product_id.name, inv_line.quantity)
if inv_line.product_type == 'service':
if not inv_line.issqn_type:
strErro += u'Produtos e Serviços: %s - %s, Qtde: %s - Tipo do ISSQN\n' % (inv_line.product_id.default_code, inv_line.product_id.name, inv_line.quantity)
if not inv_line.service_type_id:
strErro += u'Produtos e Serviços: %s - %s, Qtde: %s - Tipo do Serviço\n' % (inv_line.product_id.default_code, inv_line.product_id.name, inv_line.quantity)
if not inv_line.pis_cst_id:
strErro += u'Produtos e Serviços: %s - %s, Qtde: %s - CST do PIS\n' % (inv_line.product_id.default_code, inv_line.product_id.name, inv_line.quantity)
if not inv_line.cofins_cst_id:
strErro += u'Produtos e Serviços: %s - %s, Qtde: %s - CST do COFINS\n' % (inv_line.product_id.default_code, inv_line.product_id.name, inv_line.quantity)
if strErro:
raise orm.except_orm(
_('Error !'), ("Error Validating NFE:\n '%s'") % (strErro, ))
return True
| agpl-3.0 |
arthurdarcet/riverrun | riverrun/http/utils.py | 1 | 1593 | import bson
import cherrypy
import functools
import json
import logging
import traceback
logger = logging.getLogger(__name__)
def json_exposed(fn):
@cherrypy.expose
@functools.wraps(fn)
def wrapper(*args, **kwargs):
try:
code = 200
value = fn(*args, **kwargs)
except cherrypy.HTTPError as e:
code = e.code
value = {'status': e.code, 'error': e.reason}
except Exception as e:
msg = '{}: {}'.format(e.__class__.__qualname__, e)
logger.error(msg)
logger.debug(traceback.format_exc())
code = 500
value = {'status': 500, 'error': msg}
cherrypy.response.headers['Content-Type'] = 'application/json'
cherrypy.response.status = code
return json.dumps(value).encode('utf-8')
return wrapper
def paginated(fn):
@functools.wraps(fn)
def wrapper(*args, page=0, **kwargs):
try:
page = int(page)
except TypeError:
raise cherrypy.NotFound()
else:
return fn(*args, **kwargs).skip(page * 30).limit(30)
return wrapper
class _LogManager(cherrypy._cplogging.LogManager):
def __init__(self):
self.error_log = logging.getLogger('cherrypy.error')
self.access_log = logging.getLogger('cherrypy.access')
self.access_log_format = '{h}, {s} "{r}"'
class BaseApp:
def mount(self):
app = cherrypy.Application(self, self.mount_to, getattr(self, 'config', {'/': {}}))
app.log = _LogManager()
cherrypy.tree.mount(app)
| mit |
tow/dnspython | tests/test_rrset.py | 59 | 2282 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
import dns.rrset
class RRsetTestCase(unittest.TestCase):
def testEqual1(self):
r1 = dns.rrset.from_text('foo', 300, 'in', 'a', '10.0.0.1', '10.0.0.2')
r2 = dns.rrset.from_text('FOO', 300, 'in', 'a', '10.0.0.2', '10.0.0.1')
self.failUnless(r1 == r2)
def testEqual2(self):
r1 = dns.rrset.from_text('foo', 300, 'in', 'a', '10.0.0.1', '10.0.0.2')
r2 = dns.rrset.from_text('FOO', 600, 'in', 'a', '10.0.0.2', '10.0.0.1')
self.failUnless(r1 == r2)
def testNotEqual1(self):
r1 = dns.rrset.from_text('fooa', 30, 'in', 'a', '10.0.0.1', '10.0.0.2')
r2 = dns.rrset.from_text('FOO', 30, 'in', 'a', '10.0.0.2', '10.0.0.1')
self.failUnless(r1 != r2)
def testNotEqual2(self):
r1 = dns.rrset.from_text('foo', 30, 'in', 'a', '10.0.0.1', '10.0.0.3')
r2 = dns.rrset.from_text('FOO', 30, 'in', 'a', '10.0.0.2', '10.0.0.1')
self.failUnless(r1 != r2)
def testNotEqual3(self):
r1 = dns.rrset.from_text('foo', 30, 'in', 'a', '10.0.0.1', '10.0.0.2',
'10.0.0.3')
r2 = dns.rrset.from_text('FOO', 30, 'in', 'a', '10.0.0.2', '10.0.0.1')
self.failUnless(r1 != r2)
def testNotEqual4(self):
r1 = dns.rrset.from_text('foo', 30, 'in', 'a', '10.0.0.1')
r2 = dns.rrset.from_text('FOO', 30, 'in', 'a', '10.0.0.2', '10.0.0.1')
self.failUnless(r1 != r2)
if __name__ == '__main__':
unittest.main()
| isc |
tudorvio/tempest | tempest/api/volume/test_volumes_list.py | 10 | 9131 | # Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from oslo_log import log as logging
from testtools import matchers
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest import test
LOG = logging.getLogger(__name__)
class VolumesV2ListTestJSON(base.BaseVolumeTest):
"""
This test creates a number of 1G volumes. To run successfully,
ensure that the backing file for the volume group that Nova uses
has space for at least 3 1G volumes!
If you are running a Devstack environment, ensure that the
VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
"""
VOLUME_FIELDS = ('id', 'name')
def assertVolumesIn(self, fetched_list, expected_list, fields=None):
if fields:
expected_list = map(operator.itemgetter(*fields), expected_list)
fetched_list = map(operator.itemgetter(*fields), fetched_list)
missing_vols = [v for v in expected_list if v not in fetched_list]
if len(missing_vols) == 0:
return
def str_vol(vol):
return "%s:%s" % (vol['id'], vol[self.name])
raw_msg = "Could not find volumes %s in expected list %s; fetched %s"
self.fail(raw_msg % ([str_vol(v) for v in missing_vols],
[str_vol(v) for v in expected_list],
[str_vol(v) for v in fetched_list]))
@classmethod
def setup_clients(cls):
super(VolumesV2ListTestJSON, cls).setup_clients()
cls.client = cls.volumes_client
@classmethod
def resource_setup(cls):
super(VolumesV2ListTestJSON, cls).resource_setup()
cls.name = cls.VOLUME_FIELDS[1]
# Create 3 test volumes
cls.volume_list = []
cls.volume_id_list = []
cls.metadata = {'Type': 'work'}
for i in range(3):
volume = cls.create_volume(metadata=cls.metadata)
volume = cls.client.show_volume(volume['id'])
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
@classmethod
def resource_cleanup(cls):
# Delete the created volumes
for volid in cls.volume_id_list:
cls.client.delete_volume(volid)
cls.client.wait_for_resource_deletion(volid)
super(VolumesV2ListTestJSON, cls).resource_cleanup()
def _list_by_param_value_and_assert(self, params, with_detail=False):
"""
Perform list or list_details action with given params
and validates result.
"""
if with_detail:
fetched_vol_list = \
self.client.list_volumes(detail=True, params=params)
else:
fetched_vol_list = self.client.list_volumes(params=params)
# Validating params of fetched volumes
# In v2, only list detail view includes items in params.
# In v1, list view and list detail view are same. So the
# following check should be run when 'with_detail' is True
# or v1 tests.
if with_detail or self._api_version == 1:
for volume in fetched_vol_list:
for key in params:
msg = "Failed to list volumes %s by %s" % \
('details' if with_detail else '', key)
if key == 'metadata':
self.assertThat(
volume[key].items(),
matchers.ContainsAll(params[key].items()),
msg)
else:
self.assertEqual(params[key], volume[key], msg)
@test.attr(type='smoke')
@test.idempotent_id('0b6ddd39-b948-471f-8038-4787978747c4')
def test_volume_list(self):
# Get a list of Volumes
# Fetch all volumes
fetched_list = self.client.list_volumes()
self.assertVolumesIn(fetched_list, self.volume_list,
fields=self.VOLUME_FIELDS)
@test.idempotent_id('adcbb5a7-5ad8-4b61-bd10-5380e111a877')
def test_volume_list_with_details(self):
# Get a list of Volumes with details
# Fetch all Volumes
fetched_list = self.client.list_volumes(detail=True)
self.assertVolumesIn(fetched_list, self.volume_list)
@test.idempotent_id('a28e8da4-0b56-472f-87a8-0f4d3f819c02')
def test_volume_list_by_name(self):
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
params = {self.name: volume[self.name]}
fetched_vol = self.client.list_volumes(params=params)
self.assertEqual(1, len(fetched_vol), str(fetched_vol))
self.assertEqual(fetched_vol[0][self.name],
volume[self.name])
@test.idempotent_id('2de3a6d4-12aa-403b-a8f2-fdeb42a89623')
def test_volume_list_details_by_name(self):
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
params = {self.name: volume[self.name]}
fetched_vol = self.client.list_volumes(detail=True, params=params)
self.assertEqual(1, len(fetched_vol), str(fetched_vol))
self.assertEqual(fetched_vol[0][self.name],
volume[self.name])
@test.idempotent_id('39654e13-734c-4dab-95ce-7613bf8407ce')
def test_volumes_list_by_status(self):
params = {'status': 'available'}
fetched_list = self.client.list_volumes(params=params)
self._list_by_param_value_and_assert(params)
self.assertVolumesIn(fetched_list, self.volume_list,
fields=self.VOLUME_FIELDS)
@test.idempotent_id('2943f712-71ec-482a-bf49-d5ca06216b9f')
def test_volumes_list_details_by_status(self):
params = {'status': 'available'}
fetched_list = self.client.list_volumes(detail=True, params=params)
for volume in fetched_list:
self.assertEqual('available', volume['status'])
self.assertVolumesIn(fetched_list, self.volume_list)
@test.idempotent_id('c0cfa863-3020-40d7-b587-e35f597d5d87')
def test_volumes_list_by_availability_zone(self):
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
zone = volume['availability_zone']
params = {'availability_zone': zone}
fetched_list = self.client.list_volumes(params=params)
self._list_by_param_value_and_assert(params)
self.assertVolumesIn(fetched_list, self.volume_list,
fields=self.VOLUME_FIELDS)
@test.idempotent_id('e1b80d13-94f0-4ba2-a40e-386af29f8db1')
def test_volumes_list_details_by_availability_zone(self):
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
zone = volume['availability_zone']
params = {'availability_zone': zone}
fetched_list = self.client.list_volumes(detail=True, params=params)
for volume in fetched_list:
self.assertEqual(zone, volume['availability_zone'])
self.assertVolumesIn(fetched_list, self.volume_list)
@test.idempotent_id('b5ebea1b-0603-40a0-bb41-15fcd0a53214')
def test_volume_list_with_param_metadata(self):
# Test to list volumes when metadata param is given
params = {'metadata': self.metadata}
self._list_by_param_value_and_assert(params)
@test.idempotent_id('1ca92d3c-4a8e-4b43-93f5-e4c7fb3b291d')
def test_volume_list_with_detail_param_metadata(self):
# Test to list volumes details when metadata param is given
params = {'metadata': self.metadata}
self._list_by_param_value_and_assert(params, with_detail=True)
@test.idempotent_id('777c87c1-2fc4-4883-8b8e-5c0b951d1ec8')
def test_volume_list_param_display_name_and_status(self):
# Test to list volume when display name and status param is given
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
params = {self.name: volume[self.name],
'status': 'available'}
self._list_by_param_value_and_assert(params)
@test.idempotent_id('856ab8ca-6009-4c37-b691-be1065528ad4')
def test_volume_list_with_detail_param_display_name_and_status(self):
# Test to list volume when name and status param is given
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
params = {self.name: volume[self.name],
'status': 'available'}
self._list_by_param_value_and_assert(params, with_detail=True)
class VolumesV1ListTestJSON(VolumesV2ListTestJSON):
_api_version = 1
VOLUME_FIELDS = ('id', 'display_name')
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.