repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
SerCeMan/intellij-community | python/lib/Lib/site-packages/django/conf/locale/nn/formats.py | 685 | 1657 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
'%Y-%m-%d', # '2006-10-25',
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| apache-2.0 | -7,663,121,929,892,323,000 | 37.534884 | 81 | 0.490646 | false |
slabanja/ase | ase/test/__init__.py | 1 | 3984 | import sys
import unittest
from glob import glob
import numpy as np
class NotAvailable(SystemExit):
def __init__(self, msg, code=0):
SystemExit.__init__(self, (msg,code,))
self.msg = msg
self.code = code
# -------------------------------------------------------------------
# Custom test case/suite for embedding unittests in the test scripts
if sys.version_info < (2, 4, 0, 'final', 0):
class CustomTestCase(unittest.TestCase):
assertTrue = unittest.TestCase.failUnless
assertFalse = unittest.TestCase.failIf
else:
from unittest import TestCase as CustomTestCase
from ase.parallel import paropen
class CustomTextTestRunner(unittest.TextTestRunner):
def __init__(self, logname, descriptions=1, verbosity=1):
self.f = paropen(logname, 'w')
unittest.TextTestRunner.__init__(self, self.f, descriptions, verbosity)
def run(self, test):
stderr_old = sys.stderr
try:
sys.stderr = self.f
testresult = unittest.TextTestRunner.run(self, test)
finally:
sys.stderr = stderr_old
return testresult
# -------------------------------------------------------------------
class ScriptTestCase(unittest.TestCase):
def __init__(self, methodname='testfile', filename=None, display=True):
unittest.TestCase.__init__(self, methodname)
self.filename = filename
self.display = display
def testfile(self):
try:
execfile(self.filename, {'display': self.display})
except KeyboardInterrupt:
raise RuntimeError('Keyboard interrupt')
except NotAvailable, err:
# Only non-zero error codes are failures
if err.code:
raise
def id(self):
return self.filename
def __str__(self):
return '%s (ScriptTestCase)' % self.filename.split('/')[-1]
def __repr__(self):
return "ScriptTestCase(filename='%s')" % self.filename
def test(verbosity=1, dir=None, display=True, stream=sys.stdout):
ts = unittest.TestSuite()
if dir is None:
dir = __path__[0]
tests = glob(dir + '/*.py')
tests.sort()
for test in tests:
if test.endswith('__init__.py'):
continue
if test.endswith('COCu111.py'):
lasttest = test
continue
ts.addTest(ScriptTestCase(filename=test, display=display))
ts.addTest(ScriptTestCase(filename=lasttest, display=display))
from ase.utils import devnull
sys.stdout = devnull
ttr = unittest.TextTestRunner(verbosity=verbosity, stream=stream)
results = ttr.run(ts)
sys.stdout = sys.__stdout__
return results
class World:
"""Class for testing parallelization with MPI"""
def __init__(self, size):
self.size = size
self.data = {}
def get_rank(self, rank):
return CPU(self, rank)
class CPU:
def __init__(self, world, rank):
self.world = world
self.rank = rank
self.size = world.size
def send(self, x, rank):
while (self.rank, rank) in self.world.data:
pass
self.world.data[(self.rank, rank)] = x
def receive(self, x, rank):
while (rank, self.rank) not in self.world.data:
pass
x[:] = self.world.data.pop((rank, self.rank))
def sum(self, x):
if not isinstance(x, np.ndarray):
x = np.array([x])
self.sum(x)
return x[0]
if self.rank == 0:
y = np.empty_like(x)
for rank in range(1, self.size):
self.receive(y, rank)
x += y
else:
self.send(x, 0)
self.broadcast(x, 0)
def broadcast(self, x, root):
if self.rank == root:
for rank in range(self.size):
if rank != root:
self.send(x, rank)
else:
self.receive(x, root)
| gpl-2.0 | 4,836,849,556,218,759,000 | 27.056338 | 79 | 0.557229 | false |
octavioturra/aritial | google_appengine/lib/django/django/contrib/admin/utils.py | 33 | 3621 | "Misc. utility functions/classes for admin documentation generator."
import re
from email.Parser import HeaderParser
from email.Errors import HeaderParseError
try:
import docutils.core
import docutils.nodes
import docutils.parsers.rst.roles
except ImportError:
docutils_is_available = False
else:
docutils_is_available = True
def trim_docstring(docstring):
"""
Uniformly trims leading/trailing whitespace from docstrings.
Based on http://www.python.org/peps/pep-0257.html#handling-docstring-indentation
"""
if not docstring or not docstring.strip():
return ''
# Convert tabs to spaces and split into lines
lines = docstring.expandtabs().splitlines()
indent = min([len(line) - len(line.lstrip()) for line in lines if line.lstrip()])
trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]]
return "\n".join(trimmed).strip()
def parse_docstring(docstring):
"""
Parse out the parts of a docstring. Returns (title, body, metadata).
"""
docstring = trim_docstring(docstring)
parts = re.split(r'\n{2,}', docstring)
title = parts[0]
if len(parts) == 1:
body = ''
metadata = {}
else:
parser = HeaderParser()
try:
metadata = parser.parsestr(parts[-1])
except HeaderParseError:
metadata = {}
body = "\n\n".join(parts[1:])
else:
metadata = dict(metadata.items())
if metadata:
body = "\n\n".join(parts[1:-1])
else:
body = "\n\n".join(parts[1:])
return title, body, metadata
def parse_rst(text, default_reference_context, thing_being_parsed=None, link_base='../..'):
"""
Convert the string from reST to an XHTML fragment.
"""
overrides = {
'doctitle_xform' : True,
'inital_header_level' : 3,
"default_reference_context" : default_reference_context,
"link_base" : link_base,
}
if thing_being_parsed:
thing_being_parsed = "<%s>" % thing_being_parsed
parts = docutils.core.publish_parts(text, source_path=thing_being_parsed,
destination_path=None, writer_name='html',
settings_overrides=overrides)
return parts['fragment']
#
# reST roles
#
ROLES = {
'model' : '%s/models/%s/',
'view' : '%s/views/%s/',
'template' : '%s/templates/%s/',
'filter' : '%s/filters/#%s',
'tag' : '%s/tags/#%s',
}
def create_reference_role(rolename, urlbase):
def _role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None: options = {}
if content is None: content = []
node = docutils.nodes.reference(rawtext, text, refuri=(urlbase % (inliner.document.settings.link_base, text.lower())), **options)
return [node], []
docutils.parsers.rst.roles.register_canonical_role(rolename, _role)
def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None: options = {}
if content is None: content = []
context = inliner.document.settings.default_reference_context
node = docutils.nodes.reference(rawtext, text, refuri=(ROLES[context] % (inliner.document.settings.link_base, text.lower())), **options)
return [node], []
if docutils_is_available:
docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role)
docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE = 'cmsreference'
for name, urlbase in ROLES.items():
create_reference_role(name, urlbase)
| apache-2.0 | 162,816,134,839,681,600 | 34.5 | 140 | 0.63325 | false |
lpeska/BRDTI | netlaprls.py | 1 | 2811 | '''
We base the NetLapRLS implementation on the one from PyDTI project, https://github.com/stephenliu0423/PyDTI, changes were made to the evaluation procedure
[1] Xia, Zheng, et al. "Semi-supervised drug-protein interaction prediction from heterogeneous biological spaces." BMC systems biology 4.Suppl 2 (2010): S6.
Default parameters:
gamma_d = 0.01, gamma_d=gamma_d2/gamma_d1
gamma_t = 0.01, gamma_t=gamma_p2/gamma_p1
beta_d = 0.3
beta_t = 0.3
'''
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from functions import normalized_discounted_cummulative_gain
class NetLapRLS:
def __init__(self, gamma_d=0.01, gamma_t=0.01, beta_d=0.3, beta_t=0.3):
self.gamma_d = float(gamma_d)
self.gamma_t = float(gamma_t)
self.beta_d = float(beta_d)
self.beta_t = float(beta_t)
def fix_model(self, W, intMat, drugMat, targetMat, seed=None):
R = W*intMat
m, n = R.shape
drugMat = (drugMat+drugMat.T)/2
targetMat = (targetMat+targetMat.T)/2
Wd = (drugMat+self.gamma_d*np.dot(R, R.T))/(1.0+self.gamma_d)
Wt = (targetMat+self.gamma_t*np.dot(R.T, R))/(1.0+self.gamma_t)
Wd = Wd-np.diag(np.diag(Wd))
Wt = Wt-np.diag(np.diag(Wt))
D = np.diag(np.sqrt(1.0/np.sum(Wd, axis=1)))
Ld = np.eye(m) - np.dot(np.dot(D, Wd), D)
D = np.diag(np.sqrt(1.0/np.sum(Wt, axis=1)))
Lt = np.eye(n) - np.dot(np.dot(D, Wt), D)
X = np.linalg.inv(Wd+self.beta_d*np.dot(Ld, Wd))
Fd = np.dot(np.dot(Wd, X), R)
X = np.linalg.inv(Wt+self.beta_t*np.dot(Lt, Wt))
Ft = np.dot(np.dot(Wt, X), R.T)
self.predictR = 0.5*(Fd+Ft.T)
def predict_scores(self, test_data, N):
inx = np.array(test_data)
return self.predictR[inx[:, 0], inx[:, 1]]
def evaluation(self, test_data, test_label):
scores = self.predictR[test_data[:, 0], test_data[:, 1]]
self.scores = scores
x, y = test_data[:, 0], test_data[:, 1]
test_data_T = np.column_stack((y,x))
ndcg = normalized_discounted_cummulative_gain(test_data, test_label, np.array(scores))
ndcg_inv = normalized_discounted_cummulative_gain(test_data_T, test_label, np.array(scores))
prec, rec, thr = precision_recall_curve(test_label, scores)
aupr_val = auc(rec, prec)
fpr, tpr, thr = roc_curve(test_label, scores)
auc_val = auc(fpr, tpr)
#!!!!we should distinguish here between inverted and not inverted methods nDCGs!!!!
return aupr_val, auc_val, ndcg, ndcg_inv
def __str__(self):
return "Model: NetLapRLS, gamma_d:%s, gamma_t:%s, beta_d:%s, beta_t:%s" % (self.gamma_d, self.gamma_t, self.beta_d, self.beta_t)
| gpl-2.0 | -5,001,422,388,090,338,000 | 40.955224 | 156 | 0.608324 | false |
albertodonato/toolrack | toolrack/tests/test_config.py | 1 | 7449 | from operator import attrgetter
import pytest
from ..config import (
Config,
ConfigKey,
ConfigKeyTypes,
InvalidConfigValue,
MissingConfigKey,
)
class TestConfigKeyTypes:
def test_get_converter_unknown_type(self):
"""An error is raised if type is unknown."""
with pytest.raises(TypeError):
ConfigKeyTypes().get_converter("unknown")
@pytest.mark.parametrize(
"conv_type,value,result",
[("int", "10", 10), ("float", "20.30", 20.30), ("str", 10, "10")],
)
def test_types(self, conv_type, value, result):
"""Values can be converted."""
converter = ConfigKeyTypes().get_converter(conv_type)
assert converter(value) == result
@pytest.mark.parametrize(
"value,result",
[
# true values
(3, True),
(["foo"], True),
("true", True),
("True", True),
("yes", True),
("Yes", True),
# false values
(0, False),
([], False),
("false", False),
("no", False),
("foo", False),
("", False),
],
)
def test_bool(self, value, result):
"""Bool values cna be converted."""
converter = ConfigKeyTypes().get_converter("bool")
assert converter(value) == result
@pytest.mark.parametrize("value", [("a", "b"), ["a", "b"], "a b"])
def test_list(self, value):
"""List values are converted to lists."""
converter = ConfigKeyTypes().get_converter("str[]")
assert converter(value) == ["a", "b"]
def test_list_of_ints(self):
"""List values are converted to the propert list type."""
converter = ConfigKeyTypes().get_converter("int[]")
assert converter("1 2") == [1, 2]
def test_list_of_unknown(self):
"""An error is raised if a list of unknown type is requested."""
with pytest.raises(TypeError):
ConfigKeyTypes().get_converter("unknown[]")
class TestConfigKey:
def test_instantiate(self):
"""A ConfigKey has a name."""
config_key = ConfigKey("key", "str")
assert config_key.name == "key"
assert config_key.description == ""
assert config_key.default is None
assert config_key.validator is None
assert not config_key.required
def test_instantiate_with_description(self):
"""A ConfigKey can have a description."""
config_key = ConfigKey("key", "str", description="a config key")
assert config_key.description == "a config key"
def test_instantiate_with_required(self):
"""A ConfigKey can be marked as required."""
config_key = ConfigKey("key", "str", required=True)
assert config_key.required
def test_instantiate_with_default(self):
"""A ConfigKey can have a default value."""
config_key = ConfigKey("key", "str", default=9)
assert config_key.default == 9
def test_instantiate_with_validator(self):
"""A ConfigKey can have a validator."""
validator = object() # just a marker
config_key = ConfigKey("key", "str", validator=validator)
assert config_key.validator is validator
@pytest.mark.parametrize(
"conv_type,value,result",
[
("str", "message", "message"),
("str", 9, "9"),
("int", "100", 100),
("float", "100.3", 100.3),
],
)
def test_parse(self, conv_type, value, result):
"""ConfigKey.parse parses values based on type."""
config_key = ConfigKey("key", conv_type)
assert config_key.parse(value) == result
def test_parse_invalid_value(self):
"""If the type conversion fails, an error is raised."""
config_key = ConfigKey("key", "int")
with pytest.raises(InvalidConfigValue):
config_key.parse("not an int")
def test_parse_with_validator(self):
"""If the validator fails, an error is raised."""
def validator(value):
raise ValueError("Wrong!")
config_key = ConfigKey("key", "str", validator=validator)
with pytest.raises(InvalidConfigValue):
config_key.parse("value")
def test_parse_with_validate(self):
"""If the ConfigKey.validate method fails, an error is raised."""
class ValidatedConfigKey(ConfigKey):
def validate(self, value):
raise ValueError("Wrong!")
config_key = ValidatedConfigKey("key", "str")
with pytest.raises(InvalidConfigValue):
config_key.parse("value")
class TestConfig:
def test_keys(self):
"""Config.keys return a sorted list of ConfigKeys."""
keys = [ConfigKey("foo", "str"), ConfigKey("bar", "str")]
config = Config(*keys)
assert config.keys() == sorted(keys, key=attrgetter("name"))
def test_extend(self):
"""Config.extend returns a new Config with additional keys."""
keys = [ConfigKey("foo", "str"), ConfigKey("bar", "str")]
config = Config(*keys)
new_keys = [ConfigKey("baz", "str"), ConfigKey("bza", "str")]
new_config = config.extend(*new_keys)
assert new_config is not config
all_keys = sorted(keys + new_keys, key=attrgetter("name"))
assert new_config.keys() == all_keys
def test_extend_overwrite(self):
"""Config.extend overwrites configuration keys with the same name."""
config = Config(ConfigKey("foo", "str"))
new_config = config.extend(ConfigKey("foo", "int"))
parsed = new_config.parse({"foo": "4"})
assert parsed == {"foo": 4}
def test_parse_empty(self):
"""If not config options are present, an empty dict is returned."""
config = Config()
assert config.parse({}) == {}
def test_parse_none(self):
"""If None is passed as config, an empty dict is returned."""
config = Config()
assert config.parse(None) == {}
def test_parse_converts_values(self):
"""Config.parse convert key values to their types."""
config = Config(ConfigKey("foo", "int"), ConfigKey("bar", "float"))
parsed = config.parse({"foo": "33", "bar": "20.1"})
assert parsed == {"foo": 33, "bar": 20.1}
def test_parse_unknown_key(self):
"""Config.parse ignores unknown keys."""
config = Config(ConfigKey("foo", "str"), ConfigKey("bar", "str"))
parsed = config.parse({"foo": "Foo", "bar": "Bar", "baz": "9"})
assert parsed == {"foo": "Foo", "bar": "Bar"}
def test_parse_missing_key(self):
"""If a required key is missing, an error is raised."""
config = Config(ConfigKey("foo", "str", required=True))
with pytest.raises(MissingConfigKey):
config.parse({})
def test_parse_invalid_value(self):
"""Config.parse raises an error if a value is invalid."""
config = Config(ConfigKey("foo", "int"), ConfigKey("bar", "float"))
with pytest.raises(InvalidConfigValue):
config.parse({"foo": "33", "bar": "invalid!"})
def test_parse_includes_defaults(self):
"""If a config key is missing, the default value is returned."""
config = Config(ConfigKey("foo", "str"), ConfigKey("bar", "str", default=10))
parsed = config.parse({"foo": "Foo"})
assert parsed == {"foo": "Foo", "bar": 10}
| lgpl-3.0 | -941,328,867,736,283,300 | 35.336585 | 85 | 0.57444 | false |
mick-d/nipype | tools/make_examples.py | 10 | 3014 | #!/usr/bin/env python
"""Run the py->rst conversion and run all examples.
This also creates the index.rst file appropriately, makes figures, etc.
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import open
from past.builtins import execfile
# -----------------------------------------------------------------------------
# Library imports
# -----------------------------------------------------------------------------
# Stdlib imports
import os
import sys
from glob import glob
# Third-party imports
# We must configure the mpl backend before making any further mpl imports
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib._pylab_helpers import Gcf
# Local tools
from toollib import *
# -----------------------------------------------------------------------------
# Globals
# -----------------------------------------------------------------------------
examples_header = """
.. _examples:
Examples
========
.. note_about_examples
"""
# -----------------------------------------------------------------------------
# Function defintions
# -----------------------------------------------------------------------------
# These global variables let show() be called by the scripts in the usual
# manner, but when generating examples, we override it to write the figures to
# files with a known name (derived from the script name) plus a counter
figure_basename = None
# We must change the show command to save instead
def show():
allfm = Gcf.get_all_fig_managers()
for fcount, fm in enumerate(allfm):
fm.canvas.figure.savefig('%s_%02i.png' %
(figure_basename, fcount + 1))
_mpl_show = plt.show
plt.show = show
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Work in examples directory
cd('users/examples')
if not os.getcwd().endswith('users/examples'):
raise OSError('This must be run from doc/examples directory')
# Run the conversion from .py to rst file
sh('../../../tools/ex2rst --project Nipype --outdir . ../../../examples')
sh('../../../tools/ex2rst --project Nipype --outdir . ../../../examples/frontiers_paper')
# Make the index.rst file
"""
index = open('index.rst', 'w')
index.write(examples_header)
for name in [os.path.splitext(f)[0] for f in glob('*.rst')]:
#Don't add the index in there to avoid sphinx errors and don't add the
#note_about examples again (because it was added at the top):
if name not in(['index','note_about_examples']):
index.write(' %s\n' % name)
index.close()
"""
# Execute each python script in the directory.
if '--no-exec' in sys.argv:
pass
else:
if not os.path.isdir('fig'):
os.mkdir('fig')
for script in glob('*.py'):
figure_basename = pjoin('fig', os.path.splitext(script)[0])
execfile(script)
plt.close('all')
| bsd-3-clause | -8,356,474,151,126,147,000 | 29.14 | 89 | 0.536165 | false |
lief-project/LIEF | examples/python/authenticode/api_example.py | 1 | 2223 | #!/usr/bin/env python
import lief
import sys
import os
# Parse PE file
pe = lief.parse(sys.argv[1])
sep = (":") if sys.version_info.minor > 7 else ()
# Get authenticode
print(pe.authentihash_md5.hex(*sep)) # 1c:a0:91:53:dc:9a:3a:5f:34:1d:7f:9b:b9:56:69:4d
print(pe.authentihash(lief.PE.ALGORITHMS.SHA_1).hex(*sep)) # 1e:ad:dc:29:1e:db:41:a2:69:c2:ba:ae:4b:fb:9d:31:e7:bb:ab:59
# Check signature according to PKCS #7 and Microsoft documentation
print(pe.verify_signature()) # Return VERIFICATION_FLAGS.OK
bin_ca = None
# Look for the root CA in the PE file
for crt in pe.signatures[0].certificates:
if crt.issuer == "C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Assured ID Root CA":
bin_ca = crt
# Verify CA chain
bundle_path = os.getenv("LIEF_CA_BUNDLE", None) # Path to CA bundle (one can use those from signify:
# signify/certs/authenticode-bundle.pem)
if bundle_path is not None:
# Parse cert bundle and return a list of lief.PE.x509 objects
bundle = lief.PE.x509.parse(bundle_path)
print(bin_ca.is_trusted_by(bundle)) # VERIFICATION_FLAGS.OK
# Get the certificate used by the signer
cert_signer = pe.signatures[0].signers[0].cert
print(cert_signer)
bin_ca.verify(cert_signer) # Verify that cert_signer is signed the the CA
# running with:
# LIEF_CA_BUNDLE=signify/signify/certs/authenticode-bundle.pem python ./authenticode.py avast_free_antivirus_setup_online.exe
#
# 1c:a0:91:53:dc:9a:3a:5f:34:1d:7f:9b:b9:56:69:4d
# 1e:ad:dc:29:1e:db:41:a2:69:c2:ba:ae:4b:fb:9d:31:e7:bb:ab:59
# VERIFICATION_FLAGS.OK
# cert. version : 3
# serial number : 04:09:18:1B:5F:D5:BB:66:75:53:43:B5:6F:95:50:08
# issuer name : C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Assured ID Root CA
# subject name : C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert SHA2 Assured ID Code Signing CA
# issued on : 2013-10-22 12:00:00
# expires on : 2028-10-22 12:00:00
# signed using : RSA with SHA-256
# RSA key size : 2048 bits
# basic constraints : CA=true, max_pathlen=0
# key usage : Digital Signature, Key Cert Sign, CRL Sign
# ext key usage : Code Signing
#
# VERIFICATION_FLAGS.OK
| apache-2.0 | -2,141,882,747,915,137,500 | 38.696429 | 125 | 0.683761 | false |
robclark/chromium | chrome/test/webdriver/test/run_webdriver_tests.py | 9 | 9476 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import os
import sys
import types
import unittest
from chromedriver_launcher import ChromeDriverLauncher
import py_unittest_util
import test_paths
# Add the PYTHON_BINDINGS first so that our 'test' module is found instead of
# Python's.
sys.path = [test_paths.PYTHON_BINDINGS] + sys.path
from selenium.webdriver.remote.webdriver import WebDriver
# Implementation inspired from unittest.main()
class Main(object):
"""Main program for running WebDriver tests."""
_options, _args = None, None
TESTS_FILENAME = 'WEBDRIVER_TESTS'
_platform_map = {
'win32': 'win',
'darwin': 'mac',
'linux2': 'linux',
'linux3': 'linux',
}
TEST_PREFIX = 'selenium.test.selenium.webdriver.common.'
def __init__(self):
self._ParseArgs()
self._Run()
def _ParseArgs(self):
"""Parse command line args."""
parser = optparse.OptionParser()
parser.add_option(
'-v', '--verbose', action='store_true', default=False,
help='Output verbosely.')
parser.add_option(
'', '--log-file', type='string', default=None,
help='Provide a path to a file to which the logger will log')
parser.add_option(
'', '--filter', type='string', default='*',
help='Filter for specifying what tests to run, google test style.')
parser.add_option(
'', '--driver-exe', type='string', default=None,
help='Path to the default ChromeDriver executable to use.')
parser.add_option(
'', '--chrome-exe', type='string', default=None,
help='Path to the default Chrome executable to use.')
parser.add_option(
'', '--list', action='store_true', default=False,
help='List tests instead of running them.')
self._options, self._args = parser.parse_args()
# Setup logging - start with defaults
level = logging.WARNING
format = None
if self._options.verbose:
level=logging.DEBUG
format='%(asctime)s %(levelname)-8s %(message)s'
logging.basicConfig(level=level, format=format,
filename=self._options.log_file)
@staticmethod
def _IsTestClass(obj):
"""Returns whether |obj| is a unittest.TestCase."""
return isinstance(obj, (type, types.ClassType)) and \
issubclass(obj, unittest.TestCase)
@staticmethod
def _GetModuleFromName(test_name):
"""Return the module from the given test name.
Args:
test_name: dot-separated string for a module, a test case or a test
method
Examples: omnibox (a module)
omnibox.OmniboxTest (a test case)
omnibox.OmniboxTest.testA (a test method)
Returns:
tuple with first item corresponding to the module and second item
corresponding to the parts of the name that did not specify the module
Example: _GetModuleFromName('my_module.MyClass.testThis') returns
(my_module, ['MyClass', 'testThis'])
"""
parts = test_name.split('.')
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy: raise
for comp in parts[1:]:
if type(getattr(module, comp)) is not types.ModuleType:
break
module = getattr(module, comp)
return (module, parts[len(parts_copy):])
@staticmethod
def _GetTestsFromName(name):
"""Get a list of all test names from the given string.
Args:
name: dot-separated string for a module, a test case or a test method.
Examples: omnibox (a module)
omnibox.OmniboxTest (a test case)
omnibox.OmniboxTest.testA (a test method)
Returns:
[omnibox.OmniboxTest.testA, omnibox.OmniboxTest.testB, ...]
"""
def _GetTestsFromTestCase(class_obj):
"""Return all test method names from given class object."""
return [class_obj.__name__ + '.' + x for x in dir(class_obj) if
x.startswith('test')]
def _GetTestsFromModule(module):
"""Return all test method names from the given module object."""
tests = []
for name in dir(module):
obj = getattr(module, name)
if Main._IsTestClass(obj):
tests.extend([module.__name__ + '.' + x for x in
_GetTestsFromTestCase(obj)])
return tests
(obj, parts) = Main._GetModuleFromName(name)
for comp in parts:
obj = getattr(obj, comp)
if type(obj) == types.ModuleType:
return _GetTestsFromModule(obj)
elif Main._IsTestClass(obj):
return [module.__name__ + '.' + x for x in _GetTestsFromTestCase(obj)]
elif type(obj) == types.UnboundMethodType:
return [name]
else:
logging.warn('No tests in "%s"' % name)
return []
@staticmethod
def _EvalDataFrom(filename):
"""Return eval of python code from given file.
The datastructure used in the file will be preserved.
"""
data_file = os.path.join(filename)
contents = open(data_file).read()
try:
ret = eval(contents, {'__builtins__': None}, None)
except:
print >>sys.stderr, '%s is an invalid data file.' % data_file
raise
return ret
def _GetTestNamesFrom(self, filename):
modules = self._EvalDataFrom(filename)
all_names = modules.get('all', []) + \
modules.get(self._platform_map[sys.platform], [])
args = []
excluded = []
# Find all excluded tests. Excluded tests begin with '-'.
for name in all_names:
if name.startswith('-'): # Exclude
excluded.extend(self._GetTestsFromName(self.TEST_PREFIX + name[1:]))
else:
args.extend(self._GetTestsFromName(self.TEST_PREFIX + name))
for name in excluded:
args.remove(name)
if excluded:
logging.debug('Excluded %d test(s): %s' % (len(excluded), excluded))
return args
def _FakePytestHack(self):
"""Adds a fake 'pytest' module to the system modules.
A single test in text_handling_tests.py depends on the pytest module for
its test skipping capabilities. Without pytest, we can not run any tests
in the text_handling_tests.py module.
We are not sure we want to add pytest to chrome's third party dependencies,
so for now create a fake pytest module so that we can at least import and
run all the tests that do not depend on it. Those depending on it are
disabled.
"""
import imp
sys.modules['pytest'] = imp.new_module('pytest')
sys.modules['pytest'].mark = imp.new_module('mark')
sys.modules['pytest'].mark.ignore_chrome = lambda x: x
def _Run(self):
"""Run the tests."""
# TODO(kkania): Remove this hack.
self._FakePytestHack()
# In the webdriver tree, the python 'test' module is moved under the root
# 'selenium' one for testing. Here we mimic that by setting the 'selenium'
# module's 'test' attribute and adding 'selenium.test' to the system
# modules.
import selenium
import test
selenium.test = test
sys.modules['selenium.test'] = test
# Load and decide which tests to run.
test_names = self._GetTestNamesFrom(
os.path.join(os.path.dirname(__file__), self.TESTS_FILENAME))
all_tests_suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
filtered_suite = py_unittest_util.FilterTestSuite(
all_tests_suite, self._options.filter)
if self._options.list is True:
print '\n'.join(py_unittest_util.GetTestNamesFromSuite(filtered_suite))
sys.exit(0)
# The tests expect to run with preset 'driver' and 'webserver' class
# properties.
driver_exe = self._options.driver_exe or test_paths.CHROMEDRIVER_EXE
chrome_exe = self._options.chrome_exe or test_paths.CHROME_EXE
if driver_exe is None or not os.path.exists(os.path.expanduser(driver_exe)):
raise RuntimeError('ChromeDriver could not be found')
if chrome_exe is None or not os.path.exists(os.path.expanduser(chrome_exe)):
raise RuntimeError('Chrome could not be found')
driver_exe = os.path.expanduser(driver_exe)
chrome_exe = os.path.expanduser(chrome_exe)
# Increase number of http client threads to 10 to prevent hangs.
# The hang seems to occur because Chrome keeps too many multiple
# simultaneous connections open to our webserver.
server = ChromeDriverLauncher(
os.path.expanduser(driver_exe), test_paths.WEBDRIVER_TEST_DATA,
http_threads=10).Launch()
driver = WebDriver(server.GetUrl(),
{'chrome.binary': os.path.expanduser(chrome_exe)})
# The tests expect a webserver. Since ChromeDriver also operates as one,
# just pass this dummy class with the right info.
class DummyWebserver:
pass
webserver = DummyWebserver()
webserver.port = server.GetPort()
for test in py_unittest_util.GetTestsFromSuite(filtered_suite):
test.__class__.driver = driver
test.__class__.webserver = webserver
verbosity = 1
if self._options.verbose:
verbosity = 2
result = py_unittest_util.GTestTextTestRunner(verbosity=verbosity).run(
filtered_suite)
server.Kill()
sys.exit(not result.wasSuccessful())
if __name__ == '__main__':
Main()
| bsd-3-clause | 8,846,056,834,166,375,000 | 34.096296 | 80 | 0.647847 | false |
the-engine-room/replication-sprint-02 | crowdataapp/migrations/0009_auto__chg_field_document_stored_validity_rate.py | 1 | 11069 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Document.stored_validity_rate'
db.alter_column(u'crowdataapp_document', 'stored_validity_rate', self.gf('django.db.models.fields.DecimalField')(max_digits=3, decimal_places=2))
def backwards(self, orm):
# Changing field 'Document.stored_validity_rate'
db.alter_column(u'crowdataapp_document', 'stored_validity_rate', self.gf('django.db.models.fields.IntegerField')())
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'crowdataapp.document': {
'Meta': {'object_name': 'Document'},
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'to': u"orm['crowdataapp.DocumentSet']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'stored_validity_rate': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '3', 'decimal_places': '2'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': "'512'"})
},
u'crowdataapp.documentset': {
'Meta': {'object_name': 'DocumentSet'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'entries_threshold': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'head_html': ('django.db.models.fields.TextField', [], {'default': '\'<!-- <script> or <link rel="stylesheet"> tags go here -->\'', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'128'"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'template_function': ('django.db.models.fields.TextField', [], {'default': "'// Javascript function to insert the document into the DOM.\\n// Receives the URL of the document as its only parameter.\\n// Must be called insertDocument\\n// JQuery is available\\n// resulting element should be inserted into div#document-viewer-container\\nfunction insertDocument(document_url) {\\n}\\n'"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'crowdataapp.documentsetfieldentry': {
'Meta': {'object_name': 'DocumentSetFieldEntry'},
'entry': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['crowdataapp.DocumentSetFormEntry']"}),
'field_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'})
},
u'crowdataapp.documentsetform': {
'Meta': {'object_name': 'DocumentSetForm'},
'button_text': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '50'}),
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'form'", 'unique': 'True', 'to': u"orm['crowdataapp.DocumentSet']"}),
'email_copies': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_subject': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'to': u"orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'crowdataapp.documentsetformentry': {
'Meta': {'object_name': 'DocumentSetFormEntry'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'form_entries'", 'null': 'True', 'to': u"orm['crowdataapp.Document']"}),
'entry_time': ('django.db.models.fields.DateTimeField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': u"orm['crowdataapp.DocumentSetForm']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'crowdataapp.documentsetformfield': {
'Meta': {'object_name': 'DocumentSetFormField'},
'autocomplete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'choices': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'field_type': ('django.db.models.fields.IntegerField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['crowdataapp.DocumentSetForm']"}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'placeholder_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['crowdataapp'] | mit | -5,602,460,521,301,749,000 | 79.80292 | 400 | 0.564279 | false |
VanirAOSP/external_chromium_org | tools/deep_memory_profiler/tests/mock_gsutil.py | 131 | 1558 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
import zipfile
def main():
ZIP_PATTERN = re.compile('dmprof......\.zip')
assert len(sys.argv) == 6
assert sys.argv[1] == 'cp'
assert sys.argv[2] == '-a'
assert sys.argv[3] == 'public-read'
assert ZIP_PATTERN.match(os.path.basename(sys.argv[4]))
assert sys.argv[5] == 'gs://test-storage/'
zip_file = zipfile.ZipFile(sys.argv[4], 'r')
expected_nameset = set(['heap.01234.0001.heap',
'heap.01234.0002.heap',
'heap.01234.0001.buckets',
'heap.01234.0002.buckets',
'heap.01234.symmap/maps',
'heap.01234.symmap/chrome.uvwxyz.readelf-e',
'heap.01234.symmap/chrome.abcdef.nm',
'heap.01234.symmap/files.json'])
assert set(zip_file.namelist()) == expected_nameset
heap_1 = zip_file.getinfo('heap.01234.0001.heap')
assert heap_1.CRC == 763099253
assert heap_1.file_size == 1107
buckets_1 = zip_file.getinfo('heap.01234.0001.buckets')
assert buckets_1.CRC == 2632528901
assert buckets_1.file_size == 2146
nm_chrome = zip_file.getinfo('heap.01234.symmap/chrome.abcdef.nm')
assert nm_chrome.CRC == 2717882373
assert nm_chrome.file_size == 131049
zip_file.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 5,369,982,917,557,783,000 | 29.54902 | 72 | 0.607831 | false |
kevinlondon/youtube-dl | youtube_dl/extractor/jeuxvideo.py | 85 | 1990 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class JeuxVideoIE(InfoExtractor):
_VALID_URL = r'http://.*?\.jeuxvideo\.com/.*/(.*?)\.htm'
_TESTS = [{
'url': 'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm',
'md5': '046e491afb32a8aaac1f44dd4ddd54ee',
'info_dict': {
'id': '114765',
'ext': 'mp4',
'title': 'Tearaway : GC 2013 : Tearaway nous présente ses papiers d\'identité',
'description': 'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.',
},
}, {
'url': 'http://www.jeuxvideo.com/videos/chroniques/434220/l-histoire-du-jeu-video-la-saturn.htm',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group(1)
webpage = self._download_webpage(url, title)
title = self._html_search_meta('name', webpage)
config_url = self._html_search_regex(
r'data-src="(/contenu/medias/video.php.*?)"',
webpage, 'config URL')
config_url = 'http://www.jeuxvideo.com' + config_url
video_id = self._search_regex(
r'id=(\d+)',
config_url, 'video ID')
config = self._download_json(
config_url, title, 'Downloading JSON config')
formats = [{
'url': source['file'],
'format_id': source['label'],
'resolution': source['label'],
} for source in reversed(config['sources'])]
return {
'id': video_id,
'title': title,
'formats': formats,
'description': self._og_search_description(webpage),
'thumbnail': config.get('image'),
}
| unlicense | -6,721,072,622,530,988,000 | 34.446429 | 172 | 0.573804 | false |
sacharya/nova | nova/openstack/common/rpc/serializer.py | 72 | 1600 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides the definition of an RPC serialization handler"""
import abc
class Serializer(object):
"""Generic (de-)serialization definition base class."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def serialize_entity(self, context, entity):
"""Serialize something to primitive form.
:param context: Security context
:param entity: Entity to be serialized
:returns: Serialized form of entity
"""
pass
@abc.abstractmethod
def deserialize_entity(self, context, entity):
"""Deserialize something from primitive form.
:param context: Security context
:param entity: Primitive to be deserialized
:returns: Deserialized form of entity
"""
pass
class NoOpSerializer(Serializer):
"""A serializer that does nothing."""
def serialize_entity(self, context, entity):
return entity
def deserialize_entity(self, context, entity):
return entity
| apache-2.0 | -2,485,294,463,701,161,000 | 29.769231 | 78 | 0.68125 | false |
RyanSkraba/beam | sdks/python/apache_beam/io/gcp/bigquery_io_read_it_test.py | 7 | 2252 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Dataflow job that counts the number of rows in a BQ table.
Can be configured to simulate slow reading for a given number of rows.
"""
from __future__ import absolute_import
import logging
import unittest
from hamcrest.core.core.allof import all_of
from nose.plugins.attrib import attr
from apache_beam.io.gcp import bigquery_io_read_pipeline
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
class BigqueryIOReadIT(unittest.TestCase):
DEFAULT_DATASET = "big_query_import_export"
DEFAULT_TABLE_PREFIX = "export_"
NUM_RECORDS = {"empty": 0,
"1M": 10592,
"1G": 11110839,
"1T": 11110839000,}
def run_bigquery_io_read_pipeline(self, input_size):
test_pipeline = TestPipeline(is_integration_test=True)
pipeline_verifiers = [PipelineStateMatcher(),]
extra_opts = {'input_table': self.DEFAULT_DATASET + "." +
self.DEFAULT_TABLE_PREFIX + input_size,
'num_records': self.NUM_RECORDS[input_size],
'on_success_matcher': all_of(*pipeline_verifiers)}
bigquery_io_read_pipeline.run(test_pipeline.get_full_options_as_args(
**extra_opts))
@attr('IT')
def test_bigquery_read_1M_python(self):
self.run_bigquery_io_read_pipeline('1M')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 | 6,066,251,425,929,830,000 | 34.746032 | 74 | 0.706483 | false |
elthariel/dff | ui/console/console.py | 1 | 4149 | # DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2010 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Christophe Malinge <[email protected]>
# Frederic Baguelin <[email protected]>
#
import sys,string, os, traceback, types, completion, signal
import line_to_arguments
from cmd import *
#from api.vfs import *
#from api.taskmanager.taskmanager import TaskManager
from api.manager.manager import ApiManager
from ui.console.complete_raw_input import complete_raw_input
from ui.history import history
PROMPT = "dff / > "
INTRO = "\nWelcome to the Digital Forensic Framework\n"
IDENTCHARS = string.ascii_letters + string.digits + '\ _='
class console(Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None):
Cmd.__init__(self, completekey, stdin, stdout)
self.history = history()
self.api = ApiManager()
self.vfs = self.api.vfs()
self.taskmanager = self.api.TaskManager()
self.line_to_arguments = line_to_arguments.Line_to_arguments()
self.old_completer = ""
self.prompt = "dff / > "
self.intro = "\n##########################################\n\
# Welcome on Digital Forensics Framework #\n\
##########################################\n"
self.stdin = self
self.completekey = '\t'
self.comp_raw = complete_raw_input(self)
self.completion = completion.Completion(self.comp_raw)
if os.name == 'posix':
signal.signal(signal.SIGTSTP, self.bg)
def bg(self, signum, trace):
if self.taskmanager.current_proc:
proc = self.taskmanager.current_proc
proc.exec_flags += ["thread"]
print "\n\n[" + str(proc.pid) + "]" + " background " + proc.name
self.taskmanager.current_proc = None
self.cmdloop()
def precmd(self, line):
return line
def postcmd(self, stop, line):
self.prompt = "dff " + self.vfs.getcwd().path + "/" + self.vfs.getcwd().name + " > "
return stop
def preloop(self):
return
def postloop(self):
print "Exiting..."
def onecmd(self, line):
try:
if line == 'exit' or line == 'quit':
return 'stop'
exc_list = self.line_to_arguments.generate(line)
if exc_list != None and len(exc_list) > 0:
for exc in exc_list:
exec_type = ["console"]
if line[-1:] == "&":
exec_type += ["thread"]
for cmd, args in exc.iteritems():
if cmd != None:
self.history.add(line.strip())
self.taskmanager.add(cmd, args,exec_type)
else:
return self.emptyline()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, None, sys.stdout)
def emptyline(self):
pass
def default(self, line):
try:
exec(line) in self._locals, self._globals
except Exception, e:
print e.__class__, ":", e
def cmdloop(self, intro=None):
self.preloop()
if self.intro:
print self.intro
self.intro = None
else:
print ''
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
line = self.comp_raw.raw_input()
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
def complete(self, line, begidx):
line = str(line).strip('\n')
self.completion_matches = self.completion.complete(line, begidx)
try:
return self.completion_matches
except IndexError:
return None
| gpl-2.0 | 5,961,799,795,319,090,000 | 31.928571 | 92 | 0.590986 | false |
kwurst/grading-scripts | assignmentconvert.py | 1 | 1952 | # Copyright (C) 2014 Karl R. Wurst
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
import argparse
import os
from assignment import Assignment
from command import Command
class LabConvert(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('config', help='JSON configuration file')
parser.add_argument(
'-v', '--verbose',
help='increase output verbosity',
action='store_true'
)
args = parser.parse_args()
Command.set_default_verbosity(args.verbose)
self._a2pdf = Command(
'a2pdf --noperl-syntax --noline-numbers "{ins}" -o "{ins}.pdf"')
self._pdfcat = Command('pdftk "{ins}" cat output "{outs}"')
self._create_log = Command('git log > log.txt')
self._rm = Command('rm "{ins}"')
Assignment(args.config).accept(self.process_submission)
def process_submission(self, directory, files):
self._create_log()
self._a2pdf.each(files + ['log.txt'])
outpdf = directory.name + '.pdf'
pdfs = [str(f) + '.pdf' for f in files] + [directory/'log.txt.pdf']
self._pdfcat(pdfs, outpdf)
self._rm(pdfs)
self._rm(directory/'log.txt')
if __name__ == '__main__':
LabConvert()
| gpl-3.0 | -1,461,167,914,750,726,700 | 36.538462 | 76 | 0.650102 | false |
anisridhar/AudioShop | analysisClass.py | 1 | 5377 | import pyaudio
import wave
import sys
import time
import cv2
import numpy as np
import os
from Tkinter import *
from pydubtest import play, make_chunks
from pydub import AudioSegment
from threading import Thread
from vidAnalysis import vid2SoundFile
from eventBasedAnimationClass import EventBasedAnimationClass
import imagesDemo1
from buttonClass import button
from TITLEclass import TITLE
from barePageClass import barePage
from audioEditClass import AUDIOEDIT
from fingerTrackingClass import FINGERTRACKING
class ANALYSIS(barePage):
#allows user to compare actual recording and denoised
def __init__(self,width,height):
super(ANALYSIS,self).__init__(width,height)
self.started = False
def initAnalysis(self):
self.next = None
self.song = song1 = AudioSegment.from_wav("originalMusic.wav")
song1 = self.song[1000*self.start:1000*self.end]
self.song2 = song2 = vid2SoundFile(self.start,self.end,self.fingerData)
#initializing audio trackBars
self.bar1 = trackBar(song1,self.width,self.height/3)
self.bar2 = trackBar(song2,self.width,self.height*2/3)
#getting new timerDelay
self.timerDelay = int(round(float(len(song1)/(self.bar1.rightEdge-self.bar1.leftEdge))))
def draw(self,canvas):
canvas.create_rectangle(0,0,self.width,self.height,fill="black")
text1 = "Music from original audio file"
text2 = "Music from Video Analysis"
canvas.create_text(self.width/2,self.height/3-50, text = text1,fill="white")
canvas.create_text(self.width/2,self.height*2/3-50,text=text2,fill="white")
self.bar1.draw(canvas)
self.bar2.draw(canvas)
def onMousePressed(self,event):
self.bar1.onMousePressed(event)
self.bar2.onMousePressed(event)
def onTimerFired(self):
if self.started:
self.bar1.onTimerFired()
self.bar2.onTimerFired()
def onKeyPressed(self,event):
self.bar2.onKeyPressed(event)
if event.keysym == "Right":
self.song = self.song[:1000*self.start] + self.bar2.song + self.song[1000*self.end:]
self.next = 1
class trackBar(object):
#creates a trackbar
def __init__(self,song,width,cy):
self.song = song
self.width = width
self.cy = cy
self.leftEdge = self.width/4
self.rightEdge = 3*self.width/4
self.trackHeight = 30
self.lineHeight = self.trackHeight*2
self.controlWidth = self.trackHeight
self.control = "play"
#self.timerDelay = int(round(float(len(self.song)/(self.rightEdge-self.leftEdge))))
self.trackX = self.leftEdge
self.recordingStart = 0
def onMousePressed(self,event):
if (self.leftEdge-self.controlWidth-5 <= event.x <= self.leftEdge-5 and
self.cy-self.trackHeight/2 <= event.y <= self.cy+self.trackHeight/2):
self.control = "pause" if self.control == "play" else "play"
if self.control == "pause": self.getAudioThread()
elif self.control == "play" and self.trackX == self.rightEdge:
self.recordingStart = 0
self.trackX = self.leftEdge
def getAudioThread(self):
self.t = Thread(target = self.playAudio)
self.t.start()
def playAudio(self):
#taken from source and modified: http://people.csail.mit.edu/hubert/pyaudio/
song = self.song[self.recordingStart:]
#below is taken from a module
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(song.sample_width),
channels=song.channels,
rate=song.frame_rate,
output=True)
# break audio into half-second chunks (to allows keyboard interrupts)
startTime = time.time()
for chunk in make_chunks(song, 500):
#modified the area below to suit purposes of the program
if self.control == "play":
self.recordingStart += int(round(1000*(time.time() - startTime)))
stream.stop_stream()
stream.close()
p.terminate()
return
stream.write(chunk._data)
self.recordingStart = 0
stream.stop_stream()
stream.close()
p.terminate()
def drawStatusLine(self,canvas):
(x0,y0) = (self.trackX,self.cy-self.lineHeight/2)
(x1,y1) = (self.trackX,self.cy+self.lineHeight/2)
canvas.create_line(x0,y0,x1,y1,fill="white")
def onTimerFired(self):
if self.control == "pause": self.trackX += 1
if self.trackX >= self.rightEdge:
self.trackX = self.rightEdge
def draw(self,canvas):
self.drawBar(canvas)
self.drawStatusLine(canvas)
if self.control == "play": self.drawPlay(canvas)
elif self.control == "pause": self.drawPause(canvas)
def drawBar(self,canvas):
(x0,y0) = (self.leftEdge,self.cy-self.trackHeight/2)
(x1,y1) = (self.rightEdge,self.cy+self.trackHeight/2)
canvas.create_rectangle(x0,y0,x1,y1,fill="blue")
def drawPlay(self,canvas):
v1 = (self.leftEdge-self.controlWidth - 5,self.cy-self.trackHeight/2)
v2 = (self.leftEdge-self.controlWidth-5,self.cy+self.trackHeight/2)
v3 = (self.leftEdge-5,self.cy)
canvas.create_polygon(v1,v2,v3,fill="purple")
def drawPause(self,canvas):
rectangleWidth = self.controlWidth/3
#creating first rectangle
r01 = (x01,y01) = (self.leftEdge-self.controlWidth - 5,self.cy-self.trackHeight/2)
r02 = (x02,y02) = (x01+rectangleWidth,self.cy+self.trackHeight/2)
canvas.create_rectangle(r01,r02,fill="purple")
# creating second rectangle
r11 = (x11,y11) = (x01+2*rectangleWidth-5,y01)
r12 = (x11+rectangleWidth,y02)
canvas.create_rectangle(r11,r12,fill="purple")
def onKeyPressed(self,event):
if event.keysym == "Up":
self.song += 1
elif event.keysym == "Down":
self.song -= 1
| mit | -1,557,240,659,029,682,200 | 30.444444 | 90 | 0.7175 | false |
Cinntax/home-assistant | homeassistant/helpers/state.py | 1 | 8248 | """Helpers that help with state related things."""
import asyncio
import datetime as dt
import json
import logging
from collections import defaultdict
from types import ModuleType, TracebackType
from typing import Awaitable, Dict, Iterable, List, Optional, Tuple, Type, Union
from homeassistant.loader import bind_hass, async_get_integration, IntegrationNotFound
import homeassistant.util.dt as dt_util
from homeassistant.components.notify import ATTR_MESSAGE, SERVICE_NOTIFY
from homeassistant.components.sun import STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON
from homeassistant.components.mysensors.switch import ATTR_IR_CODE, SERVICE_SEND_IR_CODE
from homeassistant.components.cover import ATTR_POSITION, ATTR_TILT_POSITION
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_OPTION,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
SERVICE_LOCK,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_UNLOCK,
SERVICE_OPEN_COVER,
SERVICE_CLOSE_COVER,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
STATE_CLOSED,
STATE_HOME,
STATE_LOCKED,
STATE_NOT_HOME,
STATE_OFF,
STATE_ON,
STATE_OPEN,
STATE_UNKNOWN,
STATE_UNLOCKED,
SERVICE_SELECT_OPTION,
)
from homeassistant.core import Context, State, DOMAIN as HASS_DOMAIN
from homeassistant.util.async_ import run_coroutine_threadsafe
from .typing import HomeAssistantType
_LOGGER = logging.getLogger(__name__)
GROUP_DOMAIN = "group"
# Update this dict of lists when new services are added to HA.
# Each item is a service with a list of required attributes.
SERVICE_ATTRIBUTES = {
SERVICE_NOTIFY: [ATTR_MESSAGE],
SERVICE_SEND_IR_CODE: [ATTR_IR_CODE],
SERVICE_SELECT_OPTION: [ATTR_OPTION],
SERVICE_SET_COVER_POSITION: [ATTR_POSITION],
SERVICE_SET_COVER_TILT_POSITION: [ATTR_TILT_POSITION],
}
# Update this dict when new services are added to HA.
# Each item is a service with a corresponding state.
SERVICE_TO_STATE = {
SERVICE_TURN_ON: STATE_ON,
SERVICE_TURN_OFF: STATE_OFF,
SERVICE_ALARM_ARM_AWAY: STATE_ALARM_ARMED_AWAY,
SERVICE_ALARM_ARM_HOME: STATE_ALARM_ARMED_HOME,
SERVICE_ALARM_DISARM: STATE_ALARM_DISARMED,
SERVICE_ALARM_TRIGGER: STATE_ALARM_TRIGGERED,
SERVICE_LOCK: STATE_LOCKED,
SERVICE_UNLOCK: STATE_UNLOCKED,
SERVICE_OPEN_COVER: STATE_OPEN,
SERVICE_CLOSE_COVER: STATE_CLOSED,
}
class AsyncTrackStates:
"""
Record the time when the with-block is entered.
Add all states that have changed since the start time to the return list
when with-block is exited.
Must be run within the event loop.
"""
def __init__(self, hass: HomeAssistantType) -> None:
"""Initialize a TrackStates block."""
self.hass = hass
self.states: List[State] = []
# pylint: disable=attribute-defined-outside-init
def __enter__(self) -> List[State]:
"""Record time from which to track changes."""
self.now = dt_util.utcnow()
return self.states
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Add changes states to changes list."""
self.states.extend(get_changed_since(self.hass.states.async_all(), self.now))
def get_changed_since(
states: Iterable[State], utc_point_in_time: dt.datetime
) -> List[State]:
"""Return list of states that have been changed since utc_point_in_time."""
return [state for state in states if state.last_updated >= utc_point_in_time]
@bind_hass
def reproduce_state(
hass: HomeAssistantType,
states: Union[State, Iterable[State]],
blocking: bool = False,
) -> None:
"""Reproduce given state."""
return run_coroutine_threadsafe( # type: ignore
async_reproduce_state(hass, states, blocking), hass.loop
).result()
@bind_hass
async def async_reproduce_state(
hass: HomeAssistantType,
states: Union[State, Iterable[State]],
blocking: bool = False,
context: Optional[Context] = None,
) -> None:
"""Reproduce a list of states on multiple domains."""
if isinstance(states, State):
states = [states]
to_call: Dict[str, List[State]] = defaultdict(list)
for state in states:
to_call[state.domain].append(state)
async def worker(domain: str, states_by_domain: List[State]) -> None:
try:
integration = await async_get_integration(hass, domain)
except IntegrationNotFound:
_LOGGER.warning(
"Trying to reproduce state for unknown integration: %s", domain
)
return
try:
platform: Optional[ModuleType] = integration.get_platform("reproduce_state")
except ImportError:
platform = None
if platform:
await platform.async_reproduce_states( # type: ignore
hass, states_by_domain, context=context
)
else:
await async_reproduce_state_legacy(
hass, domain, states_by_domain, blocking=blocking, context=context
)
if to_call:
# run all domains in parallel
await asyncio.gather(
*(worker(domain, data) for domain, data in to_call.items())
)
@bind_hass
async def async_reproduce_state_legacy(
hass: HomeAssistantType,
domain: str,
states: Iterable[State],
blocking: bool = False,
context: Optional[Context] = None,
) -> None:
"""Reproduce given state."""
to_call: Dict[Tuple[str, str], List[str]] = defaultdict(list)
if domain == GROUP_DOMAIN:
service_domain = HASS_DOMAIN
else:
service_domain = domain
for state in states:
if hass.states.get(state.entity_id) is None:
_LOGGER.warning(
"reproduce_state: Unable to find entity %s", state.entity_id
)
continue
domain_services = hass.services.async_services().get(service_domain)
if not domain_services:
_LOGGER.warning("reproduce_state: Unable to reproduce state %s (1)", state)
continue
service = None
for _service in domain_services.keys():
if (
_service in SERVICE_ATTRIBUTES
and all(
attr in state.attributes for attr in SERVICE_ATTRIBUTES[_service]
)
or _service in SERVICE_TO_STATE
and SERVICE_TO_STATE[_service] == state.state
):
service = _service
if (
_service in SERVICE_TO_STATE
and SERVICE_TO_STATE[_service] == state.state
):
break
if not service:
_LOGGER.warning("reproduce_state: Unable to reproduce state %s (2)", state)
continue
# We group service calls for entities by service call
# json used to create a hashable version of dict with maybe lists in it
key = (service, json.dumps(dict(state.attributes), sort_keys=True))
to_call[key].append(state.entity_id)
domain_tasks: List[Awaitable[Optional[bool]]] = []
for (service, service_data), entity_ids in to_call.items():
data = json.loads(service_data)
data[ATTR_ENTITY_ID] = entity_ids
domain_tasks.append(
hass.services.async_call(service_domain, service, data, blocking, context)
)
if domain_tasks:
await asyncio.wait(domain_tasks)
def state_as_number(state: State) -> float:
"""
Try to coerce our state to a number.
Raises ValueError if this is not possible.
"""
if state.state in (
STATE_ON,
STATE_LOCKED,
STATE_ABOVE_HORIZON,
STATE_OPEN,
STATE_HOME,
):
return 1
if state.state in (
STATE_OFF,
STATE_UNLOCKED,
STATE_UNKNOWN,
STATE_BELOW_HORIZON,
STATE_CLOSED,
STATE_NOT_HOME,
):
return 0
return float(state.state)
| apache-2.0 | 6,869,102,920,101,998,000 | 29.66171 | 88 | 0.639185 | false |
fxa90id/mozillians | mozillians/users/tests/test_tasks.py | 1 | 11847 | from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.test.utils import override_settings
from basket.base import BasketException
from celery.exceptions import Retry
from mock import patch
from nose.tools import eq_, ok_
from mozillians.common.tests import TestCase
from mozillians.users.tasks import (lookup_user_task, remove_incomplete_accounts,
subscribe_user_task, subscribe_user_to_basket,
unsubscribe_from_basket_task,
unsubscribe_user_task, update_email_in_basket)
from mozillians.users.tests import UserFactory
class IncompleteAccountsTests(TestCase):
"""Incomplete accounts removal tests."""
@patch('mozillians.users.tasks.datetime')
def test_remove_incomplete_accounts(self, datetime_mock):
"""Test remove incomplete accounts."""
complete_user = UserFactory.create(vouched=False,
date_joined=datetime(2012, 01, 01))
complete_vouched_user = UserFactory.create(date_joined=datetime(2013, 01, 01))
incomplete_user_not_old = UserFactory.create(date_joined=datetime(2013, 01, 01),
userprofile={'full_name': ''})
incomplete_user_old = UserFactory.create(date_joined=datetime(2012, 01, 01),
userprofile={'full_name': ''})
datetime_mock.now.return_value = datetime(2013, 01, 01)
remove_incomplete_accounts(days=0)
ok_(User.objects.filter(id=complete_user.id).exists())
ok_(User.objects.filter(id=complete_vouched_user.id).exists())
ok_(User.objects.filter(id=incomplete_user_not_old.id).exists())
ok_(not User.objects.filter(id=incomplete_user_old.id).exists())
class BasketTests(TestCase):
@override_settings(CELERY_ALWAYS_EAGER=True)
@patch('mozillians.users.tasks.BASKET_ENABLED', True)
@patch('mozillians.users.tasks.waffle.switch_is_active')
@patch('mozillians.users.tasks.unsubscribe_user_task')
@patch('mozillians.users.tasks.subscribe_user_task')
@patch('mozillians.users.tasks.lookup_user_task')
@patch('mozillians.users.tasks.basket')
def test_change_email(self, basket_mock, lookup_mock, subscribe_mock, unsubscribe_mock,
switch_is_active_mock):
# Create a new user
old_email = '[email protected]'
# We need vouched=False in order to avoid triggering a basket_update through signals.
user = UserFactory.create(email=old_email, vouched=False)
new_email = '[email protected]'
# Enable basket.
switch_is_active_mock.return_value = True
# Mock all the calls to basket.
basket_mock.lookup_user.return_value = {
'email': old_email, # the old value
'newsletters': ['foo', 'bar']
}
basket_mock.unsubscribe.return_value = {
'result': 'ok',
}
basket_mock.subscribe.return_value = {
'token': 'new token',
}
lookup_mock.reset_mock()
subscribe_mock.reset_mock()
unsubscribe_mock.reset_mock()
# When a user's email is changed, their old email is unsubscribed
# from all newsletters related to mozillians.org and their new email is subscribed to them.
update_email_in_basket(user.email, new_email)
# Verify subtask calls and call count
ok_(lookup_mock.subtask.called)
eq_(lookup_mock.subtask.call_count, 1)
ok_(subscribe_mock.subtask.called)
eq_(subscribe_mock.subtask.call_count, 1)
ok_(unsubscribe_mock.subtask.called)
eq_(unsubscribe_mock.subtask.call_count, 1)
# Verify call arguments
lookup_mock.subtask.assert_called_with((user.email,))
unsubscribe_mock.subtask.called_with(({'token': 'new token',
'email': '[email protected]',
'newsletters': ['foo', 'bar']},))
subscribe_mock.subtask.called_with(('[email protected]',))
@patch('mozillians.users.tasks.waffle.switch_is_active')
@patch('mozillians.users.tasks.unsubscribe_user_task')
@patch('mozillians.users.tasks.lookup_user_task')
@patch('mozillians.users.tasks.basket')
def test_unsubscribe_from_basket_task(self, basket_mock, lookup_mock, unsubscribe_mock,
switch_is_active_mock):
switch_is_active_mock.return_value = True
user = UserFactory.create(email='[email protected]')
basket_mock.lookup_user.return_value = {
'email': user.email, # the old value
'token': 'token',
'newsletters': ['foo', 'bar']
}
lookup_mock.reset_mock()
unsubscribe_mock.reset_mock()
with patch('mozillians.users.tasks.BASKET_ENABLED', True):
unsubscribe_from_basket_task(user.email, ['foo'])
eq_(lookup_mock.subtask.call_count, 1)
eq_(unsubscribe_mock.subtask.call_count, 1)
lookup_mock.subtask.assert_called_with((user.email,))
unsubscribe_mock.subtask.called_with((['foo'],))
@override_settings(CELERY_ALWAYS_EAGER=True)
@patch('mozillians.users.tasks.BASKET_ENABLED', True)
@patch('mozillians.users.tasks.waffle.switch_is_active')
@patch('mozillians.users.tasks.subscribe_user_task.subtask')
@patch('mozillians.users.tasks.lookup_user_task.subtask')
def test_subscribe_no_newsletters(self, lookup_mock, subscribe_mock, switch_is_active_mock):
switch_is_active_mock.return_value = True
user = UserFactory.create(vouched=False)
result = subscribe_user_to_basket.delay(user.userprofile.pk)
ok_(lookup_mock.called)
ok_(not subscribe_mock.called)
ok_(not result.get())
@patch('mozillians.users.tasks.basket.lookup_user')
def test_lookup_task_user_not_found(self, lookup_mock):
lookup_mock.side_effect = BasketException(u'User not found')
result = lookup_user_task(email='[email protected]')
eq_(result, {})
@patch('mozillians.users.tasks.lookup_user_task.retry')
@patch('mozillians.users.tasks.basket.lookup_user')
def test_lookup_task_basket_error(self, lookup_mock, retry_mock):
exc = BasketException(u'Error error error')
lookup_mock.side_effect = [exc, None]
retry_mock.side_effect = Retry
with self.assertRaises(Retry):
lookup_user_task(email='[email protected]')
retry_mock.called_with(exc)
def test_subscribe_user_task_no_result_no_email(self):
ok_(not subscribe_user_task(result={}, email=''))
@patch('mozillians.users.tasks.basket.subscribe')
def test_subscribe_user_task_no_email_no_newsletters(self, subscribe_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar', 'mozilla-phone'],
'email': '[email protected]'
}
subscribe_user_task(result=result, email=None)
subscribe_mock.assert_called_with('[email protected]', ['mozilla-phone'],
sync='N', trigger_welcome='N',
source_url=settings.SITE_URL,
api_key='basket_api_key')
@patch('mozillians.users.tasks.basket.subscribe')
def test_subscribe_user_task_no_newsletters(self, subscribe_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar'],
'email': '[email protected]'
}
subscribe_user_task(result=result, email='[email protected]')
subscribe_mock.assert_not_called()
@patch('mozillians.users.tasks.basket.subscribe')
def test_subscribe_user_task(self, subscribe_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar'],
'email': '[email protected]'
}
kwargs = {
'result': result,
'email': '[email protected]',
'newsletters': ['foobar', 'foo']
}
subscribe_user_task(**kwargs)
subscribe_mock.assert_called_with('[email protected]', ['foobar'],
sync='N', trigger_welcome='N',
source_url=settings.SITE_URL,
api_key='basket_api_key')
@patch('mozillians.users.tasks.basket.subscribe')
def test_subscribe_user_task_no_result(self, subscribe_mock):
kwargs = {
'result': {'status': 'error',
'desc': u'User not found'},
'email': '[email protected]',
'newsletters': ['mozilla-phone']
}
subscribe_user_task(**kwargs)
subscribe_mock.assert_called_with('[email protected]', ['mozilla-phone'],
sync='N', trigger_welcome='N',
source_url=settings.SITE_URL,
api_key='basket_api_key')
@patch('mozillians.users.tasks.subscribe_user_task.retry')
@patch('mozillians.users.tasks.basket.subscribe')
def test_subscribe_user_basket_error(self, subscribe_mock, retry_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar'],
'email': '[email protected]'
}
kwargs = {
'result': result,
'email': '[email protected]',
'newsletters': ['foobar', 'foo']
}
exc = BasketException(u'Error error error')
subscribe_mock.side_effect = [exc, None]
retry_mock.side_effect = Retry
with self.assertRaises(Retry):
subscribe_user_task(**kwargs)
retry_mock.called_with(exc)
def test_unsubscribe_user_no_result(self):
ok_(not unsubscribe_user_task(result={}))
@patch('mozillians.users.tasks.basket.unsubscribe')
def test_unsubscribe_user_task_success_no_newsletters(self, unsubscribe_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar', 'mozilla-phone'],
'email': '[email protected]',
'token': 'token'
}
unsubscribe_user_task(result)
unsubscribe_mock.assert_called_with(token='token', email='[email protected]',
newsletters=['mozilla-phone'], optout=False)
@patch('mozillians.users.tasks.basket.unsubscribe')
def test_unsubscribe_user_task_success(self, unsubscribe_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar', 'foobar'],
'email': '[email protected]',
'token': 'token'
}
unsubscribe_user_task(result, newsletters=['foo', 'bar'])
unsubscribe_mock.assert_called_with(token='token', email='[email protected]',
newsletters=['foo', 'bar'], optout=False)
@patch('mozillians.users.tasks.unsubscribe_user_task.retry')
@patch('mozillians.users.tasks.basket.unsubscribe')
def test_unsubscribe_user_basket_error(self, unsubscribe_mock, retry_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar'],
'email': '[email protected]',
'token': 'token'
}
exc = BasketException(u'Error error error')
unsubscribe_mock.side_effect = [exc, None]
retry_mock.side_effect = Retry
with self.assertRaises(Retry):
unsubscribe_user_task(result, newsletters=['foo', 'bar'])
retry_mock.called_with(exc)
| bsd-3-clause | 1,835,935,476,286,033,000 | 41.462366 | 99 | 0.589854 | false |
aladagemre/django-guardian | guardian/core.py | 9 | 5191 | from __future__ import unicode_literals
from itertools import chain
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from guardian.utils import get_identity
from guardian.utils import get_user_obj_perms_model
from guardian.utils import get_group_obj_perms_model
from guardian.compat import get_user_model
class ObjectPermissionChecker(object):
"""
Generic object permissions checker class being the heart of
``django-guardian``.
.. note::
Once checked for single object, permissions are stored and we don't hit
database again if another check is called for this object. This is great
for templates, views or other request based checks (assuming we don't
have hundreds of permissions on a single object as we fetch all
permissions for checked object).
On the other hand, if we call ``has_perm`` for perm1/object1, then we
change permission state and call ``has_perm`` again for same
perm1/object1 on same instance of ObjectPermissionChecker we won't see a
difference as permissions are already fetched and stored within cache
dictionary.
"""
def __init__(self, user_or_group=None):
"""
:param user_or_group: should be an ``User``, ``AnonymousUser`` or
``Group`` instance
"""
self.user, self.group = get_identity(user_or_group)
self._obj_perms_cache = {}
def has_perm(self, perm, obj):
"""
Checks if user/group has given permission for object.
:param perm: permission as string, may or may not contain app_label
prefix (if not prefixed, we grab app_label from ``obj``)
:param obj: Django model instance for which permission should be checked
"""
perm = perm.split('.')[-1]
if self.user and not self.user.is_active:
return False
elif self.user and self.user.is_superuser:
return True
return perm in self.get_perms(obj)
def get_perms(self, obj):
"""
Returns list of ``codename``'s of all permissions for given ``obj``.
:param obj: Django model instance for which permission should be checked
"""
User = get_user_model()
ctype = ContentType.objects.get_for_model(obj)
key = self.get_local_cache_key(obj)
if not key in self._obj_perms_cache:
group_model = get_group_obj_perms_model(obj)
group_rel_name = group_model.permission.field.related_query_name()
if self.user:
fieldname = '%s__group__%s' % (
group_rel_name,
User.groups.field.related_query_name(),
)
group_filters = {fieldname: self.user}
else:
group_filters = {'%s__group' % group_rel_name: self.group}
if group_model.objects.is_generic():
group_filters.update({
'%s__content_type' % group_rel_name: ctype,
'%s__object_pk' % group_rel_name: obj.pk,
})
else:
group_filters['%s__content_object' % group_rel_name] = obj
if self.user and not self.user.is_active:
return []
elif self.user and self.user.is_superuser:
perms = list(chain(*Permission.objects
.filter(content_type=ctype)
.values_list("codename")))
elif self.user:
model = get_user_obj_perms_model(obj)
related_name = model.permission.field.related_query_name()
user_filters = {'%s__user' % related_name: self.user}
if model.objects.is_generic():
user_filters.update({
'%s__content_type' % related_name: ctype,
'%s__object_pk' % related_name: obj.pk,
})
else:
user_filters['%s__content_object' % related_name] = obj
perms_qs = Permission.objects.filter(content_type=ctype)
# Query user and group permissions separately and then combine
# the results to avoid a slow query
user_perms_qs = perms_qs.filter(**user_filters)
user_perms = user_perms_qs.values_list("codename", flat=True)
group_perms_qs = perms_qs.filter(**group_filters)
group_perms = group_perms_qs.values_list("codename", flat=True)
perms = list(set(chain(user_perms, group_perms)))
else:
perms = list(set(chain(*Permission.objects
.filter(content_type=ctype)
.filter(**group_filters)
.values_list("codename"))))
self._obj_perms_cache[key] = perms
return self._obj_perms_cache[key]
def get_local_cache_key(self, obj):
"""
Returns cache key for ``_obj_perms_cache`` dict.
"""
ctype = ContentType.objects.get_for_model(obj)
return (ctype.id, obj.pk)
| bsd-2-clause | 4,431,847,641,564,421,600 | 40.198413 | 80 | 0.577731 | false |
yograterol/django | tests/auth_tests/test_basic.py | 328 | 4643 | from __future__ import unicode_literals
from django.apps import apps
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.auth.tests.custom_user import CustomUser
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.test import TestCase, override_settings
from django.test.signals import setting_changed
from django.utils import translation
@receiver(setting_changed)
def user_model_swapped(**kwargs):
if kwargs['setting'] == 'AUTH_USER_MODEL':
from django.db.models.manager import ensure_default_manager
# Reset User manager
setattr(User, 'objects', User._default_manager)
ensure_default_manager(User)
apps.clear_cache()
class BasicTestCase(TestCase):
def test_user(self):
"Check that users can be created and can set their password"
u = User.objects.create_user('testuser', '[email protected]', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check username getter
self.assertEqual(u.get_username(), 'testuser')
# Check authentication/permissions
self.assertTrue(u.is_authenticated())
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', '[email protected]')
self.assertFalse(u2.has_usable_password())
def test_user_no_email(self):
"Check that users can be created without an email"
u = User.objects.create_user('testuser1')
self.assertEqual(u.email, '')
u2 = User.objects.create_user('testuser2', email='')
self.assertEqual(u2.email, '')
u3 = User.objects.create_user('testuser3', email=None)
self.assertEqual(u3.email, '')
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertEqual(a.pk, None)
self.assertEqual(a.username, '')
self.assertEqual(a.get_username(), '')
self.assertFalse(a.is_authenticated())
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', '[email protected]', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_get_user_model(self):
"The current user model can be retrieved"
self.assertEqual(get_user_model(), User)
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"The current user model can be swapped out for another"
self.assertEqual(get_user_model(), CustomUser)
with self.assertRaises(AttributeError):
User.objects.all()
@override_settings(AUTH_USER_MODEL='badsetting')
def test_swappable_user_bad_setting(self):
"The alternate user setting must point to something in the format app.model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_swappable_user_nonexistent_model(self):
"The current user model must point to an installed model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
def test_user_verbose_names_translatable(self):
"Default User model verbose names are translatable (#19945)"
with translation.override('en'):
self.assertEqual(User._meta.verbose_name, 'user')
self.assertEqual(User._meta.verbose_name_plural, 'users')
with translation.override('es'):
self.assertEqual(User._meta.verbose_name, 'usuario')
self.assertEqual(User._meta.verbose_name_plural, 'usuarios')
| bsd-3-clause | -881,401,330,870,592,800 | 39.025862 | 84 | 0.670472 | false |
dhermes/google-cloud-python | tasks/noxfile.py | 34 | 4095 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core"))
@nox.session(python="3.7")
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", "black", *LOCAL_DEPS)
session.run(
"black",
"--check",
"google",
"tests",
"docs",
)
session.run("flake8", "google", "tests")
@nox.session(python="3.6")
def blacken(session):
"""Run black.
Format code to uniform standard.
"""
session.install("black")
session.run(
"black",
"google",
"tests",
"docs",
)
@nox.session(python="3.7")
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
session.install("mock", "pytest", "pytest-cov")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", ".")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=97",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=["2.7", "3.5", "3.6", "3.7"])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=["2.7", "3.7"])
def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", "../test_utils/")
session.install("-e", ".")
# Run py.test against the system tests.
if system_test_exists:
session.run("py.test", "--quiet", system_test_path, *session.posargs)
if system_test_folder_exists:
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
@nox.session(python="3.7")
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
| apache-2.0 | -4,940,406,954,807,149,000 | 29.110294 | 84 | 0.63663 | false |
yestech/gae-django-template | django/contrib/localflavor/fr/forms.py | 309 | 1747 | """
FR-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
phone_digits_re = re.compile(r'^0\d(\s|\.)?(\d{2}(\s|\.)?){3}\d{2}$')
class FRZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX.'),
}
def __init__(self, *args, **kwargs):
super(FRZipCodeField, self).__init__(r'^\d{5}$',
max_length=None, min_length=None, *args, **kwargs)
class FRPhoneNumberField(Field):
"""
Validate local French phone number (not international ones)
The correct format is '0X XX XX XX XX'.
'0X.XX.XX.XX.XX' and '0XXXXXXXXX' validate but are corrected to
'0X XX XX XX XX'.
"""
default_error_messages = {
'invalid': _('Phone numbers must be in 0X XX XX XX XX format.'),
}
def clean(self, value):
super(FRPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\.|\s)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s %s %s %s %s' % (value[0:2], value[2:4], value[4:6], value[6:8], value[8:10])
raise ValidationError(self.error_messages['invalid'])
class FRDepartmentSelect(Select):
"""
A Select widget that uses a list of FR departments as its choices.
"""
def __init__(self, attrs=None):
from fr_department import DEPARTMENT_ASCII_CHOICES
super(FRDepartmentSelect, self).__init__(attrs, choices=DEPARTMENT_ASCII_CHOICES)
| bsd-3-clause | -1,115,112,162,775,680,900 | 33.254902 | 100 | 0.63652 | false |
msabramo/ansible | lib/ansible/modules/packaging/os/pacman.py | 5 | 15028 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2012, Afterburn <http://github.com/afterburn>
# (c) 2013, Aaron Bull Schaefer <[email protected]>
# (c) 2015, Indrajit Raychaudhuri <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pacman
short_description: Manage packages with I(pacman)
description:
- Manage packages with the I(pacman) package manager, which is used by
Arch Linux and its variants.
version_added: "1.0"
author:
- "Indrajit Raychaudhuri (@indrajitr)"
- "'Aaron Bull Schaefer (@elasticdog)' <[email protected]>"
- "Afterburn"
notes: []
requirements: []
options:
name:
description:
- Name of the package to install, upgrade, or remove.
required: false
default: null
aliases: [ 'pkg', 'package' ]
state:
description:
- Desired state of the package.
required: false
default: "present"
choices: ["present", "absent", "latest"]
recurse:
description:
- When removing a package, also remove its dependencies, provided
that they are not required by other packages and were not
explicitly installed by a user.
required: false
default: no
choices: ["yes", "no"]
version_added: "1.3"
force:
description:
- When removing package - force remove package, without any
checks. When update_cache - force redownload repo
databases.
required: false
default: no
choices: ["yes", "no"]
version_added: "2.0"
update_cache:
description:
- Whether or not to refresh the master package lists. This can be
run as part of a package installation or as a separate step.
required: false
default: no
choices: ["yes", "no"]
aliases: [ 'update-cache' ]
upgrade:
description:
- Whether or not to upgrade whole system
required: false
default: no
choices: ["yes", "no"]
version_added: "2.0"
'''
RETURN = '''
packages:
description: a list of packages that have been changed
returned: when upgrade is set to yes
type: list of strings
sample: ['package', 'other-package']
'''
EXAMPLES = '''
# Install package foo
- pacman:
name: foo
state: present
# Upgrade package foo
- pacman:
name: foo
state: latest
update_cache: yes
# Remove packages foo and bar
- pacman:
name: foo,bar
state: absent
# Recursively remove package baz
- pacman:
name: baz
state: absent
recurse: yes
# Run the equivalent of "pacman -Sy" as a separate step
- pacman:
update_cache: yes
# Run the equivalent of "pacman -Su" as a separate step
- pacman:
upgrade: yes
# Run the equivalent of "pacman -Syu" as a separate step
- pacman:
update_cache: yes
upgrade: yes
# Run the equivalent of "pacman -Rdd", force remove package baz
- pacman:
name: baz
state: absent
force: yes
'''
import shlex
import os
import re
import sys
def get_version(pacman_output):
"""Take pacman -Qi or pacman -Si output and get the Version"""
lines = pacman_output.split('\n')
for line in lines:
if 'Version' in line:
return line.split(':')[1].strip()
return None
def query_package(module, pacman_path, name, state="present"):
"""Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available"""
if state == "present":
lcmd = "%s -Qi %s" % (pacman_path, name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
if lrc != 0:
# package is not installed locally
return False, False, False
# get the version installed locally (if any)
lversion = get_version(lstdout)
rcmd = "%s -Si %s" % (pacman_path, name)
rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
# get the version in the repository
rversion = get_version(rstdout)
if rrc == 0:
# Return True to indicate that the package is installed locally, and the result of the version number comparison
# to determine if the package is up-to-date.
return True, (lversion == rversion), False
# package is installed but cannot fetch remote Version. Last True stands for the error
return True, True, True
def update_package_db(module, pacman_path):
if module.params["force"]:
args = "Syy"
else:
args = "Sy"
cmd = "%s -%s" % (pacman_path, args)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
module.fail_json(msg="could not update package db")
def upgrade(module, pacman_path):
cmdupgrade = "%s -Suq --noconfirm" % (pacman_path)
cmdneedrefresh = "%s -Qu" % (pacman_path)
rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False)
data = stdout.split('\n')
data.remove('')
packages = []
diff = {
'before': '',
'after': '',
}
if rc == 0:
regex = re.compile('(\w+) ((?:\S+)-(?:\S+)) -> ((?:\S+)-(?:\S+))')
b = []
a = []
for p in data:
m = regex.search(p)
packages.append(m.group(1))
if module._diff:
diff['before'] += "%s-%s\n" % (m.group(1), m.group(2))
diff['after'] += "%s-%s\n" % (m.group(1), m.group(3))
if module.check_mode:
module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff)
rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
if rc == 0:
module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff)
else:
module.fail_json(msg="Could not upgrade")
else:
module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages)
def remove_packages(module, pacman_path, packages):
data = []
diff = {
'before': '',
'after': '',
}
if module.params["recurse"] or module.params["force"]:
if module.params["recurse"]:
args = "Rs"
if module.params["force"]:
args = "Rdd"
if module.params["recurse"] and module.params["force"]:
args = "Rdds"
else:
args = "R"
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
installed, updated, unknown = query_package(module, pacman_path, package)
if not installed:
continue
cmd = "%s -%s %s --noconfirm --noprogressbar" % (pacman_path, args, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if module._diff:
d = stdout.split('\n')[2].split(' ')[2:]
for i, pkg in enumerate(d):
d[i] = re.sub('-[0-9].*$', '', d[i].split('/')[-1])
diff['before'] += "%s\n" % pkg
data.append('\n'.join(d))
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c, diff=diff)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pacman_path, state, packages, package_files):
install_c = 0
package_err = []
message = ""
data = []
diff = {
'before': '',
'after': '',
}
to_install_repos = []
to_install_files = []
for i, package in enumerate(packages):
# if the package is installed and state == present or state == latest and is up-to-date then skip
installed, updated, latestError = query_package(module, pacman_path, package)
if latestError and state == 'latest':
package_err.append(package)
if installed and (state == 'present' or (state == 'latest' and updated)):
continue
if package_files[i]:
to_install_files.append(package_files[i])
else:
to_install_repos.append(package)
if to_install_repos:
cmd = "%s -S %s --noconfirm --noprogressbar --needed" % (pacman_path, " ".join(to_install_repos))
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
data = stdout.split('\n')[3].split(' ')[2:]
data = [ i for i in data if i != '' ]
for i, pkg in enumerate(data):
data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
if module._diff:
diff['after'] += "%s\n" % pkg
if rc != 0:
module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_repos), stderr))
install_c += len(to_install_repos)
if to_install_files:
cmd = "%s -U %s --noconfirm --noprogressbar --needed" % (pacman_path, " ".join(to_install_files))
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
data = stdout.split('\n')[3].split(' ')[2:]
data = [ i for i in data if i != '' ]
for i, pkg in enumerate(data):
data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
if module._diff:
diff['after'] += "%s\n" % pkg
if rc != 0:
module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_files), stderr))
install_c += len(to_install_files)
if state == 'latest' and len(package_err) > 0:
message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err)
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message), diff=diff)
module.exit_json(changed=False, msg="package(s) already installed. %s" % (message), diff=diff)
def check_packages(module, pacman_path, packages, state):
would_be_changed = []
diff = {
'before': '',
'after': '',
'before_header': '',
'after_header': ''
}
for package in packages:
installed, updated, unknown = query_package(module, pacman_path, package)
if ((state in ["present", "latest"] and not installed) or
(state == "absent" and installed) or
(state == "latest" and not updated)):
would_be_changed.append(package)
if would_be_changed:
if state == "absent":
state = "removed"
if module._diff and (state == 'removed'):
diff['before_header'] = 'removed'
diff['before'] = '\n'.join(would_be_changed) + '\n'
elif module._diff and ((state == 'present') or (state == 'latest')):
diff['after_header'] = 'installed'
diff['after'] = '\n'.join(would_be_changed) + '\n'
module.exit_json(changed=True, msg="%s package(s) would be %s" % (
len(would_be_changed), state), diff=diff)
else:
module.exit_json(changed=False, msg="package(s) already %s" % state, diff=diff)
def expand_package_groups(module, pacman_path, pkgs):
expanded = []
for pkg in pkgs:
cmd = "%s -Sgq %s" % (pacman_path, pkg)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
# A group was found matching the name, so expand it
for name in stdout.split('\n'):
name = name.strip()
if name:
expanded.append(name)
else:
expanded.append(pkg)
return expanded
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg', 'package'], type='list'),
state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
recurse = dict(default=False, type='bool'),
force = dict(default=False, type='bool'),
upgrade = dict(default=False, type='bool'),
update_cache = dict(default=False, aliases=['update-cache'], type='bool')
),
required_one_of = [['name', 'update_cache', 'upgrade']],
supports_check_mode = True)
pacman_path = module.get_bin_path('pacman', True)
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
elif p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p["update_cache"] and not module.check_mode:
update_package_db(module, pacman_path)
if not (p['name'] or p['upgrade']):
module.exit_json(changed=True, msg='Updated the package master lists')
if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']):
module.exit_json(changed=True, msg='Would have updated the package cache')
if p['upgrade']:
upgrade(module, pacman_path)
if p['name']:
pkgs = expand_package_groups(module, pacman_path, p['name'])
pkg_files = []
for i, pkg in enumerate(pkgs):
if re.match(".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z))?$", pkg):
# The package given is a filename, extract the raw pkg name from
# it and store the filename
pkg_files.append(pkg)
pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1])
else:
pkg_files.append(None)
if module.check_mode:
check_packages(module, pacman_path, pkgs, p['state'])
if p['state'] in ['present', 'latest']:
install_packages(module, pacman_path, p['state'], pkgs, pkg_files)
elif p['state'] == 'absent':
remove_packages(module, pacman_path, pkgs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
| gpl-3.0 | -2,744,254,601,337,594,400 | 32.321508 | 270 | 0.57486 | false |
rishibarve/incubator-airflow | tests/jobs.py | 1 | 61326 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import shutil
import unittest
import six
import socket
from tempfile import mkdtemp
from airflow import AirflowException, settings, models
from airflow.bin import cli
from airflow.executors import SequentialExecutor
from airflow.jobs import BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.utils.db import provide_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.dag_processing import SimpleDagBag
from mock import patch
from sqlalchemy.orm.session import make_transient
from tests.executors.test_executor import TestExecutor
from tests.core import TEST_DAG_FOLDER
from airflow import configuration
configuration.load_test_config()
import sqlalchemy
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = datetime.datetime(2016, 1, 1)
# Include the words "airflow" and "dag" in the file contents, tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id=='example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
"""
# some DAGs really are just examples... but try to make them work!
skip_dags = [
'example_http_operator',
'example_twitter_dag',
'example_trigger_target_dag',
'example_trigger_controller_dag', # tested above
'test_utils', # sleeps forever
]
logger = logging.getLogger('BackfillJobTest.test_backfill_examples')
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id not in skip_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
Test for https://github.com/airbnb/airflow/pull/1225
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = datetime.datetime.now()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
@patch.object(LocalTaskJob, "_is_descendant_process")
def test_localtaskjob_heartbeat(self, is_descendant):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
is_descendant.return_value = True
ti.state = State.RUNNING
ti.hostname = socket.getfqdn()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
is_descendant.return_value = False
self.assertRaises(AirflowException, job1.heartbeat_callback)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = socket.getfqdn()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run, ignore_ti_state=True, executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.run)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
# These defaults make the test faster to run
default_scheduler_args = {"file_process_interval": 0,
"processor_poll_interval": 0.5}
def setUp(self):
self.dagbag = DagBag()
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.ImportError).delete()
session.commit()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def test_concurrency(self):
dag_id = 'SchedulerJobTest.test_concurrency'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = SimpleDagBag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(2, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING], session=session))
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(3, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING, State.QUEUED], session=session))
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
session.close()
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
processor.get_last_finish_time.return_value = None
scheduler = SchedulerJob(num_runs=0, run_duration=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler._execute_helper(processor_manager=processor)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob(**self.default_scheduler_args)
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob(**self.default_scheduler_args)
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
session = settings.Session()
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# zero tasks ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# still one task
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
file_process_interval=0,
processor_poll_interval=0.5,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=datetime.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_process_task_instances(self):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=datetime.datetime(2200, 1, 1))
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 1)
dag_task2 = DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = datetime.datetime.now() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEquals(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = datetime.datetime.now() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = mock.Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEquals(len(queue), 2)
dagbag = SimpleDagBag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEquals(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=datetime.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, datetime.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=datetime.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, datetime.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEquals(2, len(executor.queued_tasks))
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, ti) = ti_tuple
ti.task = dag_task1
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 1)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# now the executor has cleared and it should be allowed the re-queue
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.QUEUED)
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id==dag.dag_id,
TI.task_id==dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = datetime.datetime.now()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration,
**self.default_scheduler_args)
scheduler.run()
end_time = datetime.datetime.now()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
self.assertLess(run_duration - expected_run_duration, 5.0)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir= dag_directory,
num_runs=1,
**self.default_scheduler_args)
scheduler.run()
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns it's active runs
"""
now = datetime.datetime.now()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
now = datetime.datetime.now()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
three_minutes_ago = now - datetime.timedelta(minutes=3)
two_hours_and_three_minutes_ago = three_minutes_ago - datetime.timedelta(hours=2)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'no_catchup_test1'
DAG_NAME2 = 'no_catchup_test2'
DAG_NAME3 = 'no_catchup_test3'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
default_catchup = configuration.getboolean('scheduler', 'catchup_by_default')
# Test configs have catchup by default ON
self.assertEqual(default_catchup, True)
# Correct default?
self.assertEqual(dag1.catchup, True)
dag2 = DAG(DAG_NAME2,
schedule_interval='* * * * *',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag2)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag2)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag2)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag2.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag2.clear()
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 3 minutes, not 6 hours ago
self.assertGreater(dr.execution_date, three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, datetime.datetime.now())
dag3 = DAG(DAG_NAME3,
schedule_interval='@hourly',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag3)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag3)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag3)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag3.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag3.clear()
dr = None
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last two hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_and_three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, datetime.datetime.now())
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
| apache-2.0 | -6,800,183,360,643,084,000 | 33.530405 | 115 | 0.574177 | false |
leductan-nguyen/RaionPi | src/octoprint/plugin/core.py | 1 | 42607 | # coding=utf-8
"""
In this module resides the core data structures and logic of the plugin system. It is implemented in an RaionPi-agnostic
way and could be extracted into a separate Python module in the future.
.. autoclass:: PluginManager
:members:
.. autoclass:: PluginInfo
:members:
.. autoclass:: Plugin
:members:
"""
from __future__ import absolute_import
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The RaionPi Project - Released under terms of the AGPLv3 License"
import os
import imp
from collections import defaultdict, namedtuple
import logging
import pkg_resources
import pkginfo
EntryPointOrigin = namedtuple("EntryPointOrigin", "type, entry_point, module_name, package_name, package_version")
FolderOrigin = namedtuple("FolderOrigin", "type, folder")
class PluginInfo(object):
"""
The :class:`PluginInfo` class wraps all available information about a registered plugin.
This includes its meta data (like name, description, version, etc) as well as the actual plugin extensions like
implementations, hooks and helpers.
It works on Python module objects and extracts the relevant data from those via accessing the
:ref:`control properties <sec-plugin-concepts-controlproperties>`.
Arguments:
key (str): Identifier of the plugin
location (str): Installation folder of the plugin
instance (module): Plugin module instance
name (str): Human readable name of the plugin
version (str): Version of the plugin
description (str): Description of the plugin
author (str): Author of the plugin
url (str): URL of the website of the plugin
license (str): License of the plugin
"""
attr_name = '__plugin_name__'
""" Module attribute from which to retrieve the plugin's human readable name. """
attr_description = '__plugin_description__'
""" Module attribute from which to retrieve the plugin's description. """
attr_version = '__plugin_version__'
""" Module attribute from which to retrieve the plugin's version. """
attr_author = '__plugin_author__'
""" Module attribute from which to retrieve the plugin's author. """
attr_url = '__plugin_url__'
""" Module attribute from which to retrieve the plugin's website URL. """
attr_license = '__plugin_license__'
""" Module attribute from which to retrieve the plugin's license. """
attr_hooks = '__plugin_hooks__'
""" Module attribute from which to retrieve the plugin's provided hooks. """
attr_implementation = '__plugin_implementation__'
""" Module attribute from which to retrieve the plugin's provided mixin implementation. """
attr_implementations = '__plugin_implementations__'
"""
Module attribute from which to retrieve the plugin's provided implementations.
This deprecated attribute will only be used if a plugin does not yet offer :attr:`attr_implementation`. Only the
first entry will be evaluated.
.. deprecated:: 1.2.0-dev-694
Use :attr:`attr_implementation` instead.
"""
attr_helpers = '__plugin_helpers__'
""" Module attribute from which to retrieve the plugin's provided helpers. """
attr_check = '__plugin_check__'
""" Module attribute which to call to determine if the plugin can be loaded. """
attr_init = '__plugin_init__'
"""
Module attribute which to call when loading the plugin.
This deprecated attribute will only be used if a plugin does not yet offer :attr:`attr_load`.
.. deprecated:: 1.2.0-dev-720
Use :attr:`attr_load` instead.
"""
attr_load = '__plugin_load__'
""" Module attribute which to call when loading the plugin. """
attr_unload = '__plugin_unload__'
""" Module attribute which to call when unloading the plugin. """
attr_enable = '__plugin_enable__'
""" Module attribute which to call when enabling the plugin. """
attr_disable = '__plugin_disable__'
""" Module attribute which to call when disabling the plugin. """
def __init__(self, key, location, instance, name=None, version=None, description=None, author=None, url=None, license=None):
self.key = key
self.location = location
self.instance = instance
self.origin = None
self.enabled = True
self.bundled = False
self.loaded = False
self._name = name
self._version = version
self._description = description
self._author = author
self._url = url
self._license = license
def validate(self, phase, additional_validators=None):
if phase == "before_load":
# if the plugin still uses __plugin_init__, log a deprecation warning and move it to __plugin_load__
if hasattr(self.instance, self.__class__.attr_init):
if not hasattr(self.instance, self.__class__.attr_load):
# deprecation warning
import warnings
warnings.warn("{name} uses deprecated control property __plugin_init__, use __plugin_load__ instead".format(name=self.key), DeprecationWarning)
# move it
init = getattr(self.instance, self.__class__.attr_init)
setattr(self.instance, self.__class__.attr_load, init)
# delete __plugin_init__
delattr(self.instance, self.__class__.attr_init)
elif phase == "after_load":
# if the plugin still uses __plugin_implementations__, log a deprecation warning and put the first
# item into __plugin_implementation__
if hasattr(self.instance, self.__class__.attr_implementations):
if not hasattr(self.instance, self.__class__.attr_implementation):
# deprecation warning
import warnings
warnings.warn("{name} uses deprecated control property __plugin_implementations__, use __plugin_implementation__ instead - only the first implementation of {name} will be recognized".format(name=self.key), DeprecationWarning)
# put first item into __plugin_implementation__
implementations = getattr(self.instance, self.__class__.attr_implementations)
if len(implementations) > 0:
setattr(self.instance, self.__class__.attr_implementation, implementations[0])
# delete __plugin_implementations__
delattr(self.instance, self.__class__.attr_implementations)
if additional_validators is not None:
for validator in additional_validators:
validator(phase, self)
def __str__(self):
if self.version:
return "{name} ({version})".format(name=self.name, version=self.version)
else:
return self.name
def long_str(self, show_bundled=False, bundled_strs=(" [B]", ""),
show_location=False, location_str=" - {location}",
show_enabled=False, enabled_strs=("* ", " ")):
"""
Long string representation of the plugin's information. Will return a string of the format ``<enabled><str(self)><bundled><location>``.
``enabled``, ``bundled`` and ``location`` will only be displayed if the corresponding flags are set to ``True``.
The will be filled from ``enabled_str``, ``bundled_str`` and ``location_str`` as follows:
``enabled_str``
a 2-tuple, the first entry being the string to insert when the plugin is enabled, the second
entry the string to insert when it is not.
``bundled_str``
a 2-tuple, the first entry being the string to insert when the plugin is bundled, the second
entry the string to insert when it is not.
``location_str``
a format string (to be parsed with ``str.format``), the ``{location}`` placeholder will be
replaced with the plugin's installation folder on disk.
Arguments:
show_enabled (boolean): whether to show the ``enabled`` part
enabled_strs (tuple): the 2-tuple containing the two possible strings to use for displaying the enabled state
show_bundled (boolean): whether to show the ``bundled`` part
bundled_strs(tuple): the 2-tuple containing the two possible strings to use for displaying the bundled state
show_location (boolean): whether to show the ``location`` part
location_str (str): the format string to use for displaying the plugin's installation location
Returns:
str: The long string representation of the plugin as described above
"""
if show_enabled:
ret = enabled_strs[0] if self.enabled else enabled_strs[1]
else:
ret = ""
ret += str(self)
if show_bundled:
ret += bundled_strs[0] if self.bundled else bundled_strs[1]
if show_location and self.location:
ret += location_str.format(location=self.location)
return ret
def get_hook(self, hook):
"""
Arguments:
hook (str): Hook to return.
Returns:
callable or None: Handler for the requested ``hook`` or None if no handler is registered.
"""
if not hook in self.hooks:
return None
return self.hooks[hook]
def get_implementation(self, *types):
"""
Arguments:
types (list): List of :class:`Plugin` sub classes all returned implementations need to implement.
Returns:
object: The plugin's implementation if it matches all of the requested ``types``, None otherwise.
"""
if not self.implementation:
return None
for t in types:
if not isinstance(self.implementation, t):
return None
return self.implementation
@property
def name(self):
"""
Human readable name of the plugin. Will be taken from name attribute of the plugin module if available,
otherwise from the ``name`` supplied during construction with a fallback to ``key``.
Returns:
str: Name of the plugin, fallback is the plugin's identifier.
"""
return self._get_instance_attribute(self.__class__.attr_name, defaults=(self._name, self.key))
@property
def description(self):
"""
Description of the plugin. Will be taken from the description attribute of the plugin module as defined in
:attr:`attr_description` if available, otherwise from the ``description`` supplied during construction.
May be None.
Returns:
str or None: Description of the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_description, default=self._description)
@property
def version(self):
"""
Version of the plugin. Will be taken from the version attribute of the plugin module as defined in
:attr:`attr_version` if available, otherwise from the ``version`` supplied during construction. May be None.
Returns:
str or None: Version of the plugin.
"""
return self._version if self._version is not None else self._get_instance_attribute(self.__class__.attr_version, default=self._version)
@property
def author(self):
"""
Author of the plugin. Will be taken from the author attribute of the plugin module as defined in
:attr:`attr_author` if available, otherwise from the ``author`` supplied during construction. May be None.
Returns:
str or None: Author of the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_author, default=self._author)
@property
def url(self):
"""
Website URL for the plugin. Will be taken from the url attribute of the plugin module as defined in
:attr:`attr_url` if available, otherwise from the ``url`` supplied during construction. May be None.
Returns:
str or None: Website URL for the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_url, default=self._url)
@property
def license(self):
"""
License of the plugin. Will be taken from the license attribute of the plugin module as defined in
:attr:`attr_license` if available, otherwise from the ``license`` supplied during construction. May be None.
Returns:
str or None: License of the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_license, default=self._license)
@property
def hooks(self):
"""
Hooks provided by the plugin. Will be taken from the hooks attribute of the plugin module as defiend in
:attr:`attr_hooks` if available, otherwise an empty dictionary is returned.
Returns:
dict: Hooks provided by the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_hooks, default={})
@property
def implementation(self):
"""
Implementation provided by the plugin. Will be taken from the implementation attribute of the plugin module
as defined in :attr:`attr_implementation` if available, otherwise None is returned.
Returns:
object: Implementation provided by the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_implementation, default=None)
@property
def helpers(self):
"""
Helpers provided by the plugin. Will be taken from the helpers attribute of the plugin module as defined in
:attr:`attr_helpers` if available, otherwise an empty list is returned.
Returns:
dict: Helpers provided by the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_helpers, default={})
@property
def check(self):
"""
Method for pre-load check of plugin. Will be taken from the check attribute of the plugin module as defined in
:attr:`attr_check` if available, otherwise a lambda always returning True is returned.
Returns:
callable: Check method for the plugin module which should return True if the plugin can be loaded, False
otherwise.
"""
return self._get_instance_attribute(self.__class__.attr_check, default=lambda: True)
@property
def load(self):
"""
Method for loading the plugin module. Will be taken from the load attribute of the plugin module as defined
in :attr:`attr_load` if available, otherwise a no-operation lambda will be returned.
Returns:
callable: Load method for the plugin module.
"""
return self._get_instance_attribute(self.__class__.attr_load, default=lambda: True)
@property
def unload(self):
"""
Method for unloading the plugin module. Will be taken from the unload attribute of the plugin module as defined
in :attr:`attr_unload` if available, otherwise a no-operation lambda will be returned.
Returns:
callable: Unload method for the plugin module.
"""
return self._get_instance_attribute(self.__class__.attr_unload, default=lambda: True)
@property
def enable(self):
"""
Method for enabling the plugin module. Will be taken from the enable attribute of the plugin module as defined
in :attr:`attr_enable` if available, otherwise a no-operation lambda will be returned.
Returns:
callable: Enable method for the plugin module.
"""
return self._get_instance_attribute(self.__class__.attr_enable, default=lambda: True)
@property
def disable(self):
"""
Method for disabling the plugin module. Will be taken from the disable attribute of the plugin module as defined
in :attr:`attr_disable` if available, otherwise a no-operation lambda will be returned.
Returns:
callable: Disable method for the plugin module.
"""
return self._get_instance_attribute(self.__class__.attr_disable, default=lambda: True)
def _get_instance_attribute(self, attr, default=None, defaults=None):
if not hasattr(self.instance, attr):
if defaults is not None:
for value in defaults:
if value is not None:
return value
return default
return getattr(self.instance, attr)
class PluginManager(object):
"""
The :class:`PluginManager` is the central component for finding, loading and accessing plugins provided to the
system.
It is able to discover plugins both through possible file system locations as well as customizable entry points.
"""
def __init__(self, plugin_folders, plugin_types, plugin_entry_points, logging_prefix=None,
plugin_disabled_list=None, plugin_restart_needing_hooks=None, plugin_obsolete_hooks=None,
plugin_validators=None):
self.logger = logging.getLogger(__name__)
if logging_prefix is None:
logging_prefix = ""
if plugin_disabled_list is None:
plugin_disabled_list = []
self.plugin_folders = plugin_folders
self.plugin_types = plugin_types
self.plugin_entry_points = plugin_entry_points
self.plugin_disabled_list = plugin_disabled_list
self.plugin_restart_needing_hooks = plugin_restart_needing_hooks
self.plugin_obsolete_hooks = plugin_obsolete_hooks
self.plugin_validators = plugin_validators
self.logging_prefix = logging_prefix
self.enabled_plugins = dict()
self.disabled_plugins = dict()
self.plugin_hooks = defaultdict(list)
self.plugin_implementations = dict()
self.plugin_implementations_by_type = defaultdict(list)
self.implementation_injects = dict()
self.implementation_inject_factories = []
self.implementation_pre_inits = []
self.implementation_post_inits = []
self.on_plugin_loaded = lambda *args, **kwargs: None
self.on_plugin_unloaded = lambda *args, **kwargs: None
self.on_plugin_enabled = lambda *args, **kwargs: None
self.on_plugin_disabled = lambda *args, **kwargs: None
self.on_plugin_implementations_initialized = lambda *args, **kwargs: None
self.registered_clients = []
self.marked_plugins = defaultdict(list)
self.reload_plugins(startup=True, initialize_implementations=False)
@property
def plugins(self):
plugins = dict(self.enabled_plugins)
plugins.update(self.disabled_plugins)
return plugins
def find_plugins(self, existing=None, ignore_uninstalled=True):
if existing is None:
existing = dict(self.plugins)
result = dict()
if self.plugin_folders:
result.update(self._find_plugins_from_folders(self.plugin_folders, existing, ignored_uninstalled=ignore_uninstalled))
if self.plugin_entry_points:
existing.update(result)
result.update(self._find_plugins_from_entry_points(self.plugin_entry_points, existing, ignore_uninstalled=ignore_uninstalled))
return result
def _find_plugins_from_folders(self, folders, existing, ignored_uninstalled=True):
result = dict()
for folder in folders:
readonly = False
if isinstance(folder, (list, tuple)):
if len(folder) == 2:
folder, readonly = folder
else:
continue
if not os.path.exists(folder):
self.logger.warn("Plugin folder {folder} could not be found, skipping it".format(folder=folder))
continue
entries = os.listdir(folder)
for entry in entries:
path = os.path.join(folder, entry)
if os.path.isdir(path) and os.path.isfile(os.path.join(path, "__init__.py")):
key = entry
elif os.path.isfile(path) and entry.endswith(".py"):
key = entry[:-3] # strip off the .py extension
else:
continue
if key in existing or key in result or (ignored_uninstalled and key in self.marked_plugins["uninstalled"]):
# plugin is already defined, ignore it
continue
plugin = self._import_plugin_from_module(key, folder=folder)
if plugin:
plugin.origin = FolderOrigin("folder", folder)
if readonly:
plugin.bundled = True
plugin.enabled = False
result[key] = plugin
return result
def _find_plugins_from_entry_points(self, groups, existing, ignore_uninstalled=True):
result = dict()
# let's make sure we have a current working set
working_set = pkg_resources.WorkingSet()
if not isinstance(groups, (list, tuple)):
groups = [groups]
for group in groups:
for entry_point in working_set.iter_entry_points(group=group, name=None):
key = entry_point.name
module_name = entry_point.module_name
version = entry_point.dist.version
if key in existing or key in result or (ignore_uninstalled and key in self.marked_plugins["uninstalled"]):
# plugin is already defined or marked as uninstalled, ignore it
continue
kwargs = dict(module_name=module_name, version=version)
package_name = None
try:
module_pkginfo = InstalledEntryPoint(entry_point)
except:
self.logger.exception("Something went wrong while retrieving package info data for module %s" % module_name)
else:
kwargs.update(dict(
name=module_pkginfo.name,
summary=module_pkginfo.summary,
author=module_pkginfo.author,
url=module_pkginfo.home_page,
license=module_pkginfo.license
))
package_name = module_pkginfo.name
plugin = self._import_plugin_from_module(key, **kwargs)
if plugin:
plugin.origin = EntryPointOrigin("entry_point", group, module_name, package_name, version)
plugin.enabled = False
result[key] = plugin
return result
def _import_plugin_from_module(self, key, folder=None, module_name=None, name=None, version=None, summary=None, author=None, url=None, license=None):
# TODO error handling
try:
if folder:
module = imp.find_module(key, [folder])
elif module_name:
module = imp.find_module(module_name)
else:
return None
except:
self.logger.warn("Could not locate plugin {key}")
return None
plugin = self._import_plugin(key, *module, name=name, version=version, summary=summary, author=author, url=url, license=license)
if plugin is None:
return None
if plugin.check():
return plugin
else:
self.logger.warn("Plugin \"{plugin}\" did not pass check".format(plugin=str(plugin)))
return None
def _import_plugin(self, key, f, filename, description, name=None, version=None, summary=None, author=None, url=None, license=None):
try:
instance = imp.load_module(key, f, filename, description)
return PluginInfo(key, filename, instance, name=name, version=version, description=summary, author=author, url=url, license=license)
except:
self.logger.exception("Error loading plugin {key}".format(key=key))
return None
def _is_plugin_disabled(self, key):
return key in self.plugin_disabled_list or key.endswith('disabled')
def reload_plugins(self, startup=False, initialize_implementations=True, force_reload=None):
self.logger.info("Loading plugins from {folders} and installed plugin packages...".format(
folders=", ".join(map(lambda x: x[0] if isinstance(x, tuple) else str(x), self.plugin_folders))
))
if force_reload is None:
force_reload = []
plugins = self.find_plugins(existing=dict((k, v) for k, v in self.plugins.items() if not k in force_reload))
self.disabled_plugins.update(plugins)
for name, plugin in plugins.items():
try:
self.load_plugin(name, plugin, startup=startup, initialize_implementation=initialize_implementations)
if not self._is_plugin_disabled(name):
self.enable_plugin(name, plugin=plugin, initialize_implementation=initialize_implementations, startup=startup)
except PluginNeedsRestart:
pass
except PluginLifecycleException as e:
self.logger.info(str(e))
if len(self.enabled_plugins) <= 0:
self.logger.info("No plugins found")
else:
self.logger.info("Found {count} plugin(s) providing {implementations} mixin implementations, {hooks} hook handlers".format(
count=len(self.enabled_plugins) + len(self.disabled_plugins),
implementations=len(self.plugin_implementations),
hooks=sum(map(lambda x: len(x), self.plugin_hooks.values()))
))
def mark_plugin(self, name, uninstalled=None):
if not name in self.plugins:
self.logger.warn("Trying to mark an unknown plugin {name}".format(**locals()))
if uninstalled is not None:
if uninstalled and not name in self.marked_plugins["uninstalled"]:
self.marked_plugins["uninstalled"].append(name)
elif not uninstalled and name in self.marked_plugins["uninstalled"]:
self.marked_plugins["uninstalled"].remove(name)
def load_plugin(self, name, plugin=None, startup=False, initialize_implementation=True):
if not name in self.plugins:
self.logger.warn("Trying to load an unknown plugin {name}".format(**locals()))
return
if plugin is None:
plugin = self.plugins[name]
try:
plugin.validate("before_load", additional_validators=self.plugin_validators)
plugin.load()
plugin.validate("after_load", additional_validators=self.plugin_validators)
self.on_plugin_loaded(name, plugin)
plugin.loaded = True
self.logger.debug("Loaded plugin {name}: {plugin}".format(**locals()))
except PluginLifecycleException as e:
raise e
except:
self.logger.exception("There was an error loading plugin %s" % name)
def unload_plugin(self, name):
if not name in self.plugins:
self.logger.warn("Trying to unload unknown plugin {name}".format(**locals()))
return
plugin = self.plugins[name]
try:
if plugin.enabled:
self.disable_plugin(name, plugin=plugin)
plugin.unload()
self.on_plugin_unloaded(name, plugin)
if name in self.enabled_plugins:
del self.enabled_plugins[name]
if name in self.disabled_plugins:
del self.disabled_plugins[name]
plugin.loaded = False
self.logger.debug("Unloaded plugin {name}: {plugin}".format(**locals()))
except PluginLifecycleException as e:
raise e
except:
self.logger.exception("There was an error unloading plugin {name}".format(**locals()))
# make sure the plugin is NOT in the list of enabled plugins but in the list of disabled plugins
if name in self.enabled_plugins:
del self.enabled_plugins[name]
if not name in self.disabled_plugins:
self.disabled_plugins[name] = plugin
def enable_plugin(self, name, plugin=None, initialize_implementation=True, startup=False):
if not name in self.disabled_plugins:
self.logger.warn("Tried to enable plugin {name}, however it is not disabled".format(**locals()))
return
if plugin is None:
plugin = self.disabled_plugins[name]
if not startup and self.is_restart_needing_plugin(plugin):
raise PluginNeedsRestart(name)
if self.has_obsolete_hooks(plugin):
raise PluginCantEnable(name, "Dependency on obsolete hooks detected, full functionality cannot be guaranteed")
try:
plugin.enable()
self._activate_plugin(name, plugin)
except PluginLifecycleException as e:
raise e
except:
self.logger.exception("There was an error while enabling plugin {name}".format(**locals()))
return False
else:
if name in self.disabled_plugins:
del self.disabled_plugins[name]
self.enabled_plugins[name] = plugin
plugin.enabled = True
if plugin.implementation:
if initialize_implementation:
if not self.initialize_implementation_of_plugin(name, plugin):
return False
plugin.implementation.on_plugin_enabled()
self.on_plugin_enabled(name, plugin)
self.logger.debug("Enabled plugin {name}: {plugin}".format(**locals()))
return True
def disable_plugin(self, name, plugin=None):
if not name in self.enabled_plugins:
self.logger.warn("Tried to disable plugin {name}, however it is not enabled".format(**locals()))
return
if plugin is None:
plugin = self.enabled_plugins[name]
if self.is_restart_needing_plugin(plugin):
raise PluginNeedsRestart(name)
try:
plugin.disable()
self._deactivate_plugin(name, plugin)
except PluginLifecycleException as e:
raise e
except:
self.logger.exception("There was an error while disabling plugin {name}".format(**locals()))
return False
else:
if name in self.enabled_plugins:
del self.enabled_plugins[name]
self.disabled_plugins[name] = plugin
plugin.enabled = False
if plugin.implementation:
plugin.implementation.on_plugin_disabled()
self.on_plugin_disabled(name, plugin)
self.logger.debug("Disabled plugin {name}: {plugin}".format(**locals()))
return True
def _activate_plugin(self, name, plugin):
plugin.hotchangeable = self.is_restart_needing_plugin(plugin)
# evaluate registered hooks
for hook, callback in plugin.hooks.items():
self.plugin_hooks[hook].append((name, callback))
# evaluate registered implementation
if plugin.implementation:
for plugin_type in self.plugin_types:
if isinstance(plugin.implementation, plugin_type):
self.plugin_implementations_by_type[plugin_type].append((name, plugin.implementation))
self.plugin_implementations[name] = plugin.implementation
def _deactivate_plugin(self, name, plugin):
for hook, callback in plugin.hooks.items():
try:
self.plugin_hooks[hook].remove((name, callback))
except ValueError:
# that's ok, the plugin was just not registered for the hook
pass
if plugin.implementation is not None:
if name in self.plugin_implementations:
del self.plugin_implementations[name]
for plugin_type in self.plugin_types:
try:
self.plugin_implementations_by_type[plugin_type].remove((name, plugin.implementation))
except ValueError:
# that's ok, the plugin was just not registered for the type
pass
def is_restart_needing_plugin(self, plugin):
return self.has_restart_needing_implementation(plugin) or self.has_restart_needing_hooks(plugin)
def has_restart_needing_implementation(self, plugin):
if not plugin.implementation:
return False
return isinstance(plugin.implementation, RestartNeedingPlugin)
def has_restart_needing_hooks(self, plugin):
if not plugin.hooks:
return False
hooks = plugin.hooks.keys()
for hook in hooks:
if self.is_restart_needing_hook(hook):
return True
return False
def has_obsolete_hooks(self, plugin):
if not plugin.hooks:
return False
hooks = plugin.hooks.keys()
for hook in hooks:
if self.is_obsolete_hook(hook):
return True
return False
def is_restart_needing_hook(self, hook):
if self.plugin_restart_needing_hooks is None:
return False
for h in self.plugin_restart_needing_hooks:
if hook.startswith(h):
return True
return False
def is_obsolete_hook(self, hook):
if self.plugin_obsolete_hooks is None:
return False
return hook in self.plugin_obsolete_hooks
def initialize_implementations(self, additional_injects=None, additional_inject_factories=None, additional_pre_inits=None, additional_post_inits=None):
for name, plugin in self.enabled_plugins.items():
self.initialize_implementation_of_plugin(name, plugin,
additional_injects=additional_injects,
additional_inject_factories=additional_inject_factories,
additional_pre_inits=additional_pre_inits,
additional_post_inits=additional_post_inits)
self.logger.info("Initialized {count} plugin implementation(s)".format(count=len(self.plugin_implementations)))
def initialize_implementation_of_plugin(self, name, plugin, additional_injects=None, additional_inject_factories=None, additional_pre_inits=None, additional_post_inits=None):
if plugin.implementation is None:
return
return self.initialize_implementation(name, plugin, plugin.implementation,
additional_injects=additional_injects,
additional_inject_factories=additional_inject_factories,
additional_pre_inits=additional_pre_inits,
additional_post_inits=additional_post_inits)
def initialize_implementation(self, name, plugin, implementation, additional_injects=None, additional_inject_factories=None, additional_pre_inits=None, additional_post_inits=None):
if additional_injects is None:
additional_injects = dict()
if additional_inject_factories is None:
additional_inject_factories = []
if additional_pre_inits is None:
additional_pre_inits = []
if additional_post_inits is None:
additional_post_inits = []
injects = self.implementation_injects
injects.update(additional_injects)
inject_factories = self.implementation_inject_factories
inject_factories += additional_inject_factories
pre_inits = self.implementation_pre_inits
pre_inits += additional_pre_inits
post_inits = self.implementation_post_inits
post_inits += additional_post_inits
try:
kwargs = dict(injects)
kwargs.update(dict(
identifier=name,
plugin_name=plugin.name,
plugin_version=plugin.version,
basefolder=os.path.realpath(plugin.location),
logger=logging.getLogger(self.logging_prefix + name),
))
# inject the additional_injects
for arg, value in kwargs.items():
setattr(implementation, "_" + arg, value)
# inject any injects produced in the additional_inject_factories
for factory in inject_factories:
try:
return_value = factory(name, implementation)
except:
self.logger.exception("Exception while executing injection factory %r" % factory)
else:
if return_value is not None:
if isinstance(return_value, dict):
for arg, value in return_value.items():
setattr(implementation, "_" + arg, value)
# execute any additional pre init methods
for pre_init in pre_inits:
pre_init(name, implementation)
implementation.initialize()
# execute any additional post init methods
for post_init in post_inits:
post_init(name, implementation)
except Exception as e:
self._deactivate_plugin(name, plugin)
plugin.enabled = False
if isinstance(e, PluginLifecycleException):
raise e
else:
self.logger.exception("Exception while initializing plugin {name}, disabling it".format(**locals()))
return False
else:
self.on_plugin_implementations_initialized(name, plugin)
self.logger.debug("Initialized plugin mixin implementation for plugin {name}".format(**locals()))
return True
def log_all_plugins(self, show_bundled=True, bundled_str=(" (bundled)", ""), show_location=True, location_str=" = {location}", show_enabled=True, enabled_str=(" ", "!")):
all_plugins = self.enabled_plugins.values() + self.disabled_plugins.values()
if len(all_plugins) <= 0:
self.logger.info("No plugins available")
else:
self.logger.info("{count} plugin(s) registered with the system:\n{plugins}".format(count=len(all_plugins), plugins="\n".join(
map(lambda x: "| " + x.long_str(show_bundled=show_bundled,
bundled_strs=bundled_str,
show_location=show_location,
location_str=location_str,
show_enabled=show_enabled,
enabled_strs=enabled_str),
sorted(self.plugins.values(), key=lambda x: str(x).lower()))
)))
def get_plugin(self, identifier, require_enabled=True):
"""
Retrieves the module of the plugin identified by ``identifier``. If the plugin is not registered or disabled and
``required_enabled`` is True (the default) None will be returned.
Arguments:
identifier (str): The identifier of the plugin to retrieve.
require_enabled (boolean): Whether to only return the plugin if is enabled (True, default) or also if it's
disabled.
Returns:
module: The requested plugin module or None
"""
plugin_info = self.get_plugin_info(identifier, require_enabled=require_enabled)
if plugin_info is not None:
return plugin_info.instance
return None
def get_plugin_info(self, identifier, require_enabled=True):
"""
Retrieves the :class:`PluginInfo` instance identified by ``identifier``. If the plugin is not registered or
disabled and ``required_enabled`` is True (the default) None will be returned.
Arguments:
identifier (str): The identifier of the plugin to retrieve.
require_enabled (boolean): Whether to only return the plugin if is enabled (True, default) or also if it's
disabled.
Returns:
~.PluginInfo: The requested :class:`PluginInfo` or None
"""
if identifier in self.enabled_plugins:
return self.enabled_plugins[identifier]
elif not require_enabled and identifier in self.disabled_plugins:
return self.disabled_plugins[identifier]
return None
def get_hooks(self, hook):
"""
Retrieves all registered handlers for the specified hook.
Arguments:
hook (str): The hook for which to retrieve the handlers.
Returns:
dict: A dict containing all registered handlers mapped by their plugin's identifier.
"""
if not hook in self.plugin_hooks:
return dict()
return {hook[0]: hook[1] for hook in self.plugin_hooks[hook]}
def get_implementations(self, *types):
"""
Get all mixin implementations that implement *all* of the provided ``types``.
Arguments:
types (one or more type): The types a mixin implementation needs to implement in order to be returned.
Returns:
list: A list of all found implementations
"""
result = None
for t in types:
implementations = self.plugin_implementations_by_type[t]
if result is None:
result = set(implementations)
else:
result = result.intersection(implementations)
if result is None:
return dict()
return [impl[1] for impl in result]
def get_filtered_implementations(self, f, *types):
"""
Get all mixin implementation that implementat *all* of the provided ``types`` and match the provided filter `f`.
Arguments:
f (callable): A filter function returning True for implementations to return and False for those to exclude.
types (one or more type): The types a mixin implementation needs to implement in order to be returned.
Returns:
list: A list of all found and matching implementations.
"""
assert callable(f)
implementations = self.get_implementations(*types)
return filter(f, implementations)
def get_helpers(self, name, *helpers):
"""
Retrieves the named ``helpers`` for the plugin with identifier ``name``.
If the plugin is not available, returns None. Otherwise returns a :class:`dict` with the requested plugin
helper names mapped to the method - if a helper could not be resolved, it will be missing from the dict.
Arguments:
name (str): Identifier of the plugin for which to look up the ``helpers``.
helpers (one or more str): Identifiers of the helpers of plugin ``name`` to return.
Returns:
dict: A dictionary of all resolved helpers, mapped by their identifiers, or None if the plugin was not
registered with the system.
"""
if not name in self.enabled_plugins:
return None
plugin = self.enabled_plugins[name]
all_helpers = plugin.helpers
if len(helpers):
return dict((k, v) for (k, v) in all_helpers.items() if k in helpers)
else:
return all_helpers
def register_message_receiver(self, client):
"""
Registers a ``client`` for receiving plugin messages. The ``client`` needs to be a callable accepting two
input arguments, ``plugin`` (the sending plugin's identifier) and ``data`` (the message itself).
"""
if client is None:
return
self.registered_clients.append(client)
def unregister_message_receiver(self, client):
"""
Unregisters a ``client`` for receiving plugin messages.
"""
self.registered_clients.remove(client)
def send_plugin_message(self, plugin, data):
"""
Sends ``data`` in the name of ``plugin`` to all currently registered message receivers by invoking them
with the two arguments.
Arguments:
plugin (str): The sending plugin's identifier.
data (object): The message.
"""
for client in self.registered_clients:
try: client(plugin, data)
except: self.logger.exception("Exception while sending plugin data to client")
class InstalledEntryPoint(pkginfo.Installed):
def __init__(self, entry_point, metadata_version=None):
self.entry_point = entry_point
package = entry_point.module_name
pkginfo.Installed.__init__(self, package, metadata_version=metadata_version)
def read(self):
import sys
import glob
import warnings
opj = os.path.join
if self.package is not None:
package = self.package.__package__
if package is None:
package = self.package.__name__
project = pkg_resources.to_filename(pkg_resources.safe_name(self.entry_point.dist.project_name))
package_pattern = '%s*.egg-info' % package
project_pattern = '%s*.egg-info' % project
file = getattr(self.package, '__file__', None)
if file is not None:
candidates = []
def _add_candidate(where):
candidates.extend(glob.glob(where))
for entry in sys.path:
if file.startswith(entry):
_add_candidate(opj(entry, 'EGG-INFO')) # egg?
for pattern in (package_pattern, project_pattern): # dist-installed?
_add_candidate(opj(entry, pattern))
dir, name = os.path.split(self.package.__file__)
for pattern in (package_pattern, project_pattern):
_add_candidate(opj(dir, pattern))
_add_candidate(opj(dir, '..', pattern))
for candidate in candidates:
if os.path.isdir(candidate):
path = opj(candidate, 'PKG-INFO')
else:
path = candidate
if os.path.exists(path):
with open(path) as f:
return f.read()
warnings.warn('No PKG-INFO found for package: %s' % self.package_name)
class Plugin(object):
"""
The parent class of all plugin implementations.
.. attribute:: _identifier
The identifier of the plugin. Injected by the plugin core system upon initialization of the implementation.
.. attribute:: _plugin_name
The name of the plugin. Injected by the plugin core system upon initialization of the implementation.
.. attribute:: _plugin_version
The version of the plugin. Injected by the plugin core system upon initialization of the implementation.
.. attribute:: _basefolder
The base folder of the plugin. Injected by the plugin core system upon initialization of the implementation.
.. attribute:: _logger
The logger instance to use, with the logging name set to the :attr:`PluginManager.logging_prefix` of the
:class:`PluginManager` concatenated with :attr:`_identifier`. Injected by the plugin core system upon
initialization of the implementation.
"""
def initialize(self):
"""
Called by the plugin core after performing all injections. Override this to initialize your implementation.
"""
pass
def on_plugin_enabled(self):
pass
def on_plugin_disabled(self):
pass
class RestartNeedingPlugin(Plugin):
pass
class PluginNeedsRestart(Exception):
def __init__(self, name):
Exception.__init__(self)
self.name = name
self.message = "Plugin {name} cannot be enabled or disabled after system startup".format(**locals())
class PluginLifecycleException(Exception):
def __init__(self, name, reason, message):
Exception.__init__(self)
self.name = name
self.reason = reason
self.message = message.format(**locals())
def __str__(self):
return self.message
class PluginCantInitialize(PluginLifecycleException):
def __init__(self, name, reason):
PluginLifecycleException.__init__(self, name, reason, "Plugin {name} cannot be initialized: {reason}")
class PluginCantEnable(PluginLifecycleException):
def __init__(self, name, reason):
PluginLifecycleException.__init__(self, name, reason, "Plugin {name} cannot be enabled: {reason}")
class PluginCantDisable(PluginLifecycleException):
def __init__(self, name, reason):
PluginLifecycleException.__init__(self, name, reason, "Plugin {name} cannot be disabled: {reason}")
| agpl-3.0 | -19,205,386,692,209,010 | 33.331185 | 230 | 0.708133 | false |
mungerd/plastex | plasTeX/Base/LaTeX/Index.py | 5 | 13222 | #!/usr/bin/env python
"""
C.11.5 Index and Glossary (p211)
"""
import string, os
from plasTeX.Tokenizer import Token, EscapeSequence
from plasTeX import Command, Environment, IgnoreCommand, encoding
from plasTeX.Logging import getLogger
from Sectioning import SectionUtils
try:
from pyuca import Collator
collator = Collator(os.path.join(os.path.dirname(__file__), 'allkeys.txt')).sort_key
except ImportError:
collator = lambda x: x.lower()
class hyperpage(IgnoreCommand):
args = 'page:nox'
class hyperindexformat(IgnoreCommand):
args = 'fmt:nox page:nox'
class IndexUtils(object):
""" Helper functions for generating indexes """
linkType = 'index'
level = Command.CHAPTER_LEVEL
class Index(Command):
"""
Utility class used to surface the index entries to the renderer
"""
def __init__(self, *args, **kwargs):
Command.__init__(self, *args, **kwargs)
self.pages = []
self.key = []
self.sortkey = ''
@property
def totallen(self):
""" Return the total number of entries generated by this entry """
total = 1
for item in self:
total += item.totallen
return total
def __repr__(self):
return '%s%s --> %s' % (''.join([x.source for x in self.key]),
', '.join([str(x) for x in self.pages]),
Command.__repr__(self))
class IndexGroup(list):
title = None
def invoke(self, tex):
if isinstance(self, Environment):
Environment.invoke(self, tex)
else:
Command.invoke(self, tex)
self.attributes['title'] = self.ownerDocument.createElement('indexname').expand(tex)
@property
def groups(self):
"""
Group index entries into batches according to the first letter
"""
batches = []
current = ''
for item in self:
try:
label = title = item.sortkey[0].upper()
if title in encoding.stringletters():
pass
elif title == '_':
title = '_ (Underscore)'
else:
label = title = 'Symbols'
except IndexError:
label = title = 'Symbols'
if current != title:
newgroup = self.IndexGroup()
newgroup.title = title
newgroup.id = label
batches.append(newgroup)
current = title
batches[-1].append(item)
for item in batches:
item[:] = self.splitColumns(item,
self.ownerDocument.config['document']['index-columns'])
return batches
def splitColumns(self, items, cols):
"""
Divide the index entries into the specified number of columns
Required Arguments:
items -- list of column entries
cols -- number of columns to create
Returns:
list of length `cols' containing groups of column entries
"""
entries = [(0,0)]
# Find the total number of entries
grandtotal = 0
for item in items:
entries.append((item.totallen, item))
grandtotal += entries[-1][0]
entries.pop(0)
entries.reverse()
# Get total number of entries per column
coltotal = int(grandtotal / cols)
# Group entries into columns
current = 0
output = [[]]
for num, item in entries:
current += num
if len(output) >= cols:
output[-1].append(item)
elif current > coltotal:
output.append([item])
current = num
elif current == coltotal:
output[-1].append(item)
output.append([])
current = 0
else:
output[-1].append(item)
output.reverse()
for item in output:
item.reverse()
# Get rid of empty columns
output = [x for x in output if x]
# Pad to the correct number of columns
for i in range(cols-len(output)):
output.append([])
return output
def digest(self, tokens):
""" Sort and group index entries """
if isinstance(self, Environment):
Environment.digest(self, tokens)
if self.macroMode == self.MODE_END:
return
# Throw it all away, we don't need it. We'll be generating
# our own index entries below.
while self.childNodes:
self.pop()
else:
Command.digest(self, tokens)
doc = self.ownerDocument
current = self
entries = sorted(self.ownerDocument.userdata.get('index', []))
prev = IndexEntry([], None)
for item in entries:
# See how many levels we need to add/subtract between this one
# and the previous
common = 0
for prevkey, itemkey in zip(zip(prev.sortkey, prev.key),
zip(item.sortkey, item.key)):
if prevkey == itemkey:
common += 1
continue
break
# print
# print item
# print (prev.key, prev.sortkey), (item.key, item.sortkey), common
# Pop out to the common level
i = common
while i < len(prev.key):
# print 'POP'
current = current.parentNode
i += 1
# Add the appropriate number of levels
i = common
while i < len(item.key):
# print 'ADD', item.sortkey[i]
newidx = self.Index()
newidx.key = item.key[i]
newidx.sortkey = item.sortkey[i]
newidx.parentNode = current
current.append(newidx)
current = newidx
i += 1
# Add the current page and format it
current.pages.append(IndexDestination(item.type, item.node))
if item.format is not None:
text = doc.createTextNode(str(len(current.pages)))
ipn = item.format.getElementsByTagName('index-page-number')
if ipn:
ipn = ipn[0]
ipn.parentNode.replaceChild(text, ipn)
item.node.append(item.format)
else:
text = doc.createTextNode(str(len(current.pages)))
item.node.append(text)
prev = item
class IndexDestination(object):
def __init__(self, type, node):
self._cr_type = type
self._cr_node = node
@property
def see(self):
return self._cr_type == IndexEntry.TYPE_SEE
@property
def seealso(self):
return self._cr_type == IndexEntry.TYPE_SEEALSO
@property
def normal(self):
return not(self.see) and not(self.seealso)
def __getattribute__(self, name):
if name.startswith('_cr_') or name in ['see', 'seealso', 'normal']:
return object.__getattribute__(self, name)
if self._cr_type and name in ['url']:
return None
return getattr(self._cr_node, name)
def __unicode__(self):
return unicode(self._cr_node)
class theindex(IndexUtils, Environment, SectionUtils):
blockType = True
level = Environment.CHAPTER_LEVEL
counter = 'chapter'
class printindex(IndexUtils, Command, SectionUtils):
blockType = True
level = Command.CHAPTER_LEVEL
counter = 'chapter'
class makeindex(Command):
pass
class makeglossary(Command):
pass
class glossary(Command):
args = 'entry:nox'
class index(Command):
args = 'entry:nox'
@property
def textContent(self):
return ''
def invoke(self, tex):
result = Command.invoke(self, tex)
sortkey, key, format = [], [], []
entry = iter(self.attributes['entry'])
current = []
alphanumeric = [Token.CC_OTHER, Token.CC_LETTER, Token.CC_SPACE]
# Parse the index tokens
for tok in entry:
if tok.catcode in alphanumeric:
# Escape character
if tok == '"':
for tok in entry:
current.append(tok)
break
# Entry separator
elif tok == '!':
key.append(current)
if len(sortkey) < len(key):
sortkey.append(current)
current = []
# Sort key separator
elif tok == '@':
sortkey.append(current)
current = []
# Format separator
elif tok == '|':
key.append(current)
if len(sortkey) < len(key):
sortkey.append(current)
current = format
else:
current.append(tok)
continue
# Everything else
current.append(tok)
# Make sure to get the stuff at the end
if not format:
key.append(current)
if len(sortkey) < len(key):
sortkey.append(current)
# Convert the sort keys to strings
for i, item in enumerate(sortkey):
sortkey[i] = tex.expandTokens(item).textContent
# Expand the key tokens
for i, item in enumerate(key):
key[i] = tex.expandTokens(item)
# Get the format element
type = IndexEntry.TYPE_NORMAL
if not format:
format = None
else:
macro = []
while format and format[0].catcode == Token.CC_LETTER:
macro.append(format.pop(0))
if macro:
macro = ''.join(macro)
format.insert(0, EscapeSequence(macro))
if macro == 'see':
type = IndexEntry.TYPE_SEE
elif macro == 'seealso':
type = IndexEntry.TYPE_SEEALSO
format.append(EscapeSequence('index-page-number'))
format = tex.expandTokens(format)
# Store the index information in the document
userdata = self.ownerDocument.userdata
if 'index' not in userdata:
userdata['index'] = []
userdata['index'].append(IndexEntry(key, self, sortkey, format, type))
return result
class IndexEntry(object):
"""
Utility class used to assist in the sorting of index entries
"""
TYPE_NORMAL = 0
TYPE_SEE = 1
TYPE_SEEALSO = 2
def __init__(self, key, node, sortkey=None, format=None, type=0):
"""
Required Arguments:
key -- a list of keys for the index entry
node -- the node of the document that the index entry is
associated with
sortkey -- a list of sort keys, one per key, to be used for
sorting instead of the key values
format -- formatting that should be used to format the
destination of the index entry
type -- the type of entry that this is: TYPE_NORMAL, TYPE_SEE,
or TYPE_SEEALSO
"""
self.key = key
if not sortkey:
self.sortkey = key
else:
self.sortkey = []
for i, sk in enumerate(sortkey):
if sk is None:
self.sortkey.append(key[i].textContent)
else:
self.sortkey.append(sk)
self.format = format
self.node = node
self.type = type
@property
def see(self):
return self.type == type(self).TYPE_SEE
@property
def seealso(self):
return self.type == type(self).TYPE_SEEALSO
@property
def normal(self):
return not(self.see) and not(self.seealso)
def __cmp__(self, other):
result = cmp(zip([collator(x) for x in self.sortkey if isinstance(x, basestring)],
[collator(x.textContent) for x in self.key],
self.key),
zip([collator(x) for x in other.sortkey if isinstance(x, basestring)],
[collator(x.textContent) for x in other.key],
other.key))
if result == 0 and len(self.key) != len(other.key):
return cmp(len(self.key), len(other.key))
return result
def __repr__(self):
if self.format is None:
return ' '.join(['@'.join(self.sortkey),
'!'.join([x.source for x in self.key])])
else:
return ' '.join(['@'.join(self.sortkey),
'!'.join([x.source for x in self.key]),
' '.join([x.source for x in self.format])])
def __str__(self):
return repr(self)
class IndexPageNumber(Command):
macroName = 'index-page-number'
| mit | 1,139,842,964,231,324,900 | 30.183962 | 92 | 0.515731 | false |
nitzmahone/ansible | test/units/modules/network/eos/test_eos_banner.py | 55 | 3617 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.eos import eos_banner
from units.modules.utils import set_module_args
from .eos_module import TestEosModule, load_fixture
class TestEosBannerModule(TestEosModule):
module = eos_banner
def setUp(self):
super(TestEosBannerModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.eos.eos_banner.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.eos.eos_banner.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestEosBannerModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
if transport == 'cli':
self.run_commands.return_value = [load_fixture('eos_banner_show_banner.txt').strip()]
else:
self.run_commands.return_value = [{'loginBanner': load_fixture('eos_banner_show_banner.txt').strip()}]
self.load_config.return_value = dict(diff=None, session='session')
def test_eos_banner_create_with_cli_transport(self):
set_module_args(dict(banner='login', text='test\nbanner\nstring',
transport='cli'))
commands = ['banner login', 'test', 'banner', 'string', 'EOF']
self.execute_module(changed=True, commands=commands)
def test_eos_banner_remove_with_cli_transport(self):
set_module_args(dict(banner='login', state='absent', transport='cli'))
commands = ['no banner login']
self.execute_module(changed=True, commands=commands)
def test_eos_banner_create_with_eapi_transport(self):
set_module_args(dict(banner='login', text='test\nbanner\nstring',
transport='eapi'))
commands = ['banner login']
inputs = ['test\nbanner\nstring']
self.execute_module(changed=True, commands=commands, inputs=inputs, transport='eapi')
def test_eos_banner_remove_with_eapi_transport(self):
set_module_args(dict(banner='login', state='absent', transport='eapi'))
commands = ['no banner login']
self.execute_module(changed=True, commands=commands, transport='eapi')
def test_eos_banner_nochange_with_cli_transport(self):
banner_text = load_fixture('eos_banner_show_banner.txt').strip()
set_module_args(dict(banner='login', text=banner_text, transport='cli'))
self.execute_module()
def test_eos_banner_nochange_with_eapi_transport(self):
banner_text = load_fixture('eos_banner_show_banner.txt').strip()
set_module_args(dict(banner='login', text=banner_text, transport='eapi'))
self.execute_module(transport='eapi')
| gpl-3.0 | -7,322,685,729,992,643,000 | 42.059524 | 114 | 0.683163 | false |
jacobq/csci5221-viro-project | tests/unit/lib/mock_socket_test.py | 45 | 2309 | #!/usr/bin/env python
#
# Copyright 2011-2012 Andreas Wundsam
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os.path
from copy import copy
sys.path.append(os.path.dirname(__file__) + "/../../..")
from pox.lib.mock_socket import MockSocket
class MockSocketTest(unittest.TestCase):
def setUp(self):
pass
def test_simple_send(self):
(a, b) = MockSocket.pair()
a.send("Hallo")
self.assertEquals(b.recv(), "Hallo")
b.send("Servus")
self.assertEquals(a.recv(), "Servus")
def test_ready_to_recv(self):
(a, b) = MockSocket.pair()
a.send("Hallo")
self.assertFalse(a.ready_to_recv())
self.assertTrue(b.ready_to_recv())
self.assertEquals(b.recv(), "Hallo")
self.assertFalse(b.ready_to_recv())
self.assertFalse(a.ready_to_recv())
b.send("Servus")
self.assertTrue(a.ready_to_recv())
self.assertEquals(a.recv(), "Servus")
self.assertFalse(a.ready_to_recv())
def test_on_ready_to_recv(self):
self.seen_size = -1
self.called = 0
def ready(socket, size):
self.called += 1
self.seen_size = size
(a, b) = MockSocket.pair()
b.set_on_ready_to_recv(ready)
self.assertEquals(self.called, 0)
a.send("Hallo")
self.assertEquals(self.called, 1)
self.assertEquals(self.seen_size, 5)
# check that it doesn't get called on the other sockets data
b.send("Huhu")
self.assertEquals(self.called, 1)
def test_empty_recv(self):
""" test_empty_recv: Check that empty reads on socket return ""
Note that this is actually non-sockety behavior and should probably be changed. This
test documents it as intended for now, though
"""
(a, b) = MockSocket.pair()
self.assertEquals(a.recv(), "")
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 6,304,971,189,688,081,000 | 28.602564 | 91 | 0.673019 | false |
cmisenas/artos | PyARTOS/config.py | 2 | 7158 | """Reads and writes the configuration file of PyARTOS.
This module provides access to the configuration given in the file 'pyartos.ini',
which is searched for in the current working directory. To access the configuration
options, the 'config' object in this module's dictionary can be used, which is an
instance of the private _Config class.
"""
try:
# Python 3
from configparser import ConfigParser as SafeConfigParser
except:
# Python 2
from ConfigParser import SafeConfigParser
import os.path
class _Config(SafeConfigParser):
"""Class to access configuration options of PyARTOS.
Rather than instantiating this class, use the 'config' instance available in the dictionary
of the module.
"""
def __init__(self, iniFile):
"""Initializes a new configuration instance and sets appropriate default values."""
SafeConfigParser.__init__(self, allow_no_value = True)
self.iniFileName = iniFile
self.read(iniFile)
self.defaults = { }
self.applyDefaults({
'libartos' : {
'model_dir' : _Config.findModelDir(),
'library_path' : None,
'debug' : 0,
},
'ImageNet' : {
'repository_directory' : None
},
'GUI' : {
'max_video_width' : 640,
'max_video_height' : 480
}
});
def applyDefaults(self, defaultDict):
"""Applies default values from a dictionary.
defaultDict - Dictionary whose keys are section names and whose values are dictionaries
with the default configuration options for that section.
"""
for section in defaultDict:
if not section in self.defaults:
self.defaults[section] = { }
if not self.has_section(section):
self.add_section(section)
for option in defaultDict[section]:
self.defaults[section][option] = defaultDict[section][option]
if not self.has_option(section, option):
self.set(section, option, None)
def get(self, section, option, useDefaults = True):
"""Get an option value for the named section.
If useDefaults is set to true, this function falls back to default values
if the given option hasn't been specified in the configuration file or is empty.
"""
try:
value = SafeConfigParser.get(self, section, option)
except:
value = None
if useDefaults and ((value is None) or (value == '')) \
and (section in self.defaults) and (option in self.defaults[section]):
value = self.defaults[section][option]
return value
def getInt(self, section, option, useDefaults = True, min = None, max = None):
"""Get an option value for the named section as integer.
If useDefaults is set to true, this function falls back to default values if the
given option hasn't been specified in the configuration file or isn't an integral value.
The range of allowable values can be specified using the min and max parameters. The value
from the configuration file will be clipped to that range.
"""
try:
value = int(SafeConfigParser.get(self, section, option))
except:
value = None
if useDefaults and ((value is None) or (value == '')) \
and (section in self.defaults) and (option in self.defaults[section]):
value = self.defaults[section][option]
if not value is None:
if (not min is None) and (value < min):
value = min
elif (not max is None) and (value > max):
value = max
return value
def getBool(self, section, option, useDefaults = True):
"""Get an option value for the named section as boolean value.
If useDefaults is set to true, this function falls back to default values if the
given option hasn't been specified in the configuration file or can't be interpreted as a boolean value.
Empty strings, the strings 'no', 'off' and 'false' as well as the number 0 will be interpreted as False.
Every number different from 0 as well as the strings 'yes', 'on' and 'true' will be interpreted as True.
"""
def toBool(str):
try:
intval = int(str)
return (intval != 0)
except:
pass
try:
if str.lower() in ('', 'no', 'off', 'false'):
return False
elif str.lower() in ('yes', 'on', 'true'):
return True
except:
pass
return None
value = toBool(SafeConfigParser.get(self, section, option))
if useDefaults and ((value is None) or (value == '')) \
and (section in self.defaults) and (option in self.defaults[section]):
value = toBool(self.defaults[section][option])
return value
def is_set(self, section, option):
"""Determines if a given option has been set in the configuration file (regardless of default values)."""
try:
value = SafeConfigParser.get(self, section, option)
except:
value = None
return (not value is None) and (value != '')
def differentFromDefault(self, section, option):
"""Determines if a given configuration option differs from it's default value."""
if (not section in self.defaults) or (not option in self.defaults[section]):
return True
else:
return (self.get(section, option) != self.defaults[section][option])
def save(self):
"""Writes the configuration options to the file they were read from."""
with open(self.iniFileName, 'w') as file:
self.write(file)
@staticmethod
def findModelDir():
"""Searches for a potential model directory.
Searches for a directory named 'models' in the current working directory, one level above
the current working directory, the executed script's directory and the packages's directory.
The first match is returned or an empty string if no directory has been found.
"""
basedir = os.path.dirname(os.path.abspath(__file__))
tests = ['models', os.path.join('..','models'), os.path.join(basedir,'..','models'), os.path.join(basedir,'models')]
for t in tests:
if (os.path.isdir(t)):
return os.path.realpath(t)
else:
return ''
config = _Config('pyartos.ini') | gpl-3.0 | 4,831,734,620,645,749,000 | 36.702703 | 124 | 0.562448 | false |
JorgeCoock/django | django/contrib/gis/maps/google/__init__.py | 287 | 2771 | """
This module houses the GoogleMap object, used for generating
the needed javascript to embed Google Maps in a Web page.
Google(R) is a registered trademark of Google, Inc. of Mountain View, California.
Example:
* In the view:
return render_to_response('template.html', {'google' : GoogleMap(key="abcdefg")})
* In the template:
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
{{ google.xhtml }}
<head>
<title>Google Maps via GeoDjango</title>
{{ google.style }}
{{ google.scripts }}
</head>
{{ google.body }}
<div id="{{ google.dom_id }}" style="width:600px;height:400px;"></div>
</body>
</html>
Note: If you want to be more explicit in your templates, the following are
equivalent:
{{ google.body }} => "<body {{ google.onload }} {{ google.onunload }}>"
{{ google.xhtml }} => "<html xmlns="http://www.w3.org/1999/xhtml" {{ google.xmlns }}>"
{{ google.style }} => "<style>{{ google.vml_css }}</style>"
Explanation:
- The `xhtml` property provides the correct XML namespace needed for
Google Maps to operate in IE using XHTML. Google Maps on IE uses
VML to draw polylines. Returns, by default:
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
- The `style` property provides the correct style tag for the CSS
properties required by Google Maps on IE:
<style type="text/css">v\:* {behavior:url(#default#VML);}</style>
- The `scripts` property provides the necessary <script> tags for
including the Google Maps javascript, as well as including the
generated javascript.
- The `body` property provides the correct attributes for the
body tag to load the generated javascript. By default, returns:
<body onload="gmap_load()" onunload="GUnload()">
- The `dom_id` property returns the DOM id for the map. Defaults to "map".
The following attributes may be set or customized in your local settings:
* GOOGLE_MAPS_API_KEY: String of your Google Maps API key. These are tied
to a domain. May be obtained from http://www.google.com/apis/maps/
* GOOGLE_MAPS_API_VERSION (optional): Defaults to using "2.x"
* GOOGLE_MAPS_URL (optional): Must have a substitution ('%s') for the API
version.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap, GoogleMapSet
from django.contrib.gis.maps.google.overlays import (
GEvent, GIcon, GMarker, GPolygon, GPolyline,
)
from django.contrib.gis.maps.google.zoom import GoogleZoom
__all__ = [
'GoogleMap', 'GoogleMapSet', 'GEvent', 'GIcon', 'GMarker', 'GPolygon',
'GPolyline', 'GoogleZoom',
]
| bsd-3-clause | -1,248,986,223,294,137,300 | 39.15942 | 92 | 0.666546 | false |
urandu/mfl_api | chul/filters.py | 1 | 2760 | import django_filters
from .models import (
CommunityHealthUnit,
CommunityHealthWorker,
CommunityHealthWorkerContact,
Status,
CommunityHealthUnitContact,
Approver,
CommunityHealthUnitApproval,
CommunityHealthWorkerApproval,
ApprovalStatus
)
from common.filters.filter_shared import CommonFieldsFilterset
class ApproverFilter(CommonFieldsFilterset):
class Meta(object):
model = Approver
class CommunityHealthUnitApprovalFilter(CommonFieldsFilterset):
class Meta(object):
model = CommunityHealthUnitApproval
class CommunityHealthWorkerApprovalFilter(CommonFieldsFilterset):
class Meta(object):
model = CommunityHealthWorkerApproval
class ApprovalStatusFilter(CommonFieldsFilterset):
class Meta(object):
model = ApprovalStatus
class StatusFilter(CommonFieldsFilterset):
name = django_filters.CharFilter(lookup_type='icontains')
description = django_filters.CharFilter(lookup_type='icontains')
class Meta(object):
model = Status
class CommunityHealthUnitContactFilter(CommonFieldsFilterset):
health_unit = django_filters.AllValuesFilter(lookup_type='exact')
contact = django_filters.AllValuesFilter(lookup_type='exact')
class Meta(object):
model = CommunityHealthUnitContact
class CommunityHealthUnitFilter(CommonFieldsFilterset):
name = django_filters.CharFilter(lookup_type='icontains')
facility = django_filters.AllValuesFilter(lookup_type='exact')
ward = django_filters.CharFilter(name='community__ward')
constituency = django_filters.CharFilter(
name='community_ward__constituency')
county = django_filters.CharFilter(
name='community__ward__constituency__county')
class Meta(object):
model = CommunityHealthUnit
class CommunityHealthWorkerFilter(CommonFieldsFilterset):
first_name = django_filters.CharFilter(lookup_type='icontains')
last_name = django_filters.CharFilter(lookup_type='icontains')
username = django_filters.CharFilter(lookup_type='icontains')
id_number = django_filters.CharFilter(lookup_type='exact')
ward = django_filters.CharFilter(name='health_unit__community__ward')
constituency = django_filters.CharFilter(
name='health_unit__community_ward__constituency')
county = django_filters.CharFilter(
name='health_unit__community__ward__constituency__county')
class Meta(object):
model = CommunityHealthWorker
class CommunityHealthWorkerContactFilter(CommonFieldsFilterset):
health_worker = django_filters.AllValuesFilter(lookup_type='icontains')
contact = django_filters.AllValuesFilter(lookup_type='icontains')
class Meta(object):
model = CommunityHealthWorkerContact
| mit | 3,575,506,609,307,269,600 | 30.724138 | 75 | 0.752174 | false |
Pretio/boto | boto/gs/lifecycle.py | 157 | 9086 | # Copyright 2013 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.exception import InvalidLifecycleConfigError
# Relevant tags for the lifecycle configuration XML document.
LIFECYCLE_CONFIG = 'LifecycleConfiguration'
RULE = 'Rule'
ACTION = 'Action'
DELETE = 'Delete'
CONDITION = 'Condition'
AGE = 'Age'
CREATED_BEFORE = 'CreatedBefore'
NUM_NEWER_VERSIONS = 'NumberOfNewerVersions'
IS_LIVE = 'IsLive'
# List of all action elements.
LEGAL_ACTIONS = [DELETE]
# List of all action parameter elements.
LEGAL_ACTION_PARAMS = []
# List of all condition elements.
LEGAL_CONDITIONS = [AGE, CREATED_BEFORE, NUM_NEWER_VERSIONS, IS_LIVE]
# Dictionary mapping actions to supported action parameters for each action.
LEGAL_ACTION_ACTION_PARAMS = {
DELETE: [],
}
class Rule(object):
"""
A lifecycle rule for a bucket.
:ivar action: Action to be taken.
:ivar action_params: A dictionary of action specific parameters. Each item
in the dictionary represents the name and value of an action parameter.
:ivar conditions: A dictionary of conditions that specify when the action
should be taken. Each item in the dictionary represents the name and value
of a condition.
"""
def __init__(self, action=None, action_params=None, conditions=None):
self.action = action
self.action_params = action_params or {}
self.conditions = conditions or {}
# Name of the current enclosing tag (used to validate the schema).
self.current_tag = RULE
def validateStartTag(self, tag, parent):
"""Verify parent of the start tag."""
if self.current_tag != parent:
raise InvalidLifecycleConfigError(
'Invalid tag %s found inside %s tag' % (tag, self.current_tag))
def validateEndTag(self, tag):
"""Verify end tag against the start tag."""
if tag != self.current_tag:
raise InvalidLifecycleConfigError(
'Mismatched start and end tags (%s/%s)' %
(self.current_tag, tag))
def startElement(self, name, attrs, connection):
if name == ACTION:
self.validateStartTag(name, RULE)
elif name in LEGAL_ACTIONS:
self.validateStartTag(name, ACTION)
# Verify there is only one action tag in the rule.
if self.action is not None:
raise InvalidLifecycleConfigError(
'Only one action tag is allowed in each rule')
self.action = name
elif name in LEGAL_ACTION_PARAMS:
# Make sure this tag is found in an action tag.
if self.current_tag not in LEGAL_ACTIONS:
raise InvalidLifecycleConfigError(
'Tag %s found outside of action' % name)
# Make sure this tag is allowed for the current action tag.
if name not in LEGAL_ACTION_ACTION_PARAMS[self.action]:
raise InvalidLifecycleConfigError(
'Tag %s not allowed in action %s' % (name, self.action))
elif name == CONDITION:
self.validateStartTag(name, RULE)
elif name in LEGAL_CONDITIONS:
self.validateStartTag(name, CONDITION)
# Verify there is no duplicate conditions.
if name in self.conditions:
raise InvalidLifecycleConfigError(
'Found duplicate conditions %s' % name)
else:
raise InvalidLifecycleConfigError('Unsupported tag ' + name)
self.current_tag = name
def endElement(self, name, value, connection):
self.validateEndTag(name)
if name == RULE:
# We have to validate the rule after it is fully populated because
# the action and condition elements could be in any order.
self.validate()
elif name == ACTION:
self.current_tag = RULE
elif name in LEGAL_ACTIONS:
self.current_tag = ACTION
elif name in LEGAL_ACTION_PARAMS:
self.current_tag = self.action
# Add the action parameter name and value to the dictionary.
self.action_params[name] = value.strip()
elif name == CONDITION:
self.current_tag = RULE
elif name in LEGAL_CONDITIONS:
self.current_tag = CONDITION
# Add the condition name and value to the dictionary.
self.conditions[name] = value.strip()
else:
raise InvalidLifecycleConfigError('Unsupported end tag ' + name)
def validate(self):
"""Validate the rule."""
if not self.action:
raise InvalidLifecycleConfigError(
'No action was specified in the rule')
if not self.conditions:
raise InvalidLifecycleConfigError(
'No condition was specified for action %s' % self.action)
def to_xml(self):
"""Convert the rule into XML string representation."""
s = '<' + RULE + '>'
s += '<' + ACTION + '>'
if self.action_params:
s += '<' + self.action + '>'
for param in LEGAL_ACTION_PARAMS:
if param in self.action_params:
s += ('<' + param + '>' + self.action_params[param] + '</'
+ param + '>')
s += '</' + self.action + '>'
else:
s += '<' + self.action + '/>'
s += '</' + ACTION + '>'
s += '<' + CONDITION + '>'
for condition in LEGAL_CONDITIONS:
if condition in self.conditions:
s += ('<' + condition + '>' + self.conditions[condition] + '</'
+ condition + '>')
s += '</' + CONDITION + '>'
s += '</' + RULE + '>'
return s
class LifecycleConfig(list):
"""
A container of rules associated with a lifecycle configuration.
"""
def __init__(self):
# Track if root tag has been seen.
self.has_root_tag = False
def startElement(self, name, attrs, connection):
if name == LIFECYCLE_CONFIG:
if self.has_root_tag:
raise InvalidLifecycleConfigError(
'Only one root tag is allowed in the XML')
self.has_root_tag = True
elif name == RULE:
if not self.has_root_tag:
raise InvalidLifecycleConfigError('Invalid root tag ' + name)
rule = Rule()
self.append(rule)
return rule
else:
raise InvalidLifecycleConfigError('Unsupported tag ' + name)
def endElement(self, name, value, connection):
if name == LIFECYCLE_CONFIG:
pass
else:
raise InvalidLifecycleConfigError('Unsupported end tag ' + name)
def to_xml(self):
"""Convert LifecycleConfig object into XML string representation."""
s = '<?xml version="1.0" encoding="UTF-8"?>'
s += '<' + LIFECYCLE_CONFIG + '>'
for rule in self:
s += rule.to_xml()
s += '</' + LIFECYCLE_CONFIG + '>'
return s
def add_rule(self, action, action_params, conditions):
"""
Add a rule to this Lifecycle configuration. This only adds the rule to
the local copy. To install the new rule(s) on the bucket, you need to
pass this Lifecycle config object to the configure_lifecycle method of
the Bucket object.
:type action: str
:param action: Action to be taken.
:type action_params: dict
:param action_params: A dictionary of action specific parameters. Each
item in the dictionary represents the name and value of an action
parameter.
:type conditions: dict
:param conditions: A dictionary of conditions that specify when the
action should be taken. Each item in the dictionary represents the name
and value of a condition.
"""
rule = Rule(action, action_params, conditions)
self.append(rule)
| mit | -5,630,623,549,038,599,000 | 39.026432 | 79 | 0.607418 | false |
HybridF5/jacket | jacket/db/sqlalchemy/migrate_repo/versions/036_compute_251_add_numa_topology_to_comput_nodes.py | 81 | 1166 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
numa_topology = Column('numa_topology', Text, nullable=True)
shadow_numa_topology = Column('numa_topology', Text, nullable=True)
compute_nodes.create_column(numa_topology)
shadow_compute_nodes.create_column(shadow_numa_topology)
| apache-2.0 | 7,172,868,707,938,977,000 | 37.866667 | 78 | 0.736707 | false |
ehirt/odoo | addons/resource/__init__.py | 448 | 1086 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import resource
import faces
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,884,400,627,985,877,000 | 39.222222 | 79 | 0.611418 | false |
chen0031/nupic | nupic/bindings/__init__.py | 33 | 1033 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
__import__("pkg_resources").declare_namespace(__name__)
| agpl-3.0 | 8,889,139,551,205,847,000 | 45.954545 | 72 | 0.652469 | false |
crowdhackathon-transport/optimizers | crowdstance-api/venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.py | 286 | 18718 | """setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
import sys, os, marshal
from setuptools import Command
from distutils.dir_util import remove_tree, mkpath
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
from distutils import log
from distutils.errors import DistutilsSetupError
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from types import CodeType
from setuptools.compat import basestring, next
from setuptools.extension import Library
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
f = open(pyfile,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __loader__, __file__",
" import sys, pkg_resources, imp",
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% resource,
" __loader__ = None; del __bootstrap__, __loader__",
" imp.load_dynamic(__name__,__file__)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
# stub __init__.py for packages distributed without one
NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)'
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename+'.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files,[]
for item in old:
if isinstance(item,tuple) and len(item)==2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized==site_packages or normalized.startswith(
site_packages+os.sep
):
item = realpath[len(site_packages)+1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self,cmdname,**kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname,self.bdist_dir)
kw.setdefault('skip_build',self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root; instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p,ext_name) in enumerate(ext_outputs):
filename,ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep,'/')
to_compile.extend(self.make_init_files())
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root,'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts',install_dir=script_dir,no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root,'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info,'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution,'dist_files',[]).append(
('bdist_egg',get_python_version(),self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base,dirs,files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base,name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution,'zip_safe',None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def make_init_files(self):
"""Create missing package __init__ files"""
init_files = []
for base,dirs,files in walk_egg(self.bdist_dir):
if base==self.bdist_dir:
# don't put an __init__ in the root
continue
for name in files:
if name.endswith('.py'):
if '__init__.py' not in files:
pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.')
if self.distribution.has_contents_for(pkg):
log.warn("Creating missing __init__.py for %s",pkg)
filename = os.path.join(base,'__init__.py')
if not self.dry_run:
f = open(filename,'w'); f.write(NS_PKG_STUB)
f.close()
init_files.append(filename)
break
else:
# not a package, don't traverse to subdirectories
dirs[:] = []
return init_files
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation',{}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info,'')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir:''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base]+filename)
for filename in dirs:
paths[os.path.join(base,filename)] = paths[base]+filename+'/'
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext,Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir,filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base,dirs,files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base,dirs,files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag,fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)):
return flag
if not can_scan(): return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag,fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe)==flag:
f=open(fn,'wt'); f.write('\n'); f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base,name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir)+1:].replace(os.sep,'.')
module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename,'rb'); f.read(skip)
code = marshal.load(f); f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names: yield name
for const in code.co_consts:
if isinstance(const,basestring):
yield const
elif isinstance(const,CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'
):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir)+1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
#
| mit | 1,960,047,681,496,302,300 | 32.848101 | 79 | 0.567048 | false |
phenoxim/cinder | cinder/api/v2/snapshots.py | 3 | 6696 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes snapshots api."""
from oslo_log import log as logging
from oslo_utils import strutils
from six.moves import http_client
import webob
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api.schemas import snapshots as snapshot
from cinder.api import validation
from cinder.api.views import snapshots as snapshot_views
from cinder import utils
from cinder import volume
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
class SnapshotsController(wsgi.Controller):
"""The Snapshots API controller for the OpenStack API."""
_view_builder_class = snapshot_views.ViewBuilder
def __init__(self, ext_mgr=None):
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(SnapshotsController, self).__init__()
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
req.cache_db_snapshot(snapshot)
return self._view_builder.detail(req, snapshot)
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['cinder.context']
LOG.info("Delete snapshot with id: %s", id)
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.delete_snapshot(context, snapshot)
return webob.Response(status_int=http_client.ACCEPTED)
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, is_detail=True)
def _items(self, req, is_detail=True):
"""Returns a list of snapshots, transformed through view builder."""
context = req.environ['cinder.context']
# Pop out non search_opts and create local variables
search_opts = req.GET.copy()
sort_keys, sort_dirs = common.get_sort_params(search_opts)
marker, limit, offset = common.get_pagination_params(search_opts)
# Filter out invalid options
allowed_search_options = ('status', 'volume_id', 'name')
utils.remove_invalid_filter_options(context, search_opts,
allowed_search_options)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in search_opts:
search_opts['display_name'] = search_opts.pop('name')
snapshots = self.volume_api.get_all_snapshots(context,
search_opts=search_opts,
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
offset=offset)
req.cache_db_snapshots(snapshots.objects)
if is_detail:
snapshots = self._view_builder.detail_list(req, snapshots.objects)
else:
snapshots = self._view_builder.summary_list(req, snapshots.objects)
return snapshots
@wsgi.response(http_client.ACCEPTED)
@validation.schema(snapshot.create)
def create(self, req, body):
"""Creates a new snapshot."""
kwargs = {}
context = req.environ['cinder.context']
snapshot = body['snapshot']
kwargs['metadata'] = snapshot.get('metadata', None)
volume_id = snapshot['volume_id']
volume = self.volume_api.get(context, volume_id)
force = snapshot.get('force', False)
force = strutils.bool_from_string(force, strict=True)
LOG.info("Create snapshot from volume %s", volume_id)
self.validate_name_and_description(snapshot, check_length=False)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in snapshot:
snapshot['display_name'] = snapshot.pop('name')
if force:
new_snapshot = self.volume_api.create_snapshot_force(
context,
volume,
snapshot.get('display_name'),
snapshot.get('description'),
**kwargs)
else:
new_snapshot = self.volume_api.create_snapshot(
context,
volume,
snapshot.get('display_name'),
snapshot.get('description'),
**kwargs)
req.cache_db_snapshot(new_snapshot)
return self._view_builder.detail(req, new_snapshot)
@validation.schema(snapshot.update)
def update(self, req, id, body):
"""Update a snapshot."""
context = req.environ['cinder.context']
snapshot_body = body['snapshot']
self.validate_name_and_description(snapshot_body, check_length=False)
if 'name' in snapshot_body:
snapshot_body['display_name'] = snapshot_body.pop('name')
if 'description' in snapshot_body:
snapshot_body['display_description'] = snapshot_body.pop(
'description')
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
volume_utils.notify_about_snapshot_usage(context, snapshot,
'update.start')
self.volume_api.update_snapshot(context, snapshot, snapshot_body)
snapshot.update(snapshot_body)
req.cache_db_snapshot(snapshot)
volume_utils.notify_about_snapshot_usage(context, snapshot,
'update.end')
return self._view_builder.detail(req, snapshot)
def create_resource(ext_mgr):
return wsgi.Resource(SnapshotsController(ext_mgr))
| apache-2.0 | 5,824,212,714,935,135,000 | 37.045455 | 79 | 0.606481 | false |
danwchan/trail_of_cthulhu | mythos_website_upgrade/birthcharacter/admin.py | 1 | 1941 | from django.contrib import admin
from .models import AbilityList, AbilityExamples, OccupationList, DriveList, DriveExamples, AssociatedOccuDrive, AssociatedOccuAbil, SpecialList
# Primary keys you care about
#primary_keys = [
# 'occupation',
# 'drive',
# 'ability'
# ]
# Inline projects to build the editing forms
class AbilityInLine(admin.StackedInline):
model = AssociatedOccuAbil
fk_name = 'associated_occupations'
extra = 0
class OccuInLine(admin.StackedInline):
model = AssociatedOccuDrive
fk_name = 'drive'
extra = 0
class AbilityExInLine(admin.StackedInline):
model = AbilityExamples
fk_name = 'ability'
extra = 0
class DriveExInLine(admin.StackedInline):
model = DriveExamples
fk_name = 'drive'
extra = 0
# ModelAdmin classes to bind it all together representing editing forms
class AbilityAdmin(admin.ModelAdmin):
list_display = ['ability', 'purist', 'pulp', 'rating']
search_fields = ['abilitylist__ability']
inlines = [
AbilityExInLine
]
class OccupationAdmin(admin.ModelAdmin):
list_display = ['occupation', 'purist', 'pulp', 'rating']
search_fields = ['occupationlist__occupation']
inlines = [
AbilityInLine
]
def _abilitylist(self, obj):
return obj.abilitylist.all().count() #just copied this over... I don't know what it does :P
class DriveAdmin(admin.ModelAdmin):
list_display = ['drive', 'purist', 'pulp', 'rating']
search_fields = ['abilitylist__ability']
inlines = [
DriveExInLine,
OccuInLine
]
# Register your models here.
admin.site.register(AbilityList, AbilityAdmin)
admin.site.register(OccupationList, OccupationAdmin)
admin.site.register(DriveList, DriveAdmin)
#TO BUILD
#overview page to see which records are old/poorly perofrming
#formatting to make it prettier
#expand drive examples to all entries and formalize the media source idea
| gpl-3.0 | 8,635,215,398,481,176,000 | 27.544118 | 144 | 0.702731 | false |
mrunge/horizon | openstack_dashboard/dashboards/admin/metering/urls.py | 2 | 1031 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.metering import views
urlpatterns = patterns(
'openstack_dashboard.dashboards.admin.metering.views',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^samples$', views.SamplesView.as_view(), name='samples'),
url(r'^report$', views.ReportView.as_view(), name='report'),
url(r'^report/csv$', views.CsvReportView.as_view(), name='csvreport'))
| apache-2.0 | -5,958,527,509,451,031,000 | 43.826087 | 75 | 0.741028 | false |
befair/sbcatalog | api/flaskapp.py | 2 | 2401 | # This file is part of sbcatalog
#
# sbcatalog is Copyright © 2015 beFair.it
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License version 3, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranties of MERCHANTABILITY,
# SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from eve import Eve
from endpoints import xml_collections_endpoint, geo_collections_endpoint
class XMLEve(Eve):
"""
This class aims to let Eve be able to import XML documents
It is meant to overload the view function `collections endpoint`.
It interprets the text/xml Content-Type and calls the `post` function
with the forged json payload.
"""
def __init__(self, *args, **kw):
"""
Init Eve and overload enpoints view_functions.
"""
super(XMLEve, self).__init__(*args, **kw)
# TODO: iterate over all resources
resource = 'supplier'
endpoint = resource + "|resource"
geo_resource = 'geosupplier'
geo_endpoint = geo_resource + "|resource"
self.view_functions[endpoint] = xml_collections_endpoint
self.view_functions[geo_endpoint] = geo_collections_endpoint
settings = self.config['DOMAIN'][resource]
geo_settings = self.config['DOMAIN'][geo_resource]
self.add_url_rule(self.api_prefix + '/gdxp/supplier',
endpoint,
view_func=xml_collections_endpoint,
methods=settings['resource_methods'] + ['OPTIONS'])
self.add_url_rule(self.api_prefix + '/geo/supplier',
geo_endpoint,
view_func=geo_collections_endpoint,
methods=geo_settings['resource_methods'] + ['OPTIONS'])
# MIGHT BE USEFUL
# url = '%s/%s' % (self.api_prefix, settings['url'])
# self.add_url_rule(url, endpoint, view_func=gdxp_collections_endpoint,
# methods=settings['resource_methods'] + ['OPTIONS'])
| agpl-3.0 | -4,922,071,561,362,104,000 | 40.37931 | 81 | 0.642917 | false |
ruibarreira/linuxtrail | usr/lib/python2.7/xml/etree/ElementInclude.py | 74 | 5076 | #
# ElementTree
# $Id: ElementInclude.py 3375 2008-02-13 08:05:08Z fredrik $
#
# limited xinclude support for element trees
#
# history:
# 2003-08-15 fl created
# 2003-11-14 fl fixed default loader
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
##
# Limited XInclude support for the ElementTree package.
##
import copy
from . import ElementTree
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
##
# Fatal include error.
class FatalIncludeError(SyntaxError):
pass
##
# Default loader. This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding.
# @return The expanded resource. If the parse mode is "xml", this
# is an ElementTree instance. If the parse mode is "text", this
# is a Unicode string. If the loader fails, it can return None
# or raise an IOError exception.
# @throws IOError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
with open(href) as file:
if parse == "xml":
data = ElementTree.parse(file).getroot()
else:
data = file.read()
if encoding:
data = data.decode(encoding)
return data
##
# Expand XInclude directives.
#
# @param elem Root element.
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws IOError If the function fails to load a given resource.
def include(elem, loader=None):
if loader is None:
loader = default_loader
# look for xinclude elements
i = 0
while i < len(elem):
e = elem[i]
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = e.get("href")
parse = e.get("parse", "xml")
if parse == "xml":
node = loader(href, parse)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = copy.copy(node)
if e.tail:
node.tail = (node.tail or "") + e.tail
elem[i] = node
elif parse == "text":
text = loader(href, parse, e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
if i:
node = elem[i-1]
node.tail = (node.tail or "") + text + (e.tail or "")
else:
elem.text = (elem.text or "") + text + (e.tail or "")
del elem[i]
continue
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
include(e, loader)
i = i + 1
| gpl-3.0 | 9,197,676,049,436,645,000 | 34.746479 | 74 | 0.607565 | false |
gisweb/plomino.printdocuments | bootstrap-buildout.py | 172 | 6501 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--setuptools-version",
help="use a specific setuptools version")
options, args = parser.parse_args()
######################################################################
# load/install setuptools
try:
if options.allow_site_packages:
import setuptools
import pkg_resources
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
sys.path[:] = [x for x in sys.path if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| gpl-3.0 | 1,815,683,320,820,004,600 | 33.396825 | 79 | 0.620366 | false |
caot/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geometry/test_data.py | 364 | 2994 | """
This module has the mock object definitions used to hold reference geometry
for the GEOS and GDAL tests.
"""
import gzip
import os
from django.contrib import gis
from django.utils import simplejson
# This global used to store reference geometry data.
GEOMETRIES = None
# Path where reference test data is located.
TEST_DATA = os.path.join(os.path.dirname(gis.__file__), 'tests', 'data')
def tuplize(seq):
"Turn all nested sequences to tuples in given sequence."
if isinstance(seq, (list, tuple)):
return tuple([tuplize(i) for i in seq])
return seq
def strconvert(d):
"Converts all keys in dictionary to str type."
return dict([(str(k), v) for k, v in d.iteritems()])
def get_ds_file(name, ext):
return os.path.join(TEST_DATA,
name,
name + '.%s' % ext
)
class TestObj(object):
"""
Base testing object, turns keyword args into attributes.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class TestDS(TestObj):
"""
Object for testing GDAL data sources.
"""
def __init__(self, name, **kwargs):
# Shapefile is default extension, unless specified otherwise.
ext = kwargs.pop('ext', 'shp')
self.ds = get_ds_file(name, ext)
super(TestDS, self).__init__(**kwargs)
class TestGeom(TestObj):
"""
Testing object used for wrapping reference geometry data
in GEOS/GDAL tests.
"""
def __init__(self, **kwargs):
# Converting lists to tuples of certain keyword args
# so coordinate test cases will match (JSON has no
# concept of tuple).
coords = kwargs.pop('coords', None)
if coords:
self.coords = tuplize(coords)
centroid = kwargs.pop('centroid', None)
if centroid:
self.centroid = tuple(centroid)
ext_ring_cs = kwargs.pop('ext_ring_cs', None)
if ext_ring_cs:
ext_ring_cs = tuplize(ext_ring_cs)
self.ext_ring_cs = ext_ring_cs
super(TestGeom, self).__init__(**kwargs)
class TestGeomSet(object):
"""
Each attribute of this object is a list of `TestGeom` instances.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, [TestGeom(**strconvert(kw)) for kw in value])
class TestDataMixin(object):
"""
Mixin used for GEOS/GDAL test cases that defines a `geometries`
property, which returns and/or loads the reference geometry data.
"""
@property
def geometries(self):
global GEOMETRIES
if GEOMETRIES is None:
# Load up the test geometry data from fixture into global.
gzf = gzip.GzipFile(os.path.join(TEST_DATA, 'geometries.json.gz'))
geometries = simplejson.loads(gzf.read())
GEOMETRIES = TestGeomSet(**strconvert(geometries))
return GEOMETRIES
| apache-2.0 | -2,980,834,686,938,754,000 | 27.514286 | 78 | 0.611556 | false |
ianzhengnan/learnpy | task_master.py | 1 | 1106 | import random, time, queue
from multiprocessing.managers import BaseManager
from multiprocessing import freeze_support
task_queue = queue.Queue()
result_queue = queue.Queue()
class QueueManager(BaseManager):
pass
def return_task_queue():
global task_queue
return task_queue
def return_result_queue():
global result_queue
return result_queue
def test():
QueueManager.register('get_task_queue', callable = return_task_queue)
QueueManager.register('get_result_queue', callable = return_result_queue)
manager = QueueManager(address=('127.0.0.1', 5000), authkey=b'abc')
manager.start()
task = manager.get_task_queue()
result = manager.get_result_queue()
for i in range(10):
n = random.randint(0,10000)
print('Put task %d...' % n)
task.put(n)
print('Try get result...')
for i in range(10):
r = result.get(timeout=10)
print('Result: %s' % r)
manager.shutdown()
print('Master exit.')
if __name__ == '__main__':
freeze_support()
test() | apache-2.0 | -4,354,926,830,302,793,700 | 19.72549 | 77 | 0.615732 | false |
9kopb/django-easy-maps | easy_maps/migrations/0005_auto__add_unique_address_address.py | 3 | 1806 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing index on 'Address', fields ['address']
if db.backend_name != 'sqlite3':
# South forgets indexes when altering tables in sqlite,
# see http://south.aeracode.org/ticket/757 .
# This means delete_index will raise an exception with sqlite
# because the index is 'forgotten' in previous migrations.
db.delete_index('easy_maps_address', ['address'])
# Adding unique constraint on 'Address', fields ['address']
db.create_unique('easy_maps_address', ['address'])
def backwards(self, orm):
# Removing unique constraint on 'Address', fields ['address']
db.delete_unique('easy_maps_address', ['address'])
# Adding index on 'Address', fields ['address']
db.create_index('easy_maps_address', ['address'])
models = {
'easy_maps.address': {
'Meta': {'object_name': 'Address'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'computed_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geocode_error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['easy_maps']
| mit | 1,825,776,804,113,828,400 | 40.045455 | 130 | 0.589701 | false |
eddo888/Tools | parser.py | 1 | 15876 | #!/usr/bin/env python2
# $Date$
# $Revision$
# $Author$
# $HeadURL$
# $Id$
import sys, re, os
import xml.parsers.expat
from xml.dom import minidom
from Tools.pretty import *
from Tools.colours import Colours
tokens = [
['&' , '####amp####'],
['&' , '&'],
['<' , '<'],
['>' , '>'],
['\"' , '"'],
#['\'' , '''],
['####amp####' , '&'],
]
def escapeData(data):
for d in tokens:
data = data.replace(d[0],d[1])
return data
def doParse(
input,
output,
colour=False,
areturn=False,
rformat=False,
html=False,
preserve=False,
comments=False,
fname=None
):
myParser = MyParser(colour=colour, areturn=areturn, rformat=rformat, html=html, output=output, preserve=preserve, comments=comments)
try:
myParser.parser.ParseFile(input)
except:
printer = PrettyPrinter(colour=True, output=sys.stderr)
sys.stderr.write('%s \n'%(fname or 'rendering as text '))
printer.prettify(sys.exc_info())
del printer
if input != sys.stdin:
input.seek(0)
if output != sys.stdout:
output.seek(0)
output.write(input.read())
del myParser
return
def printXML(xml, colour=False, areturn=False, rformat=False,output=sys.stdout):
myParser = MyParser(
colour=colour,
rformat=rformat,
areturn=areturn,
output=output
)
myParser.parser.Parse(xml)
del myParser
return
class MyParser:
indent = 0
stateStartLast = 1
stateEndLast = 2
stateTextLast = 3
stateCDataLast = 4
stateCDataStart = 5
stateCDataEnd = 6
state = stateEndLast
def __init__(self, colour=False, areturn=False, rformat=False, html=False, output=sys.stdout, preserve=False, comments=True):
self.output = output
self.lt='<'
self.gt='>'
self.amp='&'
self.quot='\"'
self.apos='\''
self.lf='\n'
self.indentChar = ' '
self.preserve = preserve
self.colours = Colours(colour=colour, html=html)
if html:
self.lt ='<'
self.gt ='>'
self.amp ='&'
self.quot='"'
self.apos='''
self.lf ='<br/>'
self.indentChar = ' '
self.areturn = areturn
self.rformat = rformat
self.parser = xml.parsers.expat.ParserCreate()
self.parser.StartElementHandler = self.startElementHandler
self.parser.EndElementHandler = self.endElementHandler
self.parser.CharacterDataHandler = self.characterDataHandler
self.parser.StartCdataSectionHandler = self.startCdataSectionHandler
self.parser.EndCdataSectionHandler = self.endCdataSectionHandler
self.parser.XmlDeclHandler = self.xmlDeclHandler
self.parser.StartDoctypeDeclHandler = self.startDoctypeDeclHandler
self.parser.EndDoctypeDeclHandler = self.endDoctypeDeclHandler
self.parser.ProcessingInstructionHandler = self.processingInstructionHandler
if comments:
self.parser.CommentHandler = self.commentHandler
# Doctype => \&handle_doctype,
# Proc => => \&handle_proc,
self.leader = re.compile('(^\s+)')
self.pattern = re.compile('(^\s+|\s+$)')
self.lfCount = 0
return
##parser.ElementDeclHandler(name, model)
##parser.AttlistDeclHandler(elname, attname, type, default, required)
##parser.UnparsedEntityDeclHandler(entityName, base, systemId, publicId, notationName)
##parser.EntityDeclHandler(entityName, is_parameter_entity, value, base, systemId, publicId, notationName)
##parser.NotationDeclHandler(notationName, base, systemId, publicId)
##parser.StartNamespaceDeclHandler(prefix, uri)
##parser.EndNamespaceDeclHandler(prefix)
##parser.DefaultHandler(data)
##parser.DefaultHandlerExpand(data)
##parser.NotStandaloneHandler()
##parser.ExternalEntityRefHandler(context, base, systemId, publicId)
def close(self):
if self.parser:
self.parser.Parse('',1)
del self.parser
return
def startElementHandler(self, name, attrs):
if self.rformat:
self.areturn = True
if self.state == self.stateStartLast:
self.output.write(''.join([
self.colours.White,
self.gt,
self.colours.Off,
self.lf
]))
self.output.flush()
if self.preserve and self.lfCount > 2 and self.state == self.stateEndLast:
self.output.write(self.lf)
self.lfCount =0
if ':' in name:
(pre,ele) = tuple(name.split(':'))
pre='%s:'%pre
else:
(pre,ele) = ('',name)
self.output.write(''.join([
(self.indent) * self.indentChar,
self.colours.White,
self.lt,
self.colours.Off,
pre,
self.colours.Teal,
ele,
self.colours.Off
]))
self.output.flush()
for attr in sorted(attrs.keys()):
if self.areturn:
self.output.write(''.join([
self.lf,
(self.indent+1) * self.indentChar,
]))
else:
self.output.write(' ')
self.output.write(''.join([
self.colours.Green ,
attr ,
self.colours.Off ,
self.colours.White ,
'=' ,
self.colours.Off ,
self.quot ,
self.colours.Purple ,
escapeData(attrs[attr]) ,
self.colours.Off ,
self.quot ,
]))
self.output.flush()
if len(attrs) > 0 and self.areturn:
self.output.write(''.join([
self.lf,
(self.indent) * self.indentChar,
]))
self.indent += 1
self.state = self.stateStartLast
if self.rformat:
self.rformat = False
self.areturn = False
return
def endElementHandler(self, name):
if ':' in name:
(pre,ele) = tuple(name.split(':'))
pre='%s:'%pre
else:
(pre,ele) = ('',name)
self.indent -= 1
if self.state == self.stateCDataEnd:
if self.lfCount > 1:
self.output.write(self.lf)
self.lfCount = 0
if self.state == self.stateStartLast:
self.output.write(''.join([
self.colours.White ,
'/' ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
elif self.state != self.stateTextLast and self.state != self.stateCDataEnd:
self.output.write(''.join([
(self.indent) * self.indentChar,
self.colours.White ,
self.lt ,
self.colours.Off ,
self.colours.White ,
'/' ,
pre,
self.colours.Teal,
ele ,
self.colours.Off ,
self.colours.White ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
else:
self.output.write(''.join([
self.colours.White ,
self.lt ,
self.colours.Off ,
self.colours.White ,
'/' ,
pre,
self.colours.Teal,
ele ,
self.colours.Off ,
self.colours.White ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
self.state = self.stateEndLast
return
def characterDataHandler(self, data):
if not self.state == self.stateCDataStart and not self.state == self.stateCDataLast:
data = escapeData(data)
leader = ''
match = self.leader.match(data)
if match:
leader = match.group(1)
self.lfCount = self.lfCount + data.count('\n')
if not self.state == self.stateTextLast and not self.state == self.stateCDataLast:
data = self.leader.sub('', data)
if len(data) == 0:
return
if self.state == self.stateStartLast:
self.output.write(''.join([
self.colours.White,
self.gt,
self.colours.Off,
]))
if self.lfCount > 1:
self.output.write(leader)
self.output.write(self.lf)
self.output.write(data)
self.state = self.stateTextLast
elif self.state == self.stateCDataStart:
if self.lfCount > 0:
self.output.write(leader)
self.output.write(self.lf)
self.output.write(data)
self.state = self.stateCDataLast
elif self.state == self.stateCDataLast:
self.output.write(data)
elif self.state == self.stateTextLast:
self.output.write(data)
elif self.state != self.stateEndLast:
self.output.write(data)
return
def commentHandler(self, data):
if self.state == self.stateStartLast:
self.output.write(''.join([
self.colours.White ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
self.output.write(''.join([
(self.indent) * self.indentChar,
self.colours.Orange ,
self.lt ,
'!--' ,
data ,
'--' ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
self.state = self.stateEndLast
return
def startCdataSectionHandler(self):
if not self.state == self.stateStartLast:
self.output.write((self.indent) * self.indentChar)
if self.state == self.stateStartLast:
self.output.write(''.join([
self.colours.White ,
self.gt ,
self.colours.Off ,
]))
self.output.flush()
self.output.write(''.join([
self.colours.White ,
self.lt ,
'![',
self.colours.Green,
'CDATA',
self.colours.White,
'[' ,
self.colours.Off ,
]))
self.output.flush()
self.state = self.stateCDataStart
return
def endCdataSectionHandler(self):
self.output.write(''.join([
self.colours.White ,
']]' ,
self.gt ,
self.colours.Off ,
]))
self.output.flush()
self.state = self.stateCDataEnd
return
def xmlDeclHandler(self, version, encoding, standalone):
self.output.write(''.join([
(self.indent) * self.indentChar,
self.colours.White,
self.lt ,
'?',
self.colours.Orange,
'xml ' ,
self.colours.Off ,
self.colours.Green ,
'version' ,
self.colours.Off ,
self.colours.White ,
'=' ,
self.quot ,
self.colours.Off ,
self.colours.Purple ,
version ,
self.colours.Off ,
self.colours.White ,
self.quot ,
self.colours.Off ,
]))
self.output.flush()
if encoding:
self.output.write(''.join([
self.colours.Green ,
' encoding' ,
self.colours.Off ,
self.colours.White ,
'=' ,
self.colours.Off ,
self.quot ,
self.colours.Purple ,
encoding ,
self.colours.Off ,
self.quot ,
]))
self.output.flush()
self.output.write(''.join([
self.colours.White ,
'?' ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
return
def startDoctypeDeclHandler(self, doctypeName, systemId, publicId, has_internal_subset):
self.output.write((self.indent) * self.indentChar)
if not publicId:
self.output.write(''.join([
self.colours.White ,
self.lt ,
'!DOCTYPE ' ,
self.colours.Off ,
self.colours.White ,
doctypeName ,
self.colours.Off ,
self.colours.White ,
' SYSTEM ' ,
self.quot ,
self.colours.Off ,
self.colours.Green ,
systemId ,
self.colours.Off ,
self.colours.White ,
self.quot ,
self.quot ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
else:
self.output.write(''.join([
self.colours.White ,
self.lt ,
'!DOCTYPE ' ,
self.colours.Off ,
self.colours.White ,
doctypeName ,
self.colours.Off ,
self.colours.White ,
' PUBLIC ' ,
self.quot ,
self.colours.Off ,
self.colours.Green ,
publicId ,
self.colours.Off ,
self.quot ,
' ' ,
self.quot ,
self.colours.Green ,
systemId ,
self.colours.Off ,
self.colours.White ,
self.quot,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
return
def endDoctypeDeclHandler(self):
return
def processingInstructionHandler(self, target, data):
self.output.write(''.join([
(self.indent) * self.indentChar,
self.colours.White ,
self.lt ,
'?' ,
target ,
self.colours.Off ,
]))
self.output.flush()
pn = re.compile('\s*(\S+)=[\'"]([^\'"]*)[\'"]\s*')
b = pn.split(data)
while '' in b: b.remove('')
for i in range(len(b)/2):
self.output.write(''.join([
self.colours.Red ,
b[2*i] ,
self.colours.Off ,
self.colours.White ,
'=' ,
self.colours.Off ,
self.quot ,
self.colours.Green ,
b[2*i],
self.colours.Off ,
self.quot ,
]))
self.output.flush()
self.output.write(''.join([
self.colours.White ,
'?' ,
self.gt ,
self.colours.Off ,
self.lf ,
]))
self.output.flush()
return
def main():
with open('../scripts/_test/Sample.xml') as input:
doParse(input, sys.stdout, colour=True, rformat=True)
if __name__ == '__main__': main()
| mit | -5,552,825,259,485,982,000 | 28.730337 | 136 | 0.473923 | false |
chenjun0210/tensorflow | tensorflow/contrib/cudnn_rnn/__init__.py | 54 | 1524 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for fused Cudnn RNN models.
@@CudnnGRU
@@CudnnLSTM
@@CudnnRNNRelu
@@CudnnRNNTanh
@@RNNParamsSaveable
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnGRU
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnLSTM
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnRNNRelu
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnRNNTanh
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import RNNParamsSaveable
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"CudnnGRU",
"CudnnLSTM",
"CudnnRNNRelu",
"CudnnRNNTanh",
"RNNParamsSaveable",
]
remove_undocumented(__name__)
| apache-2.0 | -1,820,304,823,444,089,000 | 33.636364 | 83 | 0.730971 | false |
mozilla-b2g/fxos-certsuite | mcts/web-platform-tests/tests/tools/wptserve/tests/functional/base.py | 293 | 1831 | import base64
import logging
import os
import unittest
import urllib
import urllib2
import urlparse
import wptserve
logging.basicConfig()
here = os.path.split(__file__)[0]
doc_root = os.path.join(here, "docroot")
class Request(urllib2.Request):
def __init__(self, *args, **kwargs):
urllib2.Request.__init__(self, *args, **kwargs)
self.method = "GET"
def get_method(self):
return self.method
def add_data(self, data):
if hasattr(data, "iteritems"):
data = urllib.urlencode(data)
print data
self.add_header("Content-Length", str(len(data)))
urllib2.Request.add_data(self, data)
class TestUsingServer(unittest.TestCase):
def setUp(self):
self.server = wptserve.server.WebTestHttpd(host="localhost",
port=0,
use_ssl=False,
certificate=None,
doc_root=doc_root)
self.server.start(False)
def tearDown(self):
self.server.stop()
def abs_url(self, path, query=None):
return urlparse.urlunsplit(("http", "%s:%i" % (self.server.host, self.server.port), path, query, None))
def request(self, path, query=None, method="GET", headers=None, body=None, auth=None):
req = Request(self.abs_url(path, query))
req.method = method
if headers is None:
headers = {}
for name, value in headers.iteritems():
req.add_header(name, value)
if body is not None:
req.add_data(body)
if auth is not None:
req.add_header("Authorization", "Basic %s" % base64.encodestring('%s:%s' % auth))
return urllib2.urlopen(req)
| mpl-2.0 | -643,062,989,662,435,100 | 29.016393 | 111 | 0.555434 | false |
hanwenyan/ud858 | Lesson_4/00_Conference_Central/conference.py | 35 | 3749 | #!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = '[email protected] (Wesley Chun)'
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.ext import ndb
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import TeeShirtSize
from utils import getUserId
from settings import WEB_CLIENT_ID
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api( name='conference',
version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Conference objects - - - - - - - - - - - - - - - - - - -
# TODO
# registers API
api = endpoints.api_server([ConferenceApi])
| gpl-3.0 | 7,480,339,121,754,663,000 | 29.233871 | 93 | 0.596692 | false |
bminchew/PySAR | pysar/insar/sarcorrelation.py | 1 | 4976 | #!/usr/bin/env python
"""
sarcorrelation.py : Calculates interferometric correlation
usage::
$ sarcorrelation.py int_file amp_input [options]
Parameters
----------
int_file : complex interferogram file
amp_input : amplitude file(s); one of:
-a bip_amp (bit-interleaved amplitude file)
-s amp1_file amp2_file
-p power1_file power2_file
Options
-------
-o output_file : name of ouput file [sarcor.out]
-c str_option : output real amplitude (str_option = 'a'), real phase (str_option = 'p'),
in radians or complex (str_option = 'c') correlation ['a']
-n value : data null value (float only) [0]
Notes
-----
* input data is assumed to be single precision
"""
from __future__ import print_function, division
import sys,os
import numpy as np
from pysar.etc.excepts import InputError
np.seterr(divide='ignore')
###==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==###
def main(args):
cor = Correlation(args)
cor.read_data()
cor.calculate()
cor.write_data()
###==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==###
class Correlation():
def __init__(self,args):
self.intfile = args[0]
self.null = 0.
self.ampow = 'a'
self.ampfile = None
self.amp1file, self.amp2file = None, None
self.outfile = 'sarcor.out'
self.outap = 'a'
for i,a in enumerate(args[1:]):
if a == '-a':
self.ampfile = args[2+i] # 2 because I skip the first argument in args
elif a == '-s':
self.amp1file = args[2+i]
self.amp2file = args[3+i]
elif a == '-p':
self.amp1file = args[2+i]
self.amp2file = args[3+i]
self.ampow = 'p'
elif a == '-o':
self.outfile = args[2+i]
elif a == '-c':
self.outap = args[2+i]
elif a == '-n':
try:
self.null = np.float32(args[2+i])
except:
raise InputError('null value must be float; %s given' % args[2+i])
self._check_args()
###--------------------------------------###
def _check_args(self):
if self.ampfile is None:
if self.amp1file is None or self.amp2file is None:
errstr = 'a single bil amplitude file or two real-valued amplitude or power files '
errstr += 'must be provided'
raise InputError(errstr)
if self.outap != 'a' and self.outap != 'p' and self.outap != 'c':
errstr = "unrecognized option %s for output type; " % self.outap
errstr += "must be 'a' for amplitude, 'p' for phase, or 'c' for complex"
raise InputError(errstr)
###--------------------------------------###
def read_data(self):
print('reading')
fid = open(self.intfile,'r')
self.igram = np.fromfile(fid,dtype=np.complex64)
fid.close()
if self.ampfile is None:
fid = open(self.amp1file,'r')
self.amp1 = np.fromfile(fid,dtype=np.float32)
fid.close()
fid = open(self.amp2file,'r')
self.amp2 = np.fromfile(fid,dtype=np.float32)
fid.close()
else:
fid = open(self.ampfile,'r')
amp = np.fromfile(fid,dtype=np.float32)
fid.close()
self.amp1, self.amp2 = amp[::2], amp[1::2]
###--------------------------------------###
def calculate(self):
print('calculating correlation')
redonull, redozero = False, False
teps = 2.*np.finfo(np.float32).eps
nullmask = np.abs(self.igram - self.null) < teps
nullmask += np.abs(self.amp1 - self.null) < teps
nullmask += np.abs(self.amp2 - self.null) < teps
zeromask = self.amp1 < teps
zeromask += self.amp2 < teps
if len(nullmask[nullmask]) > 1:
redonull = True
self.amp1[nullmask], self.amp2[nullmask] = 1., 1.
if len(zeromask[zeromask]) > 1:
redozero = True
self.amp1[zeromask], self.amp2[zeromask] = 1., 1.
if self.ampow == 'a':
self.cor = self.igram/(self.amp1*self.amp2)
else:
self.cor = self.igram/(np.sqrt(self.amp1*self.amp2))
if self.outap == 'a':
self.cor = np.abs(self.cor)
elif self.outap == 'p':
self.cor = np.arctan2(self.cor.imag,self.cor.real)
if redonull:
self.cor[nullmask] = self.null
if redozero:
self.cor[zeromask] = self.null
###--------------------------------------###
def write_data(self):
print('writing')
fid = open(self.outfile,'w')
self.cor.tofile(fid)
fid.close()
###==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==###
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) < 3:
print(__doc__)
sys.exit()
main(args)
| gpl-3.0 | 696,523,402,408,382,300 | 30.1 | 97 | 0.499598 | false |
liuqr/edx-xiaodun | lms/djangoapps/psychometrics/models.py | 38 | 2026 | #
# db model for psychometrics data
#
# this data is collected in real time
#
from django.db import models
from courseware.models import StudentModule
class PsychometricData(models.Model):
"""
This data is a table linking student, module, and module performance,
including number of attempts, grade, max grade, and time of checks.
Links to instances of StudentModule, but only those for capa problems.
Note that StudentModule.module_state_key is nominally a Location instance (url string).
That means it is of the form {tag}://{org}/{course}/{category}/{name}[@{revision}]
and for capa problems, category = "problem".
checktimes is extracted from tracking logs, or added by capa module via psychometrics callback.
"""
studentmodule = models.ForeignKey(StudentModule, db_index=True, unique=True) # contains student, module_state_key, course_id
done = models.BooleanField(default=False)
attempts = models.IntegerField(default=0) # extracted from studentmodule.state
checktimes = models.TextField(null=True, blank=True) # internally stored as list of datetime objects
# keep in mind
# grade = studentmodule.grade
# max_grade = studentmodule.max_grade
# student = studentmodule.student
# course_id = studentmodule.course_id
# location = studentmodule.module_state_key
def __unicode__(self):
sm = self.studentmodule
return "[PsychometricData] %s url=%s, grade=%s, max=%s, attempts=%s, ct=%s" % (sm.student,
sm.module_state_key,
sm.grade,
sm.max_grade,
self.attempts,
self.checktimes)
| agpl-3.0 | -1,907,014,464,397,483,800 | 44.022222 | 130 | 0.562685 | false |
moksha11/xen-hv | dist/install/usr/lib64/python2.6/site-packages/xen/util/utils.py | 43 | 1937 | import traceback
import sys
import os
def exception_string(e):
(ty,v,tb) = sys.exc_info()
return traceback.format_exception_only(ty,v)
def daemonize(prog, args, stdin_tmpfile=None):
"""Runs a program as a daemon with the list of arguments. Returns the PID
of the daemonized program, or returns 0 on error.
"""
r, w = os.pipe()
pid = os.fork()
if pid == 0:
os.close(r)
w = os.fdopen(w, 'w')
os.setsid()
try:
pid2 = os.fork()
except:
pid2 = None
if pid2 == 0:
os.chdir("/")
null_fd = os.open("/dev/null", os.O_RDWR)
if stdin_tmpfile is not None:
os.dup2(stdin_tmpfile.fileno(), 0)
else:
os.dup2(null_fd, 0)
os.dup2(null_fd, 1)
os.dup2(null_fd, 2)
for fd in range(3, 256):
try:
os.close(fd)
except:
pass
os.execvp(prog, args)
os._exit(1)
else:
w.write(str(pid2 or 0))
w.close()
os._exit(0)
os.close(w)
r = os.fdopen(r)
daemon_pid = int(r.read())
r.close()
os.waitpid(pid, 0)
return daemon_pid
# Global variable to store the sysfs mount point
sysfs_mount_point = None
PROC_MOUNTS_PATH = '/proc/mounts'
def find_sysfs_mount():
global sysfs_mount_point
if not sysfs_mount_point is None:
return sysfs_mount_point
try:
mounts_file = open(PROC_MOUNTS_PATH, 'r')
for line in mounts_file:
sline = line.split()
if len(sline) < 3:
continue
if sline[2] == 'sysfs':
sysfs_mount_point= sline[1]
break
mounts_file.close()
return sysfs_mount_point
except IOError, (errno, strerr):
raise
return None
| gpl-2.0 | -3,659,018,382,319,848,000 | 23.833333 | 78 | 0.500774 | false |
shdxiang/yunba-smartoffice | tests/get_status.py | 2 | 1501 | #!/usr/bin/env python
import time
import sys
import logging
import argparse
from socketIO_client import SocketIO
from messenger import Messenger
logger = logging.getLogger('get_status')
logging.basicConfig(level=logging.DEBUG)
APPKEY = '5697113d4407a3cd028abead'
#TOPIC = 'yunba_smart_plug'
#ALIAS = 'plc_0'
class Status(Messenger):
def __init__(self, topic, alias, cmd):
self.__logger = logging.getLogger('get_status.Status')
self.__logger.info('init')
Messenger.__init__(self, APPKEY, 'status', 'status')
self.topic = topic
self.alias = alias
self.cmd = cmd
def __del__(self):
self.__logger.info('del')
def on_connack(self, args):
self.__logger.debug('on_connack: %s', args)
self.socketIO.emit('subscribe', {'topic': self.topic})
self.socketIO.emit('set_alias', {'alias': 'status'})
def on_set_alias(self, args):
self.__logger.debug('on_set_alias: %s', args)
self.publish_to_alias(self.alias, '{"cmd": "'+ self.cmd + '", "devid": "' + self.alias + '"}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Status')
parser.add_argument('topic', type=str, help='topic to subscribe')
parser.add_argument('alias', type=str, help='publish to this alias')
parser.add_argument('cmd', type=str, help='cmd')
args = parser.parse_args()
s = Status(args.topic, args.alias, args.cmd)
while True:
s.loop()
time.sleep(0.1)
| mit | -6,718,916,338,333,225,000 | 27.865385 | 102 | 0.624917 | false |
calfonso/ansible | lib/ansible/modules/web_infrastructure/django_manage.py | 22 | 11134 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Scott Anderson <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: django_manage
short_description: Manages a Django application.
description:
- Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all
management commands will be executed by the given I(virtualenv) installation.
version_added: "1.1"
options:
command:
choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ]
description:
- The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb,
test, and validate.
- Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run
with the I(--noinput) flag.
required: true
app_path:
description:
- The path to the root of the Django application where B(manage.py) lives.
required: true
settings:
description:
- The Python path to the application's settings module, such as 'myapp.settings'.
required: false
pythonpath:
description:
- A directory to add to the Python path. Typically used to include the settings module if it is located external to the application directory.
required: false
virtualenv:
description:
- An optional path to a I(virtualenv) installation to use while running the manage application.
aliases: [virtualenv]
apps:
description:
- A list of space-delimited apps to target. Used by the 'test' command.
required: false
cache_table:
description:
- The name of the table used for database-backed caching. Used by the 'createcachetable' command.
required: false
database:
description:
- The database to target. Used by the 'createcachetable', 'flush', 'loaddata', and 'syncdb' commands.
required: false
failfast:
description:
- Fail the command immediately if a test fails. Used by the 'test' command.
required: false
default: "no"
type: bool
fixtures:
description:
- A space-delimited list of fixture file names to load in the database. B(Required) by the 'loaddata' command.
required: false
skip:
description:
- Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate)
required: false
version_added: "1.3"
merge:
description:
- Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command
required: false
version_added: "1.3"
link:
description:
- Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command
required: false
version_added: "1.3"
notes:
- I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified.
- This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location.
- This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately.
- To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings.
- To be able to use the collectstatic command, you must have enabled staticfiles in your settings.
- As of ansible 2.x, your I(manage.py) application must be executable (rwxr-xr-x), and must have a valid I(shebang), i.e. "#!/usr/bin/env python",
for invoking the appropriate Python interpreter.
requirements: [ "virtualenv", "django" ]
author: "Scott Anderson (@tastychutney)"
'''
EXAMPLES = """
# Run cleanup on the application installed in 'django_dir'.
- django_manage:
command: cleanup
app_path: "{{ django_dir }}"
# Load the initial_data fixture into the application
- django_manage:
command: loaddata
app_path: "{{ django_dir }}"
fixtures: "{{ initial_data }}"
# Run syncdb on the application
- django_manage:
command: syncdb
app_path: "{{ django_dir }}"
settings: "{{ settings_app_name }}"
pythonpath: "{{ settings_dir }}"
virtualenv: "{{ virtualenv_dir }}"
# Run the SmokeTest test case from the main app. Useful for testing deploys.
- django_manage:
command: test
app_path: "{{ django_dir }}"
apps: main.SmokeTest
# Create an initial superuser.
- django_manage:
command: "createsuperuser --noinput --username=admin [email protected]"
app_path: "{{ django_dir }}"
"""
import os
import sys
from ansible.module_utils.basic import AnsibleModule
def _fail(module, cmd, out, err, **kwargs):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg, **kwargs)
def _ensure_virtualenv(module):
venv_param = module.params['virtualenv']
if venv_param is None:
return
vbin = os.path.join(venv_param, 'bin')
activate = os.path.join(vbin, 'activate')
if not os.path.exists(activate):
virtualenv = module.get_bin_path('virtualenv', True)
vcmd = '%s %s' % (virtualenv, venv_param)
vcmd = [virtualenv, venv_param]
rc, out_venv, err_venv = module.run_command(vcmd)
if rc != 0:
_fail(module, vcmd, out_venv, err_venv)
os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
os.environ["VIRTUAL_ENV"] = venv_param
def createcachetable_filter_output(line):
return "Already exists" not in line
def flush_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def loaddata_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def syncdb_filter_output(line):
return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line)
def migrate_filter_output(line):
return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line)
def collectstatic_filter_output(line):
return line and "0 static files" not in line
def main():
command_allowed_param_map = dict(
cleanup=(),
createcachetable=('cache_table', 'database', ),
flush=('database', ),
loaddata=('database', 'fixtures', ),
syncdb=('database', ),
test=('failfast', 'testrunner', 'liveserver', 'apps', ),
validate=(),
migrate=('apps', 'skip', 'merge', 'database',),
collectstatic=('clear', 'link', ),
)
command_required_param_map = dict(
loaddata=('fixtures', ),
)
# forces --noinput on every command that needs it
noinput_commands = (
'flush',
'syncdb',
'migrate',
'test',
'collectstatic',
)
# These params are allowed for certain commands only
specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner')
# These params are automatically added to the command if present
general_params = ('settings', 'pythonpath', 'database',)
specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
end_of_command_params = ('apps', 'cache_table', 'fixtures')
module = AnsibleModule(
argument_spec=dict(
command=dict(default=None, required=True),
app_path=dict(default=None, required=True, type='path'),
settings=dict(default=None, required=False),
pythonpath=dict(default=None, required=False, aliases=['python_path']),
virtualenv=dict(default=None, required=False, type='path', aliases=['virtual_env']),
apps=dict(default=None, required=False),
cache_table=dict(default=None, required=False),
clear=dict(default=None, required=False, type='bool'),
database=dict(default=None, required=False),
failfast=dict(default='no', required=False, type='bool', aliases=['fail_fast']),
fixtures=dict(default=None, required=False),
liveserver=dict(default=None, required=False, aliases=['live_server']),
testrunner=dict(default=None, required=False, aliases=['test_runner']),
skip=dict(default=None, required=False, type='bool'),
merge=dict(default=None, required=False, type='bool'),
link=dict(default=None, required=False, type='bool'),
),
)
command = module.params['command']
app_path = module.params['app_path']
virtualenv = module.params['virtualenv']
for param in specific_params:
value = module.params[param]
if param in specific_boolean_params:
value = module.boolean(value)
if value and param not in command_allowed_param_map[command]:
module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
for param in command_required_param_map.get(command, ()):
if not module.params[param]:
module.fail_json(msg='%s param is required for command=%s' % (param, command))
_ensure_virtualenv(module)
cmd = "./manage.py %s" % (command, )
if command in noinput_commands:
cmd = '%s --noinput' % cmd
for param in general_params:
if module.params[param]:
cmd = '%s --%s=%s' % (cmd, param, module.params[param])
for param in specific_boolean_params:
if module.boolean(module.params[param]):
cmd = '%s --%s' % (cmd, param)
# these params always get tacked on the end of the command
for param in end_of_command_params:
if module.params[param]:
cmd = '%s %s' % (cmd, module.params[param])
rc, out, err = module.run_command(cmd, cwd=app_path)
if rc != 0:
if command == 'createcachetable' and 'table' in err and 'already exists' in err:
out = 'Already exists.'
else:
if "Unknown command:" in err:
_fail(module, cmd, err, "Unknown django command: %s" % command)
_fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
changed = False
lines = out.split('\n')
filt = globals().get(command + "_filter_output", None)
if filt:
filtered_output = list(filter(filt, lines))
if len(filtered_output):
changed = filtered_output
module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv,
settings=module.params['settings'], pythonpath=module.params['pythonpath'])
if __name__ == '__main__':
main()
| gpl-3.0 | -1,560,587,839,478,336,300 | 35.625 | 148 | 0.649093 | false |
ganeshmurthy/qpid-dispatch | python/qpid_dispatch_internal/tools/__init__.py | 7 | 1221 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from .display import Display, Header, Sorter, YN, Commas, TimeLong, TimeShort, Sortable, BodyFormat, PlainNum
from .display import NumKMG
__all__ = ["Display", "Header", "Sorter", "YN", "Commas", "TimeLong", "TimeShort", "Sortable", "BodyFormat", "PlainNum",
"NumKMG"]
| apache-2.0 | -4,032,058,930,197,351,400 | 41.103448 | 120 | 0.745291 | false |
dlab-berkeley/collaboratool-archive | bsd2/vagrant-ansible/ansible/lib/ansible/runner/action_plugins/debug.py | 2 | 1760 | # Copyright 2012, Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible
from ansible import utils
from ansible.runner.return_data import ReturnData
class ActionModule(object):
''' Print statements during execution '''
NEEDS_TMPPATH = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
args = {}
if complex_args:
args.update(complex_args)
# attempt to prevent confusing messages when the variable didn't interpolate
module_args = module_args.replace("{{ ","{{").replace(" }}","}}")
kv = utils.parse_kv(module_args)
args.update(kv)
if not 'msg' in args:
args['msg'] = 'Hello world!'
if 'fail' in args and utils.boolean(args['fail']):
result = dict(failed=True, msg=args['msg'])
else:
result = dict(msg=args['msg'])
# force flag to make debug output module always verbose
result['verbose_always'] = True
return ReturnData(conn=conn, result=result)
| apache-2.0 | 5,034,036,433,593,402,000 | 32.846154 | 92 | 0.665909 | false |
embeddedarm/android_external_chromium_org | third_party/android_platform/development/scripts/stack_core.py | 50 | 9531 | #!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""stack symbolizes native crash dumps."""
import re
import symbol
def PrintTraceLines(trace_lines):
"""Print back trace."""
maxlen = max(map(lambda tl: len(tl[1]), trace_lines))
print
print "Stack Trace:"
print " RELADDR " + "FUNCTION".ljust(maxlen) + " FILE:LINE"
for tl in trace_lines:
(addr, symbol_with_offset, location) = tl
print " %8s %s %s" % (addr, symbol_with_offset.ljust(maxlen), location)
return
def PrintValueLines(value_lines):
"""Print stack data values."""
maxlen = max(map(lambda tl: len(tl[2]), value_lines))
print
print "Stack Data:"
print " ADDR VALUE " + "FUNCTION".ljust(maxlen) + " FILE:LINE"
for vl in value_lines:
(addr, value, symbol_with_offset, location) = vl
print " %8s %8s %s %s" % (addr, value, symbol_with_offset.ljust(maxlen), location)
return
UNKNOWN = "<unknown>"
HEAP = "[heap]"
STACK = "[stack]"
def PrintOutput(trace_lines, value_lines, more_info):
if trace_lines:
PrintTraceLines(trace_lines)
if value_lines:
# TODO(cjhopman): it seems that symbol.SymbolInformation always fails to
# find information for addresses in value_lines in chrome libraries, and so
# value_lines have little value to us and merely clutter the output.
# Since information is sometimes contained in these lines (from system
# libraries), don't completely disable them.
if more_info:
PrintValueLines(value_lines)
def PrintDivider():
print
print "-----------------------------------------------------\n"
def ConvertTrace(lines, more_info):
"""Convert strings containing native crash to a stack."""
process_info_line = re.compile("(pid: [0-9]+, tid: [0-9]+.*)")
signal_line = re.compile("(signal [0-9]+ \(.*\).*)")
register_line = re.compile("(([ ]*[0-9a-z]{2} [0-9a-f]{8}){4})")
thread_line = re.compile("(.*)(\-\-\- ){15}\-\-\-")
dalvik_jni_thread_line = re.compile("(\".*\" prio=[0-9]+ tid=[0-9]+ NATIVE.*)")
dalvik_native_thread_line = re.compile("(\".*\" sysTid=[0-9]+ nice=[0-9]+.*)")
# Note that both trace and value line matching allow for variable amounts of
# whitespace (e.g. \t). This is because the we want to allow for the stack
# tool to operate on AndroidFeedback provided system logs. AndroidFeedback
# strips out double spaces that are found in tombsone files and logcat output.
#
# Examples of matched trace lines include lines from tombstone files like:
# #00 pc 001cf42e /data/data/com.my.project/lib/libmyproject.so
# #00 pc 001cf42e /data/data/com.my.project/lib/libmyproject.so (symbol)
# Or lines from AndroidFeedback crash report system logs like:
# 03-25 00:51:05.520 I/DEBUG ( 65): #00 pc 001cf42e /data/data/com.my.project/lib/libmyproject.so
# Please note the spacing differences.
trace_line = re.compile("(.*)\#(?P<frame>[0-9]+)[ \t]+(..)[ \t]+(0x)?(?P<address>[0-9a-f]{0,8})[ \t]+(?P<lib>[^\r\n \t]*)(?P<symbol_present> \((?P<symbol_name>.*)\))?") # pylint: disable-msg=C6310
# Examples of matched value lines include:
# bea4170c 8018e4e9 /data/data/com.my.project/lib/libmyproject.so
# bea4170c 8018e4e9 /data/data/com.my.project/lib/libmyproject.so (symbol)
# 03-25 00:51:05.530 I/DEBUG ( 65): bea4170c 8018e4e9 /data/data/com.my.project/lib/libmyproject.so
# Again, note the spacing differences.
value_line = re.compile("(.*)([0-9a-f]{8})[ \t]+([0-9a-f]{8})[ \t]+([^\r\n \t]*)( \((.*)\))?")
# Lines from 'code around' sections of the output will be matched before
# value lines because otheriwse the 'code around' sections will be confused as
# value lines.
#
# Examples include:
# 801cf40c ffffc4cc 00b2f2c5 00b2f1c7 00c1e1a8
# 03-25 00:51:05.530 I/DEBUG ( 65): 801cf40c ffffc4cc 00b2f2c5 00b2f1c7 00c1e1a8
code_line = re.compile("(.*)[ \t]*[a-f0-9]{8}[ \t]*[a-f0-9]{8}[ \t]*[a-f0-9]{8}[ \t]*[a-f0-9]{8}[ \t]*[a-f0-9]{8}[ \t]*[ \r\n]") # pylint: disable-msg=C6310
trace_lines = []
value_lines = []
last_frame = -1
# It is faster to get symbol information with a single call rather than with
# separate calls for each line. Since symbol.SymbolInformation caches results,
# we can extract all the addresses that we will want symbol information for
# from the log and call symbol.SymbolInformation so that the results are
# cached in the following lookups.
code_addresses = {}
for ln in lines:
line = unicode(ln, errors='ignore')
lib, address = None, None
match = trace_line.match(line)
if match:
address, lib = match.group('address', 'lib')
match = value_line.match(line)
if match and not code_line.match(line):
(_0, _1, address, lib, _2, _3) = match.groups()
if lib:
code_addresses.setdefault(lib, set()).add(address)
for lib in code_addresses:
symbol.SymbolInformationForSet(
symbol.TranslateLibPath(lib), code_addresses[lib], more_info)
for ln in lines:
# AndroidFeedback adds zero width spaces into its crash reports. These
# should be removed or the regular expresssions will fail to match.
line = unicode(ln, errors='ignore')
process_header = process_info_line.search(line)
signal_header = signal_line.search(line)
register_header = register_line.search(line)
thread_header = thread_line.search(line)
dalvik_jni_thread_header = dalvik_jni_thread_line.search(line)
dalvik_native_thread_header = dalvik_native_thread_line.search(line)
if process_header or signal_header or register_header or thread_header \
or dalvik_jni_thread_header or dalvik_native_thread_header:
if trace_lines or value_lines:
PrintOutput(trace_lines, value_lines, more_info)
PrintDivider()
trace_lines = []
value_lines = []
last_frame = -1
if process_header:
print process_header.group(1)
if signal_header:
print signal_header.group(1)
if register_header:
print register_header.group(1)
if thread_header:
print thread_header.group(1)
if dalvik_jni_thread_header:
print dalvik_jni_thread_header.group(1)
if dalvik_native_thread_header:
print dalvik_native_thread_header.group(1)
continue
if trace_line.match(line):
match = trace_line.match(line)
frame, code_addr, area, symbol_present, symbol_name = match.group(
'frame', 'address', 'lib', 'symbol_present', 'symbol_name')
if frame <= last_frame and (trace_lines or value_lines):
PrintOutput(trace_lines, value_lines, more_info)
PrintDivider()
trace_lines = []
value_lines = []
last_frame = frame
if area == UNKNOWN or area == HEAP or area == STACK:
trace_lines.append((code_addr, "", area))
else:
# If a calls b which further calls c and c is inlined to b, we want to
# display "a -> b -> c" in the stack trace instead of just "a -> c"
info = symbol.SymbolInformation(area, code_addr, more_info)
nest_count = len(info) - 1
for (source_symbol, source_location, object_symbol_with_offset) in info:
if not source_symbol:
if symbol_present:
source_symbol = symbol.CallCppFilt(symbol_name)
else:
source_symbol = UNKNOWN
if not source_location:
source_location = area
if nest_count > 0:
nest_count = nest_count - 1
trace_lines.append(("v------>", source_symbol, source_location))
else:
if not object_symbol_with_offset:
object_symbol_with_offset = source_symbol
trace_lines.append((code_addr,
object_symbol_with_offset,
source_location))
if code_line.match(line):
# Code lines should be ignored. If this were exluded the 'code around'
# sections would trigger value_line matches.
continue;
if value_line.match(line):
match = value_line.match(line)
(unused_, addr, value, area, symbol_present, symbol_name) = match.groups()
if area == UNKNOWN or area == HEAP or area == STACK or not area:
value_lines.append((addr, value, "", area))
else:
info = symbol.SymbolInformation(area, value, more_info)
(source_symbol, source_location, object_symbol_with_offset) = info.pop()
if not source_symbol:
if symbol_present:
source_symbol = symbol.CallCppFilt(symbol_name)
else:
source_symbol = UNKNOWN
if not source_location:
source_location = area
if not object_symbol_with_offset:
object_symbol_with_offset = source_symbol
value_lines.append((addr,
value,
object_symbol_with_offset,
source_location))
PrintOutput(trace_lines, value_lines, more_info)
| bsd-3-clause | 7,700,434,137,084,070,000 | 41.549107 | 199 | 0.637918 | false |
hfp/tensorflow-xsmm | tensorflow/python/ops/functional_ops.py | 3 | 44679 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Functional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
# pylint: disable=unused-import
from tensorflow.python.ops.gen_functional_ops import remote_call
# pylint: enable=unused-import
from tensorflow.python.ops.gen_functional_ops import symbolic_gradient
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# TODO(yuanbyu, mrry): Handle stride to support sliding windows.
@tf_export("foldl")
def foldl(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None):
"""foldl on the list of tensors unpacked from `elems` on dimension 0.
This foldl operator repeatedly applies the callable `fn` to a sequence
of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn. If `initializer` is None, `elems` must contain
at least one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is fn(initializer, values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Args:
fn: The callable to be performed.
elems: A tensor or (possibly nested) sequence of tensors, each of which
will be unpacked along their first dimension. The nested sequence
of the resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
as the initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors, resulting from applying
`fn` consecutively to the list of tensors unpacked from `elems`, from first
to last.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = tf.constant([1, 2, 3, 4, 5, 6])
sum = foldl(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
def create_ta(elem):
return tensor_array_ops.TensorArray(
dtype=elem.dtype, size=n, dynamic_size=False,
infer_shape=True).unstack(elem)
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "foldl", [elems]):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array. n may be known statically.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems)
]
n = (tensor_shape.dimension_value(elems_flat[0].shape[0])
or array_ops.shape(elems_flat[0])[0])
elems_ta = nest.map_structure(create_ta, elems)
if initializer is None:
a = nest.map_structure(lambda elem: elem.read(0), elems_ta)
i = constant_op.constant(1)
else:
a = initializer
i = constant_op.constant(0)
def compute(i, a):
elem_i = nest.map_structure(lambda elem: elem.read(i), elems_ta)
a = fn(a, elem_i)
return [i + 1, a]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i < n, compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return r_a
@tf_export("foldr")
def foldr(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None):
"""foldr on the list of tensors unpacked from `elems` on dimension 0.
This foldr operator repeatedly applies the callable `fn` to a sequence
of elements from last to first. The elements are made of the tensors
unpacked from `elems`. The callable fn takes two tensors as arguments.
The first argument is the accumulated value computed from the preceding
invocation of fn. If `initializer` is None, `elems` must contain at least
one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `fn(initializer, values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Args:
fn: The callable to be performed.
elems: A tensor or (possibly nested) sequence of tensors, each of which
will be unpacked along their first dimension. The nested sequence
of the resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
as the initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors, resulting from applying
`fn` consecutively to the list of tensors unpacked from `elems`, from last
to first.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
sum = foldr(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
def create_ta(elem):
return tensor_array_ops.TensorArray(
dtype=elem.dtype, size=n, dynamic_size=False,
infer_shape=True).unstack(elem)
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "foldr", [elems]):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally and not
# issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array. n may be known statically.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems)
]
n = (tensor_shape.dimension_value(elems_flat[0].shape[0])
or array_ops.shape(elems_flat[0])[0])
elems_ta = nest.map_structure(create_ta, elems)
if initializer is None:
i = n - 1
a = nest.map_structure(lambda elem: elem.read(i), elems_ta)
else:
i = n
a = initializer
def compute(i, a):
i -= 1
elem = nest.map_structure(lambda elem: elem.read(i), elems_ta)
a_out = fn(a, elem)
return [i, a_out]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i > 0,
compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return r_a
@tf_export("map_fn")
def map_fn(fn, elems, dtype=None, parallel_iterations=None, back_prop=True,
swap_memory=False, infer_shape=True, name=None):
"""map on the list of tensors unpacked from `elems` on dimension 0.
The simplest version of `map_fn` repeatedly applies the callable `fn` to a
sequence of elements from first to last. The elements are made of the
tensors unpacked from `elems`. `dtype` is the data type of the return
value of `fn`. Users must provide `dtype` if it is different from
the data type of `elems`.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[values.shape[0]] + fn(values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Furthermore, `fn` may emit a different structure than its input. For example,
`fn` may look like: `fn = lambda t1: return (t1 + 1, t1 - 1)`. In this case,
the `dtype` parameter is not optional: `dtype` must be a type or (possibly
nested) tuple of types matching the output of `fn`.
To apply a functional operation to the nonzero elements of a SparseTensor
one of the following methods is recommended. First, if the function is
expressible as TensorFlow ops, use
```python
result = SparseTensor(input.indices, fn(input.values), input.dense_shape)
```
If, however, the function is not expressible as a TensorFlow op, then use
```python
result = SparseTensor(
input.indices, map_fn(fn, input.values), input.dense_shape)
```
instead.
When executing eagerly, map_fn does not execute in parallel even if
`parallel_iterations` is set to a value > 1. You can still get the
performance benefits of running a function in parallel by using the
`tf.contrib.eager.defun` decorator,
```python
# Assume the function being used in map_fn is fn.
# To ensure map_fn calls fn in parallel, use the defun decorator.
@tf.contrib.eager.defun
def func(tensor):
return tf.map_fn(fn, tensor)
```
Note that if you use the defun decorator, any non-TensorFlow Python code
that you may have written in your function won't get executed. See
`tf.contrib.eager.defun` for more details. The recommendation would be to
debug without defun but switch to defun to get performance benefits of
running map_fn in parallel.
Args:
fn: The callable to be performed. It accepts one argument, which will
have the same (possibly nested) structure as `elems`. Its output
must have the same structure as `dtype` if one is provided, otherwise
it must have the same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which
will be unpacked along their first dimension. The nested sequence
of the resulting slices will be applied to `fn`.
dtype: (optional) The output type(s) of `fn`. If `fn` returns a structure
of Tensors differing from the structure of `elems`, then `dtype` is not
optional and must have the same structure as the output of `fn`.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel. When graph building, the default value is 10. While executing
eagerly, the default value is set to 1.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying `fn` to tensors unpacked from `elems` along the first
dimension, from first to last.
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `dtype` do not match, or if elems is a SparseTensor.
ValueError: if the lengths of the output of `fn` and `dtype` do not match.
Examples:
```python
elems = np.array([1, 2, 3, 4, 5, 6])
squares = map_fn(lambda x: x * x, elems)
# squares == [1, 4, 9, 16, 25, 36]
```
```python
elems = (np.array([1, 2, 3]), np.array([-1, 1, -1]))
alternate = map_fn(lambda x: x[0] * x[1], elems, dtype=tf.int64)
# alternate == [-1, 2, -3]
```
```python
elems = np.array([1, 2, 3])
alternates = map_fn(lambda x: (x, -x), elems, dtype=(tf.int64, tf.int64))
# alternates[0] == [1, 2, 3]
# alternates[1] == [-1, -2, -3]
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
if isinstance(elems, sparse_tensor.SparseTensor):
raise TypeError(
"To perform a map on the values of a sparse tensor use either "
" SparseTensor(input.indices, fn(input.values), input.dense_shape) or "
" SparseTensor(input.indices, map_fn(fn, input.values), "
"input.dense_shape)")
in_graph_mode = not context.executing_eagerly()
# Set the default number of parallel_iterations depending on graph/eager mode.
if in_graph_mode and not parallel_iterations:
parallel_iterations = 10
elif not in_graph_mode and not parallel_iterations:
parallel_iterations = 1
if not in_graph_mode and parallel_iterations > 1:
logging.log_first_n(logging.WARN, "Setting parallel_iterations > 1 has no "
"effect when executing eagerly. Consider calling map_fn"
" with tf.contrib.eager.defun to execute fn in "
"parallel.", 1)
parallel_iterations = 1
input_is_sequence = nest.is_sequence(elems)
input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x]
def input_pack(x):
return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0]
if dtype is None:
output_is_sequence = input_is_sequence
output_flatten = input_flatten
output_pack = input_pack
else:
output_is_sequence = nest.is_sequence(dtype)
output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x]
def output_pack(x):
return (nest.pack_sequence_as(dtype, x)
if output_is_sequence else x[0])
elems_flat = input_flatten(elems)
with ops.name_scope(name, "map", elems_flat):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in elems_flat]
dtype = dtype or input_pack([elem.dtype for elem in elems_flat])
dtype_flat = output_flatten(dtype)
# Convert elems to tensor array. n may be known statically.
static_shape = elems_flat[0].shape
if static_shape.ndims is not None and static_shape.ndims < 1:
if len(elems_flat) == 1:
raise ValueError("elems must be a 1+ dimensional Tensor, not a scalar")
else:
raise ValueError(
"elements in elems must be 1+ dimensional Tensors, not scalars"
)
n = (tensor_shape.dimension_value(static_shape[0])
or array_ops.shape(elems_flat[0])[0])
# TensorArrays are always flat
elems_ta = [
tensor_array_ops.TensorArray(dtype=elem.dtype, size=n,
dynamic_size=False,
infer_shape=True)
for elem in elems_flat]
# Unpack elements
elems_ta = [
elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]
i = constant_op.constant(0)
accs_ta = [
tensor_array_ops.TensorArray(dtype=dt, size=n,
dynamic_size=False,
infer_shape=infer_shape)
for dt in dtype_flat]
def compute(i, tas):
"""The loop body of map_fn.
Args:
i: the loop counter
tas: the flat TensorArray accumulator list
Returns:
(i + 1, tas): the updated counter + updated TensorArrays
Raises:
TypeError: if dtype and packed_fn_values structure do not match
ValueType: if dtype and packed_fn_values lengths do not match
"""
packed_values = input_pack([elem_ta.read(i) for elem_ta in elems_ta])
packed_fn_values = fn(packed_values)
nest.assert_same_structure(dtype or elems, packed_fn_values)
flat_fn_values = output_flatten(packed_fn_values)
tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_fn_values)]
return (i + 1, tas)
_, r_a = control_flow_ops.while_loop(
lambda i, _: i < n, compute, (i, accs_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
results_flat = [r.stack() for r in r_a]
n_static = tensor_shape.Dimension(tensor_shape.dimension_value(
elems_flat[0].get_shape().with_rank_at_least(1)[0]))
for elem in elems_flat[1:]:
n_static.merge_with(tensor_shape.Dimension(tensor_shape.dimension_value(
elem.get_shape().with_rank_at_least(1)[0])))
for r in results_flat:
r.set_shape(tensor_shape.TensorShape(n_static).concatenate(
r.get_shape()[1:]))
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return output_pack(results_flat)
@tf_export("scan")
def scan(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, infer_shape=True, reverse=False, name=None):
"""scan on the list of tensors unpacked from `elems` on dimension 0.
The simplest version of `scan` repeatedly applies the callable `fn` to a
sequence of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn. If `initializer` is None, `elems` must contain
at least one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[len(values)] + fn(initializer, values[0]).shape`.
If reverse=True, it's fn(initializer, values[-1]).shape.
This method also allows multi-arity `elems` and accumulator. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The second argument of
`fn` must match the structure of `elems`.
If no `initializer` is provided, the output structure and dtypes of `fn`
are assumed to be the same as its input; and in this case, the first
argument of `fn` must match the structure of `elems`.
If an `initializer` is provided, then the output of `fn` must have the same
structure as `initializer`; and the first argument of `fn` must match
this structure.
For example, if `elems` is `(t1, [t2, t3])` and `initializer` is
`[i1, i2]` then an appropriate signature for `fn` in `python2` is:
`fn = lambda (acc_p1, acc_p2), (t1, [t2, t3]):` and `fn` must return a list,
`[acc_n1, acc_n2]`. An alternative correct signature for `fn`, and the
one that works in `python3`, is:
`fn = lambda a, t:`, where `a` and `t` correspond to the input tuples.
Args:
fn: The callable to be performed. It accepts two arguments. The first
will have the same structure as `initializer` if one is provided,
otherwise it will have the same structure as `elems`. The second
will have the same (possibly nested) structure as `elems`. Its output
must have the same structure as `initializer` if one is provided,
otherwise it must have the same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which
will be unpacked along their first dimension. The nested sequence
of the resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
initial value for the accumulator, and the expected output type of `fn`.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
reverse: (optional) True scans the tensor last to first (instead of first
to last).
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying `fn` to tensors unpacked from `elems` along the first
dimension, and the previous accumulator value(s), from first to last (or
last to first, if `reverse=True`).
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `initializer` do not match.
ValueError: if the lengths of the output of `fn` and `initializer`
do not match.
Examples:
```python
elems = np.array([1, 2, 3, 4, 5, 6])
sum = scan(lambda a, x: a + x, elems)
# sum == [1, 3, 6, 10, 15, 21]
sum = scan(lambda a, x: a + x, elems, reverse=True)
# sum == [22, 21, 18, 15, 11, 6]
```
```python
elems = np.array([1, 2, 3, 4, 5, 6])
initializer = np.array(0)
sum_one = scan(
lambda a, x: x[0] - x[1] + a, (elems + 1, elems), initializer)
# sum_one == [1, 2, 3, 4, 5, 6]
```
```python
elems = np.array([1, 0, 0, 0, 0, 0])
initializer = (np.array(0), np.array(1))
fibonaccis = scan(lambda a, _: (a[1], a[0] + a[1]), elems, initializer)
# fibonaccis == ([1, 1, 2, 3, 5, 8], [1, 2, 3, 5, 8, 13])
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
input_is_sequence = nest.is_sequence(elems)
input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x]
def input_pack(x):
return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0]
if initializer is None:
output_is_sequence = input_is_sequence
output_flatten = input_flatten
output_pack = input_pack
else:
output_is_sequence = nest.is_sequence(initializer)
output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x]
def output_pack(x):
return (nest.pack_sequence_as(initializer, x)
if output_is_sequence else x[0])
elems_flat = input_flatten(elems)
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "scan", elems_flat):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in elems_flat]
# Convert elems to tensor array. n may be known statically.
n = (tensor_shape.dimension_value(elems_flat[0].shape[0])
or array_ops.shape(elems_flat[0])[0])
# TensorArrays are always flat
elems_ta = [
tensor_array_ops.TensorArray(dtype=elem.dtype, size=n,
dynamic_size=False,
infer_shape=True)
for elem in elems_flat]
# Unpack elements
elems_ta = [
elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]
if initializer is None:
a_flat = [elem.read(n - 1 if reverse else 0) for elem in elems_ta]
i = constant_op.constant(1)
else:
initializer_flat = output_flatten(initializer)
a_flat = [ops.convert_to_tensor(init) for init in initializer_flat]
i = constant_op.constant(0)
# Create a tensor array to store the intermediate values.
accs_ta = [
tensor_array_ops.TensorArray(
dtype=init.dtype, size=n,
element_shape=init.shape if infer_shape else None,
dynamic_size=False,
infer_shape=infer_shape)
for init in a_flat]
if initializer is None:
accs_ta = [acc_ta.write(n - 1 if reverse else 0, a)
for (acc_ta, a) in zip(accs_ta, a_flat)]
def compute(i, a_flat, tas):
"""The loop body of scan.
Args:
i: the loop counter.
a_flat: the accumulator value(s), flattened.
tas: the output accumulator TensorArray(s), flattened.
Returns:
[i + 1, a_flat, tas]: the updated counter + new accumulator values +
updated TensorArrays
Raises:
TypeError: if initializer and fn() output structure do not match
ValueType: if initializer and fn() output lengths do not match
"""
packed_elems = input_pack([elem_ta.read(i) for elem_ta in elems_ta])
packed_a = output_pack(a_flat)
a_out = fn(packed_a, packed_elems)
nest.assert_same_structure(
elems if initializer is None else initializer, a_out)
flat_a_out = output_flatten(a_out)
tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_a_out)]
if reverse:
next_i = i - 1
else:
next_i = i + 1
return (next_i, flat_a_out, tas)
if reverse:
initial_i = n - 1 - i
condition = lambda i, _1, _2: i >= 0
else:
initial_i = i
condition = lambda i, _1, _2: i < n
_, _, r_a = control_flow_ops.while_loop(
condition, compute, (initial_i, a_flat, accs_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop, swap_memory=swap_memory,
maximum_iterations=n)
results_flat = [r.stack() for r in r_a]
n_static = tensor_shape.Dimension(tensor_shape.dimension_value(
elems_flat[0].get_shape().with_rank_at_least(1)[0]))
for elem in elems_flat[1:]:
n_static.merge_with(tensor_shape.Dimension(tensor_shape.dimension_value(
elem.get_shape().with_rank_at_least(1)[0])))
for r in results_flat:
r.set_shape(tensor_shape.TensorShape(n_static).concatenate(
r.get_shape()[1:]))
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return output_pack(results_flat)
# pylint: disable=invalid-name
def If(cond, inputs, then_branch, else_branch, name=None):
r"""output = Cond(inputs) ? then_branch(inputs) : else_branch(inputs).
Args:
cond: A `Tensor`. A scalar. If the scalar is not a boolean, the scalar is
converted to a boolean according to the following rule: if the
scalar is a numerical value, non-zero means True and zero means
False; if the scalar is a string, non-empty means True and empty
means False.
inputs: A list of input tensors.
then_branch: A function takes 'inputs' and returns a list of tensors,
whose types are the same as what else_branch returns.
else_branch: A function takes 'inputs' and returns a list of tensors.
whose types are the same as what then_branch returns.
name: A name for the operation (optional).
Returns:
A list of tensors returned by either then_branch(inputs)
or else_branch(inputs).
"""
# pylint: disable=protected-access
return gen_functional_ops._if(
cond,
inputs, [_.type for _ in then_branch.definition.signature.output_arg],
then_branch,
else_branch,
name=name)
def Gradient(inputs, f, name=None):
r"""Computes the gradient function for function f via backpropagation.
Args:
inputs: A list of tensors of size N + M.
f: The function we want to compute the gradient for.
The function 'f' must be a numerical function which takes N inputs and
produces M outputs. Its gradient function 'g', which is a function
taking N + M inputs and produces N outputs.
I.e. if we have
(y1, y2, ..., yM) = f(x1, x2, ..., xN),
then, g is
(dL/dx1, dL/dx2, ..., dL/dxN) = g(x1, x2, ..., xN,
dL/dy1, dL/dy2, ..., dL/dyM),
where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the
loss function). dL/dxi is the partial derivative of L with respect
to xi.
name: A name for the operation (optional).
Returns:
A list of tensors of size N.
"""
# TODO(zhifengc): Pretty-print the above spec in latex.
# TODO(zhfiengc): Needs some math expert to say the comment above better.
tlist = [_.type for _ in f.definition.signature.input_arg]
return symbolic_gradient(input=inputs, Tout=tlist, f=f, name=name)
def _LoopBodyCaptureWrapper(func):
"""Returns a wrapper for `func` that handles loop-carried captured inputs."""
@function.Defun(
*func.declared_input_types, func_name="%s_Wrapper" % func.name)
def Wrapper(*args):
"""A wrapper that handles loop-carried captured inputs."""
result = func(*args)
extra_args = tuple(function.get_extra_args())
# Nullary functions return an Operation. Normal functions can't do this
# because their return values are converted to Tensors.
if isinstance(result, ops.Operation):
return extra_args
# Unary functions return a single Tensor value.
elif not isinstance(result, tuple):
return (result,) + extra_args
# N-ary functions return a tuple of Tensors.
else:
return result + extra_args
return Wrapper
# pylint: disable=invalid-name,protected-access
def While(input_, cond, body, name=None, hostmem=None):
r"""output = input; While (Cond(output)) { output = Body(output) }.
Args:
input_: A list of `Tensor` objects.
A list of input tensors whose types are T.
cond: . A function takes 'input' and returns a tensor. If the tensor is
a scalar of non-boolean, the scalar is converted to a boolean
according to the following rule: if the scalar is a numerical
value, non-zero means True and zero means False; if the scalar is
a string, non-empty means True and empty means False. If the
tensor is not a scalar, non-emptiness means True and False
otherwise.
body: . A function takes a list of tensors and returns another
list tensors. Both lists have the same types as specified
by T.
name: A name for the operation (optional).
hostmem: A list of integer. If i is in the list, input[i] is a
host memory tensor.
Raises:
ValueError: if `cond` has implicitly captured inputs or if `cond` and `body`
have different signatures.
Returns:
A list of `Tensor` objects. Has the same type as `input`.
A list of output tensors whose types are T.
"""
if cond.captured_inputs:
raise ValueError("While op 'cond' argument must be a function "
"without implicitly captured inputs.")
if cond.declared_input_types != body.declared_input_types:
raise ValueError(
"While op 'cond' and 'body' signatures do not match. %r vs %r" %
(cond.declared_input_types, body.declared_input_types))
if body.captured_inputs:
cond_dtypes = list(
body.declared_input_types) + [t.dtype for t in body.captured_inputs]
@function.Defun(*cond_dtypes, func_name="%s_Wrapper" % cond.name)
def CondWrapper(*args):
"""A wrapper that handles loop-carried captured inputs."""
return cond(*args[:len(body.declared_input_types)])
ret = gen_functional_ops._while(
input_ + body.captured_inputs,
CondWrapper,
_LoopBodyCaptureWrapper(body),
name=name)
# Slice off the loop-carried captured inputs.
ret = ret[:-len(body.captured_inputs)]
else:
ret = gen_functional_ops._while(input_, cond, body, name=name)
if hostmem:
input_attr = attr_value_pb2.AttrValue()
input_attr.list.i.extend(hostmem)
ret[0].op._set_attr("_input_hostmem", input_attr) # pylint: disable=protected-access
output_attr = attr_value_pb2.AttrValue()
output_attr.list.i.extend(hostmem)
ret[0].op._set_attr("_output_hostmem", output_attr) # pylint: disable=protected-access
return ret
# b/36459430
#
# Ideally, we do not need this rewrite For loop into a While loop.
# However, today, if a While runs on GPU and the condition returns a
# boolean, the While kernel crashes. Even if we fix the crash, the
# bool needs to be copied between GPU and CPU. So, a for loop is much
# preferred when running on GPU.
#
# On the other hand, For op has no directly XLA kernel. So, when we run
# a for loop, we need to rewrite it using a While op.
#
# It should be possible and probably better to write a XLA C++ kernel
# implementing the logic in _ForUsingWhile.
def _ForUsingWhile(start,
limit,
delta,
inputs,
forbody,
name=None,
hostmem=None):
"""Helper to implement a For loop using a While."""
# To support negative delta (e.g., range(100, 0, -3)), we iterate
# over the range(n) and use iter * delta + start as the real
# iteration index. (e.g., for i in range(34): iter = i * (-3) +
# 100).
d = math_ops.abs(delta)
# XLA on TPUs doesn't support integer division
n = math_ops.cast(
math_ops.cast((math_ops.abs(limit - start) + d - 1), dtypes.float32) /
math_ops.cast(d, dtypes.float32), dtypes.int32)
# Carried loop variables ("extra_args") are implicitly added to the input list
# of the WhileBody function. WhileCond does not call forbody, and so does not
# depend on any of forbody's extra_args. Since WhileCond and WhileBody
# must have identical inputs, we have to augment the cond signature to take
# the same types as the carried loop variables.
body_sig = [dtypes.int32] * 4 + list(forbody.declared_input_types)[1:]
cond_name = "%s_Cond" % forbody.name
@function.Defun(*body_sig, func_name=cond_name)
def WhileCond(i, n, *args):
del args
return i < n
body_name = "%s_Body" % forbody.name
@function.Defun(*body_sig, func_name=body_name)
def WhileBody(i, n, start, delta, *args):
"""A While wrapper for forbody that handles loop-carried captured inputs."""
for_result = forbody(start + i * delta, *args)
# Nullary functions return an Operation. Normal functions can't do this
# because their return values are converted to Tensors.
if isinstance(for_result, ops.Operation):
for_result = ()
# Unary functions return a single Tensor value.
elif isinstance(for_result, ops.Tensor):
for_result = (for_result,)
return (i + 1, n, start, delta) + tuple(for_result)
if hostmem is not None:
hostmem = [0, 1, 2, 3] + [(4 + _) for _ in hostmem]
else:
hostmem = [0, 1, 2, 3]
results = While(
input_=[0, n, start, delta] + inputs,
cond=WhileCond,
body=WhileBody,
name=name,
hostmem=hostmem)
# Slice off the loop-carried captured inputs.
return list(results[4:len(results)])
def For(start,
limit,
delta,
inputs,
body,
name=None,
hostmem=None,
rewrite_with_while=None):
r"""out = input; for i in range(start, limit, delta) out = body(i, out).
Args:
start: A `Tensor` of type `int32`.
limit: A `Tensor` of type `int32`.
delta: A `Tensor` of type `int32`.
inputs: A list of `Tensor` objects.
A list of input tensors whose types are T.
body: A function takes a list of tensors and returns another
list of tensors. Both lists have the same types as (int32, T...).
name: A name for the operation (optional).
hostmem: A list of integer. If i is in the list, inputs[i] is a
host memory tensor. In other words, (i+1)-th argument of the body
function is expecting a host memory.
rewrite_with_while: If True, using While op to implement the For.
Returns:
A list of `Tensor` objects. Has the same type as `input`.
A list of output tensors whose types are T.
"""
if rewrite_with_while:
return _ForUsingWhile(start, limit, delta, inputs, body, name, hostmem)
if body.captured_inputs:
ret = gen_functional_ops._for(
start,
limit,
delta,
inputs + body.captured_inputs,
_LoopBodyCaptureWrapper(body),
name=name)
# Slice off the loop-carried captured inputs.
ret = ret[:-len(body.captured_inputs)]
else:
ret = gen_functional_ops._for(start, limit, delta, inputs, body, name=name)
if hostmem:
num_for_params = 3 # start/limit/delta
input_attr = attr_value_pb2.AttrValue()
input_attr.list.i.extend([num_for_params + i for i in hostmem])
ret[0].op._set_attr("_input_hostmem", input_attr) # pylint: disable=protected-access
output_attr = attr_value_pb2.AttrValue()
output_attr.list.i.extend(hostmem)
ret[0].op._set_attr("_output_hostmem", output_attr) # pylint: disable=protected-access
return ret
# pylint: enable=invalid-name,protected-access
_rewriter_config_optimizer_disabled = None
def _get_disabled_rewriter_config():
global _rewriter_config_optimizer_disabled
if _rewriter_config_optimizer_disabled is None:
config = config_pb2.ConfigProto()
rewriter_config = config.graph_options.rewrite_options
rewriter_config.disable_meta_optimizer = True
_rewriter_config_optimizer_disabled = config.SerializeToString()
return _rewriter_config_optimizer_disabled
def partitioned_call(args, f, tout=None, executing_eagerly=None, config=None,
executor_type=None):
"""Executes a function while respecting device annotations.
Currently, only those functions that execute within the same address space
can be executed.
Args:
args: The arguments of the function, including captured inputs.
f: The function to execute; an instance of `_DefinedFunction` or
`_EagerDefinedFunction`.
tout: a list containing the output dtypes enums; if `None`, inferred from
the signature of `f`.
executing_eagerly: (Optional) A boolean indicating whether the context is
executing eagerly. If `None`, fetched from the global context.
config: (Optional) A `tensorflow::ConfigProto` proto, serialized. If
`None`, all optimizations are disabled. Currently only handled for eager
defined functions.
executor_type: (Optional) A string for the name of the executor to be used
in the function call. If not set, or set to an empty string, the default
tensorflow executor will be used.
Returns:
The list of `Tensor`s returned by invoking `f(args)`. If the function does
not return anything, then returns `None` if eager execution is enabled, or
the `Operation` if not.
"""
if tout is None:
tout = tuple(x.type for x in f.definition.signature.output_arg)
if executing_eagerly is None:
executing_eagerly = context.executing_eagerly()
if config is None:
config = _get_disabled_rewriter_config()
if executor_type is None:
executor_type = ""
if executing_eagerly or len(tout):
if f.stateful_ops:
outputs = gen_functional_ops.stateful_partitioned_call(
args=args, Tout=tout, f=f, config_proto=config,
executor_type=executor_type)
else:
outputs = gen_functional_ops.partitioned_call(
args=args, Tout=tout, f=f, config_proto=config,
executor_type=executor_type)
return outputs if outputs else None
# The generated binding returns an empty list for functions that don't
# return any Tensors, hence the need to use `create_op` directly.
args = [ops.internal_convert_to_tensor(x) for x in args]
tin_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
type=[x.dtype.as_datatype_enum for x in args]))
tout_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(type=tout))
func_attr = attr_value_pb2.AttrValue(
func=attr_value_pb2.NameAttrList(name=f.name))
executor_type_attr = attr_value_pb2.AttrValue(
s=compat.as_bytes(executor_type))
# When running in graph mode, the graph and function graphs are optimized
# (i.e. run through grappler) per the session options, so we can disable any
# eager-specific rewriting.
config_proto = attr_value_pb2.AttrValue(s=_get_disabled_rewriter_config())
graph = ops.get_default_graph()
f.add_to_graph(graph)
op_name = "StatefulPartitionedCall" if f.stateful_ops else "PartitionedCall"
op = graph.create_op(
op_name,
args,
tout,
compute_shapes=False,
name="PartitionedFunctionCall",
attrs={
"Tin": tin_attr,
"Tout": tout_attr,
"f": func_attr,
"config_proto": config_proto,
"executor_type": executor_type_attr,
})
outputs = op.outputs
return outputs if outputs else op
| apache-2.0 | 7,424,192,598,383,770,000 | 38.714667 | 91 | 0.668233 | false |
bluecoiner/bluecoin-new | qa/rpc-tests/disablewallet.py | 102 | 1820 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Exercise API with -disablewallet.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class DisableWalletTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-disablewallet']])
self.is_network_split = False
self.sync_all()
def run_test (self):
# Check regression: https://github.com/bitcoin/bitcoin/issues/6963#issuecomment-154548880
x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
assert(x['isvalid'] == False)
x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
assert(x['isvalid'] == True)
# Checking mining to an address without a wallet
try:
self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
except JSONRPCException as e:
assert("Invalid address" not in e.error['message'])
assert("ProcessNewBlock, block not accepted" not in e.error['message'])
assert("Couldn't create new block" not in e.error['message'])
try:
self.nodes[0].generatetoaddress(1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
raise AssertionError("Must not mine to invalid address!")
except JSONRPCException as e:
assert("Invalid address" in e.error['message'])
if __name__ == '__main__':
DisableWalletTest ().main ()
| mit | -239,444,178,507,247,170 | 36.916667 | 97 | 0.667033 | false |
rockyzhang/zhangyanhit-python-for-android-mips | python3-alpha/python3-src/Lib/email/test/test_email_torture.py | 85 | 3657 | # Copyright (C) 2002-2004 Python Software Foundation
#
# A torture test of the email package. This should not be run as part of the
# standard Python test suite since it requires several meg of email messages
# collected in the wild. These source messages are not checked into the
# Python distro, but are available as part of the standalone email package at
# http://sf.net/projects/mimelib
import sys
import os
import unittest
from io import StringIO
from types import ListType
from email.test.test_email import TestEmailBase
from test.support import TestSkipped, run_unittest
import email
from email import __file__ as testfile
from email.iterators import _structure
def openfile(filename):
from os.path import join, dirname, abspath
path = abspath(join(dirname(testfile), os.pardir, 'moredata', filename))
return open(path, 'r')
# Prevent this test from running in the Python distro
try:
openfile('crispin-torture.txt')
except IOError:
raise TestSkipped
class TortureBase(TestEmailBase):
def _msgobj(self, filename):
fp = openfile(filename)
try:
msg = email.message_from_file(fp)
finally:
fp.close()
return msg
class TestCrispinTorture(TortureBase):
# Mark Crispin's torture test from the SquirrelMail project
def test_mondo_message(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
msg = self._msgobj('crispin-torture.txt')
payload = msg.get_payload()
eq(type(payload), ListType)
eq(len(payload), 12)
eq(msg.preamble, None)
eq(msg.epilogue, '\n')
# Probably the best way to verify the message is parsed correctly is to
# dump its structure and compare it against the known structure.
fp = StringIO()
_structure(msg, fp=fp)
neq(fp.getvalue(), """\
multipart/mixed
text/plain
message/rfc822
multipart/alternative
text/plain
multipart/mixed
text/richtext
application/andrew-inset
message/rfc822
audio/basic
audio/basic
image/pbm
message/rfc822
multipart/mixed
multipart/mixed
text/plain
audio/x-sun
multipart/mixed
image/gif
image/gif
application/x-be2
application/atomicmail
audio/x-sun
message/rfc822
multipart/mixed
text/plain
image/pgm
text/plain
message/rfc822
multipart/mixed
text/plain
image/pbm
message/rfc822
application/postscript
image/gif
message/rfc822
multipart/mixed
audio/basic
audio/basic
message/rfc822
multipart/mixed
application/postscript
text/plain
message/rfc822
multipart/mixed
text/plain
multipart/parallel
image/gif
audio/basic
application/atomicmail
message/rfc822
audio/x-sun
""")
def _testclasses():
mod = sys.modules[__name__]
return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')]
def suite():
suite = unittest.TestSuite()
for testclass in _testclasses():
suite.addTest(unittest.makeSuite(testclass))
return suite
def test_main():
for testclass in _testclasses():
run_unittest(testclass)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| apache-2.0 | 2,013,427,111,629,872,600 | 25.121429 | 79 | 0.606235 | false |
ep1cman/workload-automation | wlauto/tests/test_instrumentation.py | 5 | 7244 | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=W0231,W0613,E0611,W0603,R0201
from unittest import TestCase
from nose.tools import assert_equal, raises, assert_true, assert_false
from wlauto import Instrument
from wlauto.core import signal, instrumentation
from wlauto.instrumentation import instrument_is_installed, instrument_is_enabled, clear_instrumentation
class MockInstrument(Instrument):
name = 'mock'
def __init__(self):
Instrument.__init__(self, None)
self.before = 0
self.after = 0
def before_workload_execution(self, context):
self.before += 1
def after_workload_execution(self, context):
self.after += 1
class MockInstrument2(Instrument):
name = 'mock_2'
def __init__(self):
Instrument.__init__(self, None)
self.before = 0
self.after = 0
self.result = 0
def before_workload_execution(self, context):
self.before += 1
def after_workload_execution(self, context):
self.after += 1
def after_workload_result_update(self, context):
self.result += 1
class MockInstrument3(Instrument):
name = 'mock_3'
def __init__(self):
Instrument.__init__(self, None)
def slow_before_workload_execution(self, context):
global counter
counter += 1
class MockInstrument4(Instrument):
name = 'mock_4'
def __init__(self):
Instrument.__init__(self, None)
def slow_before_first_iteration_boot(self, context):
global counter
counter = 4
class MockInstrument5(Instrument):
name = 'mock_5'
def __init__(self):
Instrument.__init__(self, None)
def fast_before_first_iteration_boot(self, context):
global counter
counter += 2
class MockInstrument6(Instrument):
name = 'mock_6'
def __init__(self):
Instrument.__init__(self, None)
def before_first_iteration_boot(self, context):
global counter
counter *= 10
class BadInstrument(Instrument):
name = 'bad'
def __init__(self):
pass
# Not specifying the context argument.
def teardown(self):
pass
counter = 0
class InstrumentationTest(TestCase):
def tearDown(self):
clear_instrumentation()
def test_install(self):
instrument = _instantiate(MockInstrument)
instrument2 = _instantiate(MockInstrument2)
instrumentation.install(instrument)
instrumentation.install(instrument2)
signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_RESULT_UPDATE, self, context=None)
assert_equal(instrument.before, 1)
assert_equal(instrument.after, 1)
assert_equal(instrument2.before, 1)
assert_equal(instrument2.after, 1)
assert_equal(instrument2.result, 1)
def test_enable_disable(self):
instrument = _instantiate(MockInstrument)
instrument2 = _instantiate(MockInstrument2)
instrumentation.install(instrument)
instrumentation.install(instrument2)
instrumentation.disable_all()
signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_RESULT_UPDATE, self, context=None)
assert_equal(instrument.before, 0)
assert_equal(instrument.after, 0)
assert_equal(instrument2.before, 0)
assert_equal(instrument2.after, 0)
assert_equal(instrument2.result, 0)
instrumentation.enable(instrument)
signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_RESULT_UPDATE, self, context=None)
assert_equal(instrument.before, 1)
assert_equal(instrument.after, 1)
assert_equal(instrument2.before, 0)
assert_equal(instrument2.after, 0)
assert_equal(instrument2.result, 0)
instrumentation.enable_all()
signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_EXECUTION, self, context=None)
signal.send(signal.AFTER_WORKLOAD_RESULT_UPDATE, self, context=None)
assert_equal(instrument.before, 2)
assert_equal(instrument.after, 2)
assert_equal(instrument2.before, 1)
assert_equal(instrument2.after, 1)
assert_equal(instrument2.result, 1)
def test_check_enabled(self):
instrument = _instantiate(MockInstrument)
instrumentation.install(instrument)
instrumentation.enable(instrument)
assert_true(instrument_is_enabled(instrument))
assert_true(instrument_is_enabled(instrument.name))
instrumentation.disable(instrument)
assert_false(instrument_is_enabled(instrument))
assert_false(instrument_is_enabled(instrument.name))
def test_local_instrument(self):
global counter
counter = 0
self.install_local_instrument()
signal.send(signal.BEFORE_WORKLOAD_EXECUTION, self, context=None)
assert_equal(counter, 1)
def test_priority_prefix_instrument(self):
global counter
counter = 0
instrument1 = _instantiate(MockInstrument4)
instrument2 = _instantiate(MockInstrument5)
instrument3 = _instantiate(MockInstrument6)
instrumentation.install(instrument1)
instrumentation.install(instrument2)
instrumentation.install(instrument3)
signal.send(signal.BEFORE_FIRST_ITERATION_BOOT, self, context=None)
assert_equal(counter, 42)
@raises(ValueError)
def test_bad_argspec(self):
instrument = _instantiate(BadInstrument)
instrumentation.install(instrument)
def test_check_installed(self):
instrumentation.install(_instantiate(MockInstrument))
assert_true(instrument_is_installed('mock'))
assert_true(instrument_is_installed(MockInstrument))
assert_false(instrument_is_installed(MockInstrument2))
def install_local_instrument(self):
instrument = _instantiate(MockInstrument3)
instrumentation.install(instrument)
@raises(ValueError)
def test_duplicate_install(self):
instrument = _instantiate(MockInstrument)
instrument2 = _instantiate(MockInstrument)
instrumentation.install(instrument)
instrumentation.install(instrument2)
def _instantiate(cls):
# Needed to get around Extension's __init__ checks
return cls()
| apache-2.0 | 8,217,612,873,723,125,000 | 29.825532 | 104 | 0.679873 | false |
salamer/django | django/contrib/gis/geoip/prototypes.py | 535 | 3943 | from ctypes import POINTER, Structure, c_char_p, c_float, c_int, string_at
from django.contrib.gis.geoip.libgeoip import free, lgeoip
# #### GeoIP C Structure definitions ####
class GeoIPRecord(Structure):
_fields_ = [('country_code', c_char_p),
('country_code3', c_char_p),
('country_name', c_char_p),
('region', c_char_p),
('city', c_char_p),
('postal_code', c_char_p),
('latitude', c_float),
('longitude', c_float),
# TODO: In 1.4.6 this changed from `int dma_code;` to
# `union {int metro_code; int dma_code;};`. Change
# to a `ctypes.Union` in to accommodate in future when
# pre-1.4.6 versions are no longer distributed.
('dma_code', c_int),
('area_code', c_int),
('charset', c_int),
('continent_code', c_char_p),
]
geoip_char_fields = [name for name, ctype in GeoIPRecord._fields_ if ctype is c_char_p]
GEOIP_DEFAULT_ENCODING = 'iso-8859-1'
geoip_encodings = {
0: 'iso-8859-1',
1: 'utf8',
}
class GeoIPTag(Structure):
pass
RECTYPE = POINTER(GeoIPRecord)
DBTYPE = POINTER(GeoIPTag)
# #### ctypes function prototypes ####
# GeoIP_lib_version appeared in version 1.4.7.
if hasattr(lgeoip, 'GeoIP_lib_version'):
GeoIP_lib_version = lgeoip.GeoIP_lib_version
GeoIP_lib_version.argtypes = None
GeoIP_lib_version.restype = c_char_p
else:
GeoIP_lib_version = None
# For freeing memory allocated within a record
GeoIPRecord_delete = lgeoip.GeoIPRecord_delete
GeoIPRecord_delete.argtypes = [RECTYPE]
GeoIPRecord_delete.restype = None
# For retrieving records by name or address.
def check_record(result, func, cargs):
if result:
# Checking the pointer to the C structure, if valid pull out elements
# into a dictionary.
rec = result.contents
record = {fld: getattr(rec, fld) for fld, ctype in rec._fields_}
# Now converting the strings to unicode using the proper encoding.
encoding = geoip_encodings[record['charset']]
for char_field in geoip_char_fields:
if record[char_field]:
record[char_field] = record[char_field].decode(encoding)
# Free the memory allocated for the struct & return.
GeoIPRecord_delete(result)
return record
else:
return None
def record_output(func):
func.argtypes = [DBTYPE, c_char_p]
func.restype = RECTYPE
func.errcheck = check_record
return func
GeoIP_record_by_addr = record_output(lgeoip.GeoIP_record_by_addr)
GeoIP_record_by_name = record_output(lgeoip.GeoIP_record_by_name)
# For opening & closing GeoIP database files.
GeoIP_open = lgeoip.GeoIP_open
GeoIP_open.restype = DBTYPE
GeoIP_delete = lgeoip.GeoIP_delete
GeoIP_delete.argtypes = [DBTYPE]
GeoIP_delete.restype = None
# This is so the string pointer can be freed within Python.
class geoip_char_p(c_char_p):
pass
def check_string(result, func, cargs):
if result:
s = string_at(result)
free(result)
else:
s = ''
return s.decode(GEOIP_DEFAULT_ENCODING)
GeoIP_database_info = lgeoip.GeoIP_database_info
GeoIP_database_info.restype = geoip_char_p
GeoIP_database_info.errcheck = check_string
# String output routines.
def string_output(func):
def _err_check(result, func, cargs):
if result:
return result.decode(GEOIP_DEFAULT_ENCODING)
return result
func.restype = c_char_p
func.errcheck = _err_check
return func
GeoIP_country_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr)
GeoIP_country_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name)
GeoIP_country_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr)
GeoIP_country_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name)
| bsd-3-clause | -1,152,448,507,726,844,300 | 30.544 | 87 | 0.643672 | false |
damienmg/bazel | third_party/def_parser/def_parser_test.py | 17 | 4087 | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from src.test.py.bazel import test_base
class DEFParserTest(test_base.TestBase):
def createAndBuildProjectFiles(self):
self.ScratchFile('WORKSPACE')
self.ScratchFile('BUILD', ['cc_library(name="hello", srcs=["x.cc"])'])
self.ScratchFile('x.cc', [
'#include <stdio.h>',
'int hello_data;',
'void hello_world() {',
' printf("hello world\\n");',
'}',
])
exit_code, _, stderr = self.RunBazel(['build', '//:hello'])
self.AssertExitCode(exit_code, 0, stderr)
def testParseDefFileFromObjectFile(self):
# Skip this test on non-Windows platforms
if not self.IsWindows():
return
self.createAndBuildProjectFiles()
exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-bin'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = stdout[0]
objfile = os.path.join(bazel_bin, '_objs', 'hello', 'x.o')
self.assertTrue(os.path.isfile(objfile))
output_def = self.Path('x.def');
self.RunProgram([self.Rlocation('io_bazel/third_party/def_parser/def_parser.exe'), output_def, 'my_x.dll', objfile])
self.assertTrue(os.path.isfile(output_def))
with open(output_def, 'r') as def_file:
def_content = def_file.read()
self.assertIn('LIBRARY my_x.dll', def_content)
self.assertIn('hello_data', def_content)
self.assertIn('hello_world', def_content)
def testParseDefFileFromObjectFileWithParamFile(self):
# Skip this test on non-Windows platforms
if not self.IsWindows():
return
self.createAndBuildProjectFiles()
exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-bin'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = stdout[0]
objfile = os.path.join(bazel_bin, '_objs', 'hello', 'x.o')
self.assertTrue(os.path.isfile(objfile))
objfilelist = self.ScratchFile('objfilelist', [objfile])
output_def = self.Path('x.def');
self.RunProgram([self.Rlocation('io_bazel/third_party/def_parser/def_parser.exe'), output_def, 'my_x.dll', '@' + objfilelist])
self.assertTrue(os.path.isfile(output_def))
with open(output_def, 'r') as def_file:
def_content = def_file.read()
self.assertIn('LIBRARY my_x.dll', def_content)
self.assertIn('hello_data', def_content)
self.assertIn('hello_world', def_content)
def testParseDefFileFromAnotherDefFile(self):
# Skip this test on non-Windows platforms
if not self.IsWindows():
return
self.createAndBuildProjectFiles()
exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-bin'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = stdout[0]
objfile = os.path.join(bazel_bin, '_objs', 'hello', 'x.o')
self.assertTrue(os.path.isfile(objfile))
output_def = self.Path('x.def');
self.RunProgram([self.Rlocation('io_bazel/third_party/def_parser/def_parser.exe'), output_def, 'my_x.dll', objfile])
self.assertTrue(os.path.isfile(output_def))
new_output_def = self.Path('new_x.def');
self.RunProgram([self.Rlocation('io_bazel/third_party/def_parser/def_parser.exe'), new_output_def, 'my_x.dll', output_def])
self.assertTrue(os.path.isfile(new_output_def))
with open(new_output_def, 'r') as def_file:
def_content = def_file.read()
self.assertIn('LIBRARY my_x.dll', def_content)
self.assertIn('hello_data', def_content)
self.assertIn('hello_world', def_content)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -2,259,040,881,502,733,000 | 36.842593 | 130 | 0.676046 | false |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/boto/services/result.py | 153 | 5596 | #!/usr/bin/env python
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
from datetime import datetime, timedelta
from boto.utils import parse_ts
import boto
class ResultProcessor(object):
LogFileName = 'log.csv'
def __init__(self, batch_name, sd, mimetype_files=None):
self.sd = sd
self.batch = batch_name
self.log_fp = None
self.num_files = 0
self.total_time = 0
self.min_time = timedelta.max
self.max_time = timedelta.min
self.earliest_time = datetime.max
self.latest_time = datetime.min
self.queue = self.sd.get_obj('output_queue')
self.domain = self.sd.get_obj('output_domain')
def calculate_stats(self, msg):
start_time = parse_ts(msg['Service-Read'])
end_time = parse_ts(msg['Service-Write'])
elapsed_time = end_time - start_time
if elapsed_time > self.max_time:
self.max_time = elapsed_time
if elapsed_time < self.min_time:
self.min_time = elapsed_time
self.total_time += elapsed_time.seconds
if start_time < self.earliest_time:
self.earliest_time = start_time
if end_time > self.latest_time:
self.latest_time = end_time
def log_message(self, msg, path):
keys = sorted(msg.keys())
if not self.log_fp:
self.log_fp = open(os.path.join(path, self.LogFileName), 'a')
line = ','.join(keys)
self.log_fp.write(line+'\n')
values = []
for key in keys:
value = msg[key]
if value.find(',') > 0:
value = '"%s"' % value
values.append(value)
line = ','.join(values)
self.log_fp.write(line+'\n')
def process_record(self, record, path, get_file=True):
self.log_message(record, path)
self.calculate_stats(record)
outputs = record['OutputKey'].split(',')
if 'OutputBucket' in record:
bucket = boto.lookup('s3', record['OutputBucket'])
else:
bucket = boto.lookup('s3', record['Bucket'])
for output in outputs:
if get_file:
key_name = output.split(';')[0]
key = bucket.lookup(key_name)
file_name = os.path.join(path, key_name)
print('retrieving file: %s to %s' % (key_name, file_name))
key.get_contents_to_filename(file_name)
self.num_files += 1
def get_results_from_queue(self, path, get_file=True, delete_msg=True):
m = self.queue.read()
while m:
if 'Batch' in m and m['Batch'] == self.batch:
self.process_record(m, path, get_file)
if delete_msg:
self.queue.delete_message(m)
m = self.queue.read()
def get_results_from_domain(self, path, get_file=True):
rs = self.domain.query("['Batch'='%s']" % self.batch)
for item in rs:
self.process_record(item, path, get_file)
def get_results_from_bucket(self, path):
bucket = self.sd.get_obj('output_bucket')
if bucket:
print('No output queue or domain, just retrieving files from output_bucket')
for key in bucket:
file_name = os.path.join(path, key)
print('retrieving file: %s to %s' % (key, file_name))
key.get_contents_to_filename(file_name)
self.num_files + 1
def get_results(self, path, get_file=True, delete_msg=True):
if not os.path.isdir(path):
os.mkdir(path)
if self.queue:
self.get_results_from_queue(path, get_file)
elif self.domain:
self.get_results_from_domain(path, get_file)
else:
self.get_results_from_bucket(path)
if self.log_fp:
self.log_fp.close()
print('%d results successfully retrieved.' % self.num_files)
if self.num_files > 0:
self.avg_time = float(self.total_time)/self.num_files
print('Minimum Processing Time: %d' % self.min_time.seconds)
print('Maximum Processing Time: %d' % self.max_time.seconds)
print('Average Processing Time: %f' % self.avg_time)
self.elapsed_time = self.latest_time-self.earliest_time
print('Elapsed Time: %d' % self.elapsed_time.seconds)
tput = 1.0 / ((self.elapsed_time.seconds/60.0) / self.num_files)
print('Throughput: %f transactions / minute' % tput)
| mit | 3,630,449,697,623,213,000 | 40.451852 | 88 | 0.602395 | false |
psychotechnik/mycv | mycv/apps/projects/migrations/0013_auto__add_field_skill_applicant__add_field_client_applicant__add_field.py | 1 | 12221 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Skill.applicant'
db.add_column(u'projects_skill', 'applicant',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='skills', null=True, to=orm['accounts.MyCVUser']),
keep_default=False)
# Adding field 'Client.applicant'
db.add_column(u'projects_client', 'applicant',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='clients', null=True, to=orm['accounts.MyCVUser']),
keep_default=False)
# Adding field 'StackItem.applicant'
db.add_column(u'projects_stackitem', 'applicant',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='stack_items', null=True, to=orm['accounts.MyCVUser']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Skill.applicant'
db.delete_column(u'projects_skill', 'applicant_id')
# Deleting field 'Client.applicant'
db.delete_column(u'projects_client', 'applicant_id')
# Deleting field 'StackItem.applicant'
db.delete_column(u'projects_stackitem', 'applicant_id')
models = {
u'accounts.mycvuser': {
'Meta': {'object_name': 'MyCVUser'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Address']", 'null': 'True', 'blank': 'True'}),
'avatar': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'users'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'users'", 'blank': 'True', 'to': u"orm['auth.Permission']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'categories.category': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'alternate_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'alternate_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'meta_extra': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['categories.Category']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'thumbnail': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.address': {
'Meta': {'object_name': 'Address'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'street_address2': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
},
u'projects.client': {
'Meta': {'ordering': "('order_index', '-end_date')", 'object_name': 'Client'},
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'clients'", 'null': 'True', 'to': u"orm['accounts.MyCVUser']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'order_index': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.IntegerField', [], {'max_length': '2'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
u'projects.clientobjective': {
'Meta': {'object_name': 'ClientObjective'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'objectives'", 'to': u"orm['projects.Client']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_index': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'projects.project': {
'Meta': {'ordering': "('order_index',)", 'object_name': 'Project'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'projects'", 'null': 'True', 'to': u"orm['projects.Client']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'order_index': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'source_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'stack_items': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['projects.StackItem']", 'symmetrical': 'False'})
},
u'projects.projectfeature': {
'Meta': {'object_name': 'ProjectFeature'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'features'", 'to': u"orm['projects.Project']"})
},
u'projects.skill': {
'Meta': {'object_name': 'Skill'},
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'skills'", 'null': 'True', 'to': u"orm['accounts.MyCVUser']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['categories.Category']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '500', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'projects.stackitem': {
'Meta': {'object_name': 'StackItem'},
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stack_items'", 'null': 'True', 'to': u"orm['accounts.MyCVUser']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['projects'] | gpl-2.0 | -8,670,182,636,055,603,000 | 74.444444 | 190 | 0.553228 | false |
2013Commons/hue | desktop/core/ext-py/Django-1.4.5/docs/_ext/applyxrefs.py | 143 | 2148 | """Adds xref targets to the top of files."""
import sys
import os
testing = False
DONT_TOUCH = (
'./index.txt',
)
def target_name(fn):
if fn.endswith('.txt'):
fn = fn[:-4]
return '_' + fn.lstrip('./').replace('/', '-')
def process_file(fn, lines):
lines.insert(0, '\n')
lines.insert(0, '.. %s:\n' % target_name(fn))
try:
f = open(fn, 'w')
except IOError:
print("Can't open %s for writing. Not touching it." % fn)
return
try:
f.writelines(lines)
except IOError:
print("Can't write to %s. Not touching it." % fn)
finally:
f.close()
def has_target(fn):
try:
f = open(fn, 'r')
except IOError:
print("Can't open %s. Not touching it." % fn)
return (True, None)
readok = True
try:
lines = f.readlines()
except IOError:
print("Can't read %s. Not touching it." % fn)
readok = False
finally:
f.close()
if not readok:
return (True, None)
#print fn, len(lines)
if len(lines) < 1:
print("Not touching empty file %s." % fn)
return (True, None)
if lines[0].startswith('.. _'):
return (True, None)
return (False, lines)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
argv.extend('.')
files = []
for root in argv[1:]:
for (dirpath, dirnames, filenames) in os.walk(root):
files.extend([(dirpath, f) for f in filenames])
files.sort()
files = [os.path.join(p, fn) for p, fn in files if fn.endswith('.txt')]
#print files
for fn in files:
if fn in DONT_TOUCH:
print("Skipping blacklisted file %s." % fn)
continue
target_found, lines = has_target(fn)
if not target_found:
if testing:
print '%s: %s' % (fn, lines[0]),
else:
print "Adding xref to %s" % fn
process_file(fn, lines)
else:
print "Skipping %s: already has a xref" % fn
if __name__ == '__main__':
sys.exit(main()) | apache-2.0 | 1,275,210,266,804,636,200 | 23.420455 | 75 | 0.512104 | false |
petersanchez/django-allauth | allauth/socialaccount/views.py | 8 | 3507 | from django.contrib import messages
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.auth.decorators import login_required
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from ..account.views import (CloseableSignupMixin,
RedirectAuthenticatedUserMixin)
from ..account.adapter import get_adapter as get_account_adapter
from ..utils import get_form_class, get_current_site
from .adapter import get_adapter
from .models import SocialLogin
from .forms import DisconnectForm, SignupForm
from . import helpers
from . import app_settings
class SignupView(RedirectAuthenticatedUserMixin, CloseableSignupMixin,
FormView):
form_class = SignupForm
template_name = 'socialaccount/signup.html'
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'signup',
self.form_class)
def dispatch(self, request, *args, **kwargs):
self.sociallogin = None
data = request.session.get('socialaccount_sociallogin')
if data:
self.sociallogin = SocialLogin.deserialize(data)
if not self.sociallogin:
return HttpResponseRedirect(reverse('account_login'))
return super(SignupView, self).dispatch(request, *args, **kwargs)
def is_open(self):
return get_adapter().is_open_for_signup(self.request,
self.sociallogin)
def get_form_kwargs(self):
ret = super(SignupView, self).get_form_kwargs()
ret['sociallogin'] = self.sociallogin
return ret
def form_valid(self, form):
form.save(self.request)
return helpers.complete_social_signup(self.request,
self.sociallogin)
def get_context_data(self, **kwargs):
ret = super(SignupView, self).get_context_data(**kwargs)
ret.update(dict(site=get_current_site(self.request),
account=self.sociallogin.account))
return ret
def get_authenticated_redirect_url(self):
return reverse(connections)
signup = SignupView.as_view()
class LoginCancelledView(TemplateView):
template_name = "socialaccount/login_cancelled.html"
login_cancelled = LoginCancelledView.as_view()
class LoginErrorView(TemplateView):
template_name = "socialaccount/authentication_error.html"
login_error = LoginErrorView.as_view()
class ConnectionsView(FormView):
template_name = "socialaccount/connections.html"
form_class = DisconnectForm
success_url = reverse_lazy("socialaccount_connections")
def get_form_class(self):
return get_form_class(app_settings.FORMS,
'disconnect',
self.form_class)
def get_form_kwargs(self):
kwargs = super(ConnectionsView, self).get_form_kwargs()
kwargs["request"] = self.request
return kwargs
def form_valid(self, form):
get_account_adapter().add_message(self.request,
messages.INFO,
'socialaccount/messages/'
'account_disconnected.txt')
form.save()
return super(ConnectionsView, self).form_valid(form)
connections = login_required(ConnectionsView.as_view())
| mit | -2,675,504,095,613,910,500 | 33.722772 | 73 | 0.639863 | false |
davidzchen/tensorflow | tensorflow/python/eager/def_function_test.py | 1 | 28978 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import pickle
import re
import sys
import weakref
from absl.testing import parameterized
from six.moves import range
from tensorflow.python.autograph.core import converter
from tensorflow.python.eager import def_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import save_context
from tensorflow.python.saved_model import save_options
def undecorated_function(x):
return x * 3.
class _HasDecoratedMethod(object):
@def_function.function
def f(self, x):
return x * 3.
class DefFunctionTest(test.TestCase, parameterized.TestCase):
def testNoVariables(self):
@def_function.function
def fn(x):
return 2 * x
self.assertAllEqual(fn(constant_op.constant(4.0)), 8.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testFailIfVariablesAreCreatedMoreThanOnce(self):
@def_function.function
def fn(x):
return variables.Variable(1.0) + x
with self.assertRaises(ValueError):
fn(1.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testFailIfVariablesAreCreatedMoreThanOnceNoWeakRef(self):
state = []
@def_function.function
def fn(x):
state.append(variables.Variable(1.0))
return state[-1] + x
with self.assertRaises(ValueError):
fn(1.0)
def testRange(self):
@def_function.function
def f(unused_x):
return 1.0
self.assertAllEqual(f(range(5)), 1.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testCorrectVariableCreation(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testFunctionInitializer(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(lambda: 2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testFunctionMultipleVariableInitializer(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(lambda: 2.0))
state.append(variables.Variable(lambda: 5.0))
return state[0] * x, state[1] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), [2.0, 5.0])
@test_util.disable_tfrt('Variable argument is not supported')
def testFunctionInitializationFunction(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
init_fn = fn.get_initialization_function(constant_op.constant(1.0))
self.assertLen(state, 1)
self.assertFalse(
resource_variable_ops.var_is_initialized_op(state[0].handle))
init_fn()
self.assertEqual(state[0].numpy(), 2.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testVariableInitializerNotConstant(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testLegacyGraphModeVariables(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 2.0)
self.assertAllEqual(self.evaluate(result), 6.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testLegacyGraphModeVariablesNonTrivialInitializer(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
two = constant_op.constant(2.0)
four = two * two
two_again = math_ops.sqrt(four)
state.append(variables.Variable(two_again + four))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 6.0)
self.assertAllEqual(self.evaluate(result), 18.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testLegacyGraphModeInputDependentInitializerFails(self):
with ops.Graph().as_default():
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
with self.assertRaisesRegex(lift_to_graph.UnliftableError,
r'transitively.* mul .* x'):
fn(constant_op.constant(3.0))
@test_util.disable_tfrt('Variable argument is not supported')
def testMethod(self):
class MyModel(object):
def __init__(self):
self.var = None
@def_function.function
def apply(self, x):
if self.var is None:
self.var = variables.Variable(2.0)
return self.var * x
m0 = MyModel()
self.assertAllEqual(m0.apply(3.0), 6.0)
# Calling twice to exercise that we do not recreate variables.
m0.var.assign(3.0)
self.assertAllEqual(m0.apply(3.0), 9.0)
m1 = MyModel()
self.assertAllEqual(m1.apply(3.0), 6.0)
def test_functools_partial(self):
self.assertAllClose(
3.,
def_function.function(functools.partial(lambda x, y: x + y, 1.))(
constant_op.constant(2.)))
@test_util.disable_tfrt('Partial is not supported')
def test_functools_partial_new_default(self):
def f(x=3, y=7):
return x + y
func = def_function.function(functools.partial(f, y=6))
self.assertEqual(func().numpy(), 9)
self.assertEqual(func(y=8).numpy(), 11)
@test_util.disable_tfrt('Partial is not supported')
def test_functools_partial_keywords(self):
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, x=array_ops.zeros([1]), y=array_ops.zeros([1])))
self.assertAllEqual(func(), [0.0])
@test_util.disable_tfrt('Partial is not supported')
def test_functools_partial_single_positional(self):
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, constant_op.constant(1)))
self.assertAllEqual(func(5), 6)
@test_util.disable_tfrt('Partial is not supported')
def test_complicated_partial_with_defaults(self):
def identity(*args):
return args
def dynamic_unroll(core_fn,
input_sequence,
initial_state,
sequence_length=None,
parallel_iterations=1,
swap_memory=False):
del core_fn
self.assertIs(None, sequence_length)
self.assertEqual(1, parallel_iterations)
self.assertTrue(swap_memory)
return input_sequence, initial_state
input_sequence = random_ops.random_uniform([1, 1, 1])
initial_state = random_ops.random_uniform([1, 1])
func = def_function.function(
functools.partial(dynamic_unroll, identity, swap_memory=True))
func(input_sequence, initial_state)
def test_unspecified_default_argument(self):
wrapped = def_function.function(
lambda x, y=2: x + y,
input_signature=[tensor_spec.TensorSpec((), dtypes.int32)])
self.assertEqual(3, wrapped(constant_op.constant(1)).numpy())
def test_concrete_function_from_signature(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def compute(x):
return 2. * x
concrete = compute.get_concrete_function()
self.assertAllClose(1., concrete(constant_op.constant(0.5)))
concrete = compute.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
self.assertAllClose(4., concrete(constant_op.constant(2.)))
signature_args, _ = concrete.structured_input_signature
self.assertEqual(signature_args,
(tensor_spec.TensorSpec(
None, dtypes.float32, name='x'),))
@test_util.disable_tfrt('Variable argument is not supported')
@test_util.run_in_graph_and_eager_modes
def test_variable_naming(self):
class HasVars(module.Module):
def __init__(self):
self.x = None
self.y = None
self.z = None
@def_function.function
def make_x(self):
if self.x is None:
self.x = variables.Variable(1., name='v')
def make_y(self):
if self.y is None:
self.y = variables.Variable(1., name='v')
def make_z(self):
if self.z is None:
with ops.name_scope('z_scope', skip_on_eager=False):
self.z = variables.Variable(1., name='z')
root = HasVars()
root.make_x()
root.make_y()
root.make_z()
self.assertEqual('v:0', root.x.name)
self.assertEqual('z_scope/z:0', root.z.name)
def test_concrete_function_keyword_arguments(self):
@def_function.function
def f(x):
return x
conc = f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32, 'y'))
conc(y=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('y', signature_args[0].name)
conc = f.get_concrete_function(tensor_spec.TensorSpec(None, dtypes.float32))
conc(x=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('x', signature_args[0].name)
@def_function.function
def g(x):
return x[0]
conc = g.get_concrete_function(
[tensor_spec.TensorSpec(None, dtypes.float32, 'z'), 2])
conc(z=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('z', signature_args[0][0].name)
def test_error_inner_capture(self):
@def_function.function
def f(inputs):
num_steps, _ = inputs.shape[:2]
outputs = []
for t in math_ops.range(num_steps):
outputs.append(inputs[t])
return outputs
with self.assertRaisesRegex(errors.InaccessibleTensorError,
'defined in another function or code block'):
f(array_ops.zeros(shape=(8, 42, 3)))
@test_util.disable_tfrt('Control flow is not supported')
def testRuntimeErrorNotSticky(self):
@def_function.function
def fail(i):
control_flow_ops.Assert(math_ops.equal(i, 0), ['ick'])
fail(constant_op.constant(0)) # OK
with self.assertRaises(errors.InvalidArgumentError):
fail(constant_op.constant(1)) # InvalidArgument: "ick"
fail(constant_op.constant(0)) # OK
def testUnderscoreName(self):
@def_function.function
def f(_):
return _ + _
self.assertAllEqual(2.0, f(constant_op.constant(1.0)))
def test_serialization_signature_cache(self):
@def_function.function
def f(x, y):
return x, y
f(constant_op.constant([[3., 4.]]), constant_op.constant([2.]))
f(constant_op.constant([[3, 4, 5]]), constant_op.constant([2]))
signatures_args = set()
concrete_functions = f._list_all_concrete_functions_for_serialization()
for concrete_function in concrete_functions:
args, kwargs = concrete_function.structured_input_signature
signatures_args.add(args)
self.assertEqual(dict(), kwargs)
self.assertEqual(
signatures_args,
set(((tensor_spec.TensorSpec([1, 2], dtypes.float32, name='x'),
tensor_spec.TensorSpec([1], dtypes.float32, name='y')),
(tensor_spec.TensorSpec([1, 3], dtypes.int32, name='x'),
tensor_spec.TensorSpec([1], dtypes.int32, name='y')))))
@test_util.assert_no_garbage_created
def testFunctionReferenceCycles(self):
fn = def_function.function(lambda x: 2. * x)
fn(constant_op.constant(4.0))
weak_fn = weakref.ref(fn)
del fn
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
@test_util.assert_no_garbage_created
def testMethodReferenceCycles(self):
has_decorated_method = _HasDecoratedMethod()
has_decorated_method.f(constant_op.constant(5.))
weak_fn = weakref.ref(has_decorated_method.f)
del has_decorated_method
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
@test_util.assert_no_new_pyobjects_executing_eagerly
def testErrorMessageWhenGraphTensorIsPassedToEager(self):
@def_function.function
def failing_function():
a = constant_op.constant(1.)
with ops.init_scope():
_ = a + a
with self.assertRaisesRegex(
TypeError,
re.compile('An op outside of the function.*passed.*Const', re.DOTALL)):
failing_function()
def testNonUniqueNamesGetConcreteFunction(self):
@def_function.function
def non_unique_arg_names(x, **kwargs):
a, b, c = x
d = kwargs['d']
return a + b + c + d
concrete = non_unique_arg_names.get_concrete_function(
(tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)),
d=tensor_spec.TensorSpec(None, dtypes.float32))
self.assertAllClose(
10.,
concrete(x=constant_op.constant(1.),
x_1=constant_op.constant(2.),
x_2=constant_op.constant(3.),
d=constant_op.constant(4.)))
self.assertAllClose(
10.,
concrete(constant_op.constant(1.),
constant_op.constant(2.),
constant_op.constant(3.),
constant_op.constant(4.)))
@test_util.disable_tfrt('Variable argument is not supported')
def testVariableCreatorScope(self):
created_variables = []
captured_variables = []
@def_function.function
def f():
if not created_variables:
created_variables.append(variables.Variable(1.))
return created_variables[0] + 1.
def capture_creator(next_creator, **kwargs):
created = next_creator(**kwargs)
captured_variables.append(created)
return created
with variable_scope.variable_creator_scope(capture_creator):
f()
self.assertEqual(created_variables, captured_variables)
@test_util.disable_tfrt('Variable argument is not supported')
def testVarAlreadyInitializedNoClobbering(self):
v_holder = []
@def_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
add_var.get_concrete_function(constant_op.constant(2.))
self.assertAllClose([13., 14.], add_var(constant_op.constant(2.)))
@test_util.disable_tfrt('Variable argument is not supported')
def testSameVariableTwice(self):
v = variables.Variable(1.0)
@def_function.function
def add(a, b):
return a + b
self.assertAllEqual(add(v, v), 2.0)
@test_util.disable_tfrt('Variable argument is not supported')
def testVariableUpdate(self):
v1 = variables.Variable(1.0)
v2 = variables.Variable(2.0)
v3 = variables.Variable(4, dtype=dtypes.int32)
trace_count = [0]
@def_function.function
def double_variable(x):
trace_count[0] += 1
x.assign_add(x.read_value())
self.assertEqual(trace_count[0], 0)
double_variable(v1)
self.assertEqual(trace_count[0], 1)
self.assertEqual(self.evaluate(v1), 2.0)
double_variable(v2)
self.assertEqual(trace_count[0], 2)
self.assertEqual(self.evaluate(v2), 4.0)
double_variable(v3)
self.assertEqual(trace_count[0], 3)
self.assertEqual(self.evaluate(v3), 8)
def testShapeCache(self):
@def_function.function
def func(x):
return 2 * x
func_a = func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32))
func_b = func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32))
self.assertIs(func_a, func_b)
def testCacheWithinSaveContext(self):
@def_function.function
def func(x):
return 2 * x
func_a = func.get_concrete_function(constant_op.constant(2.))
func_b = func.get_concrete_function(constant_op.constant(2.))
self.assertIs(func_a, func_b)
with save_context.save_context(save_options.SaveOptions()):
func_c = func.get_concrete_function(constant_op.constant(2.))
self.assertIs(func_a, func_c)
@test_util.disable_tfrt('Nested function is not supported')
def testInitializationInNestedCall(self):
v_holder = []
@def_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
@def_function.function
def wrapper(x):
return add_var(x)
self.assertAllClose([13., 14.], wrapper(constant_op.constant(2.)))
v_holder[1].assign(11.)
self.assertAllClose([14., 15.], wrapper(constant_op.constant(2.)))
@test_util.disable_tfrt('Variable argument is not supported')
@test_util.run_gpu_only
def testDeviceAnnotationRespected(self):
a = []
@def_function.function()
def create_variable():
with ops.init_scope():
initial_value = random_ops.random_uniform(
(2, 2), maxval=1000000, dtype=dtypes.int64)
if not a:
with ops.device('CPU:0'):
a.append(resource_variable_ops.ResourceVariable(initial_value))
return a[0].read_value()
create_variable()
self.assertRegex(a[0].device, 'CPU')
@test_util.disable_tfrt('Variable argument is not supported')
@test_util.run_gpu_only
def testDeviceAnnotationForInitializerRespected(self):
a = []
initial_value = []
def initial_value_fn():
initial_value.append(random_ops.random_uniform((2, 3)))
return initial_value[0]
@def_function.function()
def create_variable():
with ops.init_scope():
if not a:
a.append(variables.Variable(initial_value_fn))
with ops.device('CPU:0'):
create_variable()
self.assertRegex(a[0].device, 'CPU')
self.assertRegex(initial_value[0].device, 'CPU')
def testDecorate(self):
func = def_function.function(lambda: 1)
def decorator(f):
return lambda: 1 + f()
func._decorate(decorator)
self.assertEqual(func().numpy(), 2)
@parameterized.parameters(*itertools.product(
(None, (tensor_spec.TensorSpec([]),)), # input_signature
(True, False), # autograph
(None, converter.Feature.ALL), # autograph_options
(None, 'foo.bar'), # implements
(None, True, False), # relax_shapes
(True, False), # compile
(True, False), # override_function
))
@test_util.disable_tfrt('b/168618526: design proper method to copy tensors'
'for function.')
def testClone(self, input_signature, autograph, autograph_options, implements,
relax_shapes, compile_, override_function):
original_py_function = lambda x: x
compile_ = False
func = def_function.function(
func=original_py_function,
input_signature=input_signature,
autograph=autograph,
experimental_implements=implements,
experimental_autograph_options=autograph_options,
experimental_relax_shapes=relax_shapes,
experimental_compile=compile_)
if override_function:
cloned_py_function = lambda x: x + 1
else:
cloned_py_function = original_py_function
cloned = func._clone(python_function=cloned_py_function)
self.assertEqual(cloned_py_function, cloned._python_function)
self.assertEqual(func._name, cloned._name)
self.assertEqual(input_signature, cloned._input_signature)
self.assertEqual(autograph, cloned._autograph)
self.assertEqual(implements, cloned._implements)
self.assertEqual(autograph_options, cloned._experimental_autograph_options)
self.assertEqual(relax_shapes, cloned._experimental_relax_shapes)
self.assertEqual(compile_, cloned._experimental_compile)
# This test does not run with XLA JIT support linked in so we can only check
# the output of the function if compile is disabled.
if not compile_:
x = array_ops.zeros([])
self.assertEqual(self.evaluate(cloned(x)),
self.evaluate(cloned_py_function(x)))
@test_util.disable_tfrt('Variable argument is not supported')
def testLiftPlaceholderInitializedVariable(self):
with ops.Graph().as_default():
var_list = []
@def_function.function
def use_variable():
if not var_list:
initial_value = array_ops.placeholder(shape=[], dtype=dtypes.float32)
v = variables.Variable(initial_value)
var_list.append(v)
return var_list[0] + 1.
var_plus_one = use_variable()
with self.session() as session:
init_op = var_list[0].initializer
session.run(init_op, feed_dict={init_op.inputs[1]: 2.})
self.assertEqual(3., session.run(var_plus_one))
def testDecorate_rejectedAfterTrace(self):
func = def_function.function(lambda: 1)
self.assertEqual(func().numpy(), 1)
msg = 'Functions cannot be decorated after they have been traced.'
with self.assertRaisesRegex(ValueError, msg):
func._decorate(lambda f: f)
def testGetConcreteFunctionGraphLifetime(self):
@def_function.function
def func():
pass
graph = func.get_concrete_function().graph
del func
# If the graph is deleted, then an exception is raised on reading `captures`
self.assertEmpty(graph.captures)
@parameterized.parameters(*itertools.product(
(None, (tensor_spec.TensorSpec([]),)), # input_signature
(True, False), # autograph
(None, converter.Feature.ALL), # autograph_options
(None, 'foo.bar'), # implements
(None, True, False), # relax_shapes
))
@test_util.disable_tfrt('b/168618526: design proper method to copy tensors'
'for function.')
def test_pickle(self, input_signature, autograph, autograph_options,
implements, relax_shapes):
"""@function objects can be pickled and unpickled."""
original_py_function = undecorated_function
func = def_function.function(
func=original_py_function,
input_signature=input_signature,
autograph=autograph,
experimental_implements=implements,
experimental_autograph_options=autograph_options,
experimental_relax_shapes=relax_shapes,
)
cloned = pickle.loads(pickle.dumps(func))
self.assertEqual(func._name, cloned._name)
self.assertEqual(input_signature, cloned._input_signature)
self.assertEqual(autograph, cloned._autograph)
self.assertEqual(implements, cloned._implements)
self.assertEqual(autograph_options, cloned._experimental_autograph_options)
self.assertEqual(relax_shapes, cloned._experimental_relax_shapes)
x = array_ops.ones([])
self.assertEqual(self.evaluate(cloned(x)), self.evaluate(func(x)))
def test_frequent_retracing_warning(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
@def_function.function
def f(x):
return x
with self.assertLogs(level='WARN') as logs:
f(1)
f(2)
f(3)
f(4)
self.assertEmpty(logs.output)
f(5)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_frequent_retracing_warning_lambda(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
f = def_function.function(lambda x: x)
with self.assertLogs(level='WARN') as logs:
f(1)
f(2)
f(3)
f(4)
f(5)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_frequent_retracing_warning_method(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
class Foo(object):
@def_function.function
def f(self, x):
return x
f = Foo().f
with self.assertLogs(level='WARN') as logs:
f(1)
f(2)
f(3)
f(4)
f(5)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_frequent_retracing_warning_two_independent_tf_functions(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
@def_function.function
def f(x):
return x
@def_function.function
def g(x):
return x
with self.assertLogs(level='WARN') as logs:
f(1)
f(2)
f(3)
f(4)
g(1)
g(2)
g(3)
g(4)
g(5)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
@test_util.disable_tfrt('Nested function is not supported')
def test_frequent_retracing_warning_nested(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
@def_function.function
def inner(x):
return x + 1
@def_function.function
def outer1(x):
return inner(x) * 2
@def_function.function
def outer2(x):
return inner(x) * 3
with self.assertLogs(level='WARN') as logs:
inner(1)
inner(2)
inner(3)
inner(4)
outer1(5)
outer1(6)
outer1(7)
outer1(8)
outer2(9)
outer2(10)
outer2(11)
outer2(12)
self.assertEmpty(logs.output)
outer2(13)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
def test_frequent_retracing_warning_on_reinstantiation(self):
if sys.version_info[0] < 3:
self.skipTest('self.assertLogs() call is not available in Python 2.')
with self.assertLogs(level='WARN') as logs:
for i in range(5):
@def_function.function
def f(x):
return x
f(i)
if i < 4:
self.assertEmpty(logs.output)
self.assertLen(logs.output, 1)
self.assertIn('Tracing is expensive', logs.output[0])
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| apache-2.0 | 1,999,680,924,349,402,000 | 29.66455 | 80 | 0.649665 | false |
blooparksystems/odoo | addons/website_slides/models/slides.py | 2 | 25969 | # -*- coding: utf-8 -*-
import datetime
import io
import json
from PIL import Image
import re
from urllib import urlencode
import urllib2
from urlparse import urlparse
from openerp import api, fields, models, SUPERUSER_ID, _
from openerp.tools import image
from openerp.exceptions import Warning
from openerp.addons.website.models.website import slug
class Channel(models.Model):
""" A channel is a container of slides. It has group-based access configuration
allowing to configure slide upload and access. Slides can be promoted in
channels. """
_name = 'slide.channel'
_description = 'Channel for Slides'
_inherit = ['mail.thread', 'website.seo.metadata', 'website.published.mixin']
_order = 'sequence, id'
_order_by_strategy = {
'most_viewed': 'total_views desc',
'most_voted': 'likes desc',
'latest': 'date_published desc',
}
name = fields.Char('Name', translate=True, required=True)
description = fields.Html('Description', translate=True)
sequence = fields.Integer(default=10, help='Display order')
category_ids = fields.One2many('slide.category', 'channel_id', string="Categories")
slide_ids = fields.One2many('slide.slide', 'channel_id', string="Slides")
promote_strategy = fields.Selection([
('none', 'No Featured Presentation'),
('latest', 'Latest Published'),
('most_voted', 'Most Voted'),
('most_viewed', 'Most Viewed'),
('custom', 'Featured Presentation')],
string="Featuring Policy", default='most_voted', required=True)
custom_slide_id = fields.Many2one('slide.slide', string='Slide to Promote')
promoted_slide_id = fields.Many2one('slide.slide', string='Featured Slide', compute='_compute_promoted_slide_id', store=True)
@api.depends('custom_slide_id', 'promote_strategy', 'slide_ids.likes',
'slide_ids.total_views', "slide_ids.date_published")
def _compute_promoted_slide_id(self):
for record in self:
if record.promote_strategy == 'none':
record.promoted_slide_id = False
elif record.promote_strategy == 'custom':
record.promoted_slide_id = record.custom_slide_id
elif record.promote_strategy:
slides = self.env['slide.slide'].search(
[('website_published', '=', True), ('channel_id', '=', record.id)],
limit=1, order=self._order_by_strategy[record.promote_strategy])
record.promoted_slide_id = slides and slides[0] or False
nbr_presentations = fields.Integer('Number of Presentations', compute='_count_presentations', store=True)
nbr_documents = fields.Integer('Number of Documents', compute='_count_presentations', store=True)
nbr_videos = fields.Integer('Number of Videos', compute='_count_presentations', store=True)
nbr_infographics = fields.Integer('Number of Infographics', compute='_count_presentations', store=True)
total = fields.Integer(compute='_count_presentations', store=True)
@api.depends('slide_ids.slide_type', 'slide_ids.website_published')
def _count_presentations(self):
result = dict.fromkeys(self.ids, dict())
res = self.env['slide.slide'].read_group(
[('website_published', '=', True), ('channel_id', 'in', self.ids)],
['channel_id', 'slide_type'], ['channel_id', 'slide_type'],
lazy=False)
for res_group in res:
result[res_group['channel_id'][0]][res_group['slide_type']] = result[res_group['channel_id'][0]].get(res_group['slide_type'], 0) + res_group['__count']
for record in self:
record.nbr_presentations = result[record.id].get('presentation', 0)
record.nbr_documents = result[record.id].get('document', 0)
record.nbr_videos = result[record.id].get('video', 0)
record.nbr_infographics = result[record.id].get('infographic', 0)
record.total = record.nbr_presentations + record.nbr_documents + record.nbr_videos + record.nbr_infographics
publish_template_id = fields.Many2one(
'mail.template', string='Published Template',
help="Email template to send slide publication through email",
default=lambda self: self.env['ir.model.data'].xmlid_to_res_id('website_slides.slide_template_published'))
share_template_id = fields.Many2one(
'mail.template', string='Shared Template',
help="Email template used when sharing a slide",
default=lambda self: self.env['ir.model.data'].xmlid_to_res_id('website_slides.slide_template_shared'))
visibility = fields.Selection([
('public', 'Public'),
('private', 'Private'),
('partial', 'Show channel but restrict presentations')],
default='public', required=True)
group_ids = fields.Many2many(
'res.groups', 'rel_channel_groups', 'channel_id', 'group_id',
string='Channel Groups', help="Groups allowed to see presentations in this channel")
access_error_msg = fields.Html(
'Error Message', help="Message to display when not accessible due to access rights",
default="<p>This channel is private and its content is restricted to some users.</p>", translate=True)
upload_group_ids = fields.Many2many(
'res.groups', 'rel_upload_groups', 'channel_id', 'group_id',
string='Upload Groups', help="Groups allowed to upload presentations in this channel. If void, every user can upload.")
# not stored access fields, depending on each user
can_see = fields.Boolean('Can See', compute='_compute_access')
can_see_full = fields.Boolean('Full Access', compute='_compute_access')
can_upload = fields.Boolean('Can Upload', compute='_compute_access')
@api.one
@api.depends('visibility', 'group_ids', 'upload_group_ids')
def _compute_access(self):
self.can_see = self.visibility in ['public', 'private'] or bool(self.group_ids & self.env.user.groups_id)
self.can_see_full = self.visibility == 'public' or bool(self.group_ids & self.env.user.groups_id)
self.can_upload = self.can_see and (not self.upload_group_ids or bool(self.upload_group_ids & self.env.user.groups_id))
@api.multi
@api.depends('name')
def _website_url(self, name, arg):
res = super(Channel, self)._website_url(name, arg)
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
res.update({(channel.id, '%s/slides/%s' % (base_url, slug(channel))) for channel in self})
return res
@api.onchange('visibility')
def change_visibility(self):
if self.visibility == 'public':
self.group_ids = False
class Category(models.Model):
""" Channel contain various categories to manage its slides """
_name = 'slide.category'
_description = "Slides Category"
_order = "sequence, id"
name = fields.Char('Name', translate=True, required=True)
channel_id = fields.Many2one('slide.channel', string="Channel", required=True, ondelete='cascade')
sequence = fields.Integer(default=10, help='Display order')
slide_ids = fields.One2many('slide.slide', 'category_id', string="Slides")
nbr_presentations = fields.Integer("Number of Presentations", compute='_count_presentations', store=True)
nbr_documents = fields.Integer("Number of Documents", compute='_count_presentations', store=True)
nbr_videos = fields.Integer("Number of Videos", compute='_count_presentations', store=True)
nbr_infographics = fields.Integer("Number of Infographics", compute='_count_presentations', store=True)
total = fields.Integer(compute='_count_presentations', store=True)
@api.depends('slide_ids.slide_type', 'slide_ids.website_published')
def _count_presentations(self):
result = dict.fromkeys(self.ids, dict())
res = self.env['slide.slide'].read_group(
[('website_published', '=', True), ('category_id', 'in', self.ids)],
['category_id', 'slide_type'], ['category_id', 'slide_type'],
lazy=False)
for res_group in res:
result[res_group['category_id'][0]][res_group['slide_type']] = result[res_group['category_id'][0]].get(res_group['slide_type'], 0) + res_group['__count']
for record in self:
record.nbr_presentations = result[record.id].get('presentation', 0)
record.nbr_documents = result[record.id].get('document', 0)
record.nbr_videos = result[record.id].get('video', 0)
record.nbr_infographics = result[record.id].get('infographic', 0)
record.total = record.nbr_presentations + record.nbr_documents + record.nbr_videos + record.nbr_infographics
class EmbeddedSlide(models.Model):
""" Embedding in third party websites. Track view count, generate statistics. """
_name = 'slide.embed'
_description = 'Embedded Slides View Counter'
_rec_name = 'slide_id'
slide_id = fields.Many2one('slide.slide', string="Presentation", required=True, select=1)
url = fields.Char('Third Party Website URL', required=True)
count_views = fields.Integer('# Views', default=1)
def add_embed_url(self, slide_id, url):
schema = urlparse(url)
baseurl = schema.netloc
embeds = self.search([('url', '=', baseurl), ('slide_id', '=', int(slide_id))], limit=1)
if embeds:
embeds.count_views += 1
else:
embeds = self.create({
'slide_id': slide_id,
'url': baseurl,
})
return embeds.count_views
class SlideTag(models.Model):
""" Tag to search slides accross channels. """
_name = 'slide.tag'
_description = 'Slide Tag'
name = fields.Char('Name', required=True)
_sql_constraints = [
('slide_tag_unique', 'UNIQUE(name)', 'A tag must be unique!'),
]
class Slide(models.Model):
""" This model represents actual presentations. Those must be one of four
types:
- Presentation
- Document
- Infographic
- Video
Slide has various statistics like view count, embed count, like, dislikes """
_name = 'slide.slide'
_inherit = ['mail.thread', 'website.seo.metadata', 'website.published.mixin']
_description = 'Slides'
_PROMOTIONAL_FIELDS = [
'__last_update', 'name', 'image_thumb', 'image_medium', 'slide_type', 'total_views', 'category_id',
'channel_id', 'description', 'tag_ids', 'write_date', 'create_date',
'website_published', 'website_url', 'website_meta_title', 'website_meta_description', 'website_meta_keywords']
_sql_constraints = [
('name_uniq', 'UNIQUE(channel_id, name)', 'The slide name must be unique within a channel')
]
# description
name = fields.Char('Title', required=True, translate=True)
description = fields.Text('Description', translate=True)
channel_id = fields.Many2one('slide.channel', string="Channel", required=True)
category_id = fields.Many2one('slide.category', string="Category", domain="[('channel_id', '=', channel_id)]")
tag_ids = fields.Many2many('slide.tag', 'rel_slide_tag', 'slide_id', 'tag_id', string='Tags')
download_security = fields.Selection(
[('none', 'No One'), ('user', 'Authentified Users Only'), ('public', 'Everyone')],
string='Download Security',
required=True, default='user')
image = fields.Binary('Image')
image_medium = fields.Binary('Medium', compute="_get_image", store=True)
image_thumb = fields.Binary('Thumbnail', compute="_get_image", store=True)
@api.depends('image')
def _get_image(self):
for record in self:
if record.image:
record.image_medium = image.crop_image(record.image, type='top', ratio=(4, 3), thumbnail_ratio=4)
record.image_thumb = image.crop_image(record.image, type='top', ratio=(4, 3), thumbnail_ratio=6)
else:
record.image_medium = False
record.iamge_thumb = False
# content
slide_type = fields.Selection([
('infographic', 'Infographic'),
('presentation', 'Presentation'),
('document', 'Document'),
('video', 'Video')],
string='Type', required=True,
default='document',
help="Document type will be set automatically depending on file type, height and width.")
index_content = fields.Text('Transcript')
datas = fields.Binary('Content')
url = fields.Char('Document URL', help="Youtube or Google Document URL")
document_id = fields.Char('Document ID', help="Youtube or Google Document ID")
mime_type = fields.Char('Mime-type')
@api.onchange('url')
def on_change_url(self):
self.ensure_one()
if self.url:
res = self._parse_document_url(self.url)
if res.get('error'):
raise Warning(_('Could not fetch data from url. Document or access right not available:\n%s') % res['error'])
values = res['values']
if not values.get('document_id'):
raise Warning(_('Please enter valid Youtube or Google Doc URL'))
for key, value in values.iteritems():
setattr(self, key, value)
# website
date_published = fields.Datetime('Publish Date')
website_message_ids = fields.One2many(
'mail.message', 'res_id',
domain=lambda self: [('model', '=', self._name), ('message_type', '=', 'comment')],
string='Website Messages', help="Website communication history")
likes = fields.Integer('Likes')
dislikes = fields.Integer('Dislikes')
# views
embedcount_ids = fields.One2many('slide.embed', 'slide_id', string="Embed Count")
slide_views = fields.Integer('# of Website Views')
embed_views = fields.Integer('# of Embedded Views')
total_views = fields.Integer("Total # Views", default="0", compute='_compute_total', store=True)
@api.depends('slide_views', 'embed_views')
def _compute_total(self):
for record in self:
record.total_views = record.slide_views + record.embed_views
embed_code = fields.Text('Embed Code', readonly=True, compute='_get_embed_code')
def _get_embed_code(self):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
for record in self:
if record.datas and not record.document_id:
record.embed_code = '<iframe src="%s/slides/embed/%s?page=1" allowFullScreen="true" height="%s" width="%s" frameborder="0"></iframe>' % (base_url, record.id, 315, 420)
elif record.slide_type == 'video' and record.document_id:
if not record.mime_type:
# embed youtube video
record.embed_code = '<iframe src="//www.youtube.com/embed/%s?theme=light" allowFullScreen="true" frameborder="0"></iframe>' % (record.document_id)
else:
# embed google doc video
record.embed_code = '<embed src="https://video.google.com/get_player?ps=docs&partnerid=30&docid=%s" type="application/x-shockwave-flash"></embed>' % (record.document_id)
else:
record.embed_code = False
@api.multi
@api.depends('name')
def _website_url(self, name, arg):
res = super(Slide, self)._website_url(name, arg)
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
#link_tracker is not in dependencies, so use it to shorten url only if installed.
if self.env.registry.get('link.tracker'):
LinkTracker = self.env['link.tracker']
res.update({(slide.id, LinkTracker.sudo().create({'url': '%s/slides/slide/%s' % (base_url, slug(slide))}).short_url) for slide in self})
else:
res.update({(slide.id, '%s/slides/slide/%s' % (base_url, slug(slide))) for slide in self})
return res
@api.model
def create(self, values):
if not values.get('index_content'):
values['index_content'] = values.get('description')
if values.get('slide_type') == 'infographic' and not values.get('image'):
values['image'] = values['datas']
if values.get('website_published') and not values.get('date_published'):
values['date_published'] = datetime.datetime.now()
if values.get('url'):
doc_data = self._parse_document_url(values['url']).get('values', dict())
for key, value in doc_data.iteritems():
values.setdefault(key, value)
# Do not publish slide if user has not publisher rights
if not self.user_has_groups('base.group_website_publisher'):
values['website_published'] = False
slide = super(Slide, self).create(values)
slide.channel_id.message_subscribe_users()
slide._post_publication()
return slide
@api.multi
def write(self, values):
if values.get('url'):
doc_data = self._parse_document_url(values['url']).get('values', dict())
for key, value in doc_data.iteritems():
values.setdefault(key, value)
res = super(Slide, self).write(values)
if values.get('website_published'):
self.date_published = datetime.datetime.now()
self._post_publication()
return res
@api.model
def check_field_access_rights(self, operation, fields):
""" As per channel access configuration (visibility)
- public ==> no restriction on slides access
- private ==> restrict all slides of channel based on access group defined on channel group_ids field
- partial ==> show channel, but presentations based on groups means any user can see channel but not slide's content.
For private: implement using record rule
For partial: user can see channel, but channel gridview have slide detail so we have to implement
partial field access mechanism for public user so he can have access of promotional field (name, view_count) of slides,
but not all fields like data (actual pdf content)
all fields should be accessible only for user group defined on channel group_ids
"""
if self.env.uid == SUPERUSER_ID:
return fields or list(self._fields)
fields = super(Slide, self).check_field_access_rights(operation, fields)
# still read not perform so we can not access self.channel_id
if self.ids:
self.env.cr.execute('SELECT DISTINCT channel_id FROM ' + self._table + ' WHERE id IN %s', (tuple(self.ids),))
channel_ids = [x[0] for x in self.env.cr.fetchall()]
channels = self.env['slide.channel'].sudo().browse(channel_ids)
limited_access = all(channel.visibility == 'partial' and
not len(channel.group_ids & self.env.user.groups_id)
for channel in channels)
if limited_access:
fields = [field for field in fields if field in self._PROMOTIONAL_FIELDS]
return fields
def get_related_slides(self, limit=20):
domain = [('website_published', '=', True), ('channel_id.visibility', '!=', 'private'), ('id', '!=', self.id)]
if self.category_id:
domain += [('category_id', '=', self.category_id.id)]
for record in self.search(domain, limit=limit):
yield record
def get_most_viewed_slides(self, limit=20):
for record in self.search([('website_published', '=', True), ('channel_id.visibility', '!=', 'private'), ('id', '!=', self.id)], limit=limit, order='total_views desc'):
yield record
def _post_publication(self):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
for slide in self.filtered(lambda slide: slide.website_published):
publish_template = slide.channel_id.publish_template_id
html_body = publish_template.with_context({'base_url': base_url}).render_template(publish_template.body_html, 'slide.slide', slide.id)
slide.channel_id.message_post(body=html_body, subtype='website_slides.mt_channel_slide_published')
return True
@api.one
def send_share_email(self, email):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
return self.channel_id.share_template_id.with_context({'email': email, 'base_url': base_url}).send_mail(self.id)
# --------------------------------------------------
# Parsing methods
# --------------------------------------------------
@api.model
def _fetch_data(self, base_url, data, content_type=False):
result = {'values': dict()}
try:
if data:
base_url = base_url + '?%s' % urlencode(data)
req = urllib2.Request(base_url)
content = urllib2.urlopen(req).read()
if content_type == 'json':
result['values'] = json.loads(content)
elif content_type in ('image', 'pdf'):
result['values'] = content.encode('base64')
else:
result['values'] = content
except urllib2.HTTPError as e:
result['error'] = e.read()
e.close()
except urllib2.URLError as e:
result['error'] = e.reason
return result
def _find_document_data_from_url(self, url):
expr = re.compile(r'^.*((youtu.be/)|(v/)|(\/u\/\w\/)|(embed\/)|(watch\?))\??v?=?([^#\&\?]*).*')
arg = expr.match(url)
document_id = arg and arg.group(7) or False
if document_id:
return ('youtube', document_id)
expr = re.compile(r'(^https:\/\/docs.google.com|^https:\/\/drive.google.com).*\/d\/([^\/]*)')
arg = expr.match(url)
document_id = arg and arg.group(2) or False
if document_id:
return ('google', document_id)
return (None, False)
def _parse_document_url(self, url, only_preview_fields=False):
document_source, document_id = self._find_document_data_from_url(url)
if document_source and hasattr(self, '_parse_%s_document' % document_source):
return getattr(self, '_parse_%s_document' % document_source)(document_id, only_preview_fields)
return {'error': _('Unknown document')}
def _parse_youtube_document(self, document_id, only_preview_fields):
key = self.env['ir.config_parameter'].sudo().get_param('website_slides.google_app_key')
fetch_res = self._fetch_data('https://www.googleapis.com/youtube/v3/videos', {'id': document_id, 'key': key, 'part': 'snippet', 'fields': 'items(id,snippet)'}, 'json')
if fetch_res.get('error'):
return fetch_res
values = {'slide_type': 'video', 'document_id': document_id}
youtube_values = fetch_res['values'].get('items', list(dict()))[0]
if youtube_values.get('snippet'):
snippet = youtube_values['snippet']
if only_preview_fields:
values.update({
'url_src': snippet['thumbnails']['high']['url'],
'title': snippet['title'],
'description': snippet['description']
})
return values
values.update({
'name': snippet['title'],
'image': self._fetch_data(snippet['thumbnails']['high']['url'], {}, 'image')['values'],
'description': snippet['description'],
})
return {'values': values}
@api.model
def _parse_google_document(self, document_id, only_preview_fields):
def get_slide_type(vals):
# TDE FIXME: WTF ??
image = Image.open(io.BytesIO(vals['image'].decode('base64')))
width, height = image.size
if height > width:
return 'document'
else:
return 'presentation'
key = self.env['ir.config_parameter'].sudo().get_param('website_slides.google_app_key')
fetch_res = self._fetch_data('https://www.googleapis.com/drive/v2/files/%s' % document_id, {'projection': 'BASIC', 'key': key}, "json")
if fetch_res.get('error'):
return fetch_res
google_values = fetch_res['values']
if only_preview_fields:
return {
'url_src': google_values['thumbnailLink'],
'title': google_values['title'],
}
values = {
'name': google_values['title'],
'image': self._fetch_data(google_values['thumbnailLink'].replace('=s220', ''), {}, 'image')['values'],
'mime_type': google_values['mimeType'],
'document_id': document_id,
}
if google_values['mimeType'].startswith('video/'):
values['slide_type'] = 'video'
elif google_values['mimeType'].startswith('image/'):
values['datas'] = values['image']
values['slide_type'] = 'infographic'
elif google_values['mimeType'].startswith('application/vnd.google-apps'):
values['datas'] = self._fetch_data(google_values['exportLinks']['application/pdf'], {}, 'pdf')['values']
values['slide_type'] = get_slide_type(values)
if google_values['exportLinks'].get('text/plain'):
values['index_content'] = self._fetch_data(google_values['exportLinks']['text/plain'], {})['values']
if google_values['exportLinks'].get('text/csv'):
values['index_content'] = self._fetch_data(google_values['exportLinks']['text/csv'], {})['values']
elif google_values['mimeType'] == 'application/pdf':
# TODO: Google Drive PDF document doesn't provide plain text transcript
values['datas'] = self._fetch_data(google_values['webContentLink'], {}, 'pdf')['values']
values['slide_type'] = get_slide_type(values)
return {'values': values}
| gpl-3.0 | -3,853,517,502,678,667,000 | 48.55916 | 189 | 0.610998 | false |
Ayub-Khan/edx-platform | lms/djangoapps/verify_student/tests/test_models.py | 27 | 36071 | # -*- coding: utf-8 -*-
from datetime import timedelta, datetime
import ddt
import json
import mock
import requests.exceptions
import pytz
from django.conf import settings
from django.db import IntegrityError
from django.test import TestCase
from mock import patch
from nose.tools import assert_is_none, assert_equals, assert_raises, assert_true, assert_false # pylint: disable=no-name-in-module
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from opaque_keys.edx.keys import CourseKey
from lms.djangoapps.verify_student.models import (
SoftwareSecurePhotoVerification,
VerificationException, VerificationCheckpoint,
VerificationStatus, SkippedReverification,
VerificationDeadline
)
FAKE_SETTINGS = {
"SOFTWARE_SECURE": {
"FACE_IMAGE_AES_KEY": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
"RSA_PUBLIC_KEY": """-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu2fUn20ZQtDpa1TKeCA/
rDA2cEeFARjEr41AP6jqP/k3O7TeqFX6DgCBkxcjojRCs5IfE8TimBHtv/bcSx9o
7PANTq/62ZLM9xAMpfCcU6aAd4+CVqQkXSYjj5TUqamzDFBkp67US8IPmw7I2Gaa
tX8ErZ9D7ieOJ8/0hEiphHpCZh4TTgGuHgjon6vMV8THtq3AQMaAQ/y5R3V7Lezw
dyZCM9pBcvcH+60ma+nNg8GVGBAW/oLxILBtg+T3PuXSUvcu/r6lUFMHk55pU94d
9A/T8ySJm379qU24ligMEetPk1o9CUasdaI96xfXVDyFhrzrntAmdD+HYCSPOQHz
iwIDAQAB
-----END PUBLIC KEY-----""",
"API_URL": "http://localhost/verify_student/fake_endpoint",
"AWS_ACCESS_KEY": "FAKEACCESSKEY",
"AWS_SECRET_KEY": "FAKESECRETKEY",
"S3_BUCKET": "fake-bucket"
}
}
class MockKey(object):
"""
Mocking a boto S3 Key object. It's a really dumb mock because once we
write data to S3, we never read it again. We simply generate a link to it
and pass that to Software Secure. Because of that, we don't even implement
the ability to pull back previously written content in this mock.
Testing that the encryption/decryption roundtrip on the data works is in
test_ssencrypt.py
"""
def __init__(self, bucket):
self.bucket = bucket
def set_contents_from_string(self, contents):
self.contents = contents
def generate_url(self, duration):
return "http://fake-edx-s3.edx.org/"
class MockBucket(object):
"""Mocking a boto S3 Bucket object."""
def __init__(self, name):
self.name = name
class MockS3Connection(object):
"""Mocking a boto S3 Connection"""
def __init__(self, access_key, secret_key):
pass
def get_bucket(self, bucket_name):
return MockBucket(bucket_name)
def mock_software_secure_post(url, headers=None, data=None, **kwargs):
"""
Mocks our interface when we post to Software Secure. Does basic assertions
on the fields we send over to make sure we're not missing headers or giving
total garbage.
"""
data_dict = json.loads(data)
# Basic sanity checking on the keys
EXPECTED_KEYS = [
"EdX-ID", "ExpectedName", "PhotoID", "PhotoIDKey", "SendResponseTo",
"UserPhoto", "UserPhotoKey",
]
for key in EXPECTED_KEYS:
assert_true(
data_dict.get(key),
"'{}' must be present and not blank in JSON submitted to Software Secure".format(key)
)
# The keys should be stored as Base64 strings, i.e. this should not explode
photo_id_key = data_dict["PhotoIDKey"].decode("base64")
user_photo_key = data_dict["UserPhotoKey"].decode("base64")
response = requests.Response()
response.status_code = 200
return response
def mock_software_secure_post_error(url, headers=None, data=None, **kwargs):
"""
Simulates what happens if our post to Software Secure is rejected, for
whatever reason.
"""
response = requests.Response()
response.status_code = 400
return response
def mock_software_secure_post_unavailable(url, headers=None, data=None, **kwargs):
"""Simulates a connection failure when we try to submit to Software Secure."""
raise requests.exceptions.ConnectionError
# Lots of patching to stub in our own settings, S3 substitutes, and HTTP posting
@patch.dict(settings.VERIFY_STUDENT, FAKE_SETTINGS)
@patch('lms.djangoapps.verify_student.models.S3Connection', new=MockS3Connection)
@patch('lms.djangoapps.verify_student.models.Key', new=MockKey)
@patch('lms.djangoapps.verify_student.models.requests.post', new=mock_software_secure_post)
@ddt.ddt
class TestPhotoVerification(ModuleStoreTestCase):
def test_state_transitions(self):
"""
Make sure we can't make unexpected status transitions.
The status transitions we expect are::
→ → → must_retry
↑ ↑ ↓
created → ready → submitted → approved
↓ ↑ ↓
↓ → → denied
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
assert_equals(attempt.status, "created")
# These should all fail because we're in the wrong starting state.
assert_raises(VerificationException, attempt.submit)
assert_raises(VerificationException, attempt.approve)
assert_raises(VerificationException, attempt.deny)
# Now let's fill in some values so that we can pass the mark_ready() call
attempt.mark_ready()
assert_equals(attempt.status, "ready")
# ready (can't approve or deny unless it's "submitted")
assert_raises(VerificationException, attempt.approve)
assert_raises(VerificationException, attempt.deny)
DENY_ERROR_MSG = '[{"photoIdReasons": ["Not provided"]}]'
# must_retry
attempt.status = "must_retry"
attempt.system_error("System error")
attempt.approve()
attempt.status = "must_retry"
attempt.deny(DENY_ERROR_MSG)
# submitted
attempt.status = "submitted"
attempt.deny(DENY_ERROR_MSG)
attempt.status = "submitted"
attempt.approve()
# approved
assert_raises(VerificationException, attempt.submit)
attempt.approve() # no-op
attempt.system_error("System error") # no-op, something processed it without error
attempt.deny(DENY_ERROR_MSG)
# denied
assert_raises(VerificationException, attempt.submit)
attempt.deny(DENY_ERROR_MSG) # no-op
attempt.system_error("System error") # no-op, something processed it without error
attempt.approve()
def test_name_freezing(self):
"""
You can change your name prior to marking a verification attempt ready,
but changing your name afterwards should not affect the value in the
in the attempt record. Basically, we want to always know what your name
was when you submitted it.
"""
user = UserFactory.create()
user.profile.name = u"Jack \u01B4" # gratuious non-ASCII char to test encodings
attempt = SoftwareSecurePhotoVerification(user=user)
user.profile.name = u"Clyde \u01B4"
attempt.mark_ready()
user.profile.name = u"Rusty \u01B4"
assert_equals(u"Clyde \u01B4", attempt.name)
def create_and_submit(self):
"""Helper method to create a generic submission and send it."""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
user.profile.name = u"Rust\u01B4"
attempt.upload_face_image("Just pretend this is image data")
attempt.upload_photo_id_image("Hey, we're a photo ID")
attempt.mark_ready()
attempt.submit()
return attempt
def test_submissions(self):
"""Test that we set our status correctly after a submission."""
# Basic case, things go well.
attempt = self.create_and_submit()
assert_equals(attempt.status, "submitted")
# We post, but Software Secure doesn't like what we send for some reason
with patch('lms.djangoapps.verify_student.models.requests.post', new=mock_software_secure_post_error):
attempt = self.create_and_submit()
assert_equals(attempt.status, "must_retry")
# We try to post, but run into an error (in this case a newtork connection error)
with patch('lms.djangoapps.verify_student.models.requests.post', new=mock_software_secure_post_unavailable):
attempt = self.create_and_submit()
assert_equals(attempt.status, "must_retry")
@mock.patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_submission_while_testing_flag_is_true(self):
""" Test that a fake value is set for field 'photo_id_key' of user's
initial verification when the feature flag 'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'
is enabled.
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
user.profile.name = "test-user"
attempt.upload_photo_id_image("Image data")
attempt.mark_ready()
attempt.submit()
self.assertEqual(attempt.photo_id_key, "fake-photo-id-key")
def test_active_for_user(self):
"""
Make sure we can retrive a user's active (in progress) verification
attempt.
"""
user = UserFactory.create()
# This user has no active at the moment...
assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user))
# Create an attempt and mark it ready...
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.mark_ready()
assert_equals(attempt, SoftwareSecurePhotoVerification.active_for_user(user))
# A new user won't see this...
user2 = UserFactory.create()
user2.save()
assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user2))
# If it's got a different status, it doesn't count
for status in ["submitted", "must_retry", "approved", "denied"]:
attempt.status = status
attempt.save()
assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user))
# But if we create yet another one and mark it ready, it passes again.
attempt_2 = SoftwareSecurePhotoVerification(user=user)
attempt_2.mark_ready()
assert_equals(attempt_2, SoftwareSecurePhotoVerification.active_for_user(user))
# And if we add yet another one with a later created time, we get that
# one instead. We always want the most recent attempt marked ready()
attempt_3 = SoftwareSecurePhotoVerification(
user=user,
created_at=attempt_2.created_at + timedelta(days=1)
)
attempt_3.save()
# We haven't marked attempt_3 ready yet, so attempt_2 still wins
assert_equals(attempt_2, SoftwareSecurePhotoVerification.active_for_user(user))
# Now we mark attempt_3 ready and expect it to come back
attempt_3.mark_ready()
assert_equals(attempt_3, SoftwareSecurePhotoVerification.active_for_user(user))
def test_user_is_verified(self):
"""
Test to make sure we correctly answer whether a user has been verified.
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.save()
# If it's any of these, they're not verified...
for status in ["created", "ready", "denied", "submitted", "must_retry"]:
attempt.status = status
attempt.save()
assert_false(SoftwareSecurePhotoVerification.user_is_verified(user), status)
attempt.status = "approved"
attempt.save()
assert_true(SoftwareSecurePhotoVerification.user_is_verified(user), attempt.status)
def test_user_has_valid_or_pending(self):
"""
Determine whether we have to prompt this user to verify, or if they've
already at least initiated a verification submission.
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
# If it's any of these statuses, they don't have anything outstanding
for status in ["created", "ready", "denied"]:
attempt.status = status
attempt.save()
assert_false(SoftwareSecurePhotoVerification.user_has_valid_or_pending(user), status)
# Any of these, and we are. Note the benefit of the doubt we're giving
# -- must_retry, and submitted both count until we hear otherwise
for status in ["submitted", "must_retry", "approved"]:
attempt.status = status
attempt.save()
assert_true(SoftwareSecurePhotoVerification.user_has_valid_or_pending(user), status)
def test_user_status(self):
# test for correct status when no error returned
user = UserFactory.create()
status = SoftwareSecurePhotoVerification.user_status(user)
self.assertEquals(status, ('none', ''))
# test for when one has been created
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.status = 'approved'
attempt.save()
status = SoftwareSecurePhotoVerification.user_status(user)
self.assertEquals(status, ('approved', ''))
# create another one for the same user, make sure the right one is
# returned
attempt2 = SoftwareSecurePhotoVerification(user=user)
attempt2.status = 'denied'
attempt2.error_msg = '[{"photoIdReasons": ["Not provided"]}]'
attempt2.save()
status = SoftwareSecurePhotoVerification.user_status(user)
self.assertEquals(status, ('approved', ''))
# now delete the first one and verify that the denial is being handled
# properly
attempt.delete()
status = SoftwareSecurePhotoVerification.user_status(user)
self.assertEquals(status, ('must_reverify', "No photo ID was provided."))
def test_parse_error_msg_success(self):
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.status = 'denied'
attempt.error_msg = '[{"photoIdReasons": ["Not provided"]}]'
parsed_error_msg = attempt.parsed_error_msg()
self.assertEquals("No photo ID was provided.", parsed_error_msg)
def test_parse_error_msg_failure(self):
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.status = 'denied'
# when we can't parse into json
bad_messages = {
'Not Provided',
'[{"IdReasons": ["Not provided"]}]',
'{"IdReasons": ["Not provided"]}',
u'[{"ïḋṚëäṡöṅṡ": ["Ⓝⓞⓣ ⓟⓡⓞⓥⓘⓓⓔⓓ "]}]',
}
for msg in bad_messages:
attempt.error_msg = msg
parsed_error_msg = attempt.parsed_error_msg()
self.assertEquals(parsed_error_msg, "There was an error verifying your ID photos.")
def test_active_at_datetime(self):
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification.objects.create(user=user)
# Not active before the created date
before = attempt.created_at - timedelta(seconds=1)
self.assertFalse(attempt.active_at_datetime(before))
# Active immediately after created date
after_created = attempt.created_at + timedelta(seconds=1)
self.assertTrue(attempt.active_at_datetime(after_created))
# Active immediately before expiration date
expiration = attempt.created_at + timedelta(days=settings.VERIFY_STUDENT["DAYS_GOOD_FOR"])
before_expiration = expiration - timedelta(seconds=1)
self.assertTrue(attempt.active_at_datetime(before_expiration))
# Not active after the expiration date
after = expiration + timedelta(seconds=1)
self.assertFalse(attempt.active_at_datetime(after))
def test_verification_for_datetime(self):
user = UserFactory.create()
now = datetime.now(pytz.UTC)
# No attempts in the query set, so should return None
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(now, query)
self.assertIs(result, None)
# Should also return None if no deadline specified
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(None, query)
self.assertIs(result, None)
# Make an attempt
attempt = SoftwareSecurePhotoVerification.objects.create(user=user)
# Before the created date, should get no results
before = attempt.created_at - timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(before, query)
self.assertIs(result, None)
# Immediately after the created date, should get the attempt
after_created = attempt.created_at + timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(after_created, query)
self.assertEqual(result, attempt)
# If no deadline specified, should return first available
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(None, query)
self.assertEqual(result, attempt)
# Immediately before the expiration date, should get the attempt
expiration = attempt.created_at + timedelta(days=settings.VERIFY_STUDENT["DAYS_GOOD_FOR"])
before_expiration = expiration - timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(before_expiration, query)
self.assertEqual(result, attempt)
# Immediately after the expiration date, should not get the attempt
after = expiration + timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(after, query)
self.assertIs(result, None)
# Create a second attempt in the same window
second_attempt = SoftwareSecurePhotoVerification.objects.create(user=user)
# Now we should get the newer attempt
deadline = second_attempt.created_at + timedelta(days=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(deadline, query)
self.assertEqual(result, second_attempt)
@ddt.unpack
@ddt.data(
{'enrollment_mode': 'honor', 'status': None, 'output': 'N/A'},
{'enrollment_mode': 'audit', 'status': None, 'output': 'N/A'},
{'enrollment_mode': 'verified', 'status': False, 'output': 'Not ID Verified'},
{'enrollment_mode': 'verified', 'status': True, 'output': 'ID Verified'},
)
def test_verification_status_for_user(self, enrollment_mode, status, output):
"""
Verify verification_status_for_user returns correct status.
"""
user = UserFactory.create()
course = CourseFactory.create()
with patch(
'lms.djangoapps.verify_student.models.SoftwareSecurePhotoVerification.user_is_verified'
) as mock_verification:
mock_verification.return_value = status
status = SoftwareSecurePhotoVerification.verification_status_for_user(user, course.id, enrollment_mode)
self.assertEqual(status, output)
def test_initial_verification_for_user(self):
"""Test that method 'get_initial_verification' of model
'SoftwareSecurePhotoVerification' always returns the initial
verification with field 'photo_id_key' set against a user.
"""
user = UserFactory.create()
# No initial verification for the user
result = SoftwareSecurePhotoVerification.get_initial_verification(user=user)
self.assertIs(result, None)
# Make an initial verification with 'photo_id_key'
attempt = SoftwareSecurePhotoVerification(user=user, photo_id_key="dummy_photo_id_key")
attempt.status = 'approved'
attempt.save()
# Check that method 'get_initial_verification' returns the correct
# initial verification attempt
first_result = SoftwareSecurePhotoVerification.get_initial_verification(user=user)
self.assertIsNotNone(first_result)
# Now create a second verification without 'photo_id_key'
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.status = 'submitted'
attempt.save()
# Test method 'get_initial_verification' still returns the correct
# initial verification attempt which have 'photo_id_key' set
second_result = SoftwareSecurePhotoVerification.get_initial_verification(user=user)
self.assertIsNotNone(second_result)
self.assertEqual(second_result, first_result)
@ddt.ddt
class VerificationCheckpointTest(ModuleStoreTestCase):
"""Tests for the VerificationCheckpoint model. """
def setUp(self):
super(VerificationCheckpointTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
self.checkpoint_midterm = u'i4x://{org}/{course}/edx-reverification-block/midterm_uuid'.format(
org=self.course.id.org, course=self.course.id.course
)
self.checkpoint_final = u'i4x://{org}/{course}/edx-reverification-block/final_uuid'.format(
org=self.course.id.org, course=self.course.id.course
)
@ddt.data('midterm', 'final')
def test_get_or_create_verification_checkpoint(self, checkpoint):
"""
Test that a reverification checkpoint is created properly.
"""
checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/{checkpoint}'.format(
org=self.course.id.org, course=self.course.id.course, checkpoint=checkpoint
)
# create the 'VerificationCheckpoint' checkpoint
verification_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=checkpoint_location
)
self.assertEqual(
VerificationCheckpoint.get_or_create_verification_checkpoint(self.course.id, checkpoint_location),
verification_checkpoint
)
def test_get_or_create_verification_checkpoint_for_not_existing_values(self):
# Retrieving a checkpoint that doesn't yet exist will create it
location = u'i4x://edX/DemoX/edx-reverification-block/invalid_location'
checkpoint = VerificationCheckpoint.get_or_create_verification_checkpoint(self.course.id, location)
self.assertIsNot(checkpoint, None)
self.assertEqual(checkpoint.course_id, self.course.id)
self.assertEqual(checkpoint.checkpoint_location, location)
def test_get_or_create_integrity_error(self):
# Create the checkpoint
VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=self.checkpoint_midterm,
)
# Simulate that the get-or-create operation raises an IntegrityError.
# This can happen when two processes both try to get-or-create at the same time
# when the database is set to REPEATABLE READ.
# To avoid IntegrityError situations when calling this method, set the view to
# use a READ COMMITTED transaction instead.
with patch.object(VerificationCheckpoint.objects, "get_or_create") as mock_get_or_create:
mock_get_or_create.side_effect = IntegrityError
with self.assertRaises(IntegrityError):
_ = VerificationCheckpoint.get_or_create_verification_checkpoint(
self.course.id,
self.checkpoint_midterm
)
def test_unique_together_constraint(self):
"""
Test the unique together constraint.
"""
# create the VerificationCheckpoint checkpoint
VerificationCheckpoint.objects.create(course_id=self.course.id, checkpoint_location=self.checkpoint_midterm)
# test creating the VerificationCheckpoint checkpoint with same course
# id and checkpoint name
with self.assertRaises(IntegrityError):
VerificationCheckpoint.objects.create(course_id=self.course.id, checkpoint_location=self.checkpoint_midterm)
def test_add_verification_attempt_software_secure(self):
"""
Test adding Software Secure photo verification attempts for the
reverification checkpoints.
"""
# adding two check points.
first_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id, checkpoint_location=self.checkpoint_midterm
)
second_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id, checkpoint_location=self.checkpoint_final
)
# make an attempt for the 'first_checkpoint'
first_checkpoint.add_verification_attempt(SoftwareSecurePhotoVerification.objects.create(user=self.user))
self.assertEqual(first_checkpoint.photo_verification.count(), 1)
# make another attempt for the 'first_checkpoint'
first_checkpoint.add_verification_attempt(SoftwareSecurePhotoVerification.objects.create(user=self.user))
self.assertEqual(first_checkpoint.photo_verification.count(), 2)
# make new attempt for the 'second_checkpoint'
attempt = SoftwareSecurePhotoVerification.objects.create(user=self.user)
second_checkpoint.add_verification_attempt(attempt)
self.assertEqual(second_checkpoint.photo_verification.count(), 1)
# remove the attempt from 'second_checkpoint'
second_checkpoint.photo_verification.remove(attempt)
self.assertEqual(second_checkpoint.photo_verification.count(), 0)
@ddt.ddt
class VerificationStatusTest(ModuleStoreTestCase):
""" Tests for the VerificationStatus model. """
def setUp(self):
super(VerificationStatusTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
self.first_checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/first_checkpoint_uuid'.format(
org=self.course.id.org, course=self.course.id.course
)
self.first_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=self.first_checkpoint_location
)
self.second_checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/second_checkpoint_uuid'.\
format(org=self.course.id.org, course=self.course.id.course)
self.second_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=self.second_checkpoint_location
)
@ddt.data('submitted', "approved", "denied", "error")
def test_add_verification_status(self, status):
""" Adding verification status using the class method. """
# adding verification status
VerificationStatus.add_verification_status(
checkpoint=self.first_checkpoint,
user=self.user,
status=status
)
# test the status from database
result = VerificationStatus.objects.filter(checkpoint=self.first_checkpoint)[0]
self.assertEqual(result.status, status)
self.assertEqual(result.user, self.user)
@ddt.data("approved", "denied", "error")
def test_add_status_from_checkpoints(self, status):
"""Test verification status for reverification checkpoints after
submitting software secure photo verification.
"""
# add initial verification status for checkpoints
initial_status = "submitted"
VerificationStatus.add_verification_status(
checkpoint=self.first_checkpoint,
user=self.user,
status=initial_status
)
VerificationStatus.add_verification_status(
checkpoint=self.second_checkpoint,
user=self.user,
status=initial_status
)
# now add verification status for multiple checkpoint points
VerificationStatus.add_status_from_checkpoints(
checkpoints=[self.first_checkpoint, self.second_checkpoint], user=self.user, status=status
)
# test that verification status entries with new status have been added
# for both checkpoints
result = VerificationStatus.objects.filter(user=self.user, checkpoint=self.first_checkpoint)
self.assertEqual(len(result), len(self.first_checkpoint.checkpoint_status.all()))
self.assertEqual(
list(result.values_list('checkpoint__checkpoint_location', flat=True)),
list(self.first_checkpoint.checkpoint_status.values_list('checkpoint__checkpoint_location', flat=True))
)
result = VerificationStatus.objects.filter(user=self.user, checkpoint=self.second_checkpoint)
self.assertEqual(len(result), len(self.second_checkpoint.checkpoint_status.all()))
self.assertEqual(
list(result.values_list('checkpoint__checkpoint_location', flat=True)),
list(self.second_checkpoint.checkpoint_status.values_list('checkpoint__checkpoint_location', flat=True))
)
def test_get_location_id(self):
"""
Getting location id for a specific checkpoint.
"""
# creating software secure attempt against checkpoint
self.first_checkpoint.add_verification_attempt(SoftwareSecurePhotoVerification.objects.create(user=self.user))
# add initial verification status for checkpoint
VerificationStatus.add_verification_status(
checkpoint=self.first_checkpoint,
user=self.user,
status='submitted',
)
attempt = SoftwareSecurePhotoVerification.objects.filter(user=self.user)
self.assertIsNotNone(VerificationStatus.get_location_id(attempt))
self.assertEqual(VerificationStatus.get_location_id(None), '')
def test_get_user_attempts(self):
"""
Test adding verification status.
"""
VerificationStatus.add_verification_status(
checkpoint=self.first_checkpoint,
user=self.user,
status='submitted'
)
actual_attempts = VerificationStatus.get_user_attempts(
self.user.id,
self.course.id,
self.first_checkpoint_location
)
self.assertEqual(actual_attempts, 1)
class SkippedReverificationTest(ModuleStoreTestCase):
"""
Tests for the SkippedReverification model.
"""
def setUp(self):
super(SkippedReverificationTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
dummy_checkpoint_location = u'i4x://edX/DemoX/edx-reverification-block/midterm_uuid'
self.checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=dummy_checkpoint_location
)
def test_add_skipped_attempts(self):
"""
Test 'add_skipped_reverification_attempt' method.
"""
# add verification status
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=self.user.id, course_id=unicode(self.course.id)
)
# test the status of skipped reverification from database
result = SkippedReverification.objects.filter(course_id=self.course.id)[0]
self.assertEqual(result.checkpoint, self.checkpoint)
self.assertEqual(result.user, self.user)
self.assertEqual(result.course_id, self.course.id)
def test_unique_constraint(self):
"""Test that adding skipped re-verification with same user and course
id will raise 'IntegrityError' exception.
"""
# add verification object
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=self.user.id, course_id=unicode(self.course.id)
)
with self.assertRaises(IntegrityError):
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=self.user.id, course_id=unicode(self.course.id)
)
# create skipped attempt for different user
user2 = UserFactory.create()
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=user2.id, course_id=unicode(self.course.id)
)
# test the status of skipped reverification from database
result = SkippedReverification.objects.filter(user=user2)[0]
self.assertEqual(result.checkpoint, self.checkpoint)
self.assertEqual(result.user, user2)
self.assertEqual(result.course_id, self.course.id)
def test_check_user_skipped_reverification_exists(self):
"""
Test the 'check_user_skipped_reverification_exists' method's response.
"""
# add verification status
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=self.user.id, course_id=unicode(self.course.id)
)
self.assertTrue(
SkippedReverification.check_user_skipped_reverification_exists(
user_id=self.user.id,
course_id=self.course.id
)
)
user2 = UserFactory.create()
self.assertFalse(
SkippedReverification.check_user_skipped_reverification_exists(
user_id=user2.id,
course_id=self.course.id
)
)
class VerificationDeadlineTest(TestCase):
"""
Tests for the VerificationDeadline model.
"""
def test_caching(self):
deadlines = {
CourseKey.from_string("edX/DemoX/Fall"): datetime.now(pytz.UTC),
CourseKey.from_string("edX/DemoX/Spring"): datetime.now(pytz.UTC) + timedelta(days=1)
}
course_keys = deadlines.keys()
# Initially, no deadlines are set
with self.assertNumQueries(1):
all_deadlines = VerificationDeadline.deadlines_for_courses(course_keys)
self.assertEqual(all_deadlines, {})
# Create the deadlines
for course_key, deadline in deadlines.iteritems():
VerificationDeadline.objects.create(
course_key=course_key,
deadline=deadline,
)
# Warm the cache
with self.assertNumQueries(1):
VerificationDeadline.deadlines_for_courses(course_keys)
# Load the deadlines from the cache
with self.assertNumQueries(0):
all_deadlines = VerificationDeadline.deadlines_for_courses(course_keys)
self.assertEqual(all_deadlines, deadlines)
# Delete the deadlines
VerificationDeadline.objects.all().delete()
# Verify that the deadlines are updated correctly
with self.assertNumQueries(1):
all_deadlines = VerificationDeadline.deadlines_for_courses(course_keys)
self.assertEqual(all_deadlines, {})
| agpl-3.0 | -5,600,385,692,779,133,000 | 40.432681 | 131 | 0.66899 | false |
40223226/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/logging/__init__.py | 733 | 66279 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <[email protected]>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
_STYLES = {
'%': PercentStyle,
'{': StrFormatStyle,
'$': StringTemplateStyle
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError: #pragma: no cover
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
| gpl-3.0 | 2,682,146,306,567,428,600 | 33.737421 | 89 | 0.585404 | false |
magenta/magenta | magenta/pipelines/dag_pipeline_test.py | 1 | 29773 | # Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dag_pipeline."""
import collections
from absl.testing import absltest
from magenta.pipelines import dag_pipeline
from magenta.pipelines import pipeline
from magenta.pipelines import statistics
Type0 = collections.namedtuple('Type0', ['x', 'y', 'z'])
Type1 = collections.namedtuple('Type1', ['x', 'y'])
Type2 = collections.namedtuple('Type2', ['z'])
Type3 = collections.namedtuple('Type3', ['s', 't'])
Type4 = collections.namedtuple('Type4', ['s', 't', 'z'])
Type5 = collections.namedtuple('Type5', ['a', 'b', 'c', 'd', 'z'])
# pylint:disable=missing-class-docstring
class UnitA(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'t1': Type1, 't2': Type2})
def transform(self, input_object):
t1 = Type1(x=input_object.x, y=input_object.y)
t2 = Type2(z=input_object.z)
return {'t1': [t1], 't2': [t2]}
class UnitB(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, Type3)
def transform(self, input_object):
t3 = Type3(s=input_object.x * 1000, t=input_object.y - 100)
return [t3]
class UnitC(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(
self,
{'A_data': Type2, 'B_data': Type3},
{'regular_data': Type4, 'special_data': Type4})
def transform(self, input_object):
s = input_object['B_data'].s
t = input_object['B_data'].t
z = input_object['A_data'].z
regular = Type4(s=s, t=t, z=0)
special = Type4(s=s + z * 100, t=t - z * 100, z=z)
return {'regular_data': [regular], 'special_data': [special]}
class UnitD(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(
self, {'0': Type4, '1': Type3, '2': Type4}, Type5)
def transform(self, input_object):
assert input_object['1'].s == input_object['0'].s
assert input_object['1'].t == input_object['0'].t
t5 = Type5(
a=input_object['0'].s, b=input_object['0'].t,
c=input_object['2'].s, d=input_object['2'].t, z=input_object['2'].z)
return [t5]
class DAGPipelineTest(absltest.TestCase):
def testDAGPipelineInputAndOutputType(self):
# Tests that the DAGPipeline has the correct `input_type` and
# `output_type` values based on the DAG given to it.
a, b, c, d = UnitA(), UnitB(), UnitC(), UnitD()
dag = {a: dag_pipeline.DagInput(Type0),
b: a['t1'],
c: {'A_data': a['t2'], 'B_data': b},
d: {'0': c['regular_data'], '1': b, '2': c['special_data']},
dag_pipeline.DagOutput('abcdz'): d}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.input_type, Type0)
self.assertEqual(dag_pipe_obj.output_type, {'abcdz': Type5})
dag = {a: dag_pipeline.DagInput(Type0),
dag_pipeline.DagOutput('t1'): a['t1'],
dag_pipeline.DagOutput('t2'): a['t2']}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.input_type, Type0)
self.assertEqual(dag_pipe_obj.output_type, {'t1': Type1, 't2': Type2})
def testSingleOutputs(self):
# Tests single object and dictionaries in the DAG.
a, b, c, d = UnitA(), UnitB(), UnitC(), UnitD()
dag = {a: dag_pipeline.DagInput(Type0),
b: a['t1'],
c: {'A_data': a['t2'], 'B_data': b},
d: {'0': c['regular_data'], '1': b, '2': c['special_data']},
dag_pipeline.DagOutput('abcdz'): d}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
inputs = [Type0(1, 2, 3), Type0(-1, -2, -3), Type0(3, -3, 2)]
for input_object in inputs:
x, y, z = input_object.x, input_object.y, input_object.z
output_dict = dag_pipe_obj.transform(input_object)
self.assertEqual(list(output_dict.keys()), ['abcdz'])
results = output_dict['abcdz']
self.assertLen(results, 1)
result = results[0]
# The following outputs are the result of passing the values in
# `input_object` through the transform functions of UnitA, UnitB, UnitC,
# and UnitD (all defined at the top of this file), connected in the way
# defined by `dag`.
self.assertEqual(result.a, x * 1000)
self.assertEqual(result.b, y - 100)
self.assertEqual(result.c, x * 1000 + z * 100)
self.assertEqual(result.d, y - 100 - z * 100)
self.assertEqual(result.z, z)
def testMultiOutput(self):
# Tests a pipeline.Pipeline that maps a single input to multiple outputs.
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'t1': Type1, 't2': Type2})
def transform(self, input_object):
t1 = [Type1(x=input_object.x + i, y=input_object.y + i)
for i in range(input_object.z)]
t2 = [Type2(z=input_object.z)]
return {'t1': t1, 't2': t2}
q, b, c = UnitQ(), UnitB(), UnitC()
dag = {q: dag_pipeline.DagInput(Type0),
b: q['t1'],
c: {'A_data': q['t2'], 'B_data': b},
dag_pipeline.DagOutput('outputs'): c['regular_data']}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
x, y, z = 1, 2, 3
output_dict = dag_pipe_obj.transform(Type0(x, y, z))
self.assertEqual(list(output_dict.keys()), ['outputs'])
results = output_dict['outputs']
self.assertLen(results, 3)
expected_results = [Type4((x + i) * 1000, (y + i) - 100, 0)
for i in range(z)]
self.assertEqual(set(results), set(expected_results))
def testUnequalOutputCounts(self):
# Tests dictionary output type where each output list has a different size.
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
return [Type1(x=input_object.x + i, y=input_object.y + i)
for i in range(input_object.z)]
class Partitioner(pipeline.Pipeline):
def __init__(self, input_type, training_set_name, test_set_name):
self.training_set_name = training_set_name
self.test_set_name = test_set_name
pipeline.Pipeline.__init__(
self,
input_type,
{training_set_name: input_type, test_set_name: input_type})
def transform(self, input_object):
if input_object.x < 0:
return {self.training_set_name: [],
self.test_set_name: [input_object]}
return {self.training_set_name: [input_object], self.test_set_name: []}
q = UnitQ()
partition = Partitioner(q.output_type, 'training_set', 'test_set')
dag = {q: dag_pipeline.DagInput(q.input_type),
partition: q,
dag_pipeline.DagOutput('training_set'): partition['training_set'],
dag_pipeline.DagOutput('test_set'): partition['test_set']}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
x, y, z = -3, 0, 8
output_dict = dag_pipe_obj.transform(Type0(x, y, z))
self.assertEqual(set(output_dict.keys()), set(['training_set', 'test_set']))
training_results = output_dict['training_set']
test_results = output_dict['test_set']
expected_training_results = [Type1(x + i, y + i) for i in range(-x, z)]
expected_test_results = [Type1(x + i, y + i) for i in range(0, -x)]
self.assertEqual(set(training_results), set(expected_training_results))
self.assertEqual(set(test_results), set(expected_test_results))
def testIntermediateUnequalOutputCounts(self):
# Tests that intermediate output lists which are not the same length are
# handled correctly.
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return {'xy': [Type1(x=input_object.x + i, y=input_object.y + i)
for i in range(input_object.z)],
'z': [Type2(z=i) for i in [-input_object.z, input_object.z]]}
class Partitioner(pipeline.Pipeline):
def __init__(self, input_type, training_set_name, test_set_name):
self.training_set_name = training_set_name
self.test_set_name = test_set_name
pipeline.Pipeline.__init__(
self,
input_type,
{training_set_name: Type0, test_set_name: Type0})
def transform(self, input_object):
input_dict = input_object
input_object = Type0(input_dict['xy'].x,
input_dict['xy'].y,
input_dict['z'].z)
if input_object.x < 0:
return {self.training_set_name: [],
self.test_set_name: [input_object]}
return {self.training_set_name: [input_object], self.test_set_name: []}
q = UnitQ()
partition = Partitioner(q.output_type, 'training_set', 'test_set')
dag = {q: dag_pipeline.DagInput(q.input_type),
partition: {'xy': q['xy'], 'z': q['z']},
dag_pipeline.DagOutput('training_set'): partition['training_set'],
dag_pipeline.DagOutput('test_set'): partition['test_set']}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
x, y, z = -3, 0, 8
output_dict = dag_pipe_obj.transform(Type0(x, y, z))
self.assertEqual(set(output_dict.keys()), set(['training_set', 'test_set']))
training_results = output_dict['training_set']
test_results = output_dict['test_set']
all_expected_results = [Type0(x + i, y + i, zed)
for i in range(0, z) for zed in [-z, z]]
expected_training_results = [sample for sample in all_expected_results
if sample.x >= 0]
expected_test_results = [sample for sample in all_expected_results
if sample.x < 0]
self.assertEqual(set(training_results), set(expected_training_results))
self.assertEqual(set(test_results), set(expected_test_results))
def testDirectConnection(self):
# Tests a direct dict to dict connection in the DAG.
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return {'xy': [Type1(x=input_object.x, y=input_object.y)],
'z': [Type2(z=input_object.z)]}
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, {'xy': Type1, 'z': Type2}, Type4)
def transform(self, input_object):
input_dict = input_object
return [Type4(input_dict['xy'].x,
input_dict['xy'].y,
input_dict['z'].z)]
q, r = UnitQ(), UnitR()
dag = {q: dag_pipeline.DagInput(q.input_type),
r: q,
dag_pipeline.DagOutput('output'): r}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
x, y, z = -3, 0, 8
output_dict = dag_pipe_obj.transform(Type0(x, y, z))
self.assertEqual(output_dict, {'output': [Type4(x, y, z)]})
def testOutputConnectedToDict(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return {'xy': [Type1(x=input_object.x, y=input_object.y)],
'z': [Type2(z=input_object.z)]}
q = UnitQ()
dag = {q: dag_pipeline.DagInput(q.input_type),
dag_pipeline.DagOutput(): q}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.output_type, {'xy': Type1, 'z': Type2})
x, y, z = -3, 0, 8
output_dict = dag_pipe_obj.transform(Type0(x, y, z))
self.assertEqual(output_dict, {'xy': [Type1(x, y)], 'z': [Type2(z)]})
dag = {q: dag_pipeline.DagInput(q.input_type),
dag_pipeline.DagOutput(): {'xy': q['xy'], 'z': q['z']}}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.output_type, {'xy': Type1, 'z': Type2})
x, y, z = -3, 0, 8
output_dict = dag_pipe_obj.transform(Type0(x, y, z))
self.assertEqual(output_dict, {'xy': [Type1(x, y)], 'z': [Type2(z)]})
def testNoOutputs(self):
# Test that empty lists or dicts as intermediate or final outputs don't
# break anything.
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return {'xy': [], 'z': []}
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, {'xy': Type1, 'z': Type2}, Type4)
def transform(self, input_object):
input_dict = input_object
return [Type4(input_dict['xy'].x,
input_dict['xy'].y,
input_dict['z'].z)]
class UnitS(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, unused_input_dict):
return []
q, r, s = UnitQ(), UnitR(), UnitS()
dag = {q: dag_pipeline.DagInput(Type0),
r: q,
dag_pipeline.DagOutput('output'): r}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.transform(Type0(1, 2, 3)), {'output': []})
dag = {q: dag_pipeline.DagInput(Type0),
s: dag_pipeline.DagInput(Type0),
r: {'xy': s, 'z': q['z']},
dag_pipeline.DagOutput('output'): r}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.transform(Type0(1, 2, 3)), {'output': []})
dag = {s: dag_pipeline.DagInput(Type0),
dag_pipeline.DagOutput('output'): s}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(dag_pipe_obj.transform(Type0(1, 2, 3)), {'output': []})
dag = {q: dag_pipeline.DagInput(Type0),
dag_pipeline.DagOutput(): q}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(
dag_pipe_obj.transform(Type0(1, 2, 3)),
{'xy': [], 'z': []})
def testNoPipelines(self):
dag = {dag_pipeline.DagOutput('output'): dag_pipeline.DagInput(Type0)}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
self.assertEqual(
dag_pipe_obj.transform(Type0(1, 2, 3)),
{'output': [Type0(1, 2, 3)]})
def testStatistics(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
self.stats = []
def transform(self, input_object):
self._set_stats([statistics.Counter('output_count', input_object.z)])
return [Type1(x=input_object.x + i, y=input_object.y + i)
for i in range(input_object.z)]
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, Type1)
def transform(self, input_object):
self._set_stats([statistics.Counter('input_count', 1)])
return [input_object]
q, r = UnitQ(), UnitR()
dag = {q: dag_pipeline.DagInput(q.input_type),
r: q,
dag_pipeline.DagOutput('output'): r}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag, 'DAGPipelineName')
for x, y, z in [(-3, 0, 8), (1, 2, 3), (5, -5, 5)]:
dag_pipe_obj.transform(Type0(x, y, z))
stats_1 = dag_pipe_obj.get_stats()
stats_2 = dag_pipe_obj.get_stats()
self.assertEqual(stats_1, stats_2)
for stat in stats_1:
self.assertIsInstance(stat, statistics.Counter)
names = sorted([stat.name for stat in stats_1])
self.assertEqual(
names,
(['DAGPipelineName_UnitQ_output_count'] +
['DAGPipelineName_UnitR_input_count'] * z))
for stat in stats_1:
if stat.name == 'DAGPipelineName_UnitQ_output_count':
self.assertEqual(stat.count, z)
else:
self.assertEqual(stat.count, 1)
def testInvalidDAGError(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'a': Type1, 'b': Type2})
def transform(self, input_object):
pass
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, Type2)
def transform(self, input_object):
pass
q, r = UnitQ(), UnitR()
dag = {q: dag_pipeline.DagInput(Type0),
UnitR: q,
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
'r': q,
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
r: UnitQ,
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
r: 123,
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {dag_pipeline.DagInput(Type0): q,
dag_pipeline.DagOutput(): q}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
q: dag_pipeline.DagOutput('output')}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
r: {'abc': q['a'], 'def': 123},
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
r: {123: q['a']},
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.InvalidDAGError):
dag_pipeline.DAGPipeline(dag)
def testTypeMismatchError(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
pass
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, {'a': Type2, 'b': Type3})
def transform(self, input_object):
pass
class UnitS(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, {'x': Type2, 'y': Type3}, Type4)
def transform(self, input_object):
pass
class UnitT(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, {'x': Type2, 'y': Type5}, Type4)
def transform(self, input_object):
pass
q, r, s, t = UnitQ(), UnitR(), UnitS(), UnitT()
dag = {q: dag_pipeline.DagInput(Type1),
r: q,
s: r,
dag_pipeline.DagOutput('output'): s}
with self.assertRaises(dag_pipeline.TypeMismatchError):
dag_pipeline.DAGPipeline(dag)
q2 = UnitQ()
dag = {q: dag_pipeline.DagInput(Type0),
q2: q,
dag_pipeline.DagOutput('output'): q2}
with self.assertRaises(dag_pipeline.TypeMismatchError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
r: q,
s: {'x': r['b'], 'y': r['a']},
dag_pipeline.DagOutput('output'): s}
with self.assertRaises(dag_pipeline.TypeMismatchError):
dag_pipeline.DAGPipeline(dag)
dag = {q: dag_pipeline.DagInput(Type0),
r: q,
t: r,
dag_pipeline.DagOutput('output'): t}
with self.assertRaises(dag_pipeline.TypeMismatchError):
dag_pipeline.DAGPipeline(dag)
def testDependencyLoops(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
pass
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, Type0)
def transform(self, input_object):
pass
class UnitS(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, {'a': Type1, 'b': Type0}, Type1)
def transform(self, input_object):
pass
class UnitT(pipeline.Pipeline):
def __init__(self, name='UnitT'):
pipeline.Pipeline.__init__(self, Type0, Type0, name)
def transform(self, input_object):
pass
q, r, s, t = UnitQ(), UnitR(), UnitS(), UnitT()
dag = {q: dag_pipeline.DagInput(q.input_type),
s: {'a': q, 'b': r},
r: s,
dag_pipeline.DagOutput('output'): r,
dag_pipeline.DagOutput('output_2'): s}
with self.assertRaises(dag_pipeline.BadTopologyError):
dag_pipeline.DAGPipeline(dag)
dag = {s: {'a': dag_pipeline.DagInput(Type1), 'b': r},
r: s,
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.BadTopologyError):
dag_pipeline.DAGPipeline(dag)
dag = {dag_pipeline.DagOutput('output'): dag_pipeline.DagInput(Type0),
t: t}
with self.assertRaises(dag_pipeline.BadTopologyError):
dag_pipeline.DAGPipeline(dag)
t2 = UnitT('UnitT2')
dag = {dag_pipeline.DagOutput('output'): dag_pipeline.DagInput(Type0),
t2: t,
t: t2}
with self.assertRaises(dag_pipeline.BadTopologyError):
dag_pipeline.DAGPipeline(dag)
def testDisjointGraph(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
pass
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, {'a': Type2, 'b': Type3})
def transform(self, input_object):
pass
q, r = UnitQ(), UnitR()
dag = {q: dag_pipeline.DagInput(q.input_type),
dag_pipeline.DagOutput(): r}
with self.assertRaises(dag_pipeline.NotConnectedError):
dag_pipeline.DAGPipeline(dag)
q, r = UnitQ(), UnitR()
dag = {q: dag_pipeline.DagInput(q.input_type),
dag_pipeline.DagOutput(): {'a': q, 'b': r['b']}}
with self.assertRaises(dag_pipeline.NotConnectedError):
dag_pipeline.DAGPipeline(dag)
# Pipelines that do not output to anywhere are not allowed.
dag = {dag_pipeline.DagOutput('output'):
dag_pipeline.DagInput(q.input_type),
q: dag_pipeline.DagInput(q.input_type),
r: q}
with self.assertRaises(dag_pipeline.NotConnectedError):
dag_pipeline.DAGPipeline(dag)
# Pipelines which need to be executed but don't have inputs are not allowed.
dag = {dag_pipeline.DagOutput('output'):
dag_pipeline.DagInput(q.input_type),
r: q,
dag_pipeline.DagOutput(): r}
with self.assertRaises(dag_pipeline.NotConnectedError):
dag_pipeline.DAGPipeline(dag)
def testBadInputOrOutputError(self):
class UnitQ(pipeline.Pipeline):
def __init__(self, name='UnitQ'):
pipeline.Pipeline.__init__(self, Type0, Type1, name)
def transform(self, input_object):
pass
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type1, Type0)
def transform(self, input_object):
pass
# Missing Input.
q, r = UnitQ(), UnitR()
dag = {r: q,
dag_pipeline.DagOutput('output'): r}
with self.assertRaises(dag_pipeline.BadInputOrOutputError):
dag_pipeline.DAGPipeline(dag)
# Missing Output.
dag = {q: dag_pipeline.DagInput(Type0),
r: q}
with self.assertRaises(dag_pipeline.BadInputOrOutputError):
dag_pipeline.DAGPipeline(dag)
# Multiple instances of Input with the same type IS allowed.
q2 = UnitQ('UnitQ2')
dag = {q: dag_pipeline.DagInput(Type0),
q2: dag_pipeline.DagInput(Type0),
dag_pipeline.DagOutput(): {'q': q, 'q2': q2}}
_ = dag_pipeline.DAGPipeline(dag)
# Multiple instances with different types is not allowed.
dag = {q: dag_pipeline.DagInput(Type0),
r: dag_pipeline.DagInput(Type1),
dag_pipeline.DagOutput(): {'q': q, 'r': r}}
with self.assertRaises(dag_pipeline.BadInputOrOutputError):
dag_pipeline.DAGPipeline(dag)
def testDuplicateNameError(self):
class UnitQ(pipeline.Pipeline):
def __init__(self, name='UnitQ'):
pipeline.Pipeline.__init__(self, Type0, Type1, name)
def transform(self, input_object):
pass
q, q2 = UnitQ(), UnitQ()
dag = {q: dag_pipeline.DagInput(Type0),
q2: dag_pipeline.DagInput(Type0),
dag_pipeline.DagOutput(): {'q': q, 'q2': q2}}
with self.assertRaises(dag_pipeline.DuplicateNameError):
dag_pipeline.DAGPipeline(dag)
def testInvalidDictionaryOutputError(self):
b = UnitB()
dag = {b: dag_pipeline.DagInput(b.input_type),
dag_pipeline.DagOutput(): b}
with self.assertRaises(dag_pipeline.InvalidDictionaryOutputError):
dag_pipeline.DAGPipeline(dag)
a = UnitA()
dag = {a: dag_pipeline.DagInput(b.input_type),
dag_pipeline.DagOutput('output'): a}
with self.assertRaises(dag_pipeline.InvalidDictionaryOutputError):
dag_pipeline.DAGPipeline(dag)
a2 = UnitA()
dag = {a: dag_pipeline.DagInput(a.input_type),
a2: dag_pipeline.DagInput(a2.input_type),
dag_pipeline.DagOutput('output'): {'t1': a['t1'], 't2': a2['t2']}}
with self.assertRaises(dag_pipeline.InvalidDictionaryOutputError):
dag_pipeline.DAGPipeline(dag)
def testInvalidTransformOutputError(self):
# This happens when the output of a pipeline's `transform` method does not
# match the type signature given by the pipeline's `output_type`.
class UnitQ1(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
return [Type2(1)]
class UnitQ2(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
return [Type1(1, 2), Type2(1)]
class UnitQ3(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, Type1)
def transform(self, input_object):
return Type1(1, 2)
class UnitR1(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return {'xy': [Type1(1, 2)], 'z': [Type1(1, 2)]}
class UnitR2(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return {'xy': [Type1(1, 2)]}
class UnitR3(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return [{'xy': [Type1(1, 2)], 'z': Type2(1)}]
class UnitR4(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return [{'xy': [Type1(1, 2), Type2(1)], 'z': [Type2(1)]}]
class UnitR5(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, Type0, {'xy': Type1, 'z': Type2})
def transform(self, input_object):
return [{'xy': [Type1(1, 2), Type1(1, 3)], 'z': [Type2(1)], 'q': []}]
for pipeline_class in [UnitQ1, UnitQ2, UnitQ3,
UnitR1, UnitR2, UnitR3, UnitR4, UnitR5]:
pipe = pipeline_class()
if pipeline_class.__name__.startswith('UnitR'):
output = dag_pipeline.DagOutput()
else:
output = dag_pipeline.DagOutput('output')
dag = {pipe: dag_pipeline.DagInput(pipe.input_type),
output: pipe}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
with self.assertRaises(dag_pipeline.InvalidTransformOutputError):
dag_pipe_obj.transform(Type0(1, 2, 3))
def testInvalidStatisticsError(self):
class UnitQ(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, str, str)
def transform(self, input_object):
self._set_stats([statistics.Counter('stat_1', 5), 1234])
return [input_object]
class UnitR(pipeline.Pipeline):
def __init__(self):
pipeline.Pipeline.__init__(self, str, str)
def transform(self, input_object):
self._set_stats(statistics.Counter('stat_1', 5))
return [input_object]
q = UnitQ()
dag = {q: dag_pipeline.DagInput(q.input_type),
dag_pipeline.DagOutput('output'): q}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
with self.assertRaises(pipeline.InvalidStatisticsError):
dag_pipe_obj.transform('hello world')
r = UnitR()
dag = {r: dag_pipeline.DagInput(q.input_type),
dag_pipeline.DagOutput('output'): r}
dag_pipe_obj = dag_pipeline.DAGPipeline(dag)
with self.assertRaises(pipeline.InvalidStatisticsError):
dag_pipe_obj.transform('hello world')
if __name__ == '__main__':
absltest.main()
| apache-2.0 | 525,818,771,944,524,740 | 32.641808 | 80 | 0.603231 | false |
jgoclawski/django | tests/messages_tests/base.py | 319 | 14243 | from django import http
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.test import modify_settings, override_settings
from django.utils.translation import ugettext_lazy
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super(override_settings_tags, self).enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super(override_settings_tags, self).disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests(object):
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS='',
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels.keys():
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
MESSAGE_LEVEL=constants.DEBUG,
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled(self):
"""
Tests that, when the middleware is disabled, an exception is raised
when one attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled_fail_silently(self):
"""
Tests that, when the middleware is disabled, an exception is not
raised if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertNotIn('messages', response.context)
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
def test_level_tag(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.level_tag for msg in storage]
self.assertEqual(tags,
['info', '', 'debug', 'warning', 'error',
'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
)
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
| bsd-3-clause | 7,031,746,378,745,251,000 | 36.18799 | 94 | 0.604718 | false |
ArcticWarriors/scouting-app-2016 | ScoutingWebsite/Scouting2017/model/get_team_metrics.py | 2 | 1861 | '''
Created on Mar 5, 2017
@author: PJ
'''
from django.db.models.aggregates import Avg, Sum
from django.db.models.expressions import Case, When
def get_team_metrics(team, regional_code):
metrics = team.scoreresult_set.filter(competition__code=regional_code).aggregate(
Avg("auto_fuel_high_score"),
Avg("auto_gears"),
Avg("tele_fuel_high_score"),
Avg("tele_gears"),
Sum("foul"),
Sum("tech_foul"),
Sum("yellow_card"),
Sum("red_card"),
rope__avg=Avg(Case(When(rope=True, then=1), When(rope=False, then=0))),
baseline__avg=Avg(Case(When(auto_baseline=True, then=1), When(auto_baseline=False, then=0))),
)
# Format all of the numbers. If we haven't scouted the team, None will be returned. Turn that into NA
for key in metrics:
if metrics[key] == None:
metrics[key] = "NA"
elif "__avg" in key:
metrics[key] = "{:10.2f}".format(metrics[key])
if metrics['tele_fuel_high_score__avg'] != "NA":
metrics['auto_fuel_high_misses__avg'] = float(metrics['auto_fuel_high_shots__avg']) - float(metrics['auto_fuel_high_score__avg'])
metrics['tele_fuel_high_misses__avg'] = float(metrics['tele_fuel_high_shots__avg']) - float(metrics['tele_fuel_high_score__avg'])
else:
metrics['auto_fuel_high_misses__avg'] = "NA"
metrics['tele_fuel_high_misses__avg'] = "NA"
return metrics
| mit | -8,228,313,592,155,410,000 | 41.295455 | 138 | 0.475551 | false |
bringingheavendown/numpy | numpy/distutils/fcompiler/g95.py | 229 | 1379 | # http://g95.sourceforge.net/
from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
compilers = ['G95FCompiler']
class G95FCompiler(FCompiler):
compiler_type = 'g95'
description = 'G95 Fortran Compiler'
# version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95!\) (?P<version>.*)\).*'
# $ g95 --version
# G95 (GCC 4.0.3 (g95!) May 22 2006)
version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95 (?P<version>.*)!\) (?P<date>.*)\).*'
# $ g95 --version
# G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006)
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : ["g95", "-ffixed-form"],
'compiler_fix' : ["g95", "-ffixed-form"],
'compiler_f90' : ["g95"],
'linker_so' : ["<F90>", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fpic']
module_dir_switch = '-fmod='
module_include_switch = '-I'
def get_flags(self):
return ['-fno-second-underscore']
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
compiler = G95FCompiler()
compiler.customize()
print(compiler.get_version())
| bsd-3-clause | 1,675,645,612,454,257,400 | 29.644444 | 105 | 0.542422 | false |
liyitest/rr | openstack_dashboard/api/ceilometer.py | 13 | 49091 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
from ceilometerclient import client as ceilometer_client
from django.conf import settings
from django.utils import datastructures
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
def get_flavor_names(request):
# TODO(lsmola) The flavors can be set per project,
# so it should show only valid ones.
try:
flavors = nova.flavor_list(request, None)
return [f.name for f in flavors]
except Exception:
return ['m1.tiny', 'm1.small', 'm1.medium',
'm1.large', 'm1.xlarge']
def is_iterable(var):
"""Return True if the given is list or tuple."""
return (isinstance(var, (list, tuple)) or
issubclass(var.__class__, (list, tuple)))
def make_query(user_id=None, tenant_id=None, resource_id=None,
user_ids=None, tenant_ids=None, resource_ids=None):
"""Returns query built from given parameters.
This query can be then used for querying resources, meters and
statistics.
:Parameters:
- `user_id`: user_id, has a priority over list of ids
- `tenant_id`: tenant_id, has a priority over list of ids
- `resource_id`: resource_id, has a priority over list of ids
- `user_ids`: list of user_ids
- `tenant_ids`: list of tenant_ids
- `resource_ids`: list of resource_ids
"""
user_ids = user_ids or []
tenant_ids = tenant_ids or []
resource_ids = resource_ids or []
query = []
if user_id:
user_ids = [user_id]
for u_id in user_ids:
query.append({"field": "user_id", "op": "eq", "value": u_id})
if tenant_id:
tenant_ids = [tenant_id]
for t_id in tenant_ids:
query.append({"field": "project_id", "op": "eq", "value": t_id})
if resource_id:
resource_ids = [resource_id]
for r_id in resource_ids:
query.append({"field": "resource_id", "op": "eq", "value": r_id})
return query
class Meter(base.APIResourceWrapper):
"""Represents one Ceilometer meter."""
_attrs = ['name', 'type', 'unit', 'resource_id', 'user_id', 'project_id']
def __init__(self, apiresource):
super(Meter, self).__init__(apiresource)
self._label = self.name
self._description = ""
def augment(self, label=None, description=None):
if label:
self._label = label
if description:
self._description = description
@property
def description(self):
return self._description
@property
def label(self):
return self._label
class Resource(base.APIResourceWrapper):
"""Represents one Ceilometer resource."""
_attrs = ['resource_id', 'source', 'user_id', 'project_id', 'metadata',
'links']
def __init__(self, apiresource, ceilometer_usage=None):
super(Resource, self).__init__(apiresource)
# Save empty strings to IDs rather than None, so it gets
# serialized correctly. We don't want 'None' strings.
self.project_id = self.project_id or ""
self.user_id = self.user_id or ""
self.resource_id = self.resource_id or ""
self._id = "%s__%s__%s" % (self.project_id,
self.user_id,
self.resource_id)
# Meters with statistics data
self._meters = {}
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and self.project_id:
self._tenant = ceilometer_usage.get_tenant(self.project_id)
else:
self._tenant = None
if ceilometer_usage and self.user_id:
self._user = ceilometer_usage.get_user(self.user_id)
else:
self._user = None
self._query = make_query(tenant_id=self.project_id,
user_id=self.user_id,
resource_id=self.resource_id)
@property
def name(self):
name = self.metadata.get("name", None)
display_name = self.metadata.get("display_name", None)
return name or display_name or ""
@property
def id(self):
return self._id
@property
def tenant(self):
return self._tenant
@property
def user(self):
return self._user
@property
def resource(self):
return self.resource_id
@property
def query(self):
return self._query
@property
def meters(self):
return self._meters
def get_meter(self, meter_name):
return self._meters.get(meter_name, None)
def set_meter(self, meter_name, value):
self._meters[meter_name] = value
class ResourceAggregate(Resource):
"""Represents aggregate of more resources together.
Aggregate of resources can be obtained by specifying
multiple ids in one parameter or by not specifying
one parameter.
It can also be specified by query directly.
Example:
We can obtain an aggregate of resources by specifying
multiple resource_ids in resource_id parameter in init.
Or we can specify only tenant_id, which will return
all resources of that tenant.
"""
def __init__(self, tenant_id=None, user_id=None, resource_id=None,
tenant_ids=None, user_ids=None, resource_ids=None,
ceilometer_usage=None, query=None, identifier=None):
self._id = identifier
self.tenant_id = None
self.user_id = None
self.resource_id = None
# Meters with statistics data
self._meters = {}
if query:
self._query = query
else:
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and tenant_id:
self.tenant_id = tenant_id
self._tenant = ceilometer_usage.get_tenant(tenant_id)
else:
self._tenant = None
if ceilometer_usage and user_id:
self.user_id = user_id
self._user = ceilometer_usage.get_user(user_id)
else:
self._user = None
if resource_id:
self.resource_id = resource_id
self._query = make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id,
tenant_ids=tenant_ids,
user_ids=user_ids,
resource_ids=resource_ids)
@property
def id(self):
return self._id
class Sample(base.APIResourceWrapper):
"""Represents one Ceilometer sample."""
_attrs = ['counter_name', 'user_id', 'resource_id', 'timestamp',
'resource_metadata', 'source', 'counter_unit', 'counter_volume',
'project_id', 'counter_type', 'resource_metadata']
@property
def instance(self):
display_name = self.resource_metadata.get('display_name', None)
instance_id = self.resource_metadata.get('instance_id', None)
return display_name or instance_id
@property
def name(self):
name = self.resource_metadata.get("name", None)
display_name = self.resource_metadata.get("display_name", None)
return name or display_name or ""
class Statistic(base.APIResourceWrapper):
"""Represents one Ceilometer statistic."""
_attrs = ['period', 'period_start', 'period_end',
'count', 'min', 'max', 'sum', 'avg',
'duration', 'duration_start', 'duration_end']
@memoized
def ceilometerclient(request):
"""Initialization of Ceilometer client."""
endpoint = base.url_for(request, 'metering')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
return ceilometer_client.Client('2', endpoint,
token=(lambda: request.user.token.id),
insecure=insecure,
cacert=cacert)
def resource_list(request, query=None, ceilometer_usage_object=None):
"""List the resources."""
resources = ceilometerclient(request).resources.list(q=query)
return [Resource(r, ceilometer_usage_object) for r in resources]
def sample_list(request, meter_name, query=None):
"""List the samples for this meters."""
samples = ceilometerclient(request).samples.list(meter_name=meter_name,
q=query)
return [Sample(s) for s in samples]
def meter_list(request, query=None):
"""List the user's meters."""
meters = ceilometerclient(request).meters.list(query)
return [Meter(m) for m in meters]
def statistic_list(request, meter_name, query=None, period=None):
"""List of statistics."""
statistics = ceilometerclient(request).\
statistics.list(meter_name=meter_name, q=query, period=period)
return [Statistic(s) for s in statistics]
class ThreadedUpdateResourceWithStatistics(threading.Thread):
"""Multithread wrapper for update_with_statistics method of
resource_usage.
A join logic is placed in process_list class method. All resources
will have its statistics attribute filled in separate threads.
The resource_usage object is shared between threads. Each thread is
updating one Resource.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `resources`: List of Resource or ResourceAggregate object,
that will be filled by statistic data.
- `resource_usage`: Wrapping resource usage object, that holds
all statistics data.
- `meter_names`: List of meter names of the statistics we want.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will be
returned, divided into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the attribute name of the stats.
E.g. (avg, max, min...) If None is given, whole
statistic object is returned,
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
# TODO(lsmola) Can be removed once Ceilometer supports sample-api
# and group-by, so all of this optimization will not be necessary.
# It is planned somewhere to I.
def __init__(self, resource_usage, resource, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
super(ThreadedUpdateResourceWithStatistics, self).__init__()
self.resource_usage = resource_usage
self.resource = resource
self.meter_names = meter_names
self.period = period
self.stats_attr = stats_attr
self.additional_query = additional_query
def run(self):
# Run the job
self.resource_usage.update_with_statistics(
self.resource,
meter_names=self.meter_names, period=self.period,
stats_attr=self.stats_attr, additional_query=self.additional_query)
@classmethod
def process_list(cls, resource_usage, resources, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
threads = []
for resource in resources:
# add statistics data into resource
thread = cls(resource_usage, resource, meter_names=meter_names,
period=period, stats_attr=stats_attr,
additional_query=additional_query)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
class CeilometerUsage(object):
"""Represents wrapper of any Ceilometer queries.
One instance of this class should be shared between resources
as this class provides a place where users and tenants are
cached. So there are no duplicate queries to API.
This class also wraps Ceilometer API calls and provides parallel
HTTP calls to API.
This class should also serve as reasonable abstraction, that will
cover huge amount of optimization due to optimization of Ceilometer
service, without changing of the interface.
"""
def __init__(self, request):
self._request = request
# Cached users and tenants.
self._users = {}
self._tenants = {}
def get_user(self, user_id):
"""Returns user fetched from API.
Caching the result, so it doesn't contact API twice with the
same query.
"""
user = self._users.get(user_id, None)
if not user:
user = keystone.user_get(self._request, user_id)
# caching the user, for later use
self._users[user_id] = user
return user
def preload_all_users(self):
"""Preloads all users into dictionary.
It's more effective to preload all users, rather than fetching many
users by separate API get calls.
"""
users = keystone.user_list(self._request)
# Cache all users on right indexes, this is more effective than to
# obtain large number of users one by one by keystone.user_get
for u in users:
self._users[u.id] = u
def get_tenant(self, tenant_id):
"""Returns tenant fetched from API.
Caching the result, so it doesn't contact API twice with the
same query.
"""
tenant = self._tenants.get(tenant_id, None)
if not tenant:
tenant = keystone.tenant_get(self._request, tenant_id)
# caching the tenant for later use
self._tenants[tenant_id] = tenant
return tenant
def preload_all_tenants(self):
"""Preloads all tenants into dictionary.
It's more effective to preload all tenants, rather than fetching each
tenant by separate API get calls.
"""
tenants, more = keystone.tenant_list(self._request)
# Cache all tenants on right indexes, this is more effective than to
# obtain large number of tenants one by one by keystone.tenant_get
for t in tenants:
self._tenants[t.id] = t
def global_data_get(self, used_cls=None, query=None,
with_statistics=False, additional_query=None,
with_users_and_tenants=True):
"""Obtaining a resources for table view.
It obtains resources with statistics data according to declaration
in used_cls class.
:Parameters:
- `user_cls`: Class wrapper for usage data. It acts as wrapper for
settings needed. See the call of this method for
details.
- `query`: Explicit query definition for fetching the resources. If
no query is provided, it takes a default_query from
used_cls. If no default query is provided, it fetches
all the resources and filters them by meters defined
in used_cls.
- `with_statistic`: Define whether statistics data from the meters
defined in used_cls should be fetched.
Can be used to first obtain only the pure
resources, then with the statistics data by
AJAX.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
default_query = used_cls.default_query
query = query or default_query
filter_func = None
def filter_resources(resource):
"""Method for filtering resources by their links.rel attr.
The links.rel attributes contain all meters the resource has.
"""
for link in resource.links:
if link['rel'] in used_cls.meters:
return True
return False
if not query:
# Not all resource types can be obtained by query, if there is not
# a query, we are filtering all resources by this function.
filter_func = filter_resources
if with_statistics:
# Will add statistic data into resources.
resources = self.resources_with_statistics(
query,
used_cls.meters,
filter_func=filter_func,
stats_attr=used_cls.stats_attr,
additional_query=additional_query,
with_users_and_tenants=with_users_and_tenants)
else:
# Will load only resources without statistical data.
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
return [used_cls(resource) for resource in resources]
def query_from_object_id(self, object_id):
"""Obtaining a query from resource id.
Query can be then used to identify a resource in resources or meters
API calls. ID is being built in the Resource initializer, or returned
by Datatable into UpdateRow functionality.
"""
try:
tenant_id, user_id, resource_id = object_id.split("__")
except ValueError:
return []
return make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id)
def update_with_statistics(self, resource, meter_names=None, period=None,
stats_attr=None, additional_query=None):
"""Adding statistical data into one Resource or ResourceAggregate.
It adds each statistic of each meter_names into the resource
attributes. Attribute name is the meter name with replaced '.' to '_'.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given a faceted result will be
returned, dividend into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
if not meter_names:
raise ValueError("meter_names and resources must be defined to be "
"able to obtain the statistics.")
# query for identifying one resource in meters
query = resource.query
if additional_query:
if not is_iterable(additional_query):
raise ValueError("Additional query must be list of"
" conditions. See the docs for format.")
query = query + additional_query
# TODO(lsmola) thread for each meter will be probably overkill
# but I should test lets say thread pool with 100 of threads
# and apply it only to this code.
# Though I do expect Ceilometer will support bulk requests,
# so all of this optimization will not be necessary.
for meter in meter_names:
statistics = statistic_list(self._request, meter,
query=query, period=period)
meter = meter.replace(".", "_")
if statistics:
if stats_attr:
# I want to load only a specific attribute
resource.set_meter(
meter,
getattr(statistics[0], stats_attr, None))
else:
# I want a dictionary of all statistics
resource.set_meter(meter, statistics)
else:
resource.set_meter(meter, None)
return resource
def resources(self, query=None, filter_func=None,
with_users_and_tenants=False):
"""Obtaining resources with the query or filter_func.
Obtains resources and also fetch tenants and users associated
with those resources if with_users_and_tenants flag is true.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
if with_users_and_tenants:
ceilometer_usage_object = self
else:
ceilometer_usage_object = None
resources = resource_list(
self._request,
query=query, ceilometer_usage_object=ceilometer_usage_object)
if filter_func:
resources = [resource for resource in resources if
filter_func(resource)]
return resources
def resources_with_statistics(self, query=None, meter_names=None,
period=None, filter_func=None,
stats_attr=None, additional_query=None,
with_users_and_tenants=False):
"""Obtaining resources with statistics data inside.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
ThreadedUpdateResourceWithStatistics.process_list(
self, resources,
meter_names=meter_names, period=period, stats_attr=stats_attr,
additional_query=additional_query)
return resources
def resource_aggregates(self, queries=None):
"""Obtaining resource aggregates with queries.
Representing a resource aggregate by query is a most general way
how to obtain a resource aggregates.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
"""
resource_aggregates = []
for identifier, query in queries.items():
resource_aggregates.append(ResourceAggregate(query=query,
ceilometer_usage=None,
identifier=identifier))
return resource_aggregates
def resource_aggregates_with_statistics(self, queries=None,
meter_names=None, period=None,
filter_func=None, stats_attr=None,
additional_query=None):
"""Obtaining resource aggregates with statistics data inside.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
resource_aggregates = self.resource_aggregates(queries)
ThreadedUpdateResourceWithStatistics.process_list(
self,
resource_aggregates, meter_names=meter_names, period=period,
stats_attr=stats_attr, additional_query=additional_query)
return resource_aggregates
def diff_lists(a, b):
if not a:
return []
elif not b:
return a
else:
return list(set(a) - set(b))
class Meters(object):
"""Class for listing of available meters.
It is listing meters defined in this class that are available
in Ceilometer meter_list.
It is storing information that is not available in Ceilometer, i.e.
label, description.
"""
def __init__(self, request=None, ceilometer_meter_list=None):
# Storing the request.
self._request = request
# Storing the Ceilometer meter list
if ceilometer_meter_list:
self._ceilometer_meter_list = ceilometer_meter_list
else:
try:
self._ceilometer_meter_list = meter_list(request)
except Exception:
self._ceilometer_meter_list = []
exceptions.handle(self._request,
_('Unable to retrieve Ceilometer meter '
'list.'))
# Storing the meters info categorized by their services.
self._nova_meters_info = self._get_nova_meters_info()
self._neutron_meters_info = self._get_neutron_meters_info()
self._glance_meters_info = self._get_glance_meters_info()
self._cinder_meters_info = self._get_cinder_meters_info()
self._swift_meters_info = self._get_swift_meters_info()
self._kwapi_meters_info = self._get_kwapi_meters_info()
self._ipmi_meters_info = self._get_ipmi_meters_info()
# Storing the meters info of all services together.
all_services_meters = (self._nova_meters_info,
self._neutron_meters_info,
self._glance_meters_info,
self._cinder_meters_info,
self._swift_meters_info,
self._kwapi_meters_info,
self._ipmi_meters_info)
self._all_meters_info = {}
for service_meters in all_services_meters:
self._all_meters_info.update(dict([(meter_name, meter_info)
for meter_name, meter_info
in service_meters.items()]))
# Here will be the cached Meter objects, that will be reused for
# repeated listing.
self._cached_meters = {}
def list_all(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names.
:Parameters:
- `only_meters`: The list of meter names we want to show.
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=only_meters,
except_meters=except_meters)
def list_nova(self, except_meters=None):
"""Returns a list of meters tied to nova.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._nova_meters_info.keys(),
except_meters=except_meters)
def list_neutron(self, except_meters=None):
"""Returns a list of meters tied to neutron.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._neutron_meters_info.keys(),
except_meters=except_meters)
def list_glance(self, except_meters=None):
"""Returns a list of meters tied to glance.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._glance_meters_info.keys(),
except_meters=except_meters)
def list_cinder(self, except_meters=None):
"""Returns a list of meters tied to cinder.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._cinder_meters_info.keys(),
except_meters=except_meters)
def list_swift(self, except_meters=None):
"""Returns a list of meters tied to swift.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._swift_meters_info.keys(),
except_meters=except_meters)
def list_kwapi(self, except_meters=None):
"""Returns a list of meters tied to kwapi.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._kwapi_meters_info.keys(),
except_meters=except_meters)
def list_ipmi(self, except_meters=None):
"""Returns a list of meters tied to ipmi
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._ipmi_meters_info.keys(),
except_meters=except_meters)
def _list(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names.
:Parameters:
- `only_meters`: The list of meter names we want to show.
- `except_meters`: The list of meter names we don't want to show.
"""
# Get all wanted meter names.
if only_meters:
meter_names = only_meters
else:
meter_names = [meter_name for meter_name
in self._all_meters_info.keys()]
meter_names = diff_lists(meter_names, except_meters)
# Collect meters for wanted meter names.
return self._get_meters(meter_names)
def _get_meters(self, meter_names):
"""Obtain meters based on meter_names.
The meters that do not exist in Ceilometer meter list are left out.
:Parameters:
- `meter_names`: A list of meter names we want to fetch.
"""
meters = []
for meter_name in meter_names:
meter = self._get_meter(meter_name)
if meter:
meters.append(meter)
return meters
def _get_meter(self, meter_name):
"""Obtains a meter.
Obtains meter either from cache or from Ceilometer meter list
joined with statically defined meter info like label and description.
:Parameters:
- `meter_name`: A meter name we want to fetch.
"""
meter = self._cached_meters.get(meter_name, None)
if not meter:
meter_candidates = [m for m in self._ceilometer_meter_list
if m.name == meter_name]
if meter_candidates:
meter_info = self._all_meters_info.get(meter_name, None)
if meter_info:
label = meter_info["label"]
description = meter_info["description"]
else:
label = ""
description = ""
meter = meter_candidates[0]
meter.augment(label=label, description=description)
self._cached_meters[meter_name] = meter
return meter
def _get_nova_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
meters_info = datastructures.SortedDict([
("instance", {
'label': '',
'description': _("Existence of instance"),
}),
("instance:<type>", {
'label': '',
'description': _("Existence of instance <type> "
"(openstack types)"),
}),
("memory", {
'label': '',
'description': _("Volume of RAM"),
}),
("memory.usage", {
'label': '',
'description': _("Volume of RAM used"),
}),
("cpu", {
'label': '',
'description': _("CPU time used"),
}),
("cpu_util", {
'label': '',
'description': _("Average CPU utilization"),
}),
("vcpus", {
'label': '',
'description': _("Number of VCPUs"),
}),
("disk.read.requests", {
'label': '',
'description': _("Number of read requests"),
}),
("disk.write.requests", {
'label': '',
'description': _("Number of write requests"),
}),
("disk.read.bytes", {
'label': '',
'description': _("Volume of reads"),
}),
("disk.write.bytes", {
'label': '',
'description': _("Volume of writes"),
}),
("disk.read.requests.rate", {
'label': '',
'description': _("Average rate of read requests"),
}),
("disk.write.requests.rate", {
'label': '',
'description': _("Average rate of write requests"),
}),
("disk.read.bytes.rate", {
'label': '',
'description': _("Average rate of reads"),
}),
("disk.write.bytes.rate", {
'label': '',
'description': _("Average volume of writes"),
}),
("disk.root.size", {
'label': '',
'description': _("Size of root disk"),
}),
("disk.ephemeral.size", {
'label': '',
'description': _("Size of ephemeral disk"),
}),
("network.incoming.bytes", {
'label': '',
'description': _("Number of incoming bytes "
"on the network for a VM interface"),
}),
("network.outgoing.bytes", {
'label': '',
'description': _("Number of outgoing bytes "
"on the network for a VM interface"),
}),
("network.incoming.packets", {
'label': '',
'description': _("Number of incoming "
"packets for a VM interface"),
}),
("network.outgoing.packets", {
'label': '',
'description': _("Number of outgoing "
"packets for a VM interface"),
}),
("network.incoming.bytes.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"bytes on a VM network interface"),
}),
("network.outgoing.bytes.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"bytes on a VM network interface"),
}),
("network.incoming.packets.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"packets on a VM network interface"),
}),
("network.outgoing.packets.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"packets on a VM network interface"),
}),
])
# Adding flavor based meters into meters_info dict
# TODO(lsmola) this kind of meter will be probably deprecated
# https://bugs.launchpad.net/ceilometer/+bug/1208365 . Delete it then.
for flavor in get_flavor_names(self._request):
name = 'instance:%s' % flavor
meters_info[name] = dict(meters_info["instance:<type>"])
meters_info[name]['description'] = (
_('Duration of instance type %s (openstack flavor)') %
flavor)
# TODO(lsmola) allow to set specific in local_settings. For all meters
# because users can have their own agents and meters.
return meters_info
def _get_neutron_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('network', {
'label': '',
'description': _("Existence of network"),
}),
('network.create', {
'label': '',
'description': _("Creation requests for this network"),
}),
('network.update', {
'label': '',
'description': _("Update requests for this network"),
}),
('subnet', {
'label': '',
'description': _("Existence of subnet"),
}),
('subnet.create', {
'label': '',
'description': _("Creation requests for this subnet"),
}),
('subnet.update', {
'label': '',
'description': _("Update requests for this subnet"),
}),
('port', {
'label': '',
'description': _("Existence of port"),
}),
('port.create', {
'label': '',
'description': _("Creation requests for this port"),
}),
('port.update', {
'label': '',
'description': _("Update requests for this port"),
}),
('router', {
'label': '',
'description': _("Existence of router"),
}),
('router.create', {
'label': '',
'description': _("Creation requests for this router"),
}),
('router.update', {
'label': '',
'description': _("Update requests for this router"),
}),
('ip.floating', {
'label': '',
'description': _("Existence of floating ip"),
}),
('ip.floating.create', {
'label': '',
'description': _("Creation requests for this floating ip"),
}),
('ip.floating.update', {
'label': '',
'description': _("Update requests for this floating ip"),
}),
])
def _get_glance_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('image', {
'label': '',
'description': _("Image existence check"),
}),
('image.size', {
'label': '',
'description': _("Uploaded image size"),
}),
('image.update', {
'label': '',
'description': _("Number of image updates"),
}),
('image.upload', {
'label': '',
'description': _("Number of image uploads"),
}),
('image.delete', {
'label': '',
'description': _("Number of image deletions"),
}),
('image.download', {
'label': '',
'description': _("Image is downloaded"),
}),
('image.serve', {
'label': '',
'description': _("Image is served out"),
}),
])
def _get_cinder_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('volume', {
'label': '',
'description': _("Existence of volume"),
}),
('volume.size', {
'label': '',
'description': _("Size of volume"),
}),
])
def _get_swift_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('storage.objects', {
'label': '',
'description': _("Number of objects"),
}),
('storage.objects.size', {
'label': '',
'description': _("Total size of stored objects"),
}),
('storage.objects.containers', {
'label': '',
'description': _("Number of containers"),
}),
('storage.objects.incoming.bytes', {
'label': '',
'description': _("Number of incoming bytes"),
}),
('storage.objects.outgoing.bytes', {
'label': '',
'description': _("Number of outgoing bytes"),
}),
('storage.api.request', {
'label': '',
'description': _("Number of API requests against swift"),
}),
])
def _get_kwapi_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('energy', {
'label': '',
'description': _("Amount of energy"),
}),
('power', {
'label': '',
'description': _("Power consumption"),
}),
])
def _get_ipmi_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('hardware.ipmi.node.power', {
'label': '',
'description': _("System Current Power"),
}),
('hardware.ipmi.fan', {
'label': '',
'description': _("Fan RPM"),
}),
('hardware.ipmi.temperature', {
'label': '',
'description': _("Sensor Temperature Reading"),
}),
('hardware.ipmi.current', {
'label': '',
'description': _("Sensor Current Reading"),
}),
('hardware.ipmi.voltage', {
'label': '',
'description': _("Sensor Voltage Reading"),
}),
('hardware.ipmi.node.inlet_temperature', {
'label': '',
'description': _("System Inlet Temperature Reading"),
}),
('hardware.ipmi.node.outlet_temperature', {
'label': '',
'description': _("System Outlet Temperature Reading"),
}),
('hardware.ipmi.node.airflow', {
'label': '',
'description': _("System Airflow Reading"),
}),
('hardware.ipmi.node.cups', {
'label': '',
'description': _("System CUPS Reading"),
}),
('hardware.ipmi.node.cpu_util', {
'label': '',
'description': _("System CPU Utility Reading"),
}),
('hardware.ipmi.node.mem_util', {
'label': '',
'description': _("System Memory Utility Reading"),
}),
('hardware.ipmi.node.io_util', {
'label': '',
'description': _("System IO Utility Reading"),
}),
])
| apache-2.0 | 922,808,792,004,095,200 | 36.588821 | 79 | 0.539203 | false |
vstoykov/django-cms | cms/migrations/0002_auto_start.py | 525 | 20033 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| bsd-3-clause | -526,735,759,091,465,660 | 54.187328 | 93 | 0.456996 | false |
wevial/bpython | bpython/test/test_line_properties.py | 2 | 10588 | import re
from bpython.test import unittest
from bpython.line import current_word, current_dict_key, current_dict, \
current_string, current_object, current_object_attribute, \
current_from_import_from, current_from_import_import, current_import, \
current_method_definition_name, current_single_word, \
current_string_literal_attr
def cursor(s):
"""'ab|c' -> (2, 'abc')"""
cursor_offset = s.index('|')
line = s[:cursor_offset] + s[cursor_offset+1:]
return cursor_offset, line
def decode(s):
"""'a<bd|c>d' -> ((3, 'abcd'), (1, 3, 'bdc'))"""
if not s.count('|') == 1:
raise ValueError('match helper needs | to occur once')
if s.count('<') != s.count('>') or not s.count('<') in (0, 1):
raise ValueError('match helper needs <, and > to occur just once')
matches = list(re.finditer(r'[<>|]', s))
assert len(matches) in [1, 3], [m.group() for m in matches]
d = {}
for i, m in enumerate(matches):
d[m.group(0)] = m.start() - i
s = s[:m.start() - i] + s[m.end() - i:]
assert len(d) in [1, 3], 'need all the parts just once! %r' % d
if '<' in d:
return (d['|'], s), (d['<'], d['>'], s[d['<']:d['>']])
else:
return (d['|'], s), None
def line_with_cursor(cursor_offset, line):
return line[:cursor_offset] + '|' + line[cursor_offset:]
def encode(cursor_offset, line, result):
"""encode(3, 'abdcd', (1, 3, 'bdc')) -> a<bd|c>d'
Written for prettier assert error messages
"""
encoded_line = line_with_cursor(cursor_offset, line)
if result is None:
return encoded_line
start, end, value = result
assert line[start:end] == value
if start < cursor_offset:
encoded_line = encoded_line[:start] + '<' + encoded_line[start:]
else:
encoded_line = encoded_line[:start+1] + '<' + encoded_line[start+1:]
if end < cursor_offset:
encoded_line = encoded_line[:end+1] + '>' + encoded_line[end+1:]
else:
encoded_line = encoded_line[:end+2] + '>' + encoded_line[end+2:]
return encoded_line
class LineTestCase(unittest.TestCase):
def assertAccess(self, s):
r"""Asserts that self.func matches as described
by s, which uses a little language to describe matches:
abcd<efg>hijklmnopqrstuvwx|yz
/|\ /|\ /|\
| | |
the function should the current cursor position
match this "efg" is between the x and y
"""
(cursor_offset, line), match = decode(s)
result = self.func(cursor_offset, line)
self.assertEqual(
result, match,
"%s(%r) result\n%r (%r) doesn't match expected\n%r (%r)" % (
self.func.__name__, line_with_cursor(cursor_offset, line),
encode(cursor_offset, line, result), result, s, match))
class TestHelpers(LineTestCase):
def test_I(self):
self.assertEqual(cursor('asd|fgh'), (3, 'asdfgh'))
def test_decode(self):
self.assertEqual(decode('a<bd|c>d'), ((3, 'abdcd'), (1, 4, 'bdc')))
self.assertEqual(decode('a|<bdc>d'), ((1, 'abdcd'), (1, 4, 'bdc')))
self.assertEqual(decode('a<bdc>d|'), ((5, 'abdcd'), (1, 4, 'bdc')))
def test_encode(self):
self.assertEqual(encode(3, 'abdcd', (1, 4, 'bdc')), 'a<bd|c>d')
self.assertEqual(encode(1, 'abdcd', (1, 4, 'bdc')), 'a|<bdc>d')
self.assertEqual(encode(4, 'abdcd', (1, 4, 'bdc')), 'a<bdc|>d')
self.assertEqual(encode(5, 'abdcd', (1, 4, 'bdc')), 'a<bdc>d|')
def test_assert_access(self):
def dumb_func(cursor_offset, line):
return (0, 2, 'ab')
self.func = dumb_func
self.assertAccess('<a|b>d')
class TestCurrentWord(LineTestCase):
def setUp(self):
self.func = current_word
def test_simple(self):
self.assertAccess('|')
self.assertAccess('|asdf')
self.assertAccess('<a|sdf>')
self.assertAccess('<asdf|>')
self.assertAccess('<asdfg|>')
self.assertAccess('asdf + <asdfg|>')
self.assertAccess('<asdfg|> + asdf')
def test_inside(self):
self.assertAccess('<asd|>')
self.assertAccess('<asd|fg>')
def test_dots(self):
self.assertAccess('<Object.attr1|>')
self.assertAccess('<Object.attr1.attr2|>')
self.assertAccess('<Object.att|r1.attr2>')
self.assertAccess('stuff[stuff] + {123: 456} + <Object.attr1.attr2|>')
self.assertAccess('stuff[<asd|fg>]')
self.assertAccess('stuff[asdf[<asd|fg>]')
def test_open_paren(self):
self.assertAccess('<foo(|>')
# documenting current behavior - TODO is this intended?
class TestCurrentDictKey(LineTestCase):
def setUp(self):
self.func = current_dict_key
def test_simple(self):
self.assertAccess('asdf|')
self.assertAccess('asdf|')
self.assertAccess('asdf[<>|')
self.assertAccess('asdf[<>|]')
self.assertAccess('object.dict[<abc|>')
self.assertAccess('asdf|')
self.assertAccess('asdf[<(>|]')
self.assertAccess('asdf[<(1>|]')
self.assertAccess('asdf[<(1,>|]')
self.assertAccess('asdf[<(1, >|]')
self.assertAccess('asdf[<(1, 2)>|]')
# TODO self.assertAccess('d[d[<12|>')
self.assertAccess("d[<'a>|")
class TestCurrentDict(LineTestCase):
def setUp(self):
self.func = current_dict
def test_simple(self):
self.assertAccess('asdf|')
self.assertAccess('asdf|')
self.assertAccess('<asdf>[|')
self.assertAccess('<asdf>[|]')
self.assertAccess('<object.dict>[abc|')
self.assertAccess('asdf|')
class TestCurrentString(LineTestCase):
def setUp(self):
self.func = current_string
def test_closed(self):
self.assertAccess('"<as|df>"')
self.assertAccess('"<asdf|>"')
self.assertAccess('"<|asdf>"')
self.assertAccess("'<asdf|>'")
self.assertAccess("'<|asdf>'")
self.assertAccess("'''<asdf|>'''")
self.assertAccess('"""<asdf|>"""')
self.assertAccess('asdf.afd("a") + "<asdf|>"')
def test_open(self):
self.assertAccess('"<as|df>')
self.assertAccess('"<asdf|>')
self.assertAccess('"<|asdf>')
self.assertAccess("'<asdf|>")
self.assertAccess("'<|asdf>")
self.assertAccess("'''<asdf|>")
self.assertAccess('"""<asdf|>')
self.assertAccess('asdf.afd("a") + "<asdf|>')
class TestCurrentObject(LineTestCase):
def setUp(self):
self.func = current_object
def test_simple(self):
self.assertAccess('<Object>.attr1|')
self.assertAccess('<Object>.|')
self.assertAccess('Object|')
self.assertAccess('Object|.')
self.assertAccess('<Object>.|')
self.assertAccess('<Object.attr1>.attr2|')
self.assertAccess('<Object>.att|r1.attr2')
self.assertAccess('stuff[stuff] + {123: 456} + <Object.attr1>.attr2|')
self.assertAccess('stuff[asd|fg]')
self.assertAccess('stuff[asdf[asd|fg]')
class TestCurrentAttribute(LineTestCase):
def setUp(self):
self.func = current_object_attribute
def test_simple(self):
self.assertAccess('Object.<attr1|>')
self.assertAccess('Object.attr1.<attr2|>')
self.assertAccess('Object.<att|r1>.attr2')
self.assertAccess('stuff[stuff] + {123: 456} + Object.attr1.<attr2|>')
self.assertAccess('stuff[asd|fg]')
self.assertAccess('stuff[asdf[asd|fg]')
self.assertAccess('Object.attr1.<|attr2>')
self.assertAccess('Object.<attr1|>.attr2')
class TestCurrentFromImportFrom(LineTestCase):
def setUp(self):
self.func = current_from_import_from
def test_simple(self):
self.assertAccess('from <sys|> import path')
self.assertAccess('from <sys> import path|')
self.assertAccess('if True|: from sys import path')
self.assertAccess('if True: |from sys import path')
self.assertAccess('if True: from <sys> import p|ath')
self.assertAccess('if True: from sys imp|ort path')
self.assertAccess('if True: from sys import |path')
self.assertAccess('if True: from sys import path.stu|ff')
self.assertAccess('if True: from <sys.path> import sep|')
self.assertAccess('from <os.p|>')
class TestCurrentFromImportImport(LineTestCase):
def setUp(self):
self.func = current_from_import_import
def test_simple(self):
self.assertAccess('from sys import <path|>')
self.assertAccess('from sys import <p|ath>')
self.assertAccess('from sys import |path')
self.assertAccess('from sys| import path')
self.assertAccess('from s|ys import path')
self.assertAccess('from |sys import path')
self.assertAccess('from xml.dom import <N|ode>')
# because syntax error
self.assertAccess('from xml.dom import Node.as|d')
class TestCurrentImport(LineTestCase):
def setUp(self):
self.func = current_import
def test_simple(self):
self.assertAccess('import <path|>')
self.assertAccess('import <p|ath>')
self.assertAccess('import |path')
self.assertAccess('import path, <another|>')
self.assertAccess('import path another|')
self.assertAccess('if True: import <path|>')
self.assertAccess('if True: import <xml.dom.minidom|>')
self.assertAccess('if True: import <xml.do|m.minidom>')
self.assertAccess('if True: import <xml.do|m.minidom> as something')
class TestMethodDefinitionName(LineTestCase):
def setUp(self):
self.func = current_method_definition_name
def test_simple(self):
self.assertAccess('def <foo|>')
self.assertAccess(' def bar(x, y)|:')
self.assertAccess(' def <bar|>(x, y)')
class TestSingleWord(LineTestCase):
def setUp(self):
self.func = current_single_word
def test_simple(self):
self.assertAccess('foo.bar|')
self.assertAccess('.foo|')
self.assertAccess(' <foo|>')
class TestCurrentStringLiteral(LineTestCase):
def setUp(self):
self.func = current_string_literal_attr
def test_simple(self):
self.assertAccess('"hey".<a|>')
self.assertAccess('"hey"|')
self.assertAccess('"hey"|.a')
self.assertAccess('"hey".<a|b>')
self.assertAccess('"hey".asdf d|')
self.assertAccess('"hey".<|>')
if __name__ == '__main__':
unittest.main()
| mit | 3,930,925,309,451,489,300 | 33.154839 | 78 | 0.588402 | false |
waytai/p2pool | wstools/MIMEAttachment.py | 294 | 3379 | #TODO add the license
#I had to rewrite this class because the python MIME email.mime (version 2.5)
#are buggy, they use \n instead \r\n for new line which is not compliant
#to standard!
# http://bugs.python.org/issue5525
#TODO do not load all the message in memory stream it from the disk
import re
import random
import sys
#new line
NL='\r\n'
_width = len(repr(sys.maxint-1))
_fmt = '%%0%dd' % _width
class MIMEMessage:
def __init__(self):
self._files = []
self._xmlMessage = ""
self._startCID = ""
self._boundary = ""
def makeBoundary(self):
#create the boundary
msgparts = []
msgparts.append(self._xmlMessage)
for i in self._files:
msgparts.append(i.read())
#this sucks, all in memory
alltext = NL.join(msgparts)
self._boundary = _make_boundary(alltext)
#maybe I can save some memory
del alltext
del msgparts
self._startCID = "<" + (_fmt % random.randrange(sys.maxint)) + (_fmt % random.randrange(sys.maxint)) + ">"
def toString(self):
'''it return a string with the MIME message'''
if len(self._boundary) == 0:
#the makeBoundary hasn't been called yet
self.makeBoundary()
#ok we have everything let's start to spit the message out
#first the XML
returnstr = NL + "--" + self._boundary + NL
returnstr += "Content-Type: text/xml; charset=\"us-ascii\"" + NL
returnstr += "Content-Transfer-Encoding: 7bit" + NL
returnstr += "Content-Id: " + self._startCID + NL + NL
returnstr += self._xmlMessage + NL
#then the files
for file in self._files:
returnstr += "--" + self._boundary + NL
returnstr += "Content-Type: application/octet-stream" + NL
returnstr += "Content-Transfer-Encoding: binary" + NL
returnstr += "Content-Id: <" + str(id(file)) + ">" + NL + NL
file.seek(0)
returnstr += file.read() + NL
#closing boundary
returnstr += "--" + self._boundary + "--" + NL
return returnstr
def attachFile(self, file):
'''
it adds a file to this attachment
'''
self._files.append(file)
def addXMLMessage(self, xmlMessage):
'''
it adds the XML message. we can have only one XML SOAP message
'''
self._xmlMessage = xmlMessage
def getBoundary(self):
'''
this function returns the string used in the mime message as a
boundary. First the write method as to be called
'''
return self._boundary
def getStartCID(self):
'''
This function returns the CID of the XML message
'''
return self._startCID
def _make_boundary(text=None):
#some code taken from python stdlib
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
token = random.randrange(sys.maxint)
boundary = ('=' * 10) + (_fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
| gpl-3.0 | -2,123,325,751,531,944,400 | 29.718182 | 115 | 0.572359 | false |
coleifer/irc | bots/markov.py | 3 | 5130 | #!/usr/bin/python
import os
import pickle
import random
import re
import sys
from irc import IRCBot, IRCConnection
class MarkovBot(IRCBot):
"""
Hacking on a markov chain bot - based on:
http://code.activestate.com/recipes/194364-the-markov-chain-algorithm/
http://github.com/ericflo/yourmomdotcom
"""
messages_to_generate = 5
chattiness = .01
max_words = 15
chain_length = 2
stop_word = '\n'
filename = 'markov.db'
last = None
def __init__(self, *args, **kwargs):
super(MarkovBot, self).__init__(*args, **kwargs)
self.load_data()
def load_data(self):
if os.path.exists(self.filename):
fh = open(self.filename, 'rb')
self.word_table = pickle.loads(fh.read())
fh.close()
else:
self.word_table = {}
def save_data(self):
fh = open(self.filename, 'w')
fh.write(pickle.dumps(self.word_table))
fh.close()
def split_message(self, message):
words = message.split()
if len(words) > self.chain_length:
words.extend([self.stop_word] * self.chain_length)
for i in range(len(words) - self.chain_length):
yield (words[i:i + self.chain_length + 1])
def generate_message(self, person, size=15, seed_key=None):
person_words = len(self.word_table.get(person, {}))
if person_words < size:
return
if not seed_key:
seed_key = random.choice(self.word_table[person].keys())
message = []
for i in xrange(self.messages_to_generate):
words = seed_key
gen_words = []
for i in xrange(size):
if words[0] == self.stop_word:
break
gen_words.append(words[0])
try:
words = words[1:] + (random.choice(self.word_table[person][words]),)
except KeyError:
break
if len(gen_words) > len(message):
message = list(gen_words)
return ' '.join(message)
def imitate(self, sender, message, channel):
person = message.replace('imitate ', '').strip()[:10]
if person != self.conn.nick:
return self.generate_message(person)
def cite(self, sender, message, channel):
if self.last:
return self.last
def sanitize_message(self, message):
"""Convert to lower-case and strip out all quotation marks"""
return re.sub('[\"\']', '', message.lower())
def log(self, sender, message, channel):
sender = sender[:10]
self.word_table.setdefault(sender, {})
if message.startswith('/'):
return
try:
say_something = self.is_ping(message) or sender != self.conn.nick and random.random() < self.chattiness
except AttributeError:
say_something = False
messages = []
seed_key = None
if self.is_ping(message):
message = self.fix_ping(message)
for words in self.split_message(self.sanitize_message(message)):
key = tuple(words[:-1])
if key in self.word_table:
self.word_table[sender][key].append(words[-1])
else:
self.word_table[sender][key] = [words[-1]]
if self.stop_word not in key and say_something:
for person in self.word_table:
if person == sender:
continue
if key in self.word_table[person]:
generated = self.generate_message(person, seed_key=key)
if generated:
messages.append((person, generated))
if len(messages):
self.last, message = random.choice(messages)
return message
def load_log_file(self, filename):
fh = open(filename, 'r')
logline_re = re.compile('<\s*(\w+)>[^\]]+\]\s([^\r\n]+)[\r\n]')
for line in fh.readlines():
match = logline_re.search(line)
if match:
sender, message = match.groups()
self.log(sender, message, '', False, None)
def load_text_file(self, filename, sender):
fh = open(filename, 'r')
for line in fh.readlines():
self.log(sender, line, '', False, None)
def command_patterns(self):
return (
self.ping('^imitate \S+', self.imitate),
self.ping('^cite', self.cite),
('.*', self.log),
)
host = 'irc.freenode.net'
port = 6667
nick = 'whatyousay'
conn = IRCConnection(host, port, nick)
markov_bot = MarkovBot(conn)
if len(sys.argv) > 1 and sys.argv[1] == '-log':
if len(sys.argv) == 3:
markov_bot.load_log_file(sys.argv[2])
elif len(sys.argv):
markov_bot.load_text_file(sys.argv[2], sys.argv[3])
else:
conn.connect()
conn.join('#botwars')
try:
conn.enter_event_loop()
except:
pass
markov_bot.save_data()
| mit | 5,246,617,683,997,780,000 | 29.35503 | 115 | 0.534113 | false |
simonolander/euler | euler-126.py | 1 | 1057 | from itertools import count
def layer(x, y, z, n):
return 2*(x*y + y*z + x*z) + 4*(x + y + z + n - 2) * (n - 1)
print(layer(3, 2, 1, 1)) # 22
print(layer(3, 2, 1, 2)) # 46
print(layer(3, 2, 1, 3)) # 78
print(layer(3, 2, 1, 4)) # 118
print(layer(5, 1, 1, 1)) # 22
limit = 30000
memo = {}
for x in count(1):
if layer(x, x, x, 1) > limit:
break
for y in count(x):
if layer(x, y, y, 1) > limit:
break
for z in count(y):
if layer(x, y, z, 1) > limit:
break
for n in count(1):
l = layer(x, y, z, n)
if l > limit:
break
if l not in memo:
memo[l] = [(x, y, z, n)]
else:
memo[l].append((x, y, z, n))
search = 1000
smallest = None
lst = None
for layer_size, count in memo.items():
if len(count) == search:
if smallest is None or layer_size < smallest:
smallest = layer_size
lst = count
print(smallest, lst)
| mit | 8,148,229,678,060,357,000 | 23.581395 | 64 | 0.449385 | false |
licode/scikit-xray | skbeam/core/fitting/models.py | 4 | 5983 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li ([email protected]) #
# created on 09/10/2014 #
# #
# Original code: #
# @author: Mirna Lerotic, 2nd Look Consulting #
# http://www.2ndlookconsulting.com/ #
# Copyright (c) 2013, Stefan Vogt, Argonne National Laboratory #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import inspect
import logging
from lmfit import Model
from .lineshapes import (elastic, compton, lorentzian2)
from .base.parameter_data import get_para
logger = logging.getLogger(__name__)
def set_default(model_name, func_name):
"""
Set values and bounds to Model parameters in lmfit.
Parameters
----------
model_name : class object
Model class object from lmfit
func_name : function
function name of physics peak
"""
paras = inspect.getargspec(func_name)
# the first argument is independent variable, also ignored
# default values are not considered for fitting in this function
my_args = paras.args[1:]
para_dict = get_para()
for name in my_args:
if name not in para_dict.keys():
continue
my_dict = para_dict[name]
if my_dict['bound_type'] == 'none':
model_name.set_param_hint(name, vary=True)
elif my_dict['bound_type'] == 'fixed':
model_name.set_param_hint(name, vary=False, value=my_dict['value'])
elif my_dict['bound_type'] == 'lo':
model_name.set_param_hint(name, value=my_dict['value'], vary=True,
min=my_dict['min'])
elif my_dict['bound_type'] == 'hi':
model_name.set_param_hint(name, value=my_dict['value'], vary=True,
max=my_dict['max'])
elif my_dict['bound_type'] == 'lohi':
model_name.set_param_hint(name, value=my_dict['value'], vary=True,
min=my_dict['min'], max=my_dict['max'])
else:
raise TypeError("Boundary type {0} can't be "
"used".format(my_dict['bound_type']))
def _gen_class_docs(func):
"""
Parameters
----------
func : function
function of peak profile
Returns
-------
str :
documentation of the function
"""
return ("Wrap the {} function for fitting within lmfit "
"framework\n".format(func.__name__) + func.__doc__)
# DEFINE NEW MODELS
class ElasticModel(Model):
__doc__ = _gen_class_docs(elastic)
def __init__(self, *args, **kwargs):
super(ElasticModel, self).__init__(elastic, *args, **kwargs)
self.set_param_hint('epsilon', value=2.96, vary=False)
class ComptonModel(Model):
__doc__ = _gen_class_docs(compton)
def __init__(self, *args, **kwargs):
super(ComptonModel, self).__init__(compton, *args, **kwargs)
self.set_param_hint('epsilon', value=2.96, vary=False)
class Lorentzian2Model(Model):
__doc__ = _gen_class_docs(lorentzian2)
def __init__(self, *args, **kwargs):
super(Lorentzian2Model, self).__init__(lorentzian2, *args, **kwargs)
| bsd-3-clause | 1,957,733,177,105,586,200 | 42.671533 | 79 | 0.520976 | false |
DanielleQuinn/studyGroup | scripts/updateCalendar.py | 27 | 6851 | #######################################################################
# date: 2015-07-28
# author: Thea Van Rossum [email protected]
# functionality:
# 1. Creates a Google Calendar API service object
# 2. Deletes all events in the calendar in case changes have been
# made to existing events
# 3. Create events based on all the posts in
# "_posts" (POSTS_DIRECTORY)
# Commented out: 4. Print next 10 events
#
# Will not add an event if it is missing one of the REQUIRED_FIELDS
#
# To modify and use:
# 1. See google docs to get setup with credentials:
# https://developers.google.com/google-apps/calendar/quickstart/python
# 2. Update the variables indicated below (APPLICATION_NAME,
# CALENDAR_ID, TIME_ZONE_SRT
# 3. run from scripts/ using:
# python updateCalendar.py --noauth_local_webserver
########################################################################
import httplib2
import os
import glob
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
import datetime
import pytz
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# Modify these variables in step 2 above -------------------
# APPLICATION_NAME: app name you created in step one above:
APPLICATION_NAME = 'test'
# CALENDAR_ID: google account name you created for your calendar:
CALENDAR_ID = '[email protected]'
# TIME_ZONE_STR: check here:
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
TIME_ZONE_STR = 'America/Vancouver'
# -----------------------------------------------------------
SCOPES = 'https://www.googleapis.com/auth/calendar'
CLIENT_SECRET_FILE = 'client_secret.json'
DEFAULT_START_TIME = "15:30" # will be overridden by startTime in _posts
DEFAULT_END_TIME = "16:30" # will be overridden by endTime in _posts
REQUIRED_FIELDS = ['title', 'location', 'text', 'link', 'date']
POSTS_DIRECTORY = "../_posts"
def main():
"""
1. Creates a Google Calendar API service object
2. Deletes all events in the calendar in case
changes have been made to existing events
3. Create events based on all the posts in "_posts" (POSTS_DIRECTORY)
Commented out: 4. Print next 10 events
Will not add an event if it is missing one of the REQUIRED_FIELDS
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http)
# clear the calendar
service.calendars().clear(calendarId=CALENDAR_ID).execute()
# create events
for inputPath in glob.glob(os.path.join(POSTS_DIRECTORY, '*.markdown')):
eventDict = parseEventPost(inputPath)
events = getAllEvents(service)
if not isEventComplete(eventDict, inputPath):
print 'Event is incomplete'
else:
event = createEvent(eventDict)
event = service.events().insert(calendarId=CALENDAR_ID, body=event).execute()
print 'Event created: %s' % (event.get('summary'))
def printNextEvents(service, numEvents):
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print 'Getting the upcoming %d events' % numEvents
eventsResult = service.events().list(
calendarId=CALENDAR_ID, timeMin=now, maxResults=numEvents, singleEvents=True,
orderBy='startTime').execute()
events = eventsResult.get('items', [])
if not events:
print 'No upcoming events found.'
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
print start, event['summary']
def getAllEvents(service):
eventsResult = service.events().list(
calendarId=CALENDAR_ID, singleEvents=True, orderBy='startTime').execute()
events = eventsResult.get('items', [])
return events
def parseEventPost(inputPath):
eventDict = {}
eventDict['startTime'] = DEFAULT_START_TIME
eventDict['endTime'] = DEFAULT_END_TIME
f = open(inputPath, 'r')
for line in f:
listedline = line.strip().split(':', 1) # split around the : sign
if len(listedline) > 1: # we have the = sign in there
eventDict[listedline[0].strip()] = listedline[1].strip()
return eventDict
def isEventComplete(eventDict, sourcePath):
isComplete = 1
for field in REQUIRED_FIELDS:
if field not in eventDict:
print "Error: event missing %s (%s)" % field, sourcePath
isComplete -= 1
return isComplete
def makeDateTime(dateStr, hourMinStr):
# date like "2014-07-25"
# hourMinStr like "15:30"
timeStr = hourMinStr[1:-1]
date = dateStr.split('-')
TIME_ZONE_HR = ':00'+pytz.timezone(TIME_ZONE_STR).localize(datetime.datetime(int(date[0]), int(date[1]), int(date[2]))).strftime('%z')
TIME_ZONE_HR = TIME_ZONE_HR[:-2] + ':' + TIME_ZONE_HR[-2:]
return dateStr + "T" + timeStr + TIME_ZONE_HR
def createEvent(eventDict):
event = {
'summary': eventDict['title'],
'location': eventDict['location'],
'description': eventDict['text']+"\n"+eventDict['link'],
'start': {
'dateTime': makeDateTime(eventDict['date'], eventDict['startTime']),
'timeZone': TIME_ZONE_STR
},
'end': {
'dateTime': makeDateTime(eventDict['date'], eventDict['endTime']),
'timeZone': TIME_ZONE_STR
},
'reminders': {
'useDefault': False,
'overrides': [
{'method': 'email', 'minutes': 60 * 24 * 2}, # 2 days
],
},
}
return event
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'google-sfuStudyGroupCalendar.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatability with Python 2.6
credentials = tools.run(flow, store)
print 'Storing credentials to ' + credential_path
return credentials
if __name__ == '__main__':
main()
| apache-2.0 | -5,042,752,143,507,849,000 | 34.133333 | 138 | 0.628813 | false |
Thraxis/SickRage | sickbeard/providers/torrentbytes.py | 1 | 7783 | # Author: Idan Gutman
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import urllib
import traceback
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.providers.TorrentProvider import TorrentProvider
class TorrentBytesProvider(TorrentProvider):
def __init__(self):
TorrentProvider.__init__(self, "TorrentBytes")
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.freeleech = False
self.urls = {'base_url': 'https://www.torrentbytes.net',
'login': 'https://www.torrentbytes.net/takelogin.php',
'detail': 'https://www.torrentbytes.net/details.php?id=%s',
'search': 'https://www.torrentbytes.net/browse.php?search=%s%s',
'download': 'https://www.torrentbytes.net/download.php?id=%s&name=%s'}
self.url = self.urls['base_url']
self.categories = "&c41=1&c33=1&c38=1&c32=1&c37=1"
self.proper_strings = ['PROPER', 'REPACK']
self.cache = TorrentBytesCache(self)
def login(self):
login_params = {'username': self.username,
'password': self.password,
'login': 'Log in!'}
response = self.get_url(self.urls['login'], post_data=login_params, timeout=30)
if not response:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if re.search('Username or password incorrect', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_params, age=0, ep_obj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if not self.login():
return results
for mode in search_params.keys():
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_params[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
searchURL = self.urls['search'] % (urllib.quote(search_string.encode('utf-8')), self.categories)
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
data = self.get_url(searchURL)
if not data:
continue
try:
with BS4Parser(data, 'html5lib') as html:
# Continue only if one Release is found
empty = html.find('Nothing found!')
if empty:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
torrent_table = html.find('table', attrs={'border': '1'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
for result in torrent_rows[1:]:
cells = result.find_all('td')
size = None
link = cells[1].find('a', attrs={'class': 'index'})
full_id = link['href'].replace('details.php?id=', '')
torrent_id = full_id.split("&")[0]
# Free leech torrents are marked with green [F L] in the title (i.e. <font color=green>[F L]</font>)
freeleechTag = cells[1].find('font', attrs={'color': 'green'})
if freeleechTag and freeleechTag.text == u'[F\xa0L]':
isFreeleechTorrent = True
else:
isFreeleechTorrent = False
if self.freeleech and not isFreeleechTorrent:
continue
try:
if link.has_key('title'):
title = cells[1].find('a', {'class': 'index'})['title']
else:
title = link.contents[0]
download_url = self.urls['download'] % (torrent_id, link.contents[0])
seeders = int(cells[8].find('span').contents[0])
leechers = int(cells[9].find('span').contents[0])
# Need size for failed downloads handling
if size is None:
if re.match(r'[0-9]+,?\.?[0-9]*[KkMmGg]+[Bb]+', cells[6].text):
size = self._convertSize(cells[6].text)
if not size:
size = -1
except (AttributeError, TypeError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item)
except Exception, e:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
# For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seed_ratio(self):
return self.ratio
def _convertSize(self, sizeString):
size = sizeString[:-2]
modifier = sizeString[-2:]
size = float(size)
if modifier in 'KB':
size = size * 1024
elif modifier in 'MB':
size = size * 1024**2
elif modifier in 'GB':
size = size * 1024**3
elif modifier in 'TB':
size = size * 1024**4
return int(size)
class TorrentBytesCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll TorrentBytes every 20 minutes max
self.minTime = 20
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider.search(search_params)}
provider = TorrentBytesProvider()
| gpl-3.0 | -579,820,408,553,699,700 | 38.100503 | 191 | 0.510603 | false |
kokogaga/arducopter | mk/PX4/Tools/genmsg/test/test_genmsg_msg_loader.py | 215 | 29225 | # Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import random
def get_test_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'files'))
def test_exceptions():
from genmsg import MsgNotFound
try:
raise MsgNotFound('hello')
except MsgNotFound:
pass
def test__convert_constant_value():
from genmsg.msg_loader import convert_constant_value
from genmsg import InvalidMsgSpec
assert 0. == convert_constant_value('float32', '0.0')
assert 0. == convert_constant_value('float64', '0.0')
assert 'fo o' == convert_constant_value('string', ' fo o ')
assert 1 == convert_constant_value('byte', '1')
assert 1 == convert_constant_value('char', '1')
assert 1 == convert_constant_value('int8', '1')
assert 12 == convert_constant_value('int16', '12')
assert -13 == convert_constant_value('int32', '-13')
assert 14 == convert_constant_value('int64', '14')
assert 0 == convert_constant_value('uint8', '0')
assert 18 == convert_constant_value('uint16', '18')
assert 19 == convert_constant_value('uint32', '19')
assert 20 == convert_constant_value('uint64', '20')
assert True == convert_constant_value('bool', '1')
assert False == convert_constant_value('bool', '0')
width_fail = [('int8', '129'), ('uint8', '256'),
('int16', '35536'), ('uint16', '-1'),('uint16', '65536'),
('int32', '3000000000'),('int32', '-2700000000'),
('uint32', '-1'),('uint32', '41000000000'),
('uint64', '-1')]
for t, v in width_fail:
try:
convert_constant_value(t, v)
assert False, "should have failed width check: %s, %s"%(t, v)
except InvalidMsgSpec:
pass
type_fail = [('int32', 'f'), ('float32', 'baz')]
for t, v in type_fail:
try:
convert_constant_value(t, v)
assert False, "should have failed type check: %s, %s"%(t, v)
except ValueError:
pass
try:
convert_constant_value('foo', '1')
assert False, "should have failed invalid type"
except InvalidMsgSpec:
pass
def test__load_constant_line():
from genmsg.msgs import Constant, InvalidMsgSpec
from genmsg.msg_loader import _load_constant_line
try:
_load_constant_line("int8 field=alpha")
assert False, "should have raised"
except InvalidMsgSpec:
pass
try:
_load_constant_line("int8 field=")
assert False, "should have raised"
except InvalidMsgSpec:
pass
try:
_load_constant_line("faketype field=1")
assert False, "should have raised"
except InvalidMsgSpec:
pass
c = _load_constant_line("int8 field=1")
assert c == Constant('int8', 'field', 1, '1')
c = _load_constant_line("string val=hello #world")
assert c == Constant('string', 'val', 'hello #world', 'hello #world')
def test__load_field_line():
from genmsg.msgs import InvalidMsgSpec, Field
from genmsg.msg_loader import _load_field_line, InvalidMsgSpec, Field, is_valid_msg_field_name
try:
_load_field_line("string", 'foo')
assert False, "should have raised"
except InvalidMsgSpec:
pass
assert not is_valid_msg_field_name('string[')
try:
_load_field_line("string data!", 'foo')
assert False, "should have raised"
except InvalidMsgSpec:
pass
try:
_load_field_line("string[ data", 'foo')
assert False, "should have raised"
except InvalidMsgSpec:
pass
f =_load_field_line("string str", 'foo')
assert f == ('string', 'str')
f =_load_field_line("string str #nonsense", 'foo')
assert f == ('string', 'str')
f =_load_field_line("String str #nonsense", '')
assert f == ('String', 'str')
f =_load_field_line("String str #nonsense", 'foo')
assert f == ('foo/String', 'str')
# make sure Header is mapped
f =_load_field_line("Header header #nonsense", 'somewhere')
assert f == ('std_msgs/Header', 'header'), f
f =_load_field_line("Header header #nonsense", '')
assert f == ('std_msgs/Header', 'header'), f
def test_load_msg_from_string():
# make sure Header -> std_msgs/Header conversion works
from genmsg.msgs import Constant
from genmsg.msg_loader import load_msg_from_string, MsgContext
context = MsgContext.create_default()
msgspec = load_msg_from_string(context, "Header header", 'test_pkg/HeaderTest')
print(msgspec)
assert msgspec.has_header()
assert msgspec.types == ['std_msgs/Header']
assert msgspec.names == ['header']
assert msgspec.constants == []
assert msgspec.short_name == 'HeaderTest'
assert msgspec.package == 'test_pkg'
assert msgspec.full_name == 'test_pkg/HeaderTest'
msgspec = load_msg_from_string(context, "int8 c=1\nHeader header\nint64 data", 'test_pkg/HeaderValsTest')
assert msgspec.has_header()
assert msgspec.types == ['std_msgs/Header', 'int64']
assert msgspec.names == ['header', 'data']
assert msgspec.constants == [Constant('int8', 'c', 1, '1')]
assert msgspec.short_name == 'HeaderValsTest'
assert msgspec.package == 'test_pkg'
assert msgspec.full_name == 'test_pkg/HeaderValsTest'
msgspec = load_msg_from_string(context, "string data\nint64 data2", 'test_pkg/ValsTest')
assert not msgspec.has_header()
assert msgspec.types == ['string', 'int64']
assert msgspec.names == ['data', 'data2']
assert msgspec.constants == []
assert msgspec.short_name == 'ValsTest'
assert msgspec.full_name == 'test_pkg/ValsTest'
def _validate_TestString(msgspec):
assert ['caller_id', 'orig_caller_id', 'data'] == msgspec.names, msgspec.names
assert ['string', 'string', 'string'] == msgspec.types, msgspec.types
def test_load_msg_from_file():
from genmsg.msgs import InvalidMsgSpec
from genmsg.msg_loader import load_msg_from_file, MsgContext
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
msg_context = MsgContext.create_default()
spec = load_msg_from_file(msg_context, test_string_path, 'test_ros/TestString')
assert spec.full_name == 'test_ros/TestString'
assert spec.package == 'test_ros'
assert spec.short_name == 'TestString'
_validate_TestString(spec)
# test repeat
spec_2 = load_msg_from_file(msg_context, test_string_path, 'test_ros/TestString')
assert spec == spec_2
assert spec.package == spec_2.package
assert spec.short_name == spec_2.short_name
# test w/ bad file
test_bad_path = os.path.join(test_ros_dir, 'Bad.msg')
try:
load_msg_from_file(msg_context, test_bad_path, 'test_ros/Bad')
assert False, "should have raised"
except InvalidMsgSpec:
pass
# supposed to register
assert msg_context.is_registered('test_ros/TestString'), msg_context
def test_load_msg_from_string_TestString():
from genmsg.msg_loader import load_msg_from_string, MsgContext
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
with open(test_string_path) as f:
text = f.read()
msg_context = MsgContext.create_default()
_validate_TestString(load_msg_from_string(msg_context, text, 'test_ros/TestString'))
# supposed to register
assert msg_context.is_registered('test_ros/TestString'), msg_context
def test_load_msg_by_type():
from genmsg.msg_loader import load_msg_by_type, MsgContext, MsgNotFound
test_d = get_test_dir()
geometry_d = os.path.join(test_d, 'geometry_msgs', 'msg')
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
search_path = {
'test_ros': [ test_ros_dir ],
'geometry_msgs': [ geometry_d ],
}
msg_context = MsgContext.create_default()
msgspec = load_msg_by_type(msg_context, 'test_ros/TestString', search_path)
_validate_TestString(msgspec)
# supposed to register
assert msg_context.is_registered('test_ros/TestString'), msg_context
# test invalid search path
try:
load_msg_by_type(msg_context, 'test_ros/TestString', [test_string_path])
assert False, "should have raised"
except ValueError:
pass
# test not found
try:
load_msg_by_type(msg_context, 'test_ros/Fake', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
# test all the known geometry msgs
test_d = get_test_dir()
for f in os.listdir(geometry_d):
if f.endswith('.msg'):
short = f[:-4]
msg_type = 'geometry_msgs/%s'%short
spec = load_msg_by_type(msg_context, msg_type, search_path)
assert spec is not None
assert spec.package == 'geometry_msgs'
assert spec.full_name == msg_type
assert spec.short_name == short
with open(os.path.join(geometry_d, f)) as file_h:
assert spec.text == file_h.read()
# all types with 'Stamped' in name have headers
if 'Stamped' in f:
assert spec.has_header(), msg_type
def test_get_msg_file():
from genmsg import MsgNotFound
from genmsg.msg_loader import get_msg_file
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
search_path = {
'test_ros': [ test_ros_dir ],
}
assert test_string_path == get_msg_file('test_ros', 'TestString', search_path)
try:
get_msg_file('test_ros', 'DNE', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
try:
get_msg_file('bad_pkg', 'TestString', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
# test with invalid search path
try:
get_msg_file('test_ros', 'TestString', [test_string_path])
assert False, "should have raised"
except ValueError:
pass
def test_get_srv_file():
from genmsg import MsgNotFound
from genmsg.msg_loader import get_srv_file
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'srv')
std_srvs_dir = os.path.join(test_d, 'std_srvs', 'srv')
empty_path = os.path.join(std_srvs_dir, 'Empty.srv')
search_path = {
'test_ros': [ test_ros_dir ],
'std_srvs': [ std_srvs_dir ],
}
assert empty_path == get_srv_file('std_srvs', 'Empty', search_path)
try:
get_srv_file('test_ros', 'DNE', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
try:
get_srv_file('bad_pkg', 'TestString', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
# test with invalid search path
try:
get_srv_file('std_srvs', 'Empty', [std_srvs_dir])
assert False, "should have raised"
except ValueError:
pass
def test_MsgContext():
from genmsg.msg_loader import MsgContext, load_msg_from_file
msg_context = MsgContext()
assert not msg_context.is_registered('time')
assert not msg_context.is_registered('duration')
msg_context = MsgContext.create_default()
# tripwires
repr(msg_context)
str(msg_context)
assert msg_context.is_registered('time'), msg_context._registered_packages
assert msg_context.is_registered('duration')
assert not msg_context.is_registered('test_ros/TestString')
assert not msg_context.is_registered('Header')
# start loading stuff into context
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'msg')
test_string_path = os.path.join(test_ros_dir, 'TestString.msg')
spec = load_msg_from_file(msg_context, test_string_path, 'test_ros/TestString')
msg_context.register('test_ros/TestString', spec)
assert msg_context.get_registered('test_ros/TestString') == spec
try:
msg_context.get_registered('bad/TestString')
assert False, 'should have raised'
except KeyError:
pass
assert msg_context.is_registered('test_ros/TestString')
# test Header
assert not msg_context.is_registered('Header')
assert not msg_context.is_registered('std_msgs/Header')
msg_context.register('std_msgs/Header', spec)
assert msg_context.is_registered('std_msgs/Header')
def test_load_srv_from_file():
from genmsg.msg_loader import MsgContext, load_srv_from_file
msg_context = MsgContext.create_default()
d = get_test_dir()
filename = os.path.join(d, 'test_ros', 'srv', 'AddTwoInts.srv')
with open(filename, 'r') as f:
text = f.read()
full_name = 'test_ros/AddTwoInts'
spec = load_srv_from_file(msg_context, filename, full_name)
assert spec == load_srv_from_file(msg_context, filename, full_name)
assert ['int64', 'int64'] == spec.request.types, spec.request.types
assert ['a', 'b'] == spec.request.names
assert text == spec.text
assert full_name == spec.full_name
def test_load_msg_depends():
#TODO: should there just be a 'load_msg, implicit=True?'
from genmsg.msg_loader import MsgContext, load_msg_by_type, load_msg_depends, MsgNotFound
test_d = get_test_dir()
search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'msg') ],
'std_msgs': [ os.path.join(test_d, 'std_msgs', 'msg') ],
'geometry_msgs': [ os.path.join(test_d, 'geometry_msgs', 'msg') ],
'sensor_msgs': [ os.path.join(test_d, 'sensor_msgs', 'msg') ],
'invalid': [ os.path.join(test_d, 'invalid', 'msg') ],
}
# Test not found
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'invalid/BadDepend', search_path)
try:
load_msg_depends(msg_context, root_spec, search_path)
assert False, "should have raised MsgNotFound"
except MsgNotFound:
pass
root_spec = load_msg_by_type(msg_context, 'invalid/BadLocalDepend', search_path)
try:
load_msg_depends(msg_context, root_spec, search_path)
assert False, "should have raised MsgNotFound"
except MsgNotFound:
pass
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'std_msgs/Int32', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'std_msgs', 'msg', 'Int32.msg')
assert file_p == msg_context.get_file('std_msgs/Int32')
assert [] == msg_context.get_depends('std_msgs/Int32')
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'std_msgs/Header', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'std_msgs', 'msg', 'Header.msg')
assert file_p == msg_context.get_file('std_msgs/Header')
assert [] == msg_context.get_depends('std_msgs/Header')
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'Header', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'std_msgs', 'msg', 'Header.msg')
assert file_p == msg_context.get_file('std_msgs/Header')
assert [] == msg_context.get_depends('std_msgs/Header')
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'std_msgs/Int32MultiArray', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'std_msgs', 'msg', 'Int32MultiArray.msg')
assert file_p == msg_context.get_file('std_msgs/Int32MultiArray')
val = msg_context.get_all_depends('std_msgs/Int32MultiArray')
assert set(['std_msgs/MultiArrayLayout', 'std_msgs/MultiArrayDimension']) == set(val), val
assert 2 == len(val), val
val = msg_context.get_depends('std_msgs/Int32MultiArray')
assert set(['std_msgs/MultiArrayLayout']) == set(val), val
for s in ['MultiArrayLayout', 'MultiArrayDimension']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
def test_load_msg_depends_stamped():
#TODO: should there just be a 'load_msg, implicit=True?'
from genmsg.msg_loader import MsgContext, load_msg_by_type, load_msg_depends
test_d = get_test_dir()
geometry_d = os.path.join(test_d, 'geometry_msgs', 'msg')
search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'msg') ],
'std_msgs': [ os.path.join(test_d, 'std_msgs', 'msg') ],
'geometry_msgs': [ geometry_d ],
'sensor_msgs': [ os.path.join(test_d, 'sensor_msgs', 'msg') ],
}
# Test with Stamped and deeper hierarchies, Header
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'geometry_msgs/PoseStamped', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'geometry_msgs', 'msg', 'PoseStamped.msg')
assert file_p == msg_context.get_file('geometry_msgs/PoseStamped')
val = msg_context.get_all_depends('geometry_msgs/PoseStamped')
assert set(['std_msgs/Header', 'geometry_msgs/Pose', 'geometry_msgs/Point', 'geometry_msgs/Quaternion']) == set(val), val
val = msg_context.get_depends('geometry_msgs/PoseStamped')
assert set(['std_msgs/Header', 'geometry_msgs/Pose']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['Pose', 'Point', 'Quaternion']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'geometry_msgs/TwistWithCovarianceStamped', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'geometry_msgs', 'msg', 'TwistWithCovarianceStamped.msg')
assert file_p == msg_context.get_file('geometry_msgs/TwistWithCovarianceStamped')
val = msg_context.get_all_depends('geometry_msgs/TwistWithCovarianceStamped')
assert set(['std_msgs/Header', 'geometry_msgs/TwistWithCovariance', 'geometry_msgs/Twist', 'geometry_msgs/Vector3']) == set(val), val
val = msg_context.get_depends('geometry_msgs/TwistWithCovarianceStamped')
assert set(['std_msgs/Header', 'geometry_msgs/TwistWithCovariance']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['TwistWithCovariance', 'Twist', 'Vector3']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'sensor_msgs/Imu', search_path)
load_msg_depends(msg_context, root_spec, search_path)
file_p = os.path.join(test_d, 'sensor_msgs', 'msg', 'Imu.msg')
assert file_p == msg_context.get_file('sensor_msgs/Imu')
val = msg_context.get_all_depends('sensor_msgs/Imu')
assert set(['std_msgs/Header', 'geometry_msgs/Quaternion', 'geometry_msgs/Vector3']) == set(val), val
val = msg_context.get_depends('sensor_msgs/Imu')
assert set(['std_msgs/Header', 'geometry_msgs/Quaternion', 'geometry_msgs/Vector3']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['Quaternion', 'Vector3']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
def test_load_depends_msg():
from genmsg.msg_loader import MsgContext, load_msg_by_type, load_depends, MsgNotFound, load_srv_by_type
test_d = get_test_dir()
geometry_d = os.path.join(test_d, 'geometry_msgs', 'msg')
msg_search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'msg') ],
'std_msgs': [ os.path.join(test_d, 'std_msgs', 'msg') ],
'geometry_msgs': [ geometry_d ],
'sensor_msgs': [ os.path.join(test_d, 'sensor_msgs', 'msg') ],
'invalid': [ os.path.join(test_d, 'invalid', 'msg') ],
}
# Test not found
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'invalid/BadDepend', msg_search_path)
try:
load_depends(msg_context, root_spec, msg_search_path)
assert False, "should have raised MsgNotFound"
except MsgNotFound:
pass
root_spec = load_msg_by_type(msg_context, 'invalid/BadLocalDepend', msg_search_path)
try:
load_depends(msg_context, root_spec, msg_search_path)
assert False, "should have raised MsgNotFound"
except MsgNotFound:
pass
# Test with msgs
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'geometry_msgs/PoseStamped', msg_search_path)
load_depends(msg_context, root_spec, msg_search_path)
file_p = os.path.join(test_d, 'geometry_msgs', 'msg', 'PoseStamped.msg')
assert file_p == msg_context.get_file('geometry_msgs/PoseStamped')
val = msg_context.get_all_depends('geometry_msgs/PoseStamped')
assert set(['std_msgs/Header', 'geometry_msgs/Pose', 'geometry_msgs/Point', 'geometry_msgs/Quaternion']) == set(val), val
val = msg_context.get_depends('geometry_msgs/PoseStamped')
assert set(['std_msgs/Header', 'geometry_msgs/Pose']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['Pose', 'Point', 'Quaternion']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
msg_context = MsgContext.create_default()
root_spec = load_msg_by_type(msg_context, 'sensor_msgs/Imu', msg_search_path)
load_depends(msg_context, root_spec, msg_search_path)
file_p = os.path.join(test_d, 'sensor_msgs', 'msg', 'Imu.msg')
assert file_p == msg_context.get_file('sensor_msgs/Imu')
val = msg_context.get_depends('sensor_msgs/Imu')
assert set(['std_msgs/Header', 'geometry_msgs/Quaternion', 'geometry_msgs/Vector3']) == set(val), val
for s in ['Header']:
file_p = os.path.join(test_d, 'std_msgs', 'msg', '%s.msg'%s)
assert file_p == msg_context.get_file('std_msgs/%s'%s)
for s in ['Quaternion', 'Vector3']:
file_p = os.path.join(geometry_d, '%s.msg'%s)
assert file_p == msg_context.get_file('geometry_msgs/%s'%s)
def test_load_depends_srv():
from genmsg.msg_loader import MsgContext, load_msg_by_type, load_depends, MsgNotFound, load_srv_by_type
test_d = get_test_dir()
geometry_d = os.path.join(test_d, 'geometry_msgs', 'msg')
msg_search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'msg') ],
'std_msgs': [ os.path.join(test_d, 'std_msgs', 'msg') ],
'geometry_msgs': [ geometry_d ],
'sensor_msgs': [ os.path.join(test_d, 'sensor_msgs', 'msg') ],
'invalid': [ os.path.join(test_d, 'invalid', 'msg') ],
}
# Test with srvs
srv_search_path = {
'test_ros': [ os.path.join(test_d, 'test_ros', 'srv') ],
'std_srvs': [ os.path.join(test_d, 'std_srvs', 'srv') ],
}
msg_context = MsgContext.create_default()
root_spec = load_srv_by_type(msg_context, 'test_ros/AddTwoInts', srv_search_path)
load_depends(msg_context, root_spec, msg_search_path)
val = msg_context.get_depends('test_ros/AddTwoIntsRequest')
assert val == [], val
val = msg_context.get_depends('test_ros/AddTwoIntsResponse')
assert val == [], val
# test with srv that has depends
msg_context = MsgContext.create_default()
response_deps = ['std_msgs/Header', 'geometry_msgs/Pose', 'geometry_msgs/PoseStamped', 'geometry_msgs/Point', 'geometry_msgs/Quaternion']
root_spec = load_srv_by_type(msg_context, 'test_ros/GetPoseStamped', srv_search_path)
load_depends(msg_context, root_spec, msg_search_path)
for d in response_deps:
assert msg_context.is_registered(d)
val = msg_context.get_depends('test_ros/GetPoseStampedRequest')
assert val == [], val
val = msg_context.get_depends('test_ros/GetPoseStampedResponse')
assert val == ['geometry_msgs/PoseStamped']
# Test with nonsense
class Foo(object): pass
try:
load_depends(msg_context, Foo(), msg_search_path)
assert False, "should have raised"
except ValueError:
pass
def test_load_srv_by_type():
from genmsg.msg_loader import load_srv_by_type, MsgContext, MsgNotFound
test_d = get_test_dir()
test_ros_dir = os.path.join(test_d, 'test_ros', 'srv')
std_srvs_dir = os.path.join(test_d, 'std_srvs', 'srv')
empty_path = os.path.join(std_srvs_dir, 'Empty.srv')
a2i_path = os.path.join(std_srvs_dir, 'AddTwoInts.srv')
search_path = {
'test_ros': [ test_ros_dir ],
'std_srvs': [ std_srvs_dir ],
}
msg_context = MsgContext.create_default()
spec = load_srv_by_type(msg_context, 'std_srvs/Empty', search_path)
assert msg_context.is_registered('std_srvs/EmptyRequest')
assert msg_context.is_registered('std_srvs/EmptyResponse')
assert msg_context.get_registered('std_srvs/EmptyRequest') == spec.request
assert msg_context.get_registered('std_srvs/EmptyResponse') == spec.response
assert msg_context.get_file('std_srvs/EmptyRequest') == empty_path, msg_context.get_file('std_srvs/EmptyRequest')
assert msg_context.get_file('std_srvs/EmptyResponse') == empty_path,msg_context.get_file('std_srvs/EmptyResponse')
assert spec.request.full_name == 'std_srvs/EmptyRequest'
assert spec.response.full_name == 'std_srvs/EmptyResponse'
assert spec.request.short_name == 'EmptyRequest'
assert spec.response.short_name == 'EmptyResponse'
assert spec.request.package == 'std_srvs'
assert spec.response.package == 'std_srvs'
for f in [spec.request.names, spec.request.types, spec.response.names, spec.response.types]:
assert [] == f
spec = load_srv_by_type(msg_context, 'test_ros/AddTwoInts', search_path)
assert msg_context.is_registered('test_ros/AddTwoIntsRequest')
assert msg_context.is_registered('test_ros/AddTwoIntsResponse')
assert msg_context.get_registered('test_ros/AddTwoIntsRequest') == spec.request
assert msg_context.get_registered('test_ros/AddTwoIntsResponse') == spec.response
assert spec.request.types == ['int64', 'int64'], spec.request.types
assert spec.request.names == ['a', 'b'], spec.request.names
assert spec.response.types == ['int64'], spec.response.types
assert spec.response.names == ['sum'], spec.response.names
# test invalid search path
try:
load_srv_by_type(msg_context, 'test_ros/AddTwoInts', [std_srvs_dir])
assert False, "should have raised"
except ValueError:
pass
# test not found
try:
load_srv_by_type(msg_context, 'test_ros/Fake', search_path)
assert False, "should have raised"
except MsgNotFound:
pass
| gpl-3.0 | -7,631,231,549,216,794,000 | 41.726608 | 141 | 0.64479 | false |
ARCCN/elt | server/pox/openflow/libopenflow_01.py | 1 | 126604 | # Copyright 2011,2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
# This file was originally based on pyopenflow.py from NOX, which was
# autogenerated from openflow.h via a program by KK Yap. It has been
# substantially altered since then.
from __future__ import print_function
import struct
import operator
import collections
from itertools import chain, repeat
import sys
from pox.lib.packet.packet_base import packet_base
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.vlan import vlan
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.udp import udp
from pox.lib.packet.tcp import tcp
from pox.lib.packet.icmp import icmp
from pox.lib.packet.arp import arp
from pox.lib.addresses import *
from pox.lib.util import assert_type
from pox.lib.util import initHelper
from pox.lib.util import hexdump
EMPTY_ETH = EthAddr(None)
# ----------------------------------------------------------------------
# XID Management
# ----------------------------------------------------------------------
MAX_XID = 0x7fFFffFF
def XIDGenerator (start = 1, stop = MAX_XID):
i = start
while True:
yield i
i += 1
if i > stop:
i = start
def xid_generator (start = 1, stop = MAX_XID):
return XIDGenerator(start, stop).next
def user_xid_generator ():
return xid_generator(0x80000000, 0xffFFffFF)
generate_xid = xid_generator()
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Packing / Unpacking
# ----------------------------------------------------------------------
_PAD = b'\x00'
_PAD2 = _PAD*2
_PAD3 = _PAD*3
_PAD4 = _PAD*4
_PAD6 = _PAD*6
class UnderrunError (RuntimeError):
"""
Raised when one tries to unpack more data than is available
"""
pass
def _read (data, offset, length):
if (len(data)-offset) < length:
raise UnderrunError("wanted %s bytes but only have %s"
% (length, len(data)-offset))
return (offset+length, data[offset:offset+length])
def _unpack (fmt, data, offset):
size = struct.calcsize(fmt)
if (len(data)-offset) < size: raise UnderrunError()
return (offset+size, struct.unpack_from(fmt, data, offset))
def _skip (data, offset, num):
offset += num
if offset > len(data): raise UnderrunError()
return offset
def _unpad (data, offset, num):
(offset, o) = _read(data, offset, num)
assert len(o.replace("\x00", "")) == 0
return offset
def _readzs (data, offset, length):
(offset, d) = _read(data, offset, length)
d = d.split("\x00", 1)
#if len(d[1].replace("\x00", "")) > 0:
# raise RuntimeError("Non-zero string padding")
assert True if (len(d) == 1) else (len(d[1].replace("\x00", "")) == 0)
return (offset, d[0])
def _readether (data, offset):
(offset, d) = _read(data, offset, 6)
return (offset, EthAddr(d))
def _readip (data, offset, networkOrder = True):
(offset, d) = _read(data, offset, 4)
return (offset, IPAddr(d, networkOrder = networkOrder))
# ----------------------------------------------------------------------
def _format_body (body, prefix):
if hasattr(body, 'show'):
#TODO: Check this (spacing may well be wrong)
return body.show(prefix + ' ')
else:
return prefix + hexdump(body).replace("\n", "\n" + prefix)
TABLE_ALL = 0xff
TABLE_EMERGENCY = 0xfe
class _ofp_meta (type):
"""
Metaclass for ofp messages/structures
This takes care of making len() work as desired.
"""
def __len__ (cls):
try:
return cls.__len__()
except:
return cls._MIN_LENGTH
class ofp_base (object):
"""
Base class for OpenFlow messages/structures
You should implement a __len__ method. If your length is fixed, it
should be a static method. If your length is not fixed, you should
implement a __len__ instance method and set a class level _MIN_LENGTH
attribute to your minimum length.
"""
__metaclass__ = _ofp_meta
def _assert (self):
r = self._validate()
if r is not None:
raise RuntimeError(r)
return False # Never reached
return True
def _validate (self):
return None
def __ne__ (self, other):
return not self.__eq__(other)
@classmethod
def unpack_new (cls, raw, offset=0):
"""
Unpacks wire format into the appropriate message object.
Returns newoffset,object
"""
o = cls()
r,length = o.unpack(raw, offset)
assert (r-offset) == length, o
return (r, o)
# ----------------------------------------------------------------------
# Class decorators
# ----------------------------------------------------------------------
_message_type_to_class = {}
_message_class_to_types = {} # Do we need this?
#_message_type_to_name = {}
#_message_name_to_type = {}
ofp_type_rev_map = {}
ofp_type_map = {}
def openflow_message (ofp_type, type_val, reply_to=None,
request_for=None, switch=False, controller=False):
#TODO: Reply stuff, switch/controller stuff
#_message_name_to_type[ofp_type] = type_val
#_message_type_to_name[type_val] = ofp_type
ofp_type_rev_map[ofp_type] = type_val
ofp_type_map[type_val] = ofp_type
def f (c):
c.header_type = type_val
c._from_switch = switch
c._from_controller = controller
_message_type_to_class[type_val] = c
_message_class_to_types.setdefault(c, set()).add(type_val)
return c
return f
def openflow_sc_message (*args, **kw):
return openflow_message(switch=True, controller=True, *args, **kw)
def openflow_c_message (*args, **kw):
return openflow_message(controller=True, *args, **kw)
def openflow_s_message (*args, **kw):
return openflow_message(switch=True, *args, **kw)
_queue_prop_type_to_class = {}
_queue_prop_class_to_types = {} # Do we need this?
ofp_queue_prop_type_rev_map = {}
ofp_queue_prop_type_map = {}
def openflow_queue_prop (queue_prop_type, type_val):
ofp_queue_prop_type_rev_map[queue_prop_type] = type_val
ofp_queue_prop_type_map[type_val] = queue_prop_type
def f (c):
c.property = type_val
_queue_prop_type_to_class[type_val] = c
_queue_prop_class_to_types.setdefault(c, set()).add(type_val)
return c
return f
_action_type_to_class = {}
_action_class_to_types = {} # Do we need this?
ofp_action_type_rev_map = {}
ofp_action_type_map = {}
def openflow_action (action_type, type_val):
ofp_action_type_rev_map[action_type] = type_val
ofp_action_type_map[type_val] = action_type
def f (c):
c.type = type_val
_action_type_to_class[type_val] = c
_action_class_to_types.setdefault(c, set()).add(type_val)
return c
return f
class _StatsClassInfo (object):
__slots__ = 'request reply reply_is_list'.split()
def __init__ (self, **kw):
self.request = None
self.reply = None
self.reply_is_list = False
initHelper(self, kw)
def __str__ (self):
r = str(self.reply)
if self.reply_is_list: r = "[%s]" % (r,)
return "request:%s reply:%s" % (self.request, r)
_stats_type_to_class_info = {}
_stats_class_to_type = {}
ofp_stats_type_rev_map = {}
ofp_stats_type_map = {}
def openflow_stats_request (stats_type, type_val=None, is_list=None,
is_reply = False):
if type_val is not None:
ofp_stats_type_rev_map[stats_type] = type_val
ofp_stats_type_map[type_val] = stats_type
else:
type_val = ofp_stats_type_rev_map.get(stats_type)
def f (c):
if type_val is not None:
ti = _stats_type_to_class_info.get(stats_type)
if ti is not None:
_stats_type_to_class_info[type_val] = ti
del _stats_type_to_class_info[stats_type]
else:
ti = _stats_type_to_class_info.setdefault(type_val,
_StatsClassInfo())
_stats_class_to_type[c] = type_val
else:
ti = _stats_type_to_class_info.setdefault(stats_type,
_StatsClassInfo())
if is_list is not None:
ti.reply_is_list = is_list
if is_reply:
ti.reply = c
else:
ti.request = c
if type_val is not None:
if ti.reply and issubclass(ti.reply, ofp_stats_body_base):
ti.reply._type = type_val
if ti.request and issubclass(ti.request, ofp_stats_body_base):
ti.request._type = type_val
return c
return f
def openflow_stats_reply (stats_type, type_val=None, is_list=None,
is_reply = True):
return openflow_stats_request(stats_type, type_val, is_list, is_reply)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Constants, etc.
# ----------------------------------------------------------------------
ofp_error_type_rev_map = {
'OFPET_HELLO_FAILED' : 0,
'OFPET_BAD_REQUEST' : 1,
'OFPET_BAD_ACTION' : 2,
'OFPET_FLOW_MOD_FAILED' : 3,
'OFPET_PORT_MOD_FAILED' : 4,
'OFPET_QUEUE_OP_FAILED' : 5,
}
ofp_hello_failed_code_rev_map = {
'OFPHFC_INCOMPATIBLE' : 0,
'OFPHFC_EPERM' : 1,
}
ofp_bad_request_code_rev_map = {
'OFPBRC_BAD_VERSION' : 0,
'OFPBRC_BAD_TYPE' : 1,
'OFPBRC_BAD_STAT' : 2,
'OFPBRC_BAD_VENDOR' : 3,
'OFPBRC_BAD_SUBTYPE' : 4,
'OFPBRC_EPERM' : 5,
'OFPBRC_BAD_LEN' : 6,
'OFPBRC_BUFFER_EMPTY' : 7,
'OFPBRC_BUFFER_UNKNOWN' : 8,
}
ofp_bad_action_code_rev_map = {
'OFPBAC_BAD_TYPE' : 0,
'OFPBAC_BAD_LEN' : 1,
'OFPBAC_BAD_VENDOR' : 2,
'OFPBAC_BAD_VENDOR_TYPE' : 3,
'OFPBAC_BAD_OUT_PORT' : 4,
'OFPBAC_BAD_ARGUMENT' : 5,
'OFPBAC_EPERM' : 6,
'OFPBAC_TOO_MANY' : 7,
'OFPBAC_BAD_QUEUE' : 8,
}
ofp_flow_mod_failed_code_rev_map = {
'OFPFMFC_ALL_TABLES_FULL' : 0,
'OFPFMFC_OVERLAP' : 1,
'OFPFMFC_EPERM' : 2,
'OFPFMFC_BAD_EMERG_TIMEOUT' : 3,
'OFPFMFC_BAD_COMMAND' : 4,
'OFPFMFC_UNSUPPORTED' : 5,
}
ofp_port_mod_failed_code_rev_map = {
'OFPPMFC_BAD_PORT' : 0,
'OFPPMFC_BAD_HW_ADDR' : 1,
}
ofp_queue_op_failed_code_rev_map = {
'OFPQOFC_BAD_PORT' : 0,
'OFPQOFC_BAD_QUEUE' : 1,
'OFPQOFC_EPERM' : 2,
}
ofp_port_config_rev_map = {
'OFPPC_PORT_DOWN' : 1,
'OFPPC_NO_STP' : 2,
'OFPPC_NO_RECV' : 4,
'OFPPC_NO_RECV_STP' : 8,
'OFPPC_NO_FLOOD' : 16,
'OFPPC_NO_FWD' : 32,
'OFPPC_NO_PACKET_IN' : 64,
}
ofp_port_state_rev_map = {
'OFPPS_STP_LISTEN' : 0,
'OFPPS_LINK_DOWN' : 1,
'OFPPS_STP_LEARN' : 256,
'OFPPS_STP_FORWARD' : 512,
'OFPPS_STP_BLOCK' : 768,
}
OFPPS_STP_MASK = 768
ofp_port_features_rev_map = {
'OFPPF_10MB_HD' : 1,
'OFPPF_10MB_FD' : 2,
'OFPPF_100MB_HD' : 4,
'OFPPF_100MB_FD' : 8,
'OFPPF_1GB_HD' : 16,
'OFPPF_1GB_FD' : 32,
'OFPPF_10GB_FD' : 64,
'OFPPF_COPPER' : 128,
'OFPPF_FIBER' : 256,
'OFPPF_AUTONEG' : 512,
'OFPPF_PAUSE' : 1024,
'OFPPF_PAUSE_ASYM' : 2048,
}
ofp_queue_properties_rev_map = {
'OFPQT_MIN_RATE' : 0,
}
OFPQT_NONE = 0
ofp_capabilities_rev_map = {
'OFPC_FLOW_STATS' : 1,
'OFPC_TABLE_STATS' : 2,
'OFPC_PORT_STATS' : 4,
'OFPC_STP' : 8,
'OFPC_RESERVED' : 16,
'OFPC_IP_REASM' : 32,
'OFPC_QUEUE_STATS' : 64,
'OFPC_ARP_MATCH_IP' : 128,
}
ofp_config_flags_rev_map = {
'OFPC_FRAG_NORMAL' : 0,
'OFPC_FRAG_DROP' : 1,
'OFPC_FRAG_REASM' : 2,
'OFPC_FRAG_MASK' : 3,
}
ofp_flow_mod_command_rev_map = {
'OFPFC_ADD' : 0,
'OFPFC_MODIFY' : 1,
'OFPFC_MODIFY_STRICT' : 2,
'OFPFC_DELETE' : 3,
'OFPFC_DELETE_STRICT' : 4,
}
ofp_flow_mod_flags_rev_map = {
'OFPFF_SEND_FLOW_REM' : 1,
'OFPFF_CHECK_OVERLAP' : 2,
'OFPFF_EMERG' : 4,
}
ofp_stats_reply_flags_rev_map = {
'OFPSF_REPLY_MORE' : 1,
}
ofp_packet_in_reason_rev_map = {
'OFPR_NO_MATCH' : 0,
'OFPR_ACTION' : 1,
}
ofp_flow_removed_reason_rev_map = {
'OFPRR_IDLE_TIMEOUT' : 0,
'OFPRR_HARD_TIMEOUT' : 1,
'OFPRR_DELETE' : 2,
}
ofp_port_reason_rev_map = {
'OFPPR_ADD' : 0,
'OFPPR_DELETE' : 1,
'OFPPR_MODIFY' : 2,
}
ofp_port_rev_map = {
'OFPP_MAX' : 65280,
'OFPP_IN_PORT' : 65528,
'OFPP_TABLE' : 65529,
'OFPP_NORMAL' : 65530,
'OFPP_FLOOD' : 65531,
'OFPP_ALL' : 65532,
'OFPP_CONTROLLER' : 65533,
'OFPP_LOCAL' : 65534,
'OFPP_NONE' : 65535,
}
ofp_flow_wildcards_rev_map = {
'OFPFW_IN_PORT' : 1,
'OFPFW_DL_VLAN' : 2,
'OFPFW_DL_SRC' : 4,
'OFPFW_DL_DST' : 8,
'OFPFW_DL_TYPE' : 16,
'OFPFW_NW_PROTO' : 32,
'OFPFW_TP_SRC' : 64,
'OFPFW_TP_DST' : 128,
'OFPFW_DL_VLAN_PCP' : 1048576,
'OFPFW_NW_TOS' : 1<<21,
}
OFPFW_NW_DST_BITS = 6
OFPFW_NW_SRC_BITS = 6
OFPFW_NW_SRC_SHIFT = 8
OFPFW_NW_DST_SHIFT = 14
OFPFW_NW_SRC_ALL = 8192
OFPFW_NW_SRC_MASK = 16128
OFPFW_NW_DST_ALL = 524288
OFPFW_NW_DST_MASK = 1032192
# Note: Need to handle all flags that are set in this.
# glob-all masks in the packet handling methods.
# (Esp. ofp_match.from_packet)
# Otherwise, packets are not being matched as they should
OFPFW_ALL = ((1 << 22) - 1)
NO_BUFFER = 4294967295
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Structure definitions
# ----------------------------------------------------------------------
#1. Openflow Header
class ofp_header (ofp_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.version = OFP_VERSION
#self.header_type = None # Set via class decorator
self._xid = None
if 'header_type' in kw:
self.header_type = kw.pop('header_type')
initHelper(self, kw)
@property
def xid (self):
if self._xid is None:
self._xid = generate_xid()
return self._xid
@xid.setter
def xid (self, val):
self._xid = val
def _validate (self):
if self.header_type not in ofp_type_map:
return "type is not a known message type"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!BBHL", self.version, self.header_type,
len(self), self.xid)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
return offset,length
def _unpack_header (self, raw, offset):
offset,(self.version, self.header_type, length, self.xid) = \
_unpack("!BBHL", raw, offset)
return offset,length
def __eq__ (self, other):
if type(self) != type(other): return False
if self.version != other.version: return False
if self.header_type != other.header_type: return False
if len(self) != len(other): return False
if self.xid != other.xid: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'version: ' + str(self.version) + '\n'
outstr += prefix + 'type: ' + str(self.header_type)# + '\n'
outstr += " (" + ofp_type_map.get(self.header_type, "Unknown") + ")\n"
try:
outstr += prefix + 'length: ' + str(len(self)) + '\n'
except:
pass
outstr += prefix + 'xid: ' + str(self.xid) + '\n'
return outstr
def __str__ (self):
return self.__class__.__name__ + "\n " + self.show(' ').strip()
class ofp_stats_body_base (ofp_base):
"""
Base class for stats bodies
"""
# Stats bodies don't actually have a type field in OpenFlow --
# the type information is in the request or reply. It's really
# convenient, though, so we add it. Note that you generally
# don't need to set this yourself -- the openflow_stats_XXX
# decorator will do it for you.
_type = None
"""
def unpack (self, data, offset=0, avail=None):
"""
class ofp_action_base (ofp_base):
"""
Base class for actions
This is sort of the equivalent of ofp_action_header in the spec.
However, ofp_action_header as the spec defines it is not super
useful for us, as it has the padding in it.
"""
type = None
class ofp_queue_prop_base (ofp_base):
"""
Base class for queue properties
This is sort of the equivalent of ofp_queue_prop_header in the spec.
However, ofp_queue_prop_header as the spec defines it is not super
useful for us, as it has the padding in it.
"""
property = None
#2. Common Structures
##2.1 Port Structures
class ofp_phy_port (ofp_base):
def __init__ (self, **kw):
self.port_no = 0
self.hw_addr = EMPTY_ETH
self.name = ""
self.config = 0
self.state = 0
self.curr = 0
self.advertised = 0
self.supported = 0
self.peer = 0
initHelper(self, kw)
def enable_config (self, mask):
"""
Turn on selected config bits
"""
return self.set_config(0xffFFffFF, mask)
def disable_config (self, mask):
"""
Turn off selected config bits
"""
return self.set_config(0, mask)
def set_config (self, config, mask):
"""
Updates the specified config bits
Returns which bits were changed
"""
old = self.config
self.config &= ~mask
self.config |= config
return old ^ self.config
def __str__ (self):
return "%s:%i" % (self.name, self.port_no)
def _validate (self):
if isinstance(self.hw_addr, bytes) and len(self.hw_addr) == 6:
pass
elif not isinstance(self.hw_addr, EthAddr):
return "hw_addr is not a valid format"
if len(self.name) > OFP_MAX_PORT_NAME_LEN:
return "name is too long"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += (self.hw_addr if isinstance(self.hw_addr, bytes) else
self.hw_addr.toRaw())
packed += self.name.ljust(OFP_MAX_PORT_NAME_LEN,'\0')
packed += struct.pack("!LLLLLL", self.config, self.state, self.curr,
self.advertised, self.supported, self.peer)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset,self.hw_addr = _readether(raw, offset)
offset,self.name = _readzs(raw, offset, OFP_MAX_PORT_NAME_LEN)
offset,(self.config, self.state, self.curr, self.advertised,
self.supported, self.peer) = _unpack("!LLLLLL", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 48
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.hw_addr != other.hw_addr: return False
if self.name != other.name: return False
if self.config != other.config: return False
if self.state != other.state: return False
if self.curr != other.curr: return False
if self.advertised != other.advertised: return False
if self.supported != other.supported: return False
if self.peer != other.peer: return False
return True
def __cmp__ (self, other):
if type(other) != type(self): return id(self)-id(other)
if self.port_no < other.port_no: return -1
if self.port_no > other.port_no: return 1
if self == other: return 0
return id(self)-id(other)
def __hash__(self, *args, **kwargs):
return hash(self.port_no) ^ hash(self.hw_addr) ^ \
hash(self.name) ^ hash(self.config) ^ \
hash(self.state) ^ hash(self.curr) ^ \
hash(self.advertised) ^ hash(self.supported) + \
hash(self.peer)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'hw_addr: ' + str(EthAddr(self.hw_addr)) + '\n'
outstr += prefix + 'name: ' + str(self.name) + '\n'
outstr += prefix + 'config: ' + str(self.config) + '\n'
outstr += prefix + 'state: ' + str(self.state) + '\n'
outstr += prefix + 'curr: ' + str(self.curr) + '\n'
outstr += prefix + 'advertised: ' + str(self.advertised) + '\n'
outstr += prefix + 'supported: ' + str(self.supported) + '\n'
outstr += prefix + 'peer: ' + str(self.peer) + '\n'
return outstr
def __repr__(self):
return self.show()
##2.2 Queue Structures
class ofp_packet_queue (ofp_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.queue_id = 0
self.properties = []
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!LH", self.queue_id, len(self))
packed += _PAD2 # Pad
for i in self.properties:
packed += i.pack()
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.queue_id, length) = _unpack("!LH", raw, offset)
offset = _skip(raw, offset, 2)
length -= (4 + 2 + 2)
offset,self.properties = _unpack_queue_props(raw, length, offset)
assert offset - _offset == len(self)
return offset
def __len__ (self):
l = 8
for i in self.properties:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if self.queue_id != other.queue_id: return False
if len(self) != len(other): return False
if self.properties != other.properties: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'properties: \n'
for obj in self.properties:
outstr += obj.show(prefix + ' ')
return outstr
class ofp_queue_prop_generic (ofp_queue_prop_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.property = None # Purposely bad
self.data = _PAD4
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.property, len(self))
packed += self.data
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.property, length) = _unpack("!HH", raw, offset)
offset,self.data = _read(raw, offset, length-4)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 4 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if self.property != other.property: return False
if len(self) != len(other): return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'property: ' + str(self.property) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
return outstr
@openflow_queue_prop('OFPQT_NONE', 0)
class ofp_queue_prop_none (ofp_queue_prop_generic):
pass
@openflow_queue_prop('OFPQT_MIN_RATE', 1)
class ofp_queue_prop_min_rate (ofp_base):
def __init__ (self, **kw):
self.rate = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.property, len(self))
packed += _PAD4
packed += struct.pack("!H", self.rate)
packed += _PAD6
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.property, length, pad) = \
_unpack("!HHL", raw, offset)
offset,(self.rate,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 16
def __eq__ (self, other):
if type(self) != type(other): return False
if self.property != other.property: return False
if self.rate != other.rate: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'property: ' + str(self.property) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'rate: ' + str(self.rate) + '\n'
return outstr
##2.3 Flow Match Structures
class ofp_match (ofp_base):
adjust_wildcards = True # Set to true to "fix" outgoing wildcards
@classmethod
def from_packet (cls, packet, in_port = None):
"""
Constructs an exact match for the given packet
@param in_port The switch port the packet arrived on if you want
the resulting match to have its in_port set.
If "packet" is a packet_in, this is ignored.
@param packet A pox.packet.ethernet instance or a packet_in
"""
if isinstance(packet, ofp_packet_in):
in_port = packet.in_port
packet = ethernet(packet.data)
assert assert_type("packet", packet, ethernet, none_ok=False)
match = cls()
if in_port is not None:
match.in_port = in_port
match.dl_src = packet.src
match.dl_dst = packet.dst
match.dl_type = packet.type
p = packet.next
if isinstance(p, vlan):
match.dl_type = p.eth_type
match.dl_vlan = p.id
match.dl_vlan_pcp = p.pcp
p = p.next
else:
match.dl_vlan = OFP_VLAN_NONE
match.dl_vlan_pcp = 0
if isinstance(p, ipv4):
match.nw_src = p.srcip
match.nw_dst = p.dstip
match.nw_proto = p.protocol
match.nw_tos = p.tos
p = p.next
if isinstance(p, udp) or isinstance(p, tcp):
match.tp_src = p.srcport
match.tp_dst = p.dstport
elif isinstance(p, icmp):
match.tp_src = p.type
match.tp_dst = p.code
elif isinstance(p, arp):
if p.opcode <= 255:
match.nw_proto = p.opcode
match.nw_src = p.protosrc
match.nw_dst = p.protodst
return match
def optimize (self):
"""
Reduce the number of wildcards used.
"""
#TODO: Fix for optional cases (i.e. ARP)
if self.dl_vlan == OFP_VLAN_NONE:
self.dl_vlan_pcp = 0
#TODO: What do we do when something is "behind" a wildcard?
# e.g., does nw_src count if dl_type is wild or only if it's 0x0800?
if self.dl_type is not None:
if self.dl_type != 0x0800:
# Not IP
if self.dl_type != 0x0806:
# Not IP or ARP
self.nw_src = IPAddr(0)
self.nw_dst = IPAddr(0)
self.nw_proto = 0
self.nw_tos = 0
self.tp_src = 0
self.tp_dst = 0
else:
# It's IP
if (self.nw_proto != 6 and self.nw_proto != 17
and self.nw_proto != 1):
# Not TCP, UDP, or ICMP
self.tp_src = 0
self.tp_dst = 0
self.wildcards = self._normalize_wildcards(self.wildcards)
return self # for chaining
def clone (self):
n = ofp_match()
for k,v in ofp_match_data.iteritems():
setattr(n, '_' + k, getattr(self, '_' + k))
n.wildcards = self.wildcards
return n
def flip (self):
"""
Return version of this match with src and dst fields swapped
"""
reversed = self.clone()
for field in ('dl','nw','tp'):
setattr(reversed, field + '_src', getattr(self, field + '_dst'))
setattr(reversed, field + '_dst', getattr(self, field + '_src'))
return reversed
def __init__ (self, **kw):
for k,v in ofp_match_data.iteritems():
setattr(self, '_' + k, v[0])
self.wildcards = self._normalize_wildcards(OFPFW_ALL)
# This is basically initHelper(), but tweaked slightly since this
# class does some magic of its own.
for k,v in kw.iteritems():
if not hasattr(self, '_'+k):
raise TypeError(self.__class__.__name__ + " constructor got "
+ "unexpected keyword argument '" + k + "'")
setattr(self, k, v)
def get_nw_dst (self):
if (self.wildcards & OFPFW_NW_DST_ALL) == OFPFW_NW_DST_ALL:
return (None, 0)
w = (self.wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT
return (self._nw_dst,32-w if w <= 32 else 0)
def get_nw_src (self):
if (self.wildcards & OFPFW_NW_SRC_ALL) == OFPFW_NW_SRC_ALL:
return (None, 0)
w = (self.wildcards & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT
return (self._nw_src,32-w if w <= 32 else 0)
def set_nw_dst (self, *args, **kw):
a = self._make_addr(*args, **kw)
if a == None:
self._nw_dst = ofp_match_data['nw_dst'][0]
self.wildcards &= ~OFPFW_NW_DST_MASK
self.wildcards |= ofp_match_data['nw_dst'][1]
return
self._nw_dst = a[0]
self.wildcards &= ~OFPFW_NW_DST_MASK
self.wildcards |= ((32-a[1]) << OFPFW_NW_DST_SHIFT)
def set_nw_src (self, *args, **kw):
a = self._make_addr(*args, **kw)
if a == None:
self._nw_src = ofp_match_data['nw_src'][0]
self.wildcards &= ~OFPFW_NW_SRC_MASK
self.wildcards |= ofp_match_data['nw_src'][1]
return
self._nw_src = a[0]
self.wildcards &= ~OFPFW_NW_SRC_MASK
self.wildcards |= ((32-a[1]) << OFPFW_NW_SRC_SHIFT)
def _make_addr (self, ipOrIPAndBits, bits=None):
if ipOrIPAndBits == None: return None
b = None
if type(ipOrIPAndBits) is tuple:
ip = ipOrIPAndBits[0]
b = int(ipOrIPAndBits[1])
if (type(ipOrIPAndBits) is str) and (len(ipOrIPAndBits) != 4):
if ipOrIPAndBits.find('/') != -1:
#s = ipOrIPAndBits.split('/')
s = parse_cidr(ipOrIPAndBits, infer=False)
ip = s[0]
b = int(s[1]) if b is None else b
else:
ip = ipOrIPAndBits
b = 32 if b is None else b
else:
ip = ipOrIPAndBits
b = 32 if b is None else b
if type(ip) is str:
ip = IPAddr(ip)
if bits != None: b = bits
if b > 32: b = 32
elif b < 0: b = 0
return (ip, b)
def __setattr__ (self, name, value):
if name not in ofp_match_data:
self.__dict__[name] = value
return
if name == 'nw_dst' or name == 'nw_src':
# Special handling
getattr(self, 'set_' + name)(value)
return value
if value is None:
setattr(self, '_' + name, ofp_match_data[name][0])
self.wildcards |= ofp_match_data[name][1]
else:
setattr(self, '_' + name, value)
self.wildcards = self.wildcards & ~ofp_match_data[name][1]
return value
def __getattr__ (self, name):
if name in ofp_match_data:
if ( (self.wildcards & ofp_match_data[name][1])
== ofp_match_data[name][1] ):
# It's wildcarded -- always return None
return None
if name == 'nw_dst' or name == 'nw_src':
# Special handling
return getattr(self, 'get_' + name)()[0]
return self.__dict__['_' + name]
raise AttributeError("attribute not found: "+name)
def _validate (self):
# TODO
return None
def pack (self, flow_mod=False):
assert self._assert()
packed = b""
if self.adjust_wildcards and flow_mod:
wc = self._wire_wildcards(self.wildcards)
else:
wc = self.wildcards
packed += struct.pack("!LH", wc, self.in_port or 0)
if self.dl_src == None:
packed += EMPTY_ETH.toRaw()
elif type(self.dl_src) is bytes:
packed += self.dl_src
else:
packed += self.dl_src.toRaw()
if self.dl_dst == None:
packed += EMPTY_ETH.toRaw()
elif type(self.dl_dst) is bytes:
packed += self.dl_dst
else:
packed += self.dl_dst.toRaw()
def check_ip(val):
return (val or 0) if self.dl_type == 0x0800 else 0
def check_ip_or_arp(val):
return (val or 0) if self.dl_type == 0x0800 \
or self.dl_type == 0x0806 else 0
def check_tp(val):
return (val or 0) if self.dl_type == 0x0800 \
and self.nw_proto in (1,6,17) else 0
packed += struct.pack("!HB", self.dl_vlan or 0, self.dl_vlan_pcp or 0)
packed += _PAD # Hardcode padding
packed += struct.pack("!HBB", self.dl_type or 0,
check_ip(self.nw_tos), check_ip_or_arp(self.nw_proto))
packed += _PAD2 # Hardcode padding
def fix (addr):
if addr is None: return 0
if type(addr) is int: return addr & 0xffFFffFF
if type(addr) is long: return addr & 0xffFFffFF
return addr.toUnsigned()
packed += struct.pack("!LLHH", check_ip_or_arp(fix(self.nw_src)),
check_ip_or_arp(fix(self.nw_dst)),
check_tp(self.tp_src), check_tp(self.tp_dst))
return packed
def _normalize_wildcards (self, wildcards):
"""
nw_src and nw_dst values greater than 32 mean the same thing as 32.
We normalize them here just to be clean and so that comparisons act
as you'd want them to.
"""
if ((wildcards & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT) > 32:
wildcards &= ~OFPFW_NW_SRC_MASK
wildcards |= (32 << OFPFW_NW_SRC_SHIFT)
if ((wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT) > 32:
wildcards &= ~OFPFW_NW_DST_MASK
wildcards |= (32 << OFPFW_NW_DST_SHIFT)
return wildcards
def _wire_wildcards(self, wildcards):
"""
Normalize the wildcard bits to the openflow wire representation.
Note this atrocity from the OF1.1 spec:
Protocol-specific fields within ofp_match will be ignored within
a single table when the corresponding protocol is not specified in the
match. The IP header and transport header fields
will be ignored unless the Ethertype is specified as either IPv4 or
ARP. The tp_src and tp_dst fields will be ignored unless the network
protocol specified is as TCP, UDP or SCTP. Fields that are ignored
don't need to be wildcarded and should be set to 0.
"""
if self.dl_type == 0x0800:
# IP
if self.nw_proto not in (1,6,17):
# not TCP/UDP/ICMP -> Clear TP wildcards for the wire
return wildcards & ~(OFPFW_TP_SRC | OFPFW_TP_DST)
else:
return wildcards
elif self.dl_type == 0x0806:
# ARP: clear NW_TOS / TP wildcards for the wire
return wildcards & ~( OFPFW_NW_TOS | OFPFW_TP_SRC | OFPFW_TP_DST)
else:
# not even IP. Clear NW/TP wildcards for the wire
return wildcards & ~( OFPFW_NW_TOS | OFPFW_NW_PROTO
| OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK
| OFPFW_TP_SRC | OFPFW_TP_DST)
def _unwire_wildcards(self, wildcards):
"""
Normalize the wildcard bits from the openflow wire representation.
Note this atrocity from the OF1.1 spec:
Protocol-specific fields within ofp_match will be ignored within
a single table when the corresponding protocol is not specified in the
match. The IP header and transport header fields
will be ignored unless the Ethertype is specified as either IPv4 or
ARP. The tp_src and tp_dst fields will be ignored unless the network
protocol specified is as TCP, UDP or SCTP. Fields that are ignored
don't need to be wildcarded and should be set to 0.
"""
if self._dl_type == 0x0800:
# IP
if self._nw_proto not in (1,6,17):
# not TCP/UDP/ICMP -> Set TP wildcards for the object
return wildcards | (OFPFW_TP_SRC | OFPFW_TP_DST)
else:
return wildcards
elif self._dl_type == 0x0806:
# ARP: Set NW_TOS / TP wildcards for the object
return wildcards | ( OFPFW_NW_TOS | OFPFW_TP_SRC | OFPFW_TP_DST)
else:
# not even IP. Set NW/TP wildcards for the object
return wildcards | ( OFPFW_NW_TOS | OFPFW_NW_PROTO
| OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK
| OFPFW_TP_SRC | OFPFW_TP_DST)
@property
def is_wildcarded (self):
return self.wildcards & OFPFW_ALL != 0
@property
def is_exact (self):
return not self.is_wildcarded
def unpack (self, raw, offset=0, flow_mod=False):
_offset = offset
offset,(wildcards, self._in_port) = _unpack("!LH",raw, offset)
offset,self._dl_src = _readether(raw, offset)
offset,self._dl_dst = _readether(raw, offset)
offset,(self._dl_vlan, self._dl_vlan_pcp) = \
_unpack("!HB", raw, offset)
offset = _skip(raw, offset, 1)
offset,(self._dl_type, self._nw_tos, self._nw_proto) = \
_unpack("!HBB", raw, offset)
offset = _skip(raw, offset, 2)
offset,self._nw_src = _readip(raw, offset)
offset,self._nw_dst = _readip(raw, offset)
offset,(self._tp_src, self._tp_dst) = _unpack("!HH", raw, offset)
# Only unwire wildcards for flow_mod
self.wildcards = self._normalize_wildcards(
self._unwire_wildcards(wildcards) if flow_mod else wildcards)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 40
def hash_code (self):
'''
ofp_match is not properly hashable since it is mutable, but it can
still be useful to easily generate a hash code.
'''
h = self.wildcards
for f in ofp_match_data:
v = getattr(self, f)
if type(v) is int:
h ^= v
elif type(v) is long:
h ^= v
return int(h & 0x7fFFffFF)
def matches_with_wildcards (self, other, consider_other_wildcards=True):
"""
Test whether /this/ match completely encompasses the other match.
Important for non-strict modify flow_mods etc.
"""
assert assert_type("other", other, ofp_match, none_ok=False)
# short cut for equal matches
if(self == other): return True
# only candidate if all wildcard bits in the *other* match are also
# set in this match (i.e., a submatch)
# first compare the bitmask part
if(consider_other_wildcards):
self_bits = self.wildcards&~(OFPFW_NW_SRC_MASK|OFPFW_NW_DST_MASK)
other_bits = other.wildcards&~(OFPFW_NW_SRC_MASK|OFPFW_NW_DST_MASK)
if( self_bits | other_bits != self_bits): return False
def match_fail(mine, others):
return mine != None and mine != others
if match_fail(self.in_port, other.in_port): return False
if match_fail(self.dl_vlan, other.dl_vlan): return False
if match_fail(self.dl_src, other.dl_src): return False
if match_fail(self.dl_dst, other.dl_dst): return False
if match_fail(self.dl_type, other.dl_type): return False
if match_fail(self.nw_proto, other.nw_proto): return False
if match_fail(self.tp_src, other.tp_src): return False
if match_fail(self.tp_dst, other.tp_dst): return False
if match_fail(self.dl_vlan_pcp, other.dl_vlan_pcp): return False
if match_fail(self.nw_tos, other.nw_tos): return False
self_nw_src = self.get_nw_src()
if(self_nw_src[0] != None):
other_nw_src = other.get_nw_src()
if self_nw_src[1] > other_nw_src[1]: return False
if not IPAddr(other_nw_src[0]).inNetwork(
(self_nw_src[0], self_nw_src[1])): return False
self_nw_dst = self.get_nw_dst()
if(self_nw_dst[0] != None):
other_nw_dst = other.get_nw_dst()
if self_nw_dst[1] > other_nw_dst[1]: return False
if not IPAddr(other_nw_dst[0]).inNetwork(
(self_nw_dst[0], self_nw_dst[1])): return False
return True
def __eq__ (self, other):
if type(self) != type(other): return False
if self.wildcards != other.wildcards: return False
if self.in_port != other.in_port: return False
if self.dl_src != other.dl_src: return False
if self.dl_dst != other.dl_dst: return False
if self.dl_vlan != other.dl_vlan: return False
if self.dl_vlan_pcp != other.dl_vlan_pcp: return False
if self.dl_type != other.dl_type: return False
if self.nw_tos != other.nw_tos: return False
if self.nw_proto != other.nw_proto: return False
if self.nw_src != other.nw_src: return False
if self.nw_dst != other.nw_dst: return False
if self.tp_src != other.tp_src: return False
if self.tp_dst != other.tp_dst: return False
return True
def __str__ (self):
return self.__class__.__name__ + "\n " + self.show(' ').strip()
def show (self, prefix=''):
def binstr (n):
s = ''
while True:
s = ('1' if n & 1 else '0') + s
n >>= 1
if n == 0: break
return s
def safehex(n):
if n == None:
return "(None)"
else:
return hex(n)
def show_wildcards(w):
parts = [ k.lower()[len("OFPFW_"):]
for (k,v) in ofp_flow_wildcards_rev_map.iteritems()
if v & w == v ]
nw_src_bits = (w & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT
if nw_src_bits > 0:
parts.append("nw_src(/%d)" % (32 - nw_src_bits))
nw_dst_bits = (w & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT
if nw_dst_bits > 0:
parts.append("nw_dst(/%d)" % (32 - nw_dst_bits))
return "|".join(parts)
outstr = ''
outstr += prefix + 'wildcards: '
outstr += show_wildcards(self.wildcards)
outstr += ' (%s = %x)\n' % (binstr(self.wildcards), self.wildcards)
def append (f, formatter=str):
v = self.__getattr__(f)
if v is None: return ''
return prefix + f + ": " + formatter(v) + "\n"
outstr += append('in_port')
outstr += append('dl_src')
outstr += append('dl_dst')
outstr += append('dl_vlan')
outstr += append('dl_vlan_pcp')
outstr += append('dl_type', safehex)
outstr += append('nw_tos')
outstr += append('nw_proto')
outstr += append('nw_src')
outstr += append('nw_dst')
outstr += append('tp_src')
outstr += append('tp_dst')
return outstr
class ofp_action_generic (ofp_action_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.type = None # Purposely bad
self.data = _PAD4
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.type, len(self))
packed += self.data
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset,self.data = _read(raw, offset, length-4)
assert offset - _offset == len(self)
return offset
def __len__ (self):
return 4 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
return outstr
@openflow_action('OFPAT_OUTPUT', 0)
class ofp_action_output (ofp_action_base):
def __init__ (self, **kw):
self.port = None # Purposely bad -- require specification
self.max_len = 0xffFF
initHelper(self, kw)
def pack (self):
if self.port != OFPP_CONTROLLER:
self.max_len = 0
assert self._assert()
packed = b""
packed += struct.pack("!HHHH", self.type, len(self), self.port,
self.max_len)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.port, self.max_len) = \
_unpack("!HHHH", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.port != other.port: return False
if self.max_len != other.max_len: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'port: ' + str(self.port) + '\n'
outstr += prefix + 'max_len: ' + str(self.max_len) + '\n'
return outstr
@openflow_action('OFPAT_ENQUEUE', 11)
class ofp_action_enqueue (ofp_action_base):
def __init__ (self, **kw):
self.port = None # Require user to set
self.queue_id = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHH", self.type, len(self), self.port)
packed += _PAD6 # Pad
packed += struct.pack("!L", self.queue_id)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.port) = _unpack("!HHH", raw, offset)
offset = _skip(raw, offset, 6)
offset,(self.queue_id,) = _unpack("!L", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 16
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.port != other.port: return False
if self.queue_id != other.queue_id: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'port: ' + str(self.port) + '\n'
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
return outstr
@openflow_action('OFPAT_STRIP_VLAN', 3)
class ofp_action_strip_vlan (ofp_action_base):
def __init__ (self):
pass
def pack (self):
packed = struct.pack("!HHi", self.type, len(self), 0)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset = _skip(raw, offset, 4)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
return outstr
@openflow_action('OFPAT_SET_VLAN_VID', 1)
class ofp_action_vlan_vid (ofp_action_base):
def __init__ (self, **kw):
self.vlan_vid = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHH", self.type, len(self), self.vlan_vid)
packed += _PAD2 # Pad
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vlan_vid) = \
_unpack("!HHH", raw, offset)
offset = _skip(raw, offset, 2)
#TODO: check length for this and other actions
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vlan_vid != other.vlan_vid: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vlan_vid: ' + str(self.vlan_vid) + '\n'
return outstr
@openflow_action('OFPAT_SET_VLAN_PCP', 2)
class ofp_action_vlan_pcp (ofp_action_base):
def __init__ (self, **kw):
self.vlan_pcp = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHB", self.type, len(self), self.vlan_pcp)
packed += _PAD3 # Pad
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vlan_pcp) = \
_unpack("!HHB", raw, offset)
offset = _skip(raw, offset, 3)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vlan_pcp != other.vlan_pcp: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vlan_pcp: ' + str(self.vlan_pcp) + '\n'
return outstr
@openflow_action('OFPAT_SET_DL_DST', 5)
@openflow_action('OFPAT_SET_DL_SRC', 4)
class ofp_action_dl_addr (ofp_action_base):
@classmethod
def set_dst (cls, dl_addr = None):
return cls(OFPAT_SET_DL_DST, dl_addr)
@classmethod
def set_src (cls, dl_addr = None):
return cls(OFPAT_SET_DL_SRC, dl_addr)
def __init__ (self, type = None, dl_addr = None):
"""
'type' should be OFPAT_SET_DL_SRC or OFPAT_SET_DL_DST.
"""
self.type = type
self.dl_addr = EMPTY_ETH
if dl_addr is not None:
self.dl_addr = EthAddr(dl_addr)
def _validate (self):
if (not isinstance(self.dl_addr, EthAddr)
and not isinstance(self.dl_addr, bytes)):
return "dl_addr is not string or EthAddr"
if isinstance(self.dl_addr, bytes) and len(self.dl_addr) != 6:
return "dl_addr is not of size 6"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.type, len(self))
if isinstance(self.dl_addr, EthAddr):
packed += self.dl_addr.toRaw()
else:
packed += self.dl_addr
packed += _PAD6
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset,self.dl_addr = _readether(raw, offset)
offset = _skip(raw, offset, 6)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 16
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.dl_addr != other.dl_addr: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'dl_addr: ' + str(self.dl_addr) + '\n'
return outstr
@openflow_action('OFPAT_SET_NW_DST', 7)
@openflow_action('OFPAT_SET_NW_SRC', 6)
class ofp_action_nw_addr (ofp_action_base):
@classmethod
def set_dst (cls, nw_addr = None):
return cls(OFPAT_SET_NW_DST, nw_addr)
@classmethod
def set_src (cls, nw_addr = None):
return cls(OFPAT_SET_NW_SRC, nw_addr)
def __init__ (self, type = None, nw_addr = None):
"""
'type' should be OFPAT_SET_NW_SRC or OFPAT_SET_NW_DST
"""
self.type = type
if nw_addr is not None:
self.nw_addr = IPAddr(nw_addr)
else:
self.nw_addr = IPAddr(0)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHl", self.type, len(self),
self.nw_addr.toSigned())
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset,self.nw_addr = _readip(raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.nw_addr != other.nw_addr: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'nw_addr: ' + str(self.nw_addr) + '\n'
return outstr
@openflow_action('OFPAT_SET_NW_TOS', 8)
class ofp_action_nw_tos (ofp_action_base):
def __init__ (self, nw_tos = 0):
self.nw_tos = nw_tos
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHB", self.type, len(self), self.nw_tos)
packed += _PAD3
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.nw_tos) = _unpack("!HHB", raw, offset)
offset = _skip(raw, offset, 3)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.nw_tos != other.nw_tos: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'nw_tos: ' + str(self.nw_tos) + '\n'
return outstr
@openflow_action('OFPAT_SET_TP_DST', 10)
@openflow_action('OFPAT_SET_TP_SRC', 9)
class ofp_action_tp_port (ofp_action_base):
@classmethod
def set_dst (cls, tp_port = None):
return cls(OFPAT_SET_TP_DST, tp_port)
@classmethod
def set_src (cls, tp_port = None):
return cls(OFPAT_SET_TP_SRC, tp_port)
def __init__ (self, type=None, tp_port = 0):
"""
'type' is OFPAT_SET_TP_SRC/DST
"""
self.type = type
self.tp_port = tp_port
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHH", self.type, len(self), self.tp_port)
packed += _PAD2
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.tp_port) = \
_unpack("!HHH", raw, offset)
offset = _skip(raw, offset, 2)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.tp_port != other.tp_port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'tp_port: ' + str(self.tp_port) + '\n'
return outstr
class ofp_action_vendor_base (ofp_action_base):
"""
Base class for vendor actions
"""
type = 65535 # OFPAT_VENDOR
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
return True
def _init (self, kw):
"""
Initialize fields
Overide this.
"""
pass
def _pack_body (self):
"""
Pack body.
"""
return b""
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
return offset
def _body_length (self):
"""
Return length of body.
Optionally override this.
"""
return len(self._pack_body())
def _show (self, prefix):
"""
Format additional fields as text
"""
return ""
def __init__ (self, **kw):
self._init(kw)
assert hasattr(self, 'vendor')
#self.vendor = 0
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.body, 'pack'):
return self.body.pack()
else:
return bytes(self.body)
def pack (self):
assert self._assert()
body = self._pack_body()
packed = b""
packed += struct.pack("!HHL", self.type, 8 + len(body), self.vendor)
packed += body
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vendor) = _unpack("!HHL", raw, offset)
offset = self._unpack_body(raw, offset, length - 8)
assert offset - _offset == len(self)
return offset
def __len__ (self):
return 8 + self._body_length()
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vendor != other.vendor: return False
return self._eq(other)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
outstr += self._show(prefix)
return outstr
@openflow_action('OFPAT_VENDOR', 65535)
class ofp_action_vendor_generic (ofp_action_base):
def __init__ (self, **kw):
self.vendor = 0
self.body = b""
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.body, 'pack'):
return self.body.pack()
else:
return bytes(self.body)
def pack (self):
assert self._assert()
body = self._pack_body()
packed = b""
packed += struct.pack("!HHL", self.type, 8 + len(body), self.vendor)
packed += body
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vendor) = _unpack("!HHL", raw, offset)
offset,self.body = _read(raw, offset, length - 8)
assert offset - _offset == len(self)
return offset
def __len__ (self):
return 8 + len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vendor != other.vendor: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
return outstr
#3. Controller-to-Switch Messages
##3.1 Handshake
@openflow_s_message("OFPT_FEATURES_REPLY", 6,
reply_to="ofp_features_request")
class ofp_features_reply (ofp_header):
_MIN_LENGTH = 32
def __init__ (self, **kw):
ofp_header.__init__(self)
self.datapath_id = 0
self.n_buffers = 0
self.n_tables = 0
self.capabilities = 0
self.actions = 0
self.ports = []
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!QLB", self.datapath_id, self.n_buffers,
self.n_tables)
packed += _PAD3
packed += struct.pack("!LL", self.capabilities, self.actions)
for i in self.ports:
packed += i.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.datapath_id, self.n_buffers, self.n_tables) = \
_unpack("!QLB", raw, offset)
offset = _skip(raw, offset, 3)
offset,(self.capabilities, self.actions) = _unpack("!LL", raw, offset)
portCount = (length - 32) / len(ofp_phy_port)
self.ports = []
for i in xrange(0, portCount):
p = ofp_phy_port()
offset = p.unpack(raw, offset)
self.ports.append(p)
assert length == len(self)
return offset,length
def __len__ (self):
return 32 + len(self.ports) * len(ofp_phy_port)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.datapath_id != other.datapath_id: return False
if self.n_buffers != other.n_buffers: return False
if self.n_tables != other.n_tables: return False
if self.capabilities != other.capabilities: return False
if self.actions != other.actions: return False
if self.ports != other.ports: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'datapath_id: ' + str(self.datapath_id) + '\n'
outstr += prefix + 'n_buffers: ' + str(self.n_buffers) + '\n'
outstr += prefix + 'n_tables: ' + str(self.n_tables) + '\n'
outstr += prefix + 'capabilities: ' + str(self.capabilities) + '\n'
outstr += prefix + 'actions: ' + str(self.actions) + '\n'
outstr += prefix + 'ports: \n'
for obj in self.ports:
outstr += obj.show(prefix + ' ')
return outstr
ofp_switch_features = ofp_features_reply
##3.2 Switch Configuration
@openflow_c_message("OFPT_SET_CONFIG", 9)
class ofp_set_config (ofp_header): # uses ofp_switch_config
def __init__ (self, **kw):
ofp_header.__init__(self)
self.flags = 0
self.miss_send_len = OFP_DEFAULT_MISS_SEND_LEN
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.flags, self.miss_send_len)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.flags, self.miss_send_len) = _unpack("!HH", raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 12
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.flags != other.flags: return False
if self.miss_send_len != other.miss_send_len: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'miss_send_len: ' + str(self.miss_send_len) + '\n'
return outstr
##3.3 Modify State Messages
@openflow_c_message("OFPT_FLOW_MOD", 14)
class ofp_flow_mod (ofp_header):
_MIN_LENGTH = 72
def __init__ (self, **kw):
ofp_header.__init__(self)
if 'match' in kw:
self.match = None
else:
self.match = ofp_match()
self.cookie = 0
self.command = OFPFC_ADD
self.idle_timeout = 0
self.hard_timeout = 0
self.priority = OFP_DEFAULT_PRIORITY
self._buffer_id = NO_BUFFER
self.out_port = OFPP_NONE
self.flags = 0
self.actions = []
self.data = None # Not in the spec! Special magic! Can be packet_in.
# ofp_flow_mod/ofp_packet_out do some special handling of 'actions'...
# Allow "action" as a synonym for "actions"
if 'action' in kw and 'actions' not in kw:
kw['actions'] = kw['action']
del kw['action']
initHelper(self, kw)
# Allow use of actions=<a single action> for kw args.
if not hasattr(self.actions, '__getitem__'):
self.actions = [self.actions]
@property
def buffer_id (self):
if self._buffer_id == NO_BUFFER: return None
return self._buffer_id
@buffer_id.setter
def buffer_id (self, val):
if val is None: val = NO_BUFFER
self._buffer_id = val
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
"""
Packs this object into its wire format.
May normalize fields.
NOTE: If "data" has been specified, this method may actually return
*more than just a single ofp_flow_mod* in packed form.
Specifically, it may also have a barrier and an ofp_packet_out.
"""
po = None
if self.data:
#TODO: It'd be nice to log and then ignore if not data_is_complete.
# Unfortunately, we currently have no logging in here, so we
# assert instead which is a either too drastic or too quiet.
assert self.data.is_complete
assert self.buffer_id is None
self.buffer_id = self.data.buffer_id
if self.buffer_id is None:
po = ofp_packet_out(data=self.data)
po.in_port = self.data.in_port
po.actions.append(ofp_action_output(port = OFPP_TABLE))
# Should maybe check that packet hits the new entry...
# Or just duplicate the actions? (I think that's the best idea)
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.match.pack(flow_mod=True)
packed += struct.pack("!QHHHHLHH", self.cookie, self.command,
self.idle_timeout, self.hard_timeout,
self.priority, self._buffer_id, self.out_port,
self.flags)
for i in self.actions:
packed += i.pack()
if po:
packed += ofp_barrier_request().pack()
packed += po.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset = self.match.unpack(raw, offset, flow_mod=True)
offset,(self.cookie, self.command, self.idle_timeout,
self.hard_timeout, self.priority, self._buffer_id,
self.out_port, self.flags) = \
_unpack("!QHHHHLHH", raw, offset)
offset,self.actions = _unpack_actions(raw,
length-(32 + len(self.match)), offset)
assert length == len(self)
return offset,length
def __len__ (self):
l = 32 + len(self.match)
for i in self.actions:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.command != other.command: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'command: ' + str(self.command) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'hard_timeout: ' + str(self.hard_timeout) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
outstr += obj.show(prefix + ' ')
return outstr
@openflow_c_message("OFPT_PORT_MOD", 15)
class ofp_port_mod (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.port_no = 0
self.hw_addr = EMPTY_ETH
self.config = 0
self.mask = 0
self.advertise = 0
initHelper(self, kw)
def _validate (self):
if (not isinstance(self.hw_addr, bytes)
and not isinstance(self.hw_addr, EthAddr)):
return "hw_addr is not bytes or EthAddr"
if len(self.hw_addr) != 6:
return "hw_addr is not of size 6"
return None
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!H", self.port_no)
if isinstance(self.hw_addr, bytes):
packed += self.hw_addr
else:
packed += self.hw_addr.toRaw()
packed += struct.pack("!LLL", self.config, self.mask, self.advertise)
packed += _PAD4
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset,self.hw_addr = _readether(raw, offset)
offset,(self.config, self.mask, self.advertise) = \
_unpack("!LLL", raw, offset)
offset = _skip(raw, offset, 4)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 32
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.port_no != other.port_no: return False
if self.hw_addr != other.hw_addr: return False
if self.config != other.config: return False
if self.mask != other.mask: return False
if self.advertise != other.advertise: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'hw_addr: ' + str(EthAddr(self.hw_addr)) + '\n'
outstr += prefix + 'config: ' + str(self.config) + '\n'
outstr += prefix + 'mask: ' + str(self.mask) + '\n'
outstr += prefix + 'advertise: ' + str(self.advertise) + '\n'
return outstr
##3.4 Queue Configuration Messages
@openflow_c_message("OFPT_QUEUE_GET_CONFIG_REQUEST", 20)
class ofp_queue_get_config_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.port = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!H", self.port)
packed += _PAD2
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.port,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 2)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 12
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.port != other.port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'port: ' + str(self.port) + '\n'
return outstr
@openflow_s_message("OFPT_QUEUE_GET_CONFIG_REPLY", 21)
class ofp_queue_get_config_reply (ofp_header):
_MIN_LENGTH = 16
def __init__ (self, **kw):
ofp_header.__init__(self)
self.port = 0
self.queues = []
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!H", self.port)
packed += _PAD6
for i in self.queues:
packed += i.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.port,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
remaining = length - 6 - 2 - len(ofp_header)
del self.queues[:]
# Not tested; probably buggy
while remaining > 0:
q = ofp_packet_queue()
_offset = q.unpack(raw, offset)
l = _offset - offset
offset = _offset
if l < 1: raise RuntimeError("Can't parse")
remaining -= l
self.queues.append(q)
assert length == len(self)
return offset,length
def __len__ (self):
l = 16
for i in self.queues:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.port != other.port: return False
if self.queues != other.queues: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'port: ' + str(self.port) + '\n'
outstr += prefix + 'queues: \n'
for obj in self.queues:
outstr += obj.show(prefix + ' ')
return outstr
@openflow_c_message("OFPT_STATS_REQUEST", 16)
class ofp_stats_request (ofp_header):
_MIN_LENGTH = 12
def __init__ (self, **kw):
ofp_header.__init__(self)
self.type = None # Try to guess
self.flags = 0
self._body = b''
self._body_packed = None # Cache
initHelper(self, kw)
def pack (self):
if self.type is None:
if isinstance(self.body, ofp_stats_body_base):
self.type = self.body._type
else:
raise RuntimeError("Can't determine body type; specify it "
+ "explicitly")
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.type, self.flags)
packed += self._pack_body()
return packed
def _pack_body (self):
if self._body_packed is None:
if hasattr(self.body, 'pack'):
self._body_packed = self._body.pack()
else:
self._body_packed = self._body
return self._body_packed
@property
def body (self):
return self._body
@body.setter
def body (self, data):
self._body = data
self._body_packed_cache = None
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.type, self.flags) = _unpack("!HH", raw, offset)
offset,body = _read(raw, offset, length - 12)
si = _stats_type_to_class_info.get(self.type)
if si is None:
self.body = ofp_generic_stats_body()
self.body.unpack(body, 0, len(body))
else:
if si.request is None:
raise RuntimeError("No request for " + str(si))
self.body = si.request()
self.body.unpack(body, 0, len(body))
#TODO: assert entire body is unpacked
assert length == len(self)
return offset,length
def __len__ (self):
return 12 + len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.type != other.type: return False
if self.flags != other.flags: return False
if self._pack_body() != other._pack_body(): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
@openflow_s_message("OFPT_STATS_REPLY", 17,
reply_to="ofp_stats_request")
class ofp_stats_reply (ofp_header):
_MIN_LENGTH = 12
def __init__ (self, **kw):
ofp_header.__init__(self)
self.type = None # Guess
self.flags = 0
self.body = b''
self._body_data = (None, None)
initHelper(self, kw)
@property
def is_last_reply (self):
return (self.flags & 1) == 0
@is_last_reply.setter
def is_last_reply (self, value):
self.flags = self.flags & 0xfffe
if not value:
self.flags |= 1
@property
def body_data (self):
if self._body_data[0] is not self.body:
def _pack(b):
return b.pack() if hasattr(b, 'pack') else b
data = b''
if isinstance(self.body, collections.Iterable):
for b in self.body:
data += _pack(b)
else:
data = _pack(self.body)
self._body_data = (self.body, data)
return self._body_data[1]
def pack (self):
if self.type is None:
if isinstance(self.body, ofp_stats_body_base):
self.type = self.body._type
else:
raise RuntimeError("Can't determine body type; specify it "
+ "explicitly")
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.type, self.flags)
packed += self.body_data
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.type, self.flags) = _unpack("!HH", raw, offset)
offset,packed = _read(raw, offset, length - 12)
t = _stats_type_to_class_info.get(self.type)
if t is None:
#FIXME: Put in a generic container?
self.body = packed
else:
if t.reply is None:
#FIXME: Put in a generic container?
self.body = packed
else:
if not t.reply_is_list:
self.body = t.reply()
self.body.unpack(packed, 0, len(packed))
else:
prev_len = len(packed)
self.body = []
while len(packed):
part = t.reply()
off = part.unpack(packed, 0, len(packed))
packed = packed[off:]
assert len(packed) != prev_len
prev_len = len(packed)
self.body.append(part)
assert length == len(self)
return offset,length
def __len__ (self):
if isinstance(self.body, list):
return 12 + sum(len(part) for part in self.body)
return 12 + len(self.body)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.type != other.type: return False
if self.flags != other.flags: return False
if self.body != other.body: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
@openflow_stats_reply("OFPST_DESC", 0)
class ofp_desc_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.mfr_desc= ""
self.hw_desc= ""
self.sw_desc= ""
self.serial_num= ""
self.dp_desc= ""
initHelper(self, kw)
def _validate (self):
if not isinstance(self.mfr_desc, str):
return "mfr_desc is not string"
if len(self.mfr_desc) > DESC_STR_LEN:
return "mfr_desc is not of size 256"
if not isinstance(self.hw_desc, str):
return "hw_desc is not string"
if len(self.hw_desc) > DESC_STR_LEN:
return "hw_desc is not of size 256"
if not isinstance(self.sw_desc, str):
return "sw_desc is not string"
if len(self.sw_desc) > DESC_STR_LEN:
return "sw_desc is not of size 256"
if not isinstance(self.serial_num, str):
return "serial_num is not string"
if len(self.serial_num) > SERIAL_NUM_LEN:
return "serial_num is not of size 32"
if not isinstance(self.dp_desc, str):
return "dp_desc is not string"
if len(self.dp_desc) > DESC_STR_LEN:
return "dp_desc is not of size 256"
return None
def pack (self):
assert self._assert()
packed = b""
packed += self.mfr_desc.ljust(DESC_STR_LEN,'\0')
packed += self.hw_desc.ljust(DESC_STR_LEN,'\0')
packed += self.sw_desc.ljust(DESC_STR_LEN,'\0')
packed += self.serial_num.ljust(SERIAL_NUM_LEN,'\0')
packed += self.dp_desc.ljust(DESC_STR_LEN,'\0')
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,self.mfr_desc = _readzs(raw, offset, DESC_STR_LEN)
offset,self.hw_desc = _readzs(raw, offset, DESC_STR_LEN)
offset,self.sw_desc = _readzs(raw, offset, DESC_STR_LEN)
offset,self.serial_num = _readzs(raw, offset, SERIAL_NUM_LEN)
offset,self.dp_desc = _readzs(raw, offset, DESC_STR_LEN)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 1056
def __eq__ (self, other):
if type(self) != type(other): return False
if self.mfr_desc != other.mfr_desc: return False
if self.hw_desc != other.hw_desc: return False
if self.sw_desc != other.sw_desc: return False
if self.serial_num != other.serial_num: return False
if self.dp_desc != other.dp_desc: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'mfr_desc: ' + str(self.mfr_desc) + '\n'
outstr += prefix + 'hw_desc: ' + str(self.hw_desc) + '\n'
outstr += prefix + 'sw_desc: ' + str(self.sw_desc) + '\n'
outstr += prefix + 'serial_num: ' + str(self.serial_num) + '\n'
outstr += prefix + 'dp_desc: ' + str(self.dp_desc) + '\n'
return outstr
ofp_desc_stats_reply = ofp_desc_stats
# This next one is weird. It only exists so that the type-guessing
# will work for requests. I don't think it's really needed, though.
@openflow_stats_request('OFPST_DESC', 0)
class ofp_desc_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
pass
def pack (self):
return b""
def unpack (self, raw, offset, avail):
if avail != 0:
raise RuntimeError("Expected empty body")
return offset
@staticmethod
def __len__ ():
return 0
def __eq__ (self, other):
if type(self) != type(other): return False
return True
def show (self, prefix=''):
return "<empty>"
@openflow_stats_request('OFPST_FLOW', 1)
class ofp_flow_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.match = ofp_match()
self.table_id = TABLE_ALL
self.out_port = OFPP_NONE
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += self.match.pack()
packed += struct.pack("!BBH", self.table_id, 0, self.out_port)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset = self.match.unpack(raw, offset)
offset,(self.table_id, pad, self.out_port) = \
_unpack("!BBH", raw, offset)
assert pad == 0
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 4 + len(ofp_match)
def __eq__ (self, other):
if type(self) != type(other): return False
if self.match != other.match: return False
if self.table_id != other.table_id: return False
if self.out_port != other.out_port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
return outstr
@openflow_stats_reply('OFPST_FLOW', is_list = True)
class ofp_flow_stats (ofp_stats_body_base):
_MIN_LENGTH = 88
def __init__ (self, **kw):
self.table_id = 0
self.match = ofp_match()
self.duration_sec = 0
self.duration_nsec = 0
self.priority = OFP_DEFAULT_PRIORITY
self.idle_timeout = 0
self.hard_timeout = 0
self.cookie = 0
self.packet_count = 0
self.byte_count = 0
self.actions = []
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HBB", len(self), self.table_id, 0)
packed += self.match.pack()
packed += struct.pack("!LLHHH", self.duration_sec,
self.duration_nsec, self.priority,
self.idle_timeout, self.hard_timeout)
packed += _PAD6 # Pad
packed += struct.pack("!QQQ", self.cookie, self.packet_count,
self.byte_count)
for i in self.actions:
packed += i.pack()
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(length, self.table_id, pad) = _unpack("!HBB", raw, offset)
assert pad == 0
offset = self.match.unpack(raw, offset)
offset,(self.duration_sec, self.duration_nsec, self.priority,
self.idle_timeout, self.hard_timeout) = \
_unpack("!LLHHH", raw, offset)
offset = _skip(raw, offset, 6)
offset,(self.cookie, self.packet_count, self.byte_count) = \
_unpack("!QQQ", raw, offset)
assert (offset - _offset) == 48 + len(self.match)
offset,self.actions = _unpack_actions(raw,
length - (48 + len(self.match)), offset)
assert offset - _offset == len(self)
return offset
def __len__ (self):
l = 48 + len(self.match)
for i in self.actions:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if len(self) != len(other): return False
if self.table_id != other.table_id: return False
if self.match != other.match: return False
if self.duration_sec != other.duration_sec: return False
if self.duration_nsec != other.duration_nsec: return False
if self.priority != other.priority: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.cookie != other.cookie: return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
if self.actions != other.actions: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'length: ' + str(len(self)) + '\n'
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'duration_sec: ' + str(self.duration_sec) + '\n'
outstr += prefix + 'duration_nsec: ' + str(self.duration_nsec) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'hard_timeout: ' + str(self.hard_timeout) + '\n'
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'packet_count: ' + str(self.packet_count) + '\n'
outstr += prefix + 'byte_count: ' + str(self.byte_count) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
outstr += obj.show(prefix + ' ')
return outstr
ofp_flow_stats_reply = ofp_flow_stats
@openflow_stats_request('OFPST_AGGREGATE', 2)
class ofp_aggregate_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.match = ofp_match()
self.table_id = TABLE_ALL
self.out_port = OFPP_NONE
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += self.match.pack()
packed += struct.pack("!BBH", self.table_id, 0, self.out_port)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset = self.match.unpack(raw, offset)
offset,(self.table_id, pad, self.out_port) = \
_unpack("!BBH", raw, offset)
assert pad == 0
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 44
def __eq__ (self, other):
if type(self) != type(other): return False
if self.match != other.match: return False
if self.table_id != other.table_id: return False
if self.out_port != other.out_port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
return outstr
@openflow_stats_reply('OFPST_AGGREGATE')
class ofp_aggregate_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.packet_count = 0
self.byte_count = 0
self.flow_count = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!QQL", self.packet_count, self.byte_count,
self.flow_count)
packed += _PAD4 # Pad
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.packet_count, self.byte_count, self.flow_count) = \
_unpack("!QQL", raw, offset)
offset = _skip(raw, offset, 4)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 24
def __eq__ (self, other):
if type(self) != type(other): return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
if self.flow_count != other.flow_count: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'packet_count: ' + str(self.packet_count) + '\n'
outstr += prefix + 'byte_count: ' + str(self.byte_count) + '\n'
outstr += prefix + 'flow_count: ' + str(self.flow_count) + '\n'
return outstr
ofp_aggregate_stats_reply = ofp_aggregate_stats
@openflow_stats_reply('OFPST_TABLE', 3, is_list = True)
class ofp_table_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.table_id = 0
self.name = ""
self.wildcards = 0
self.max_entries = 0
self.active_count = 0
self.lookup_count = 0
self.matched_count = 0
initHelper(self, kw)
def _validate (self):
if not isinstance(self.name, str):
return "name is not string"
if len(self.name) > OFP_MAX_TABLE_NAME_LEN:
return "name is too long"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!B", self.table_id)
packed += _PAD3
packed += self.name.ljust(OFP_MAX_TABLE_NAME_LEN,'\0')
packed += struct.pack("!LLLQQ", self.wildcards, self.max_entries,
self.active_count, self.lookup_count,
self.matched_count)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.table_id,) = _unpack("!B", raw, offset)
offset = _skip(raw, offset, 3)
offset,self.name = _readzs(raw, offset, OFP_MAX_TABLE_NAME_LEN)
offset,(self.wildcards, self.max_entries, self.active_count,
self.lookup_count, self.matched_count) = \
_unpack("!LLLQQ", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 64
def __eq__ (self, other):
if type(self) != type(other): return False
if self.table_id != other.table_id: return False
if self.name != other.name: return False
if self.wildcards != other.wildcards: return False
if self.max_entries != other.max_entries: return False
if self.active_count != other.active_count: return False
if self.lookup_count != other.lookup_count: return False
if self.matched_count != other.matched_count: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'name: ' + str(self.name) + '\n'
outstr += prefix + 'wildcards: ' + str(self.wildcards) + '\n'
outstr += prefix + 'max_entries: ' + str(self.max_entries) + '\n'
outstr += prefix + 'active_count: ' + str(self.active_count) + '\n'
outstr += prefix + 'lookup_count: ' + str(self.lookup_count) + '\n'
outstr += prefix + 'matched_count: ' + str(self.matched_count) + '\n'
return outstr
ofp_table_stats_reply = ofp_table_stats
@openflow_stats_request("OFPST_PORT", 4)
class ofp_port_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = OFPP_NONE
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD6
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
return outstr
@openflow_stats_reply("OFPST_PORT", is_list = True)
class ofp_port_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = OFPP_NONE
self.rx_packets = 0
self.tx_packets = 0
self.rx_bytes = 0
self.tx_bytes = 0
self.rx_dropped = 0
self.tx_dropped = 0
self.rx_errors = 0
self.tx_errors = 0
self.rx_frame_err = 0
self.rx_over_err = 0
self.rx_crc_err = 0
self.collisions = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD6
packed += struct.pack("!QQQQQQQQQQQQ", self.rx_packets,
self.tx_packets, self.rx_bytes, self.tx_bytes,
self.rx_dropped, self.tx_dropped,
self.rx_errors, self.tx_errors,
self.rx_frame_err, self.rx_over_err,
self.rx_crc_err, self.collisions)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
offset,(self.rx_packets, self.tx_packets, self.rx_bytes,
self.tx_bytes, self.rx_dropped, self.tx_dropped,
self.rx_errors, self.tx_errors, self.rx_frame_err,
self.rx_over_err, self.rx_crc_err, self.collisions) = \
_unpack("!QQQQQQQQQQQQ", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 104
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.rx_packets != other.rx_packets: return False
if self.tx_packets != other.tx_packets: return False
if self.rx_bytes != other.rx_bytes: return False
if self.tx_bytes != other.tx_bytes: return False
if self.rx_dropped != other.rx_dropped: return False
if self.tx_dropped != other.tx_dropped: return False
if self.rx_errors != other.rx_errors: return False
if self.tx_errors != other.tx_errors: return False
if self.rx_frame_err != other.rx_frame_err: return False
if self.rx_over_err != other.rx_over_err: return False
if self.rx_crc_err != other.rx_crc_err: return False
if self.collisions != other.collisions: return False
return True
def __add__(self, other):
if type(self) != type(other): raise NotImplemented()
port_no = OFPP_NONE
if self.port_no == other.port_no:
port_no = self.port_no
return ofp_port_stats(
port_no=port_no,
rx_packets = self.rx_packets + other.rx_packets,
tx_packets = self.tx_packets + other.tx_packets,
rx_bytes = self.rx_bytes + other.rx_bytes,
tx_bytes = self.tx_bytes + other.tx_bytes,
rx_dropped = self.rx_dropped + other.rx_dropped,
tx_dropped = self.tx_dropped + other.tx_dropped,
rx_errors = self.rx_errors + other.rx_errors,
tx_errors = self.tx_errors + other.tx_errors,
rx_frame_err = self.rx_frame_err + other.rx_frame_err,
rx_over_err = self.rx_over_err + other.rx_over_err,
rx_crc_err = self.rx_crc_err + other.rx_crc_err,
collisions = self.collisions + other.collisions)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'rx_packets: ' + str(self.rx_packets) + '\n'
outstr += prefix + 'tx_packets: ' + str(self.tx_packets) + '\n'
outstr += prefix + 'rx_bytes: ' + str(self.rx_bytes) + '\n'
outstr += prefix + 'tx_bytes: ' + str(self.tx_bytes) + '\n'
outstr += prefix + 'rx_dropped: ' + str(self.rx_dropped) + '\n'
outstr += prefix + 'tx_dropped: ' + str(self.tx_dropped) + '\n'
outstr += prefix + 'rx_errors: ' + str(self.rx_errors) + '\n'
outstr += prefix + 'tx_errors: ' + str(self.tx_errors) + '\n'
outstr += prefix + 'rx_frame_err: ' + str(self.rx_frame_err) + '\n'
outstr += prefix + 'rx_over_err: ' + str(self.rx_over_err) + '\n'
outstr += prefix + 'rx_crc_err: ' + str(self.rx_crc_err) + '\n'
outstr += prefix + 'collisions: ' + str(self.collisions) + '\n'
return outstr
ofp_port_stats_reply = ofp_port_stats
@openflow_stats_request("OFPST_QUEUE", 5)
class ofp_queue_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = OFPP_ALL
self.queue_id = OFPQ_ALL
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD2
packed += struct.pack("!L", self.queue_id)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no,pad,self.queue_id) = _unpack("!HHL", raw, offset)
assert pad == 0
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.queue_id != other.queue_id: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
return outstr
@openflow_stats_reply("OFPST_QUEUE", is_list = True)
class ofp_queue_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = 0
self.queue_id = 0
self.tx_bytes = 0
self.tx_packets = 0
self.tx_errors = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD2
packed += struct.pack("!LQQQ", self.queue_id, self.tx_bytes,
self.tx_packets, self.tx_errors)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no, pad, self.queue_id, self.tx_bytes,
self.tx_packets, self.tx_errors) = \
_unpack("!HHLQQQ", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 32
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.queue_id != other.queue_id: return False
if self.tx_bytes != other.tx_bytes: return False
if self.tx_packets != other.tx_packets: return False
if self.tx_errors != other.tx_errors: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
outstr += prefix + 'tx_bytes: ' + str(self.tx_bytes) + '\n'
outstr += prefix + 'tx_packets: ' + str(self.tx_packets) + '\n'
outstr += prefix + 'tx_errors: ' + str(self.tx_errors) + '\n'
return outstr
ofp_queue_stats_reply = ofp_queue_stats
@openflow_stats_request("OFPST_VENDOR", 65535, is_list = False)
@openflow_stats_reply("OFPST_VENDOR", 65535, is_list = False)
class ofp_vendor_stats_generic (ofp_stats_body_base):
_MIN_LENGTH = 4
def __init__ (self, **kw):
self.vendor = None
self.data = b""
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.data, "pack"):
return self.data.pack()
else:
return self.data
def pack (self):
assert self._assert()
packed = struct.pack("!L", self.vendor)
packed += self._pack_body()
return packed
def unpack (self, raw, offset, avail):
if avail is None: RuntimeError("Requires length")
_offset = offset
offset,(self.vendor,) = _unpack("!L", raw, offset)
offset,self.data = _read(raw, offset, avail-4)
return offset
@staticmethod
def __len__ ():
return 4+len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if self.vendor != other.vendor: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'vendor id: ' + str(self.vendor) + '\n'
outstr += prefix + 'data len: ' + str(len(self.data)) + '\n'
return outstr
class ofp_generic_stats_body (ofp_stats_body_base):
_MIN_LENGTH = 0
def __init__ (self, **kw):
self.data = b""
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.data, "pack"):
return self.data.pack()
else:
return self.data
def pack (self):
assert self._assert()
packed += self._pack_body()
return packed
def unpack (self, raw, offset, avail):
if avail is None: RuntimeError("Requires length")
_offset = offset
offset,self.data = _read(raw, offset, avail)
return offset
@staticmethod
def __len__ ():
return len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'data len: ' + str(len(self.data)) + '\n'
return outstr
@openflow_c_message("OFPT_PACKET_OUT", 13)
class ofp_packet_out (ofp_header):
_MIN_LENGTH = 16
def __init__ (self, **kw):
ofp_header.__init__(self)
self._buffer_id = NO_BUFFER
self.in_port = OFPP_NONE
self.actions = []
self._data = b''
# ofp_flow_mod & ofp_packet_out do some special handling of 'actions'
# Allow "action" as a synonym for "actions"
if 'action' in kw and 'actions' not in kw:
kw['actions'] = kw['action']
del kw['action']
initHelper(self, kw)
# Allow use of actions=<a single action> for kw args.
if not hasattr(self.actions, '__getitem__'):
self.actions = [self.actions]
@property
def buffer_id (self):
if self._buffer_id == NO_BUFFER: return None
return self._buffer_id
@buffer_id.setter
def buffer_id (self, val):
if val is None: val = NO_BUFFER
self._buffer_id = val
@property
def data (self):
return self._data
@data.setter
def data (self, data):
if data is None:
self._data = b''
elif isinstance(data, packet_base):
self._data = data.pack()
elif isinstance(data, ofp_packet_in):
# Enable you to easily resend a packet
self._data = b''
self.buffer_id = data.buffer_id
if self.buffer_id is None:
#TODO: It'd be nice to log and then ignore if data is incomplete
# Unfortunately, we currently have no logging in here, so we
# assert instead which is a either too drastic or too quiet.
assert data.is_complete
self._data = data._data
self.in_port = data.in_port
elif isinstance(data, bytes):
self._data = data
assert assert_type("data", self._data, (bytes,))
def _validate (self):
if self.buffer_id is not None and self.data != b'':
return "can not have both buffer_id and data set"
return None
def pack (self):
assert self._assert()
actions = b''.join((i.pack() for i in self.actions))
actions_len = len(actions)
if self.data is not None:
return b''.join((ofp_header.pack(self),
struct.pack("!LHH", self._buffer_id, self.in_port, actions_len),
actions, self.data))
else:
return b''.join((ofp_header.pack(self),
struct.pack("!LHH", self._buffer_id, self.in_port, actions_len),
actions))
def unpack (self, raw, offset=0):
_offset = offset
offset,length = self._unpack_header(raw, offset)
offset,(self._buffer_id, self.in_port, actions_len) = \
_unpack("!LHH", raw, offset)
offset,self.actions = _unpack_actions(raw, actions_len, offset)
remaining = length - (offset - _offset)
if remaining <= 0:
self.data = None
else:
offset,self.data = _read(raw, offset, remaining)
assert length == len(self)
return offset,length
def __len__ (self):
return 16 + reduce(operator.add, (len(a) for a in self.actions),
0) + (len(self.data) if self.data else 0)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.buffer_id != other.buffer_id: return False
if self.in_port != other.in_port: return False
if self.actions != other.actions: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'in_port: ' + str(self.in_port) + '\n'
outstr += prefix + 'actions_len: ' + str(len(self.actions)) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
if obj is None:
raise RuntimeError("An element of self.actions was None! "
+ "Bad formatting...")
outstr += obj.show(prefix + ' ')
return outstr
##3.7 Barrier Message
@openflow_s_message("OFPT_BARRIER_REPLY", 19,
reply_to="ofp_barrier_request")
class ofp_barrier_reply (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_c_message("OFPT_BARRIER_REQUEST", 18,
request_for="ofp_barrier_reply")
class ofp_barrier_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
#4 Asynchronous Messages
@openflow_s_message("OFPT_PACKET_IN", 10)
class ofp_packet_in (ofp_header):
_MIN_LENGTH = 18
def __init__ (self, **kw):
ofp_header.__init__(self)
self.in_port = OFPP_NONE
self._buffer_id = NO_BUFFER
self.reason = 0
self.data = None
self._total_len = None
if 'total_len' in kw:
self._total_len = kw.pop('total_len')
initHelper(self, kw)
def _validate (self):
if self.data and (self.total_len < len(self.data)):
return "total len less than data len"
@property
def total_len (self):
if self._total_len is None:
return len(self.data) if self.data else 0
return self._total_len
@total_len.setter
def total_len (self, value):
self._total_len = value
@property
def buffer_id (self):
if self._buffer_id == NO_BUFFER: return None
return self._buffer_id
@buffer_id.setter
def buffer_id (self, val):
if val is None: val = NO_BUFFER
self._buffer_id = val
@property
def data (self):
return self._data
@data.setter
def data (self, data):
assert assert_type("data", data, (packet_base, str))
if data is None:
self._data = ''
elif isinstance(data, packet_base):
self._data = data.pack()
else:
self._data = data
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!LHHBB", self._buffer_id, self.total_len,
self.in_port, self.reason, 0)
packed += self.data
#TODO: Padding? See __len__
return packed
@property
def is_complete (self):
if self.buffer_id is not None: return True
return len(self.data) == self.total_len
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self._buffer_id, self._total_len, self.in_port, self.reason,
pad) = _unpack("!LHHBB", raw, offset)
offset,self.data = _read(raw, offset, length-18)
assert length == len(self)
return offset,length
def __len__ (self):
#FIXME: This is probably wrong, but it's not clear from the
# spec what's supposed to be going on here.
#if len(self.data) < 2:
# return 20 + len(self.data)
return 18 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.buffer_id != other.buffer_id: return False
if self.total_len != other.total_len: return False
if self.in_port != other.in_port: return False
if self.reason != other.reason: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'total_len: ' + str(self._total_len) + '\n'
outstr += prefix + 'in_port: ' + str(self.in_port) + '\n'
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'data: ' + str(self.data) + '\n'
return outstr
@openflow_s_message("OFPT_FLOW_REMOVED", 11)
class ofp_flow_removed (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.match = ofp_match()
self.cookie = 0
self.priority = 0
self.reason = 0
self.duration_sec = 0
self.duration_nsec = 0
self.idle_timeout = 0
self.packet_count = 0
self.byte_count = 0
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.match.pack()
packed += struct.pack("!QHB", self.cookie, self.priority, self.reason)
packed += _PAD
packed += struct.pack("!LLH", self.duration_sec, self.duration_nsec,
self.idle_timeout)
packed += _PAD2
packed += struct.pack("!QQ", self.packet_count, self.byte_count)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset = self.match.unpack(raw, offset)
offset,(self.cookie, self.priority, self.reason) = \
_unpack("!QHB", raw, offset)
offset = _skip(raw, offset, 1)
offset,(self.duration_sec, self.duration_nsec, self.idle_timeout) = \
_unpack("!LLH", raw, offset)
offset = _skip(raw, offset, 2)
offset,(self.packet_count, self.byte_count) = \
_unpack("!QQ", raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 48 + len(ofp_match)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.priority != other.priority: return False
if self.reason != other.reason: return False
if self.duration_sec != other.duration_sec: return False
if self.duration_nsec != other.duration_nsec: return False
if self.idle_timeout != other.idle_timeout: return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'duration_sec: ' + str(self.duration_sec) + '\n'
outstr += prefix + 'duration_nsec: ' + str(self.duration_nsec) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'packet_count: ' + str(self.packet_count) + '\n'
outstr += prefix + 'byte_count: ' + str(self.byte_count) + '\n'
return outstr
@openflow_s_message("OFPT_PORT_STATUS", 12)
class ofp_port_status (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.reason = 0
self.desc = ofp_phy_port()
initHelper(self, kw)
def _validate (self):
if not isinstance(self.desc, ofp_phy_port):
return "desc is not class ofp_phy_port"
return None
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!B", self.reason)
packed += _PAD * 7 # Pad
packed += self.desc.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.reason,) = _unpack("!B", raw, offset)
offset = _skip(raw, offset, 7)
offset = self.desc.unpack(raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 64
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.reason != other.reason: return False
if self.desc != other.desc: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'desc: \n'
outstr += self.desc.show(prefix + ' ')
return outstr
@openflow_s_message("OFPT_ERROR", 1)
class ofp_error (ofp_header):
_MIN_LENGTH = 12
def __init__ (self, **kw):
ofp_header.__init__(self)
self.type = 0
self.code = 0
self.data = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.type, self.code)
packed += self.data
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.type, self.code) = _unpack("!HH", raw, offset)
offset,self.data = _read(raw, offset, length - 12)
assert length == len(self)
return offset,length
def __len__ (self):
return 12 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.type != other.type: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
t = self.type
c = self.code
if t < len(ofp_error_type):
n = ofp_error_type_map[t]
t = "%s (%i)" % (n, t)
n = 'ofp' + n.lower()[5:] + '_code_map'
if n in sys.modules[__name__].__dict__:
if c in sys.modules[__name__].__dict__[n]:
c = "%s (%i)" % (sys.modules[__name__].__dict__[n][c], c)
outstr += prefix + 'type: ' + str(t) + '\n'
outstr += prefix + 'code: ' + str(c) + '\n'
if len(self.data):
outstr += prefix + 'datalen: %s\n' % (len(self.data),)
outstr += prefix + hexdump(self.data).replace("\n", "\n" + prefix)
return outstr.strip()
#5. Symmetric Messages
@openflow_sc_message("OFPT_HELLO", 0)
class ofp_hello (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_sc_message("OFPT_ECHO_REQUEST", 2,
request_for="ofp_echo_reply")
class ofp_echo_request (ofp_header):
_MIN_LENGTH = 8
def __init__ (self, **kw):
ofp_header.__init__(self)
self.body = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.body
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,self.body = _read(raw, offset, length - 8)
assert length == len(self)
return offset,length
def __len__ (self):
return 8 + len(self.body)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.body != other.body: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
@openflow_sc_message("OFPT_ECHO_REPLY", 3,
reply_to="ofp_echo_request")
class ofp_echo_reply (ofp_header):
_MIN_LENGTH = 8
def __init__ (self, **kw):
ofp_header.__init__(self)
self.body = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.body
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,self.body = _read(raw, offset, length - 8)
assert length == len(self)
return offset,length
def __len__ (self):
return 8 + len(self.body)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.body != other.body: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
class ofp_vendor_base (ofp_header):
header_type = 4 # OFPT_VENDOR
"""
Base class for vendor messages
"""
pass
@openflow_sc_message("OFPT_VENDOR", 4)
class ofp_vendor_generic (ofp_vendor_base):
_MIN_LENGTH = 12
_collect_raw = False
def __init__ (self, **kw):
ofp_header.__init__(self)
self.vendor = 0
self.data = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!L", self.vendor)
if hasattr(self.data, "pack"):
packed += self.data.pack()
else:
packed += self.data
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,length = self._unpack_header(raw, offset)
offset,(self.vendor,) = _unpack("!L", raw, offset)
offset,self.data = _read(raw, offset, length-12)
if self._collect_raw:
self.raw = raw[_offset, _offset+length]
return offset,length
def __len__ (self):
return 12 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.vendor != other.vendor: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
outstr += prefix + 'datalen: ' + str(len(self.data)) + '\n'
#outstr += prefix + hexdump(self.data).replace("\n", "\n" + prefix)
return outstr
@openflow_c_message("OFPT_FEATURES_REQUEST", 5,
request_for="ofp_features_reply")
class ofp_features_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_c_message("OFPT_GET_CONFIG_REQUEST", 7,
request_for="ofp_get_config_reply")
class ofp_get_config_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_s_message("OFPT_GET_CONFIG_REPLY", 8,
reply_to="ofp_get_config_request")
class ofp_get_config_reply (ofp_header): # uses ofp_switch_config
def __init__ (self, **kw):
ofp_header.__init__(self)
self.flags = 0
self.miss_send_len = OFP_DEFAULT_MISS_SEND_LEN
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.flags, self.miss_send_len)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.flags, self.miss_send_len) = \
_unpack("!HH", raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 12
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.flags != other.flags: return False
if self.miss_send_len != other.miss_send_len: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'miss_send_len: ' + str(self.miss_send_len) + '\n'
return outstr
def _unpack_queue_props (b, length, offset=0):
"""
Parses queue props from a buffer
b is a buffer (bytes)
offset, if specified, is where in b to start decoding
returns (next_offset, [Pops])
"""
if (len(b) - offset) < length: raise UnderrunError
props = []
end = length + offset
while offset < end:
(t,l) = struct.unpack_from("!HH", b, offset)
if (len(b) - offset) < l: raise UnderrunError
a = _queue_prop_type_to_class.get(t)
if a is None:
# Use generic prop header for unknown type
a = ofp_queue_prop_generic()
else:
a = a()
a.unpack(b[offset:offset+l])
assert len(a) == l
props.append(a)
offset += l
return (offset, props)
def _unpack_actions (b, length, offset=0):
"""
Parses actions from a buffer
b is a buffer (bytes)
offset, if specified, is where in b to start decoding
returns (next_offset, [Actions])
"""
if (len(b) - offset) < length: raise UnderrunError
actions = []
end = length + offset
while offset < end:
(t,l) = struct.unpack_from("!HH", b, offset)
if (len(b) - offset) < l: raise UnderrunError
a = _action_type_to_class.get(t)
if a is None:
# Use generic action header for unknown type
a = ofp_action_generic()
else:
a = a()
a.unpack(b[offset:offset+l])
assert len(a) == l
actions.append(a)
offset += l
return (offset, actions)
def _init ():
def formatMap (name, m):
o = name + " = {\n"
vk = sorted([(v,k) for k,v in m.iteritems()])
maxlen = 2 + len(reduce(lambda a,b: a if len(a)>len(b) else b,
(v for k,v in vk)))
fstr = " %-" + str(maxlen) + "s : %s,\n"
for v,k in vk:
o += fstr % ("'" + k + "'",v)
o += "}"
return o
"""
maps = []
for k,v in globals().iteritems():
if k.startswith("ofp_") and k.endswith("_map") and type(v) == dict:
maps.append((k,v))
for name,m in maps:
rev = {}
name = name[:-4]
names = globals()[name]
for n in names:
rev[n] = globals()[n]
globals()[name + '_rev_map'] = rev
print(formatMap(name + "_rev_map", rev))
return
"""
maps = []
for k,v in globals().iteritems():
if (k.startswith("ofp_") and k.endswith("_rev_map")
and type(v) == dict):
maps.append((k[:-8],v))
for name,m in maps:
# Try to generate forward maps
forward = dict(((v,k) for k,v in m.iteritems()))
if len(forward) == len(m):
if name + "_map" not in globals():
globals()[name + "_map"] = forward
else:
print(name + "_rev_map is not a map")
# Try to generate lists
v = m.values()
v.sort()
if v[-1] != len(v)-1:
# Allow ones where the last value is a special value (e.g., VENDOR)
del v[-1]
if len(v) > 0 and v[0] == 0 and v[-1] == len(v)-1:
globals()[name] = v
# Generate gobals
for k,v in m.iteritems():
globals()[k] = v
_init()
# Values from macro definitions
OFP_FLOW_PERMANENT = 0
OFP_DL_TYPE_ETH2_CUTOFF = 0x0600
DESC_STR_LEN = 256
OFPFW_ICMP_CODE = OFPFW_TP_DST
OFPQ_MIN_RATE_UNCFG = 0xffff
OFP_VERSION = 0x01
OFP_MAX_TABLE_NAME_LEN = 32
OFP_DL_TYPE_NOT_ETH_TYPE = 0x05ff
OFP_DEFAULT_MISS_SEND_LEN = 128
OFP_MAX_PORT_NAME_LEN = 16
OFP_SSL_PORT = 6633
OFPFW_ICMP_TYPE = OFPFW_TP_SRC
OFP_TCP_PORT = 6633
SERIAL_NUM_LEN = 32
OFP_DEFAULT_PRIORITY = 0x8000
OFP_VLAN_NONE = 0xffff
OFPQ_ALL = 0xffffffff
ofp_match_data = {
'in_port' : (0, OFPFW_IN_PORT),
'dl_src' : (EMPTY_ETH, OFPFW_DL_SRC),
'dl_dst' : (EMPTY_ETH, OFPFW_DL_DST),
'dl_vlan' : (0, OFPFW_DL_VLAN),
'dl_vlan_pcp' : (0, OFPFW_DL_VLAN_PCP),
'dl_type' : (0, OFPFW_DL_TYPE),
'nw_tos' : (0, OFPFW_NW_TOS),
'nw_proto' : (0, OFPFW_NW_PROTO),
'nw_src' : (0, OFPFW_NW_SRC_ALL),
'nw_dst' : (0, OFPFW_NW_DST_ALL),
'tp_src' : (0, OFPFW_TP_SRC),
'tp_dst' : (0, OFPFW_TP_DST),
}
| bsd-3-clause | 7,203,568,719,793,936,000 | 28.367664 | 74 | 0.595384 | false |
was4444/chromium.src | tools/perf/profile_creators/fast_navigation_profile_extender.py | 14 | 8768 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from profile_creators import profile_extender
from telemetry.core import exceptions
from telemetry.core import util
class FastNavigationProfileExtender(profile_extender.ProfileExtender):
"""Extends a Chrome profile.
This class creates or extends an existing profile by performing a set of tab
navigations in large batches. This is accomplished by opening a large number
of tabs, simultaneously navigating all the tabs, and then waiting for all the
tabs to load. This provides two benefits:
- Takes advantage of the high number of logical cores on modern CPUs.
- The total time spent waiting for navigations to time out scales linearly
with the number of batches, but does not scale with the size of the
batch.
"""
def __init__(self, finder_options, maximum_batch_size):
"""Initializer.
Args:
maximum_batch_size: A positive integer indicating the number of tabs to
simultaneously perform navigations.
"""
super(FastNavigationProfileExtender, self).__init__(finder_options)
# The instance keeps a list of Tabs that can be navigated successfully.
# This means that the Tab is not crashed, and is processing JavaScript in a
# timely fashion.
self._navigation_tabs = []
# The number of tabs to use.
self._NUM_TABS = maximum_batch_size
# The amount of additional time to wait for a batch of pages to finish
# loading for each page in the batch.
self._BATCH_TIMEOUT_PER_PAGE_IN_SECONDS = 20
# The amount of time to wait for a page to quiesce. Some pages will never
# quiesce.
self._TIME_TO_WAIT_FOR_PAGE_TO_QUIESCE_IN_SECONDS = 10
def Run(self):
"""Superclass override."""
try:
self.SetUpBrowser()
self._PerformNavigations()
finally:
self.TearDownBrowser()
# When there hasn't been an exception, verify that the profile was
# correctly extended.
# TODO(erikchen): I've intentionally omitted my implementation of
# VerifyProfileWasExtended() in small_profile_extender, since the profile
# is not being correctly extended. http://crbug.com/484833
# http://crbug.com/484880
self.VerifyProfileWasExtended()
def VerifyProfileWasExtended(self):
"""Verifies that the profile was correctly extended.
Can be overridden by subclasses.
"""
pass
def GetUrlIterator(self):
"""Gets URLs for the browser to navigate to.
Intended for subclass override.
Returns:
An iterator whose elements are urls to be navigated to.
"""
raise NotImplementedError()
def ShouldExitAfterBatchNavigation(self):
"""Returns a boolean indicating whether profile extension is finished.
Intended for subclass override.
"""
raise NotImplementedError()
def CleanUpAfterBatchNavigation(self):
"""A hook for subclasses to perform cleanup after each batch of
navigations.
Can be overridden by subclasses.
"""
pass
def _RefreshNavigationTabs(self):
"""Updates the member self._navigation_tabs to contain self._NUM_TABS
elements, each of which is not crashed. The crashed tabs are intentionally
leaked, since Telemetry doesn't have a good way of killing crashed tabs.
It is also possible for a tab to be stalled in an infinite JavaScript loop.
These tabs will be in self.browser.tabs, but not in self._navigation_tabs.
There is no way to kill these tabs, so they are also leaked. This method is
careful to only use tabs in self._navigation_tabs, or newly created tabs.
"""
live_tabs = [tab for tab in self._navigation_tabs if tab.IsAlive()]
self._navigation_tabs = live_tabs
while len(self._navigation_tabs) < self._NUM_TABS:
self._navigation_tabs.append(self._browser.tabs.New())
def _RemoveNavigationTab(self, tab):
"""Removes a tab which is no longer in a useable state from
self._navigation_tabs. The tab is not removed from self.browser.tabs,
since there is no guarantee that the tab can be safely removed."""
self._navigation_tabs.remove(tab)
def _RetrieveTabUrl(self, tab, timeout):
"""Retrives the URL of the tab."""
# TODO(erikchen): Use tab.url instead, which talks to the browser process
# instead of the renderer process. http://crbug.com/486119
return tab.EvaluateJavaScript('document.URL', timeout)
def _WaitForUrlToChange(self, tab, initial_url, end_time):
"""Waits for the tab to navigate away from its initial url.
If time.time() is larger than end_time, the function does nothing.
Otherwise, the function tries to return no later than end_time.
"""
while True:
seconds_to_wait = end_time - time.time()
if seconds_to_wait <= 0:
break
current_url = self._RetrieveTabUrl(tab, seconds_to_wait)
if current_url != initial_url and current_url != '':
break
# Retrieving the current url is a non-trivial operation. Add a small
# sleep here to prevent this method from contending with the actual
# navigation.
time.sleep(0.01)
def _WaitForTabToBeReady(self, tab, end_time):
"""Waits for the tab to be ready.
If time.time() is larger than end_time, the function does nothing.
Otherwise, the function tries to return no later than end_time.
"""
seconds_to_wait = end_time - time.time()
if seconds_to_wait <= 0:
return
tab.WaitForDocumentReadyStateToBeComplete(seconds_to_wait)
# Wait up to 10 seconds for the page to quiesce. If the page hasn't
# quiesced in 10 seconds, it will probably never quiesce.
seconds_to_wait = end_time - time.time()
seconds_to_wait = max(0, seconds_to_wait)
try:
util.WaitFor(tab.HasReachedQuiescence, seconds_to_wait)
except exceptions.TimeoutException:
pass
def _BatchNavigateTabs(self, batch):
"""Performs a batch of tab navigations with minimal delay.
Args:
batch: A list of tuples (tab, url).
Returns:
A list of tuples (tab, initial_url). |initial_url| is the url of the
|tab| prior to a navigation command being sent to it.
"""
# Attempting to pass in a timeout of 0 seconds results in a synchronous
# socket error from the websocket library. Pass in a very small timeout
# instead so that the websocket library raises a Timeout exception. This
# prevents the logic from accidentally catching different socket
# exceptions.
timeout_in_seconds = 0.01
queued_tabs = []
for tab, url in batch:
initial_url = self._RetrieveTabUrl(tab, 20)
try:
tab.Navigate(url, None, timeout_in_seconds)
except exceptions.TimeoutException:
# We expect to receive a timeout exception, since we're not waiting for
# the navigation to complete.
pass
queued_tabs.append((tab, initial_url))
return queued_tabs
def _WaitForQueuedTabsToLoad(self, queued_tabs):
"""Waits for all the batch navigated tabs to finish loading.
Args:
queued_tabs: A list of tuples (tab, initial_url). Each tab is guaranteed
to have already been sent a navigation command.
"""
total_batch_timeout = (len(queued_tabs) *
self._BATCH_TIMEOUT_PER_PAGE_IN_SECONDS)
end_time = time.time() + total_batch_timeout
for tab, initial_url in queued_tabs:
# Since we didn't wait any time for the tab url navigation to commit, it's
# possible that the tab hasn't started navigating yet.
self._WaitForUrlToChange(tab, initial_url, end_time)
self._WaitForTabToBeReady(tab, end_time)
def _GetUrlsToNavigate(self, url_iterator):
"""Returns an array of urls to navigate to, given a url_iterator."""
urls = []
for _ in xrange(self._NUM_TABS):
try:
urls.append(url_iterator.next())
except StopIteration:
break
return urls
def _PerformNavigations(self):
"""Repeatedly fetches a batch of urls, and navigates to those urls. This
will run until an empty batch is returned, or
ShouldExitAfterBatchNavigation() returns True.
"""
url_iterator = self.GetUrlIterator()
while True:
self._RefreshNavigationTabs()
urls = self._GetUrlsToNavigate(url_iterator)
if len(urls) == 0:
break
batch = []
for i in range(len(urls)):
url = urls[i]
tab = self._navigation_tabs[i]
batch.append((tab, url))
queued_tabs = self._BatchNavigateTabs(batch)
self._WaitForQueuedTabsToLoad(queued_tabs)
self.CleanUpAfterBatchNavigation()
if self.ShouldExitAfterBatchNavigation():
break
| bsd-3-clause | -6,631,524,169,817,016,000 | 34.787755 | 80 | 0.690237 | false |
Telrik/komimport-2.0 | vendor/guzzlehttp/guzzle/docs/conf.py | 100 | 2995 | import sys, os
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
# -- General configuration -----------------------------------------------------
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Guzzle'
copyright = u'2012, Michael Dowling'
version = '3.0.0'
release = '3.0.0'
exclude_patterns = ['_build']
# -- Options for HTML output ---------------------------------------------------
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Guzzle documentation"
html_short_title = "Guzzle"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'searchbox.html']
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'Guzzledoc'
# -- Guzzle Sphinx theme setup ------------------------------------------------
sys.path.insert(0, '/Users/dowling/projects/guzzle_sphinx_theme')
import guzzle_sphinx_theme
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
"project_nav_name": "Guzzle",
"github_user": "guzzle",
"github_repo": "guzzle",
"disqus_comments_shortname": "guzzle",
"google_analytics_account": "UA-22752917-1"
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Guzzle.tex', u'Guzzle Documentation',
u'Michael Dowling', 'manual'),
]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'guzzle', u'Guzzle Documentation',
[u'Michael Dowling'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Guzzle', u'Guzzle Documentation',
u'Michael Dowling', 'Guzzle', 'One line description of project.',
'Miscellaneous'),
]
| bsd-3-clause | -4,678,375,126,345,400,000 | 31.204301 | 80 | 0.641402 | false |
kewisch/bedrock | bedrock/newsletter/tests/test_footer_form.py | 3 | 2110 | from funfactory.urlresolvers import reverse
from mock import patch
from nose.tools import eq_
from pyquery import PyQuery as pq
from bedrock.mozorg.tests import TestCase
@patch('bedrock.newsletter.utils.get_languages_for_newsletters',
lambda *x: set(['en', 'fr', 'pt']))
@patch('lib.l10n_utils.template_is_active', lambda *x: True)
class TestNewsletterFooter(TestCase):
def setUp(self):
self.view_name = 'newsletter.mozilla-and-you'
def test_country_selected(self):
"""
The correct country for the locale should be initially selected.
"""
with self.activate('en-US'):
resp = self.client.get(reverse(self.view_name))
doc = pq(resp.content)
eq_(doc('#id_country option[selected="selected"]').val(), 'us')
# no country in locale, no country selected
with self.activate('fr'):
resp = self.client.get(reverse(self.view_name))
doc = pq(resp.content)
eq_(doc('#id_country option[selected="selected"]').val(), '')
with self.activate('pt-BR'):
resp = self.client.get(reverse(self.view_name))
doc = pq(resp.content)
eq_(doc('#id_country option[selected="selected"]').val(), 'br')
def test_language_selected(self):
"""
The correct language for the locale should be initially selected or
'en' if it's not an option.
"""
with self.activate('fr'):
resp = self.client.get(reverse(self.view_name))
doc = pq(resp.content)
eq_(doc('#id_lang option[selected="selected"]').val(), 'fr')
# with hyphenated regional locale, should have only lang
with self.activate('pt-BR'):
resp = self.client.get(reverse(self.view_name))
doc = pq(resp.content)
eq_(doc('#id_lang option[selected="selected"]').val(), 'pt')
# not supported. should default to ''
with self.activate('ak'):
resp = self.client.get(reverse(self.view_name))
doc = pq(resp.content)
eq_(doc('#id_lang option[selected="selected"]').val(), '')
| mpl-2.0 | -8,319,709,939,350,413,000 | 36.678571 | 75 | 0.608531 | false |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/isapi/simple.py | 23 | 2490 | """Simple base-classes for extensions and filters.
None of the filter and extension functions are considered 'optional' by the
framework. These base-classes provide simple implementations for the
Initialize and Terminate functions, allowing you to omit them,
It is not necessary to use these base-classes - but if you don't, you
must ensure each of the required methods are implemented.
"""
class SimpleExtension:
"Base class for a simple ISAPI extension"
def __init__(self):
pass
def GetExtensionVersion(self, vi):
"""Called by the ISAPI framework to get the extension version
The default implementation uses the classes docstring to
set the extension description."""
# nod to our reload capability - vi is None when we are reloaded.
if vi is not None:
vi.ExtensionDesc = self.__doc__
def HttpExtensionProc(self, control_block):
"""Called by the ISAPI framework for each extension request.
sub-classes must provide an implementation for this method.
"""
raise NotImplementedError("sub-classes should override HttpExtensionProc")
def TerminateExtension(self, status):
"""Called by the ISAPI framework as the extension terminates.
"""
pass
class SimpleFilter:
"Base class for a a simple ISAPI filter"
filter_flags = None
def __init__(self):
pass
def GetFilterVersion(self, fv):
"""Called by the ISAPI framework to get the extension version
The default implementation uses the classes docstring to
set the extension description, and uses the classes
filter_flags attribute to set the ISAPI filter flags - you
must specify filter_flags in your class.
"""
if self.filter_flags is None:
raise RuntimeError("You must specify the filter flags")
# nod to our reload capability - fv is None when we are reloaded.
if fv is not None:
fv.Flags = self.filter_flags
fv.FilterDesc = self.__doc__
def HttpFilterProc(self, fc):
"""Called by the ISAPI framework for each filter request.
sub-classes must provide an implementation for this method.
"""
raise NotImplementedError("sub-classes should override HttpExtensionProc")
def TerminateFilter(self, status):
"""Called by the ISAPI framework as the filter terminates.
"""
pass
| apache-2.0 | -4,035,626,619,538,003,000 | 35.617647 | 82 | 0.66506 | false |
datalogistics/libdlt | tools/dlt_xfer.py | 1 | 4008 | #!/usr/bin/env python3
import os
import argparse
import json
import libdlt
from unis.exceptions import CollectionIndexError
from libdlt.util.common import print_progress
SYS_PATH="/etc/periscope"
USER_DEPOTS=os.path.join(SYS_PATH, "depots.conf")
UNIS_URL = "http://unis.crest.iu.edu:8890"
XFER_TOTAL = 0
def progress(depot, name, total, size, offset):
global XFER_TOTAL
if not size:
XFER_TOTAL = 0
else:
XFER_TOTAL += size
print_progress(XFER_TOTAL, total, name)
def main():
parser = argparse.ArgumentParser(description="DLT File Transfer Tool")
parser.add_argument('files', metavar='FILES', type=str, nargs='+',
help='Files to transfer')
parser.add_argument('-u', '--upload', action='store_true',
help='Perform file upload (default is download)')
parser.add_argument('-H', '--host', type=str, default=UNIS_URL,
help='UNIS instance for uploading eXnode metadata')
parser.add_argument('-b', '--bs', type=str, default='20m',
help='Block size')
parser.add_argument('-d', '--depot-file', type=str, default=None,
help='Depots in a JSON dict used for upload')
parser.add_argument('-o', '--output', type=str, default=None,
help='Output file')
parser.add_argument('-V', '--visualize', type=str, default=None,
help='Periscope URL for visualization')
parser.add_argument('-D', '--debug', type=str, default=None,
help='Include verbose logging output')
parser.add_argument('-t', '--threads', type=int, default=5,
help='Number of threads for operation')
parser.add_argument('-r', '--recursive', action='store_true',
help='Recurse into subdirectories')
parser.add_argument('-c', '--cert', type=str, default=None,
help='SSL Cert/Key for HTTPS endpoints')
args = parser.parse_args()
bs = args.bs
df = args.depot_file
if args.debug in ['TRACE', 'DEBUG']:
import logging as plogging
from lace import logging
plogging.basicConfig(format='%(color)s[%(asctime)-15s] [%(levelname)s] %(name)s%(reset)s %(message)s')
log = logging.getLogger('libdlt')
log.setLevel(logging.DEBUG)
if args.debug == 'TRACE':
from lace.logging import trace
trace.setLevel(logging.DEBUG, True)
depots = None
if df:
try:
f = open(df, "r")
depots = json.loads(f.read())
except Exception as e:
print ("{}, trying {}".format(e, USER_DEPOTS))
try:
f = open(USER_DEPOTS, "r")
depots = json.oads(f.read())
except:
print ("ERROR: No default depot file: {}".format(USER_DEPOTS))
exit(1)
sess = libdlt.Session([{"default": True, "url": args.host, "ssl": args.cert}],
bs=bs, depots=depots, threads=args.threads,
**{"viz_url": args.visualize})
xfer = sess.upload if args.upload else sess.download
flist = []
for f in args.files:
if args.recursive and os.path.isdir(f):
for dirpath, dirnames, files in os.walk(f):
for n in files:
flist.append(os.path.join(dirpath, n))
else:
flist.append(f)
for f in flist:
try:
result = xfer(f, folder=args.output, progress_cb=progress)
diff, res = result.time, result.exnode
except CollectionIndexError as e:
print ("ERROR: invalid file or URL: {}".format(e))
exit(1)
print ("{0} ({1} {2:.2f} MB/s) {3}".format(res.name, res.size,
res.size/1e6/diff,
res.selfRef))
if __name__ == "__main__":
main()
| bsd-3-clause | 8,300,174,638,794,628,000 | 37.912621 | 110 | 0.545409 | false |
whitzhu/kolibri | kolibri/auth/test/test_permissions_classes.py | 10 | 5747 | from __future__ import absolute_import, print_function, unicode_literals
from django.test import TestCase
from mock import Mock
from ..models import FacilityUser, DeviceOwner, Facility, KolibriAnonymousUser
from ..api import KolibriAuthPermissions
from ..permissions.base import BasePermissions
from ..permissions.general import AllowAll, DenyAll
class BasePermissionsThrowExceptionsTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create()
self.object = object() # shouldn't matter what the object is, for these tests
self.facility_user = FacilityUser.objects.create(username="qqq", facility=self.facility)
self.device_owner = DeviceOwner.objects.create(username="zzz")
self.anon_user = KolibriAnonymousUser()
self.permissions = BasePermissions()
def test_user_cannot_create(self):
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_create_object(self.facility_user, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_create_object(self.device_owner, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_create_object(self.anon_user, self.object))
def test_user_cannot_read(self):
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_read_object(self.facility_user, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_read_object(self.device_owner, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_read_object(self.anon_user, self.object))
def test_user_cannot_update(self):
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_update_object(self.facility_user, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_update_object(self.device_owner, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_update_object(self.anon_user, self.object))
def test_user_cannot_delete(self):
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_delete_object(self.facility_user, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_delete_object(self.device_owner, self.object))
with self.assertRaises(NotImplementedError):
self.assertFalse(self.permissions.user_can_delete_object(self.anon_user, self.object))
class TestBooleanOperationsOnPermissionClassesTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create()
self.obj = object()
self.user = FacilityUser.objects.create(username='dummyuser', facility=self.facility)
self.queryset = FacilityUser.objects.all()
def assertAllowAll(self, perms, test_filtering=True):
self.assertTrue(perms.user_can_create_object(self.user, self.obj))
self.assertTrue(perms.user_can_read_object(self.user, self.obj))
self.assertTrue(perms.user_can_update_object(self.user, self.obj))
self.assertTrue(perms.user_can_delete_object(self.user, self.obj))
if test_filtering:
self.assertSetEqual(set(self.queryset), set(perms.readable_by_user_filter(self.user, self.queryset)))
def assertDenyAll(self, perms, test_filtering=True):
self.assertFalse(perms.user_can_create_object(self.user, self.obj))
self.assertFalse(perms.user_can_read_object(self.user, self.obj))
self.assertFalse(perms.user_can_update_object(self.user, self.obj))
self.assertFalse(perms.user_can_delete_object(self.user, self.obj))
if test_filtering:
self.assertEqual(len(perms.readable_by_user_filter(self.user, self.queryset)), 0)
def test_allow_or_allow(self):
self.assertAllowAll(AllowAll() | AllowAll())
def test_allow_or_deny(self):
self.assertAllowAll(AllowAll() | DenyAll())
def test_deny_or_allow(self):
self.assertAllowAll(DenyAll() | AllowAll())
def test_deny_or_deny(self):
self.assertDenyAll(DenyAll() | DenyAll())
def test_allow_and_allow(self):
self.assertAllowAll(AllowAll() & AllowAll())
def test_allow_and_deny(self):
self.assertDenyAll(AllowAll() & DenyAll())
def test_deny_and_allow(self):
self.assertDenyAll(DenyAll() & AllowAll())
def test_deny_and_deny(self):
self.assertDenyAll(DenyAll() & DenyAll())
def test_or_is_shortcircuited_for_efficiency(self):
self.assertAllowAll(AllowAll() | BasePermissions(), test_filtering=False)
def test_and_is_shortcircuited_for_efficiency(self):
self.assertDenyAll(DenyAll() & BasePermissions(), test_filtering=False)
def test_or_is_not_shortcircuited_inappropriately(self):
with self.assertRaises(NotImplementedError):
self.assertAllowAll(BasePermissions() | AllowAll())
def test_and_is_not_shortcircuited_inappropriately(self):
with self.assertRaises(NotImplementedError):
self.assertDenyAll(BasePermissions() & DenyAll())
class KolibriAuthPermissionsTestCase(TestCase):
def test_bad_request_method(self):
request = Mock(method="BADWOLF")
view = Mock()
obj = Mock()
perm_obj = KolibriAuthPermissions()
self.assertFalse(perm_obj.has_object_permission(request, view, obj))
| mit | 8,803,652,868,043,418,000 | 45.346774 | 113 | 0.708022 | false |
FedeDR/django-oscar-paypal | paypal/payflow/models.py | 9 | 3364 | from __future__ import unicode_literals
import re
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from paypal.payflow import codes
from paypal import base
@python_2_unicode_compatible
class PayflowTransaction(base.ResponseModel):
# This is the linking parameter between the merchant and PayPal. It is
# normally set to the order number
comment1 = models.CharField(_("Comment 1"), max_length=128, db_index=True)
trxtype = models.CharField(_("Transaction type"), max_length=12)
tender = models.CharField(_("Bankcard or PayPal"), max_length=12, null=True)
amount = models.DecimalField(max_digits=12, decimal_places=2, null=True,
blank=True)
# Response params
pnref = models.CharField(_("Payflow transaction ID"), max_length=32,
null=True)
ppref = models.CharField(_("Payment transaction ID"), max_length=32,
unique=True, null=True)
result = models.CharField(max_length=32, null=True, blank=True)
respmsg = models.CharField(_("Response message"), max_length=512)
authcode = models.CharField(_("Auth code"), max_length=32, null=True,
blank=True)
# Fraud/risk params
cvv2match = models.CharField(_("CVV2 check"), null=True, blank=True,
max_length=12)
avsaddr = models.CharField(_("House number check"), null=True, blank=True,
max_length=1)
avszip = models.CharField(_("Zip/Postcode check"), null=True, blank=True,
max_length=1)
class Meta:
ordering = ('-date_created',)
app_label = 'paypal'
def save(self, *args, **kwargs):
self.raw_request = re.sub(r'PWD=.+?&', 'PWD=XXXXXX&', self.raw_request)
self.raw_request = re.sub(r'ACCT=\d+(\d{4})&', 'ACCT=XXXXXXXXXXXX\1&', self.raw_request)
self.raw_request = re.sub(r'CVV2=\d+&', 'CVV2=XXX&', self.raw_request)
return super(PayflowTransaction, self).save(*args, **kwargs)
def get_trxtype_display(self):
return ugettext(codes.trxtype_map.get(self.trxtype, self.trxtype))
get_trxtype_display.short_description = _("Transaction type")
def get_tender_display(self):
return ugettext(codes.tender_map.get(self.tender, ''))
get_tender_display.short_description = _("Tender")
@property
def is_approved(self):
return self.result in ('0', '126')
def is_address_verified(self):
return self.avsaddr == 'Y' and self.avzip == 'Y'
def __str__(self):
return self.pnref
@property
def can_be_voided(self):
if self.trxtype != codes.AUTHORIZATION:
return False
return self.is_approved
@property
def can_be_credited(self):
"""
Test if this txn can be credited
"""
if self.trxtype not in (codes.SALE, codes.DELAYED_CAPTURE):
return False
return self.is_approved
@property
def can_be_captured(self):
"""
Test if this txn can be captured
"""
if self.trxtype != codes.AUTHORIZATION:
return False
return self.is_approved
| bsd-3-clause | -1,132,983,771,953,467,100 | 35.172043 | 96 | 0.616825 | false |
Ban3/Limnoria | plugins/Dict/__init__.py | 4 | 2289 | ###
# Copyright (c) 2004, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Commands that use the dictd protocol to define word.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you\'re keeping the plugin in CVS or some similar system.
__version__ = "%%VERSION%%"
__author__ = supybot.authors.jemfinch
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
from . import config
from . import plugin
from imp import reload
reload(plugin) # In case we're being reloaded.
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause | -3,868,354,150,566,165,000 | 37.79661 | 79 | 0.760594 | false |
kennedyshead/home-assistant | homeassistant/components/smarthab/config_flow.py | 2 | 2392 | """SmartHab configuration flow."""
import logging
import pysmarthab
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
class SmartHabConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""SmartHab config flow."""
VERSION = 1
def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if user_input is None:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_EMAIL, default=user_input.get(CONF_EMAIL, "")
): str,
vol.Required(CONF_PASSWORD): str,
}
),
errors=errors or {},
)
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return self._show_setup_form(user_input, None)
username = user_input[CONF_EMAIL]
password = user_input[CONF_PASSWORD]
# Check if already configured
if self.unique_id is None:
await self.async_set_unique_id(username)
self._abort_if_unique_id_configured()
# Setup connection with SmartHab API
hub = pysmarthab.SmartHab()
try:
await hub.async_login(username, password)
# Verify that passed in configuration works
if hub.is_logged_in():
return self.async_create_entry(
title=username, data={CONF_EMAIL: username, CONF_PASSWORD: password}
)
errors["base"] = "invalid_auth"
except pysmarthab.RequestFailedException:
_LOGGER.exception("Error while trying to reach SmartHab API")
errors["base"] = "service"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error during login")
errors["base"] = "unknown"
return self._show_setup_form(user_input, errors)
async def async_step_import(self, import_info):
"""Handle import from legacy config."""
return await self.async_step_user(import_info)
| apache-2.0 | 9,158,663,498,396,323,000 | 29.666667 | 88 | 0.58194 | false |
sql-machine-learning/sqlflow | python/runtime/pai/submitter_evaluate.py | 1 | 4277 | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import runtime.temp_file as temp_file
from runtime import db
from runtime.diagnostics import SQLFlowDiagnostic
from runtime.model import EstimatorType
from runtime.pai import cluster_conf, pai_model, table_ops
from runtime.pai.get_pai_tf_cmd import (ENTRY_FILE, JOB_ARCHIVE_FILE,
PARAMS_FILE, get_pai_tf_cmd)
from runtime.pai.prepare_archive import prepare_archive
from runtime.pai.submit_pai_task import submit_pai_task
from runtime.pai_local.try_run import try_pai_local_run
from runtime.step.create_result_table import create_evaluate_table
def submit_pai_evaluate(datasource,
original_sql,
select,
label_name,
model,
model_params,
result_table,
user=""):
"""Submit a PAI evaluation task
Args:
datasource: string
Like: maxcompute://ak:[email protected]/api?
curr_project=test_ci&scheme=http
original_sql: string
Original "TO PREDICT" statement.
select: string
SQL statement to get prediction data set.
model: string
Model to load and do prediction.
label_name: string
The label name to evaluate.
model_params: dict
Params for training, crossponding to WITH clause.
result_table: string
The table name to save prediction result.
user: string
A string to identify the user, used to load model from the user's
directory.
"""
params = dict(locals())
project = table_ops.get_project(datasource)
if result_table.count(".") == 0:
result_table = "%s.%s" % (project, result_table)
params["result_table"] = result_table
oss_model_path = pai_model.get_oss_model_save_path(datasource,
model,
user=user)
model_type, estimator = pai_model.get_saved_model_type_and_estimator(
datasource, model)
if model_type == EstimatorType.PAIML:
raise SQLFlowDiagnostic("PAI model evaluation is not supported yet.")
if model_type == EstimatorType.XGBOOST:
params["entry_type"] = "evaluate_xgb"
validation_metrics = model_params.get("validation.metrics",
"accuracy_score")
else:
params["entry_type"] = "evaluate_tf"
validation_metrics = model_params.get("validation.metrics", "Accuracy")
validation_metrics = [m.strip() for m in validation_metrics.split(",")]
with db.connect_with_data_source(datasource) as conn:
result_column_names = create_evaluate_table(conn, result_table,
validation_metrics)
with table_ops.create_tmp_tables_guard(select, datasource) as data_table:
params["pai_table"] = data_table
params["result_column_names"] = result_column_names
if try_pai_local_run(params, oss_model_path):
return
conf = cluster_conf.get_cluster_config(model_params)
with temp_file.TemporaryDirectory(prefix="sqlflow", dir="/tmp") as cwd:
prepare_archive(cwd, estimator, oss_model_path, params)
cmd = get_pai_tf_cmd(
conf, "file://" + os.path.join(cwd, JOB_ARCHIVE_FILE),
"file://" + os.path.join(cwd, PARAMS_FILE), ENTRY_FILE, model,
oss_model_path, data_table, "", result_table, project)
submit_pai_task(cmd, datasource)
| apache-2.0 | 8,518,427,084,068,251,000 | 40.931373 | 79 | 0.611644 | false |
hgrimelid/feincms | feincms/views/base.py | 1 | 3756 | from django.contrib.auth.decorators import permission_required
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.utils.cache import add_never_cache_headers
try:
from django.template.response import TemplateResponse
except ImportError:
TemplateResponse = None
from feincms.module.page.models import Page
class Handler(object):
"""
This is the default handler for feincms page content.
It isn't a class-based-view like those in Django's generic view framework.
State should not be stored on the ``Handler`` class, because of thread-safety
and cross polination issues.
"""
def __call__(self, request, path=None):
return self.build_response(request,
Page.objects.page_for_path_or_404(path or request.path))
def build_response(self, request, page):
"""
Calls `prepare`, `render` and `finalize`, in this order.
"""
response = self.prepare(request, page)
if response:
return response
response = self.render(request, page)
return self.finalize(request, response, page)
def prepare(self, request, page):
"""
Prepare / pre-process content types. If this method returns anything,
it is treated as a ``HttpResponse`` and handed back to the visitor.
"""
response = page.setup_request(request)
if response:
return response
for content in page.content.all_of_type(tuple(page._feincms_content_types_with_process)):
r = content.process(request)
if r:
return r
def render(self, request, page):
"""
The render step. Must return a HttpResponse.
"""
# This facility can be used by request processors to add values
# to the context.
context = request._feincms_extra_context
context['feincms_page'] = page
if TemplateResponse:
return TemplateResponse(request, page.template.path, context)
else:
return render_to_response(page.template.path,
context_instance=RequestContext(request, context))
def finalize(self, request, response, page):
"""
Runs finalize() on content types having such a method, adds headers and
returns the final response.
"""
for content in page.content.all_of_type(tuple(page._feincms_content_types_with_finalize)):
r = content.finalize(request, response)
if r:
return r
page.finalize_response(request, response)
# Add never cache headers in case frontend editing is active
if hasattr(request, "session") and request.session.get('frontend_editing', False):
add_never_cache_headers(response)
return response
#: Default handler
handler = Handler()
class PreviewHandler(Handler):
"""
This handler is for previewing site content; it takes a page_id so
the page is uniquely identified and does not care whether the page
is active or expired. To balance that, it requires a logged in user.
"""
def __call__(self, request, page_id):
page = get_object_or_404(Page, pk=page_id)
return self.build_response(request, page)
def finalize(self, request, response, page):
"""
Do (nearly) nothing. Do not call any ``finalize`` methods,
because those might add stuff to the cache, set ETags etc.
all of which we cannot use in a preview handler.
"""
add_never_cache_headers(response)
return response
#: Preview handler
preview_handler = permission_required('page.change_page')(PreviewHandler())
| bsd-3-clause | -1,696,824,816,090,143,000 | 31.66087 | 98 | 0.647764 | false |
charlesbastos/ArduPilotMega_demo | Tools/LogAnalyzer/LogAnalyzer.py | 74 | 12240 | #!/usr/bin/env python
#
# A module to analyze and identify any common problems which can be determined from log files
#
# Initial code by Andrew Chapman ([email protected]), 16th Jan 2014
#
# some logging oddities noticed while doing this, to be followed up on:
# - tradheli MOT labels Mot1,Mot2,Mot3,Mot4,GGain
# - Pixhawk doesn't output one of the FMT labels... forget which one
# - MAG offsets seem to be constant (only seen data on Pixhawk)
# - MAG offsets seem to be cast to int before being output? (param is -84.67, logged as -84)
# - copter+plane use 'V' in their vehicle type/version/build line, rover uses lower case 'v'. Copter+Rover give a build number, plane does not
# - CTUN.ThrOut on copter is 0-1000, on plane+rover it is 0-100
# TODO: add test for noisy baro values
# TODO: support loading binary log files (use Tridge's mavlogdump?)
import DataflashLog
import pprint # temp
import imp
import glob
import inspect
import os, sys
import argparse
import datetime
import time
from xml.sax.saxutils import escape
class TestResult(object):
'''all tests return a standardized result type'''
class StatusType:
# NA means not applicable for this log (e.g. copter tests against a plane log), UNKNOWN means it is missing data required for the test
GOOD, FAIL, WARN, UNKNOWN, NA = range(5)
status = None
statusMessage = "" # can be multi-line
class Test(object):
'''base class to be inherited by log tests. Each test should be quite granular so we have lots of small tests with clear results'''
def __init__(self):
self.name = ""
self.result = None # will be an instance of TestResult after being run
self.execTime = None
self.enable = True
def run(self, logdata, verbose=False):
pass
class TestSuite(object):
'''registers test classes, loading using a basic plugin architecture, and can run them all in one run() operation'''
def __init__(self):
self.tests = []
self.logfile = None
self.logdata = None
# dynamically load in Test subclasses from the 'tests' folder
# to prevent one being loaded, move it out of that folder, or set that test's .enable attribute to False
dirName = os.path.dirname(os.path.abspath(__file__))
testScripts = glob.glob(dirName + '/tests/*.py')
testClasses = []
for script in testScripts:
m = imp.load_source("m",script)
for name, obj in inspect.getmembers(m, inspect.isclass):
if name not in testClasses and inspect.getsourcefile(obj) == script:
testClasses.append(name)
self.tests.append(obj())
# and here's an example of explicitly loading a Test class if you wanted to do that
# m = imp.load_source("m", dirName + '/tests/TestBadParams.py')
# self.tests.append(m.TestBadParams())
def run(self, logdata, verbose):
'''run all registered tests in a single call, gathering execution timing info'''
self.logdata = logdata
self.logfile = logdata.filename
for test in self.tests:
# run each test in turn, gathering timing info
if test.enable:
startTime = time.time()
test.run(self.logdata, verbose) # RUN THE TEST
endTime = time.time()
test.execTime = 1000 * (endTime-startTime)
def outputPlainText(self, outputStats):
'''output test results in plain text'''
print 'Dataflash log analysis report for file: ' + self.logfile
print 'Log size: %.2fmb (%d lines)' % (self.logdata.filesizeKB / 1024.0, self.logdata.lineCount)
print 'Log duration: %s' % str(datetime.timedelta(seconds=self.logdata.durationSecs)) + '\n'
if self.logdata.vehicleType == "ArduCopter" and self.logdata.getCopterType():
print 'Vehicle Type: %s (%s)' % (self.logdata.vehicleType, self.logdata.getCopterType())
else:
print 'Vehicle Type: %s' % self.logdata.vehicleType
print 'Firmware Version: %s (%s)' % (self.logdata.firmwareVersion, self.logdata.firmwareHash)
print 'Hardware: %s' % self.logdata.hardwareType
print 'Free RAM: %s' % self.logdata.freeRAM
if self.logdata.skippedLines:
print "\nWARNING: %d malformed log lines skipped during read" % self.logdata.skippedLines
print '\n'
print "Test Results:"
for test in self.tests:
if not test.enable:
continue
statusMessageFirstLine = test.result.statusMessage.strip('\n\r').split('\n')[0]
statusMessageExtra = test.result.statusMessage.strip('\n\r').split('\n')[1:]
execTime = ""
if outputStats:
execTime = " (%6.2fms)" % (test.execTime)
if test.result.status == TestResult.StatusType.GOOD:
print " %20s: GOOD %-55s%s" % (test.name, statusMessageFirstLine, execTime)
elif test.result.status == TestResult.StatusType.FAIL:
print " %20s: FAIL %-55s%s [GRAPH]" % (test.name, statusMessageFirstLine, execTime)
elif test.result.status == TestResult.StatusType.WARN:
print " %20s: WARN %-55s%s [GRAPH]" % (test.name, statusMessageFirstLine, execTime)
elif test.result.status == TestResult.StatusType.NA:
# skip any that aren't relevant for this vehicle/hardware/etc
continue
else:
print " %20s: UNKNOWN %-55s%s" % (test.name, statusMessageFirstLine, execTime)
#if statusMessageExtra:
for line in statusMessageExtra:
print " %29s %s" % ("",line)
print '\n'
print 'The Log Analyzer is currently BETA code.\nFor any support or feedback on the log analyzer please email Andrew Chapman ([email protected])'
print '\n'
def outputXML(self, xmlFile):
'''output test results to an XML file'''
# open the file for writing
xml = None
try:
if xmlFile == '-':
xml = sys.stdout
else:
xml = open(xmlFile, 'w')
except:
sys.stderr.write("Error opening output xml file: %s" % xmlFile)
sys.exit(1)
# output header info
print >>xml, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
print >>xml, "<loganalysis>"
print >>xml, "<header>"
print >>xml, " <logfile>" + escape(self.logfile) + "</logfile>"
print >>xml, " <sizekb>" + escape(`self.logdata.filesizeKB`) + "</sizekb>"
print >>xml, " <sizelines>" + escape(`self.logdata.lineCount`) + "</sizelines>"
print >>xml, " <duration>" + escape(str(datetime.timedelta(seconds=self.logdata.durationSecs))) + "</duration>"
print >>xml, " <vehicletype>" + escape(self.logdata.vehicleType) + "</vehicletype>"
if self.logdata.vehicleType == "ArduCopter" and self.logdata.getCopterType():
print >>xml, " <coptertype>" + escape(self.logdata.getCopterType()) + "</coptertype>"
print >>xml, " <firmwareversion>" + escape(self.logdata.firmwareVersion) + "</firmwareversion>"
print >>xml, " <firmwarehash>" + escape(self.logdata.firmwareHash) + "</firmwarehash>"
print >>xml, " <hardwaretype>" + escape(self.logdata.hardwareType) + "</hardwaretype>"
print >>xml, " <freemem>" + escape(`self.logdata.freeRAM`) + "</freemem>"
print >>xml, " <skippedlines>" + escape(`self.logdata.skippedLines`) + "</skippedlines>"
print >>xml, "</header>"
# output parameters
print >>xml, "<params>"
for param, value in self.logdata.parameters.items():
print >>xml, " <param name=\"%s\" value=\"%s\" />" % (param,escape(`value`))
print >>xml, "</params>"
# output test results
print >>xml, "<results>"
for test in self.tests:
if not test.enable:
continue
print >>xml, " <result>"
if test.result.status == TestResult.StatusType.GOOD:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>GOOD</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
elif test.result.status == TestResult.StatusType.FAIL:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>FAIL</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
print >>xml, " <data>(test data will be embeded here at some point)</data>"
elif test.result.status == TestResult.StatusType.WARN:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>WARN</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
print >>xml, " <data>(test data will be embeded here at some point)</data>"
elif test.result.status == TestResult.StatusType.NA:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>NA</status>"
else:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>UNKNOWN</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
print >>xml, " </result>"
print >>xml, "</results>"
print >>xml, "</loganalysis>"
xml.close()
def main():
dirName = os.path.dirname(os.path.abspath(__file__))
# deal with command line arguments
parser = argparse.ArgumentParser(description='Analyze an APM Dataflash log for known issues')
parser.add_argument('logfile', type=argparse.FileType('r'), help='path to Dataflash log file (or - for stdin)')
parser.add_argument('-f', '--format', metavar='', type=str, action='store', choices=['bin','log','auto'], default='auto', help='log file format: \'bin\',\'log\' or \'auto\'')
parser.add_argument('-q', '--quiet', metavar='', action='store_const', const=True, help='quiet mode, do not print results')
parser.add_argument('-p', '--profile', metavar='', action='store_const', const=True, help='output performance profiling data')
parser.add_argument('-s', '--skip_bad', metavar='', action='store_const', const=True, help='skip over corrupt dataflash lines')
parser.add_argument('-e', '--empty', metavar='', action='store_const', const=True, help='run an initial check for an empty log')
parser.add_argument('-x', '--xml', type=str, metavar='XML file', nargs='?', const='', default='', help='write output to specified XML file (or - for stdout)')
parser.add_argument('-v', '--verbose', metavar='', action='store_const', const=True, help='verbose output')
args = parser.parse_args()
# load the log
startTime = time.time()
logdata = DataflashLog.DataflashLog(args.logfile.name, format=args.format, ignoreBadlines=args.skip_bad) # read log
endTime = time.time()
if args.profile:
print "Log file read time: %.2f seconds" % (endTime-startTime)
# check for empty log if requested
if args.empty:
emptyErr = DataflashLog.DataflashLogHelper.isLogEmpty(logdata)
if emptyErr:
sys.stderr.write("Empty log file: %s, %s" % (logdata.filename, emptyErr))
sys.exit(1)
#run the tests, and gather timings
testSuite = TestSuite()
startTime = time.time()
testSuite.run(logdata, args.verbose) # run tests
endTime = time.time()
if args.profile:
print "Test suite run time: %.2f seconds" % (endTime-startTime)
# deal with output
if not args.quiet:
testSuite.outputPlainText(args.profile)
if args.xml:
testSuite.outputXML(args.xml)
if not args.quiet:
print "XML output written to file: %s\n" % args.xml
if __name__ == "__main__":
main()
| gpl-3.0 | -2,479,926,188,697,281,000 | 46.8125 | 179 | 0.600572 | false |
MoamerEncsConcordiaCa/tensorflow | tensorflow/python/saved_model/builder.py | 126 | 1271 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel builder.
Builds a SavedModel that can be saved to storage, is language neutral, and
enables systems to produce, consume, or transform TensorFlow Models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.saved_model.builder_impl import SavedModelBuilder
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"SavedModelBuilder",
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 | -1,713,068,632,878,346,500 | 35.314286 | 80 | 0.722266 | false |
mpharrigan/mdtraj | mdtraj/geometry/distance.py | 1 | 11815 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2015 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors: Kyle A Beauchamp, Jason Swails
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import numpy as np
from mdtraj.utils import ensure_type
from mdtraj.utils.six.moves import range
from mdtraj.geometry import _geometry
__all__ = ['compute_distances', 'compute_displacements',
'compute_center_of_mass', 'find_closest_contact']
##############################################################################
# Functions
##############################################################################
def compute_distances(traj, atom_pairs, periodic=True, opt=True):
"""Compute the distances between pairs of atoms in each frame.
Parameters
----------
traj : Trajectory
An mtraj trajectory.
atom_pairs : np.ndarray, shape=(num_pairs, 2), dtype=int
Each row gives the indices of two atoms involved in the interaction.
periodic : bool, default=True
If `periodic` is True and the trajectory contains unitcell
information, we will compute distances under the minimum image
convention.
opt : bool, default=True
Use an optimized native library to calculate distances. Our optimized
SSE minimum image convention calculation implementation is over 1000x
faster than the naive numpy implementation.
Returns
-------
distances : np.ndarray, shape=(n_frames, num_pairs), dtype=float
The distance, in each frame, between each pair of atoms.
"""
xyz = ensure_type(traj.xyz, dtype=np.float32, ndim=3, name='traj.xyz', shape=(None, None, 3), warn_on_cast=False)
pairs = ensure_type(atom_pairs, dtype=np.int32, ndim=2, name='atom_pairs', shape=(None, 2), warn_on_cast=False)
if not np.all(np.logical_and(pairs < traj.n_atoms, pairs >= 0)):
raise ValueError('atom_pairs must be between 0 and %d' % traj.n_atoms)
if len(pairs) == 0:
return np.zeros((len(xyz), 0), dtype=np.float32)
if periodic and traj._have_unitcell:
box = ensure_type(traj.unitcell_vectors, dtype=np.float32, ndim=3, name='unitcell_vectors', shape=(len(xyz), 3, 3),
warn_on_cast=False)
orthogonal = np.allclose(traj.unitcell_angles, 90)
if opt:
out = np.empty((xyz.shape[0], pairs.shape[0]), dtype=np.float32)
_geometry._dist_mic(xyz, pairs, box.transpose(0, 2, 1).copy(), out, orthogonal)
return out
else:
return _distance_mic(xyz, pairs, box.transpose(0, 2, 1), orthogonal)
# either there are no unitcell vectors or they dont want to use them
if opt:
out = np.empty((xyz.shape[0], pairs.shape[0]), dtype=np.float32)
_geometry._dist(xyz, pairs, out)
return out
else:
return _distance(xyz, pairs)
def compute_displacements(traj, atom_pairs, periodic=True, opt=True):
"""Compute the displacement vector between pairs of atoms in each frame of a trajectory.
Parameters
----------
traj : Trajectory
Trajectory to compute distances in
atom_pairs : np.ndarray, shape[num_pairs, 2], dtype=int
Each row gives the indices of two atoms.
periodic : bool, default=True
If `periodic` is True and the trajectory contains unitcell
information, we will compute distances under the minimum image
convention.
opt : bool, default=True
Use an optimized native library to calculate distances. Our
optimized minimum image convention calculation implementation is
over 1000x faster than the naive numpy implementation.
Returns
-------
displacements : np.ndarray, shape=[n_frames, n_pairs, 3], dtype=float32
The displacememt vector, in each frame, between each pair of atoms.
"""
xyz = ensure_type(traj.xyz, dtype=np.float32, ndim=3, name='traj.xyz', shape=(None, None, 3), warn_on_cast=False)
pairs = ensure_type(np.asarray(atom_pairs), dtype=np.int32, ndim=2, name='atom_pairs', shape=(None, 2), warn_on_cast=False)
if not np.all(np.logical_and(pairs < traj.n_atoms, pairs >= 0)):
raise ValueError('atom_pairs must be between 0 and %d' % traj.n_atoms)
if periodic and traj._have_unitcell:
box = ensure_type(traj.unitcell_vectors, dtype=np.float32, ndim=3, name='unitcell_vectors', shape=(len(xyz), 3, 3),
warn_on_cast=False)
orthogonal = np.allclose(traj.unitcell_angles, 90)
if opt:
out = np.empty((xyz.shape[0], pairs.shape[0], 3), dtype=np.float32)
_geometry._dist_mic_displacement(xyz, pairs, box.transpose(0, 2, 1).copy(), out, orthogonal)
return out
else:
return _displacement_mic(xyz, pairs, box.transpose(0, 2, 1), orthogonal)
# either there are no unitcell vectors or they dont want to use them
if opt:
out = np.empty((xyz.shape[0], pairs.shape[0], 3), dtype=np.float32)
_geometry._dist_displacement(xyz, pairs, out)
return out
return _displacement(xyz, pairs)
def compute_center_of_mass(traj):
"""Compute the center of mass for each frame.
Parameters
----------
traj : Trajectory
Trajectory to compute center of mass for
Returns
-------
com : np.ndarray, shape=(n_frames, 3)
Coordinates of the center of mass for each frame
"""
com = np.zeros((traj.n_frames, 3))
masses = np.array([a.element.mass for a in traj.top.atoms])
masses /= masses.sum()
for i, x in enumerate(traj.xyz):
com[i, :] = x.astype('float64').T.dot(masses)
return com
def find_closest_contact(traj, group1, group2, frame=0, periodic=True):
"""Find the closest contact between two groups of atoms.
Given a frame of a Trajectory and two groups of atoms, identify the pair of
atoms (one from each group) that form the closest contact between the two groups.
Parameters
----------
traj : Trajectory
An mtraj trajectory.
group1 : np.ndarray, shape=(num_atoms), dtype=int
The indices of atoms in the first group.
group2 : np.ndarray, shape=(num_atoms), dtype=int
The indices of atoms in the second group.
frame : int, default=0
The frame of the Trajectory to take positions from
periodic : bool, default=True
If `periodic` is True and the trajectory contains unitcell
information, we will compute distances under the minimum image
convention.
Returns
-------
result : tuple (int, int, float)
The indices of the two atoms forming the closest contact, and the distance between them.
"""
xyz = ensure_type(traj.xyz, dtype=np.float32, ndim=3, name='traj.xyz', shape=(None, None, 3), warn_on_cast=False)[frame]
atoms1 = ensure_type(group1, dtype=np.int32, ndim=1, name='group1', warn_on_cast=False)
atoms2 = ensure_type(group2, dtype=np.int32, ndim=1, name='group2', warn_on_cast=False)
if periodic and traj._have_unitcell:
box = ensure_type(traj.unitcell_vectors, dtype=np.float32, ndim=3, name='unitcell_vectors', shape=(len(traj.xyz), 3, 3),
warn_on_cast=False)[frame]
else:
box = None
return _geometry._find_closest_contact(xyz, atoms1, atoms2, box)
##############################################################################
# pure python implementation of the core routines
##############################################################################
def _distance(xyz, pairs):
"Distance between pairs of points in each frame"
delta = np.diff(xyz[:, pairs], axis=2)[:, :, 0]
return (delta ** 2.).sum(-1) ** 0.5
def _displacement(xyz, pairs):
"Displacement vector between pairs of points in each frame"
value = np.diff(xyz[:, pairs], axis=2)[:, :, 0]
assert value.shape == (xyz.shape[0], pairs.shape[0], 3), 'v.shape %s, xyz.shape %s, pairs.shape %s' % (str(value.shape), str(xyz.shape), str(pairs.shape))
return value
def _distance_mic(xyz, pairs, box_vectors, orthogonal):
"""Distance between pairs of points in each frame under the minimum image
convention for periodic boundary conditions.
The computation follows scheme B.9 in Tukerman, M. "Statistical
Mechanics: Theory and Molecular Simulation", 2010.
This is a slow pure python implementation, mostly for testing.
"""
out = np.empty((xyz.shape[0], pairs.shape[0]), dtype=np.float32)
for i in range(len(xyz)):
hinv = np.linalg.inv(box_vectors[i])
bv1, bv2, bv3 = box_vectors[i].T
for j, (a,b) in enumerate(pairs):
s1 = np.dot(hinv, xyz[i,a,:])
s2 = np.dot(hinv, xyz[i,b,:])
s12 = s2 - s1
s12 = s12 - np.round(s12)
r12 = np.dot(box_vectors[i], s12)
dist = np.linalg.norm(r12)
if not orthogonal:
for ii in range(-1, 2):
v1 = bv1*ii
for jj in range(-1, 2):
v12 = bv2*jj + v1
for kk in range(-1, 2):
new_r12 = r12 + v12 + bv3*kk
dist = min(dist, np.linalg.norm(new_r12))
out[i, j] = dist
return out
def _displacement_mic(xyz, pairs, box_vectors, orthogonal):
"""Displacement vector between pairs of points in each frame under the
minimum image convention for periodic boundary conditions.
The computation follows scheme B.9 in Tukerman, M. "Statistical
Mechanics: Theory and Molecular Simulation", 2010.
This is a very slow pure python implementation, mostly for testing.
"""
out = np.empty((xyz.shape[0], pairs.shape[0], 3), dtype=np.float32)
for i in range(len(xyz)):
hinv = np.linalg.inv(box_vectors[i])
bv1, bv2, bv3 = box_vectors[i].T
for j, (a,b) in enumerate(pairs):
s1 = np.dot(hinv, xyz[i,a,:])
s2 = np.dot(hinv, xyz[i,b,:])
s12 = s2 - s1
s12 = s12 - np.round(s12)
disp = np.dot(box_vectors[i], s12)
min_disp = disp
dist2 = (disp*disp).sum()
if not orthogonal:
for ii in range(-1, 2):
v1 = bv1*ii
for jj in range(-1, 2):
v12 = bv2*jj+v1
for kk in range(-1, 2):
tmp = disp + v12 + bv3*kk
new_dist2 = (tmp*tmp).sum()
if new_dist2 < dist2:
dist2 = new_dist2
min_disp = tmp
out[i, j] = min_disp
return out
| lgpl-2.1 | -2,654,320,538,677,725,700 | 40.024306 | 158 | 0.588489 | false |
Subsets and Splits