repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
abgworrall/test-infra | gubernator/third_party/defusedxml/sax.py | 53 | 1462 | # defusedxml
#
# Copyright (c) 2013 by Christian Heimes <[email protected]>
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
"""Defused xml.sax
"""
from __future__ import print_function, absolute_import
from xml.sax import InputSource as _InputSource
from xml.sax import ErrorHandler as _ErrorHandler
from . import expatreader
__origin__ = "xml.sax"
def parse(source, handler, errorHandler=_ErrorHandler(), forbid_dtd=False,
forbid_entities=True, forbid_external=True):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.forbid_dtd = forbid_dtd
parser.forbid_entities = forbid_entities
parser.forbid_external = forbid_external
parser.parse(source)
def parseString(string, handler, errorHandler=_ErrorHandler(),
forbid_dtd=False, forbid_entities=True,
forbid_external=True):
from io import BytesIO
if errorHandler is None:
errorHandler = _ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.forbid_dtd = forbid_dtd
parser.forbid_entities = forbid_entities
parser.forbid_external = forbid_external
inpsrc = _InputSource()
inpsrc.setByteStream(BytesIO(string))
parser.parse(inpsrc)
def make_parser(parser_list=[]):
return expatreader.create_parser()
| apache-2.0 |
ess-dmsc/do-ess-data-simulator | DonkiPlayer/scripts/mcstas-generator/src/flatbuffers/compat.py | 19 | 1465 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" A tiny version of `six` to help with backwards compability. """
import sys
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PY27 = sys.version_info[0:2] == (2, 7)
PY275 = sys.version_info[0:3] >= (2, 7, 5)
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = (str,)
binary_types = (bytes,bytearray)
range_func = range
memoryview_type = memoryview
struct_bool_decl = "?"
else:
string_types = (unicode,)
if PY26 or PY27:
binary_types = (str,bytearray)
else:
binary_types = (str,)
range_func = xrange
if PY26 or (PY27 and not PY275):
memoryview_type = buffer
struct_bool_decl = "<b"
else:
memoryview_type = memoryview
struct_bool_decl = "?"
# NOTE: Future Jython support may require code here (look at `six`).
| bsd-2-clause |
YinongLong/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 160 | 6028 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
isyippee/ceilometer | ceilometer/hardware/discovery.py | 7 | 3617 | # -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from ceilometer.agent import plugin_base
from ceilometer.i18n import _
from ceilometer import nova_client
LOG = log.getLogger(__name__)
OPTS = [
cfg.StrOpt('url_scheme',
default='snmp://',
help='URL scheme to use for hardware nodes.'),
cfg.StrOpt('readonly_user_name',
default='ro_snmp_user',
help='SNMPd user name of all nodes running in the cloud.'),
cfg.StrOpt('readonly_user_password',
default='password',
help='SNMPd password of all the nodes running in the cloud.',
secret=True),
]
cfg.CONF.register_opts(OPTS, group='hardware')
class NodesDiscoveryTripleO(plugin_base.DiscoveryBase):
def __init__(self):
super(NodesDiscoveryTripleO, self).__init__()
self.nova_cli = nova_client.Client()
self.last_run = None
self.instances = {}
@staticmethod
def _address(instance, field):
return instance.addresses['ctlplane'][0].get(field)
def discover(self, manager, param=None):
"""Discover resources to monitor.
instance_get_all will return all instances if last_run is None,
and will return only the instances changed since the last_run time.
"""
try:
instances = self.nova_cli.instance_get_all(self.last_run)
except Exception:
# NOTE(zqfan): instance_get_all is wrapped and will log exception
# when there is any error. It is no need to raise it again and
# print one more time.
return []
for instance in instances:
if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted',
'error']:
self.instances.pop(instance.id, None)
else:
self.instances[instance.id] = instance
self.last_run = timeutils.utcnow(True).isoformat()
resources = []
for instance in self.instances.values():
try:
ip_address = self._address(instance, 'addr')
final_address = (
cfg.CONF.hardware.url_scheme +
cfg.CONF.hardware.readonly_user_name + ':' +
cfg.CONF.hardware.readonly_user_password + '@' +
ip_address)
resource = {
'resource_id': instance.id,
'resource_url': final_address,
'mac_addr': self._address(instance,
'OS-EXT-IPS-MAC:mac_addr'),
'image_id': instance.image['id'],
'flavor_id': instance.flavor['id']
}
resources.append(resource)
except KeyError:
LOG.error(_("Couldn't obtain IP address of "
"instance %s") % instance.id)
return resources
| apache-2.0 |
pizzathief/scipy | benchmarks/benchmarks/sparse.py | 5 | 13402 | """
Simple benchmarks for the sparse module
"""
import warnings
import time
import timeit
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy
import numpy as np
from numpy import ones, array, asarray, empty, random
try:
from scipy import sparse
from scipy.sparse import (csr_matrix, coo_matrix, dia_matrix, lil_matrix,
dok_matrix, rand, SparseEfficiencyWarning)
except ImportError:
pass
from .common import Benchmark
def random_sparse(m, n, nnz_per_row):
rows = numpy.arange(m).repeat(nnz_per_row)
cols = numpy.random.randint(0, n, size=nnz_per_row*m)
vals = numpy.random.random_sample(m*nnz_per_row)
return coo_matrix((vals, (rows, cols)), (m, n)).tocsr()
# TODO move this to a matrix gallery and add unittests
def poisson2d(N, dtype='d', format=None):
"""
Return a sparse matrix for the 2D Poisson problem
with standard 5-point finite difference stencil on a
square N-by-N grid.
"""
if N == 1:
diags = asarray([[4]], dtype=dtype)
return dia_matrix((diags, [0]), shape=(1, 1)).asformat(format)
offsets = array([0, -N, N, -1, 1])
diags = empty((5, N**2), dtype=dtype)
diags[0] = 4 # main diagonal
diags[1:] = -1 # all offdiagonals
diags[3, N-1::N] = 0 # first lower diagonal
diags[4, N::N] = 0 # first upper diagonal
return dia_matrix((diags, offsets), shape=(N**2, N**2)).asformat(format)
class Arithmetic(Benchmark):
param_names = ['format', 'XY', 'op']
params = [
['csr'],
['AA', 'AB', 'BA', 'BB'],
['__add__', '__sub__', 'multiply', '__mul__']
]
def setup(self, format, XY, op):
matrices = dict(A=poisson2d(250, format=format),
B=poisson2d(250, format=format)**2)
x = matrices[XY[0]]
self.y = matrices[XY[1]]
self.fn = getattr(x, op)
self.fn(self.y) # warmup
def time_arithmetic(self, format, XY, op):
self.fn(self.y)
class Sort(Benchmark):
params = ['Rand10', 'Rand25', 'Rand50', 'Rand100', 'Rand200']
param_names = ['matrix']
def setup(self, matrix):
n = 10000
if matrix.startswith('Rand'):
k = int(matrix[4:])
self.A = random_sparse(n, n, k)
self.A.has_sorted_indices = False
self.A.indices[:2] = 2, 1
else:
raise NotImplementedError()
def time_sort(self, matrix):
"""sort CSR column indices"""
self.A.sort_indices()
class Matvec(Benchmark):
params = [
['Identity', 'Poisson5pt', 'Block2x2', 'Block3x3'],
['dia', 'csr', 'csc', 'dok', 'lil', 'coo', 'bsr']
]
param_names = ['matrix', 'format']
def setup(self, matrix, format):
if matrix == 'Identity':
if format in ('lil', 'dok'):
raise NotImplementedError()
self.A = sparse.eye(10000, 10000, format=format)
elif matrix == 'Poisson5pt':
self.A = poisson2d(300, format=format)
elif matrix == 'Block2x2':
if format not in ('csr', 'bsr'):
raise NotImplementedError()
b = (2, 2)
self.A = sparse.kron(poisson2d(150),
ones(b)).tobsr(blocksize=b).asformat(format)
elif matrix == 'Block3x3':
if format not in ('csr', 'bsr'):
raise NotImplementedError()
b = (3, 3)
self.A = sparse.kron(poisson2d(100),
ones(b)).tobsr(blocksize=b).asformat(format)
else:
raise NotImplementedError()
self.x = ones(self.A.shape[1], dtype=float)
def time_matvec(self, matrix, format):
self.A * self.x
class Matvecs(Benchmark):
params = ['dia', 'coo', 'csr', 'csc', 'bsr']
param_names = ["format"]
def setup(self, format):
self.A = poisson2d(300, format=format)
self.x = ones((self.A.shape[1], 10), dtype=self.A.dtype)
def time_matvecs(self, format):
self.A * self.x
class Matmul(Benchmark):
def setup(self):
H1, W1 = 1, 100000
H2, W2 = W1, 1000
C1 = 10
C2 = 1000000
random.seed(0)
i = random.randint(H1, size=C1)
j = random.randint(W1, size=C1)
data = random.rand(C1)
self.matrix1 = coo_matrix((data, (i, j)), shape=(H1, W1)).tocsr()
i = random.randint(H2, size=C2)
j = random.randint(W2, size=C2)
data = random.rand(C2)
self.matrix2 = coo_matrix((data, (i, j)), shape=(H2, W2)).tocsr()
def time_large(self):
for i in range(100):
self.matrix1 * self.matrix2
# Retain old benchmark results (remove this if changing the benchmark)
time_large.version = "33aee08539377a7cb0fabaf0d9ff9d6d80079a428873f451b378c39f6ead48cb"
class Construction(Benchmark):
params = [
['Empty', 'Identity', 'Poisson5pt'],
['lil', 'dok']
]
param_names = ['matrix', 'format']
def setup(self, name, format):
if name == 'Empty':
self.A = coo_matrix((10000, 10000))
elif name == 'Identity':
self.A = sparse.eye(10000, format='coo')
else:
self.A = poisson2d(100, format='coo')
formats = {'lil': lil_matrix, 'dok': dok_matrix}
self.cls = formats[format]
def time_construction(self, name, format):
T = self.cls(self.A.shape)
for i, j, v in zip(self.A.row, self.A.col, self.A.data):
T[i, j] = v
class Conversion(Benchmark):
params = [
['csr', 'csc', 'coo', 'dia', 'lil', 'dok', 'bsr'],
['csr', 'csc', 'coo', 'dia', 'lil', 'dok', 'bsr'],
]
param_names = ['from_format', 'to_format']
def setup(self, fromfmt, tofmt):
base = poisson2d(100, format=fromfmt)
try:
self.fn = getattr(base, 'to' + tofmt)
except Exception:
def fn():
raise RuntimeError()
self.fn = fn
def time_conversion(self, fromfmt, tofmt):
self.fn()
class Getset(Benchmark):
params = [
[1, 10, 100, 1000, 10000],
['different', 'same'],
['csr', 'csc', 'lil', 'dok']
]
param_names = ['N', 'sparsity pattern', 'format']
unit = "seconds"
def setup(self, N, sparsity_pattern, format):
if format == 'dok' and N > 500:
raise NotImplementedError()
self.A = rand(1000, 1000, density=1e-5)
A = self.A
N = int(N)
# indices to assign to
i, j = [], []
while len(i) < N:
n = N - len(i)
ip = numpy.random.randint(0, A.shape[0], size=n)
jp = numpy.random.randint(0, A.shape[1], size=n)
i = numpy.r_[i, ip]
j = numpy.r_[j, jp]
v = numpy.random.rand(n)
if N == 1:
i = int(i)
j = int(j)
v = float(v)
base = A.asformat(format)
self.m = base.copy()
self.i = i
self.j = j
self.v = v
def _timeit(self, kernel, recopy):
min_time = 1e99
if not recopy:
kernel(self.m, self.i, self.j, self.v)
number = 1
start = time.time()
while time.time() - start < 0.1:
if recopy:
m = self.m.copy()
else:
m = self.m
while True:
duration = timeit.timeit(
lambda: kernel(m, self.i, self.j, self.v), number=number)
if duration > 1e-5:
break
else:
number *= 10
min_time = min(min_time, duration/number)
return min_time
def track_fancy_setitem(self, N, sparsity_pattern, format):
def kernel(A, i, j, v):
A[i, j] = v
with warnings.catch_warnings():
warnings.simplefilter('ignore', SparseEfficiencyWarning)
return self._timeit(kernel, sparsity_pattern == 'different')
def time_fancy_getitem(self, N, sparsity_pattern, format):
self.m[self.i, self.j]
class NullSlice(Benchmark):
params = [[0.05, 0.01], ['csr', 'csc', 'lil']]
param_names = ['density', 'format']
def _setup(self, density, format):
n = 100000
k = 1000
# faster version of rand(n, k, format=format, density=density),
# with non-exact nnz
nz = int(n*k * density)
row = np.random.randint(0, n, size=nz)
col = np.random.randint(0, k, size=nz)
data = np.ones(nz, dtype=np.float64)
X = coo_matrix((data, (row, col)), shape=(n, k))
X.sum_duplicates()
X = X.asformat(format)
with open('{}-{}.pck'.format(density, format), 'wb') as f:
pickle.dump(X, f, protocol=pickle.HIGHEST_PROTOCOL)
def setup_cache(self):
for density in self.params[0]:
for fmt in self.params[1]:
self._setup(density, fmt)
setup_cache.timeout = 120
def setup(self, density, format):
# Unpickling is faster than computing the random matrix...
with open('{}-{}.pck'.format(density, format), 'rb') as f:
self.X = pickle.load(f)
def time_getrow(self, density, format):
self.X.getrow(100)
def time_getcol(self, density, format):
self.X.getcol(100)
def time_3_rows(self, density, format):
self.X[[0, 100, 105], :]
def time_10000_rows(self, density, format):
self.X[np.arange(10000), :]
def time_3_cols(self, density, format):
self.X[:, [0, 100, 105]]
def time_100_cols(self, density, format):
self.X[:, np.arange(100)]
# Retain old benchmark results (remove this if changing the benchmark)
time_10000_rows.version = "dc19210b894d5fd41d4563f85b7459ef5836cddaf77154b539df3ea91c5d5c1c"
time_100_cols.version = "8d43ed52084cdab150018eedb289a749a39f35d4dfa31f53280f1ef286a23046"
time_3_cols.version = "93e5123910772d62b3f72abff56c2732f83d217221bce409b70e77b89c311d26"
time_3_rows.version = "a9eac80863a0b2f4b510269955041930e5fdd15607238257eb78244f891ebfe6"
time_getcol.version = "291388763b355f0f3935db9272a29965d14fa3f305d3306059381e15300e638b"
time_getrow.version = "edb9e4291560d6ba8dd58ef371b3a343a333bc10744496adb3ff964762d33c68"
class Diagonal(Benchmark):
params = [[0.01, 0.1, 0.5], ['csr', 'csc', 'coo', 'lil', 'dok', 'dia']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 1000
if format == 'dok' and n * density >= 500:
raise NotImplementedError()
warnings.simplefilter('ignore', SparseEfficiencyWarning)
self.X = sparse.rand(n, n, format=format, density=density)
def time_diagonal(self, density, format):
self.X.diagonal()
# Retain old benchmark results (remove this if changing the benchmark)
time_diagonal.version = "d84f53fdc6abc208136c8ce48ca156370f6803562f6908eb6bd1424f50310cf1"
class Sum(Benchmark):
params = [[0.01, 0.1, 0.5], ['csr', 'csc', 'coo', 'lil', 'dok', 'dia']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 1000
if format == 'dok' and n * density >= 500:
raise NotImplementedError()
warnings.simplefilter('ignore', SparseEfficiencyWarning)
self.X = sparse.rand(n, n, format=format, density=density)
def time_sum(self, density, format):
self.X.sum()
def time_sum_axis0(self, density, format):
self.X.sum(axis=0)
def time_sum_axis1(self, density, format):
self.X.sum(axis=1)
# Retain old benchmark results (remove this if changing the benchmark)
time_sum.version = "05c305857e771024535e546360203b17f5aca2b39b023a49ab296bd746d6cdd3"
time_sum_axis0.version = "8aca682fd69aa140c69c028679826bdf43c717589b1961b4702d744ed72effc6"
time_sum_axis1.version = "1a6e05244b77f857c61f8ee09ca3abd006a10ba07eff10b1c5f9e0ac20f331b2"
class Iteration(Benchmark):
params = [[0.05, 0.01], ['csr', 'csc', 'lil']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 500
k = 1000
self.X = sparse.rand(n, k, format=format, density=density)
def time_iteration(self, density, format):
for row in self.X:
pass
class Densify(Benchmark):
params = [
['dia', 'csr', 'csc', 'dok', 'lil', 'coo', 'bsr'],
['C', 'F'],
]
param_names = ['format', 'order']
def setup(self, format, order):
warnings.simplefilter('ignore', SparseEfficiencyWarning)
self.X = sparse.rand(1000, 1000, format=format, density=0.01)
def time_toarray(self, format, order):
self.X.toarray(order=order)
# Retain old benchmark results (remove this if changing the benchmark)
time_toarray.version = "2fbf492ec800b982946a62785beda803460b913cc80080043a5d407025893b2b"
class Random(Benchmark):
params = [
np.arange(0, 1.1, 0.1).tolist()
]
param_names = ['density']
def setup(self, density):
warnings.simplefilter('ignore', SparseEfficiencyWarning)
self.nrows = 1000
self.ncols = 1000
self.format = 'csr'
def time_rand(self, density):
sparse.rand(self.nrows, self.ncols,
format=self.format, density=density)
| bsd-3-clause |
chemelnucfin/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/mvn_full_covariance_test.py | 25 | 7365 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormalFullCovariance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
ds = distributions
rng = np.random.RandomState(42)
class MultivariateNormalFullCovarianceTest(test.TestCase):
def _random_pd_matrix(self, *shape):
mat = rng.rand(*shape)
chol = ds.matrix_diag_transform(mat, transform=nn_ops.softplus)
chol = array_ops.matrix_band_part(chol, -1, 0)
return math_ops.matmul(chol, chol, adjoint_b=True).eval()
def testRaisesIfInitializedWithNonSymmetricMatrix(self):
with self.cached_session():
mu = [1., 2.]
sigma = [[1., 0.], [1., 1.]] # Nonsingular, but not symmetric
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
with self.assertRaisesOpError("not symmetric"):
mvn.covariance().eval()
def testNamePropertyIsSetByInitArg(self):
with self.cached_session():
mu = [1., 2.]
sigma = [[1., 0.], [0., 1.]]
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, name="Billy")
self.assertEqual(mvn.name, "Billy/")
def testDoesNotRaiseIfInitializedWithSymmetricMatrix(self):
with self.cached_session():
mu = rng.rand(10)
sigma = self._random_pd_matrix(10, 10)
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
# Should not raise
mvn.covariance().eval()
def testLogPDFScalarBatch(self):
with self.cached_session():
mu = rng.rand(2)
sigma = self._random_pd_matrix(2, 2)
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
x = rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testLogPDFScalarBatchCovarianceNotProvided(self):
with self.cached_session():
mu = rng.rand(2)
mvn = ds.MultivariateNormalFullCovariance(
mu, covariance_matrix=None, validate_args=True)
x = rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
# Initialize a scipy_mvn with the default covariance.
scipy_mvn = stats.multivariate_normal(mean=mu, cov=np.eye(2))
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testShapes(self):
with self.cached_session():
mu = rng.rand(3, 5, 2)
covariance = self._random_pd_matrix(3, 5, 2, 2)
mvn = ds.MultivariateNormalFullCovariance(
mu, covariance, validate_args=True)
# Shapes known at graph construction time.
self.assertEqual((2,), tuple(mvn.event_shape.as_list()))
self.assertEqual((3, 5), tuple(mvn.batch_shape.as_list()))
# Shapes known at runtime.
self.assertEqual((2,), tuple(mvn.event_shape_tensor().eval()))
self.assertEqual((3, 5), tuple(mvn.batch_shape_tensor().eval()))
def _random_mu_and_sigma(self, batch_shape, event_shape):
# This ensures sigma is positive def.
mat_shape = batch_shape + event_shape + event_shape
mat = rng.randn(*mat_shape)
perm = np.arange(mat.ndim)
perm[-2:] = [perm[-1], perm[-2]]
sigma = np.matmul(mat, np.transpose(mat, perm))
mu_shape = batch_shape + event_shape
mu = rng.randn(*mu_shape)
return mu, sigma
def testKLBatch(self):
batch_shape = [2]
event_shape = [3]
with self.cached_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = ds.MultivariateNormalFullCovariance(
loc=mu_a,
covariance_matrix=sigma_a,
validate_args=True)
mvn_b = ds.MultivariateNormalFullCovariance(
loc=mu_b,
covariance_matrix=sigma_b,
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],
mu_b[0, :], sigma_b[0, :])
expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],
mu_b[1, :], sigma_b[1, :])
self.assertAllClose(expected_kl_0, kl_v[0])
self.assertAllClose(expected_kl_1, kl_v[1])
def testKLBatchBroadcast(self):
batch_shape = [2]
event_shape = [3]
with self.cached_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
# No batch shape.
mu_b, sigma_b = self._random_mu_and_sigma([], event_shape)
mvn_a = ds.MultivariateNormalFullCovariance(
loc=mu_a,
covariance_matrix=sigma_a,
validate_args=True)
mvn_b = ds.MultivariateNormalFullCovariance(
loc=mu_b,
covariance_matrix=sigma_b,
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],
mu_b, sigma_b)
expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],
mu_b, sigma_b)
self.assertAllClose(expected_kl_0, kl_v[0])
self.assertAllClose(expected_kl_1, kl_v[1])
def _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b):
"""Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b)."""
# Check using numpy operations
# This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy.
# So it is important to also check that KL(mvn, mvn) = 0.
sigma_b_inv = np.linalg.inv(sigma_b)
t = np.trace(sigma_b_inv.dot(sigma_a))
q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a)
k = mu_a.shape[0]
l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a))
return 0.5 * (t + q - k + l)
if __name__ == "__main__":
test.main()
| apache-2.0 |
katiecheng/Bombolone | env/lib/python2.7/site-packages/PIL/MicImagePlugin.py | 40 | 2160 | #
# The Python Imaging Library.
# $Id$
#
# Microsoft Image Composer support for PIL
#
# Notes:
# uses TiffImagePlugin.py to read the actual image streams
#
# History:
# 97-01-20 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.1"
import Image, TiffImagePlugin
from OleFileIO import *
#
# --------------------------------------------------------------------
def _accept(prefix):
return prefix[:8] == MAGIC
##
# Image plugin for Microsoft's Image Composer file format.
class MicImageFile(TiffImagePlugin.TiffImageFile):
format = "MIC"
format_description = "Microsoft Image Composer"
def _open(self):
# read the OLE directory and see if this is a likely
# to be a Microsoft Image Composer file
try:
self.ole = OleFileIO(self.fp)
except IOError:
raise SyntaxError, "not an MIC file; invalid OLE file"
# find ACI subfiles with Image members (maybe not the
# best way to identify MIC files, but what the... ;-)
self.images = []
for file in self.ole.listdir():
if file[1:] and file[0][-4:] == ".ACI" and file[1] == "Image":
self.images.append(file)
# if we didn't find any images, this is probably not
# an MIC file.
if not self.images:
raise SyntaxError, "not an MIC file; no image entries"
self.__fp = self.fp
self.frame = 0
if len(self.images) > 1:
self.category = Image.CONTAINER
self.seek(0)
def seek(self, frame):
try:
filename = self.images[frame]
except IndexError:
raise EOFError, "no such frame"
self.fp = self.ole.openstream(filename)
TiffImagePlugin.TiffImageFile._open(self)
self.frame = frame
def tell(self):
return self.frame
#
# --------------------------------------------------------------------
Image.register_open("MIC", MicImageFile, _accept)
Image.register_extension("MIC", ".mic")
| bsd-3-clause |
nipy/nireg | examples/affine_registration.py | 2 | 4047 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
This script requires the nipy-data package to run. It is an example of
inter-subject affine registration using two MR-T1 images from the
sulcal 2000 database acquired at CEA, SHFJ, Orsay, France. The source
is 'ammon' and the target is 'anubis'. Running it will result in a
resampled ammon image being created in the current directory.
"""
from __future__ import print_function # Python 2/3 compatibility
from optparse import OptionParser
import time
import numpy as np
import nibabel as nb
from nireg import HistogramRegistration, resample
from nipy.utils import example_data
print('Scanning data directory...')
# Input images are provided with the nipy-data package
source = 'ammon'
target = 'anubis'
source_file = example_data.get_filename('neurospin', 'sulcal2000',
'nobias_' + source + '.nii.gz')
target_file = example_data.get_filename('neurospin', 'sulcal2000',
'nobias_' + target + '.nii.gz')
# Parse arguments
parser = OptionParser(description=__doc__)
doc_similarity = 'similarity measure: cc (correlation coefficient), \
cr (correlation ratio), crl1 (correlation ratio in L1 norm), \
mi (mutual information), nmi (normalized mutual information), \
pmi (Parzen mutual information), dpmi (discrete Parzen mutual \
information). Default is crl1.'
doc_renormalize = 'similarity renormalization: default, ml or nml.'
doc_interp = 'interpolation method: tri (trilinear), pv (partial volume), \
rand (random). Default is pv.'
doc_optimizer = 'optimization method: simplex, powell, steepest, cg, bfgs. \
Default is powell.'
doc_tol = 'numerical tolerance on similarity values: default is 0.01'
parser.add_option('-s', '--similarity', dest='similarity',
help=doc_similarity)
parser.add_option('-r', '--renormalize', dest='renormalize',
help=doc_renormalize)
parser.add_option('-i', '--interp', dest='interp',
help=doc_interp)
parser.add_option('-o', '--optimizer', dest='optimizer',
help=doc_optimizer)
parser.add_option('-t', '--tol', dest='tol',
help=doc_tol)
opts, args = parser.parse_args()
# Optional arguments
similarity = 'crl1'
renormalize = 'default'
interp = 'pv'
optimizer = 'powell'
if opts.similarity is not None:
similarity = opts.similarity
if opts.renormalize is not None:
renormalize = bool(int(opts.renormalize))
if opts.interp is not None:
interp = opts.interp
if opts.optimizer is not None:
optimizer = opts.optimizer
if not opts.tol == None:
tol = float(opts.tol)
else:
tol = 1e-2
# Print messages
print('Source brain: %s' % source)
print('Target brain: %s' % target)
print('Similarity measure: %s' % similarity)
print('Renormalization: %s' % renormalize)
print('Interpolation: %s' % interp)
print('Optimizer: %s' % optimizer)
print('Tolerance: %f' % tol)
# Get data
print('Fetching image data...')
I = nb.load(source_file)
J = nb.load(target_file)
# Perform affine registration
# The output is an array-like object such that
# np.asarray(T) is a customary 4x4 matrix
print('Setting up registration...')
tic = time.time()
R = HistogramRegistration(I, J, similarity=similarity, interp=interp,
renormalize=renormalize)
T = R.optimize('affine', optimizer=optimizer, xtol=tol, ftol=tol)
toc = time.time()
print(' Registration time: %f sec' % (toc - tic))
# Resample source image
print('Resampling source image...')
tic = time.time()
#It = resample2(I, J.coordmap, T.inv(), J.shape)
It = resample(I, T.inv(), reference=J)
toc = time.time()
print(' Resampling time: %f sec' % (toc - tic))
# Save resampled source
outroot = source + '_TO_' + target
outimg = outroot + '.nii.gz'
print ('Saving resampled source in: %s' % outimg)
nb.save(It, outimg)
# Save transformation matrix
"""
outparams = outroot + '.npy'
np.save(outparams, np.asarray(T))
"""
| bsd-3-clause |
tdipisa/mapstore | mapcomposer/app/static/externals/proj4js/tools/pjjs.py | 250 | 3061 | #!/usr/bin/env python
#
# TODO explain
#
# -- Copyright 2007 IGN France / Geoportail project --
#
import sys
import os
import re
SUFFIX_JAVASCRIPT = ".js"
def _pjcat2js_remove(rezDirectory,catName,targetDirectory):
pjCatFilename = os.path.join(rezDirectory, catName)
pjCat = open(pjCatFilename,'r')
comment_re = re.compile("^#")
srsdef_re = re.compile("^<([^>]*)>.* <>$")
l = pjCat.readline()
while len(l) != 0:
if comment_re.search(l) is None:
srsdef_mo = srsdef_re.match(l)
srsdef_fn = os.path.join(targetDirectory, catName+srsdef_mo.group(1)+".js")
if os.path.exists(srsdef_fn):
os.remove(srsdef_fn)
l = pjCat.readline()
pjCat.close()
def _pjcat2js_make(rezDirectory,catName,targetDirectory):
pjCatFilename = os.path.join(rezDirectory, catName)
pjCat = open(pjCatFilename,'r')
comment_re = re.compile("^#")
srsdef_re = re.compile("^<([^>]*)> *(.*) <>$")
l = pjCat.readline()
while len(l) != 0:
if comment_re.search(l) is None:
srsdef_mo = srsdef_re.match(l)
srsdef_fn = os.path.join(targetDirectory, catName+srsdef_mo.group(1)+".js")
srsdef = 'Proj4js.defs["'+catName+':'+srsdef_mo.group(1)+'"]="'+srsdef_mo.group(2)+'";'
file(srsdef_fn,'w').write(srsdef)
l = pjCat.readline()
pjCat.close()
def pjcat2js_clean(rezDirectory,targetDirectory):
if not os.path.isdir(rezDirectory):
return
if not os.path.isdir(targetDirectory):
return
if os.path.abspath(rezDirectory) == '/':
return
if os.path.abspath(targetDirectory) == '/':
return
rezDirectory_name_len = len(rezDirectory)
for root, dirs, filenames in os.walk(rezDirectory):
if 'CVS' in dirs:
dirs.remove('CVS')
if '.svn' in dirs:
dirs.remove('.svn')
for filename in filenames:
if not filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[rezDirectory_name_len+1:]
filepath = filepath.replace("\\", "/")
_pjcat2js_remove(rezDirectory,filepath,targetDirectory)
def pjcat2js_run(rezDirectory,targetDirectory):
if not os.path.isdir(rezDirectory):
return
if not os.path.isdir(targetDirectory):
return
if os.path.abspath(rezDirectory) == '/':
return
if os.path.abspath(targetDirectory) == '/':
return
rezDirectory_name_len = len(rezDirectory)
for root, dirs, filenames in os.walk(rezDirectory):
if 'CVS' in dirs:
dirs.remove('CVS')
if '.svn' in dirs:
dirs.remove('.svn')
for filename in filenames:
if not filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[rezDirectory_name_len+1:]
filepath = filepath.replace("\\", "/")
_pjcat2js_make(rezDirectory,filepath,targetDirectory)
| gpl-3.0 |
HiroIshikawa/21playground | flask-rethink/env/lib/python3.5/site-packages/pip/commands/list.py | 269 | 7251 | from __future__ import absolute_import
import logging
from pip._vendor import pkg_resources
from pip.basecommand import Command
from pip.exceptions import DistributionNotFound
from pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions, dist_is_editable
from pip.wheel import WheelCache
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages (excluding editables)')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages (excluding editables)')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
elif options.editable:
self.run_editables(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, version, typ in self.find_packages_latest_versions(options):
if version > dist.parsed_version:
logger.info(
'%s (Current: %s Latest: %s [%s])',
dist.project_name, dist.version, version, typ,
)
def find_packages_latest_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in get_installed_distributions(local_only=options.local,
user_only=options.user):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
include_editables=False,
)
format_control = FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
for dist in installed_packages:
req = InstallRequirement.from_line(
dist.key, None, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
typ = 'unknown'
try:
link = finder.find_requirement(req, True)
# If link is None, means installed version is most
# up-to-date
if link is None:
continue
except DistributionNotFound:
continue
else:
canonical_name = pkg_resources.safe_name(req.name).lower()
formats = fmt_ctl_formats(format_control, canonical_name)
search = Search(
req.name,
canonical_name,
formats)
remote_version = finder._link_package_versions(
link, search).version
if link.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
yield dist, remote_version, typ
def run_listing(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
)
self.output_package_listing(installed_packages)
def run_editables(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=True,
)
self.output_package_listing(installed_packages)
def output_package_listing(self, installed_packages):
installed_packages = sorted(
installed_packages,
key=lambda dist: dist.project_name.lower(),
)
for dist in installed_packages:
if dist_is_editable(dist):
line = '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
line = '%s (%s)' % (dist.project_name, dist.version)
logger.info(line)
def run_uptodate(self, options):
uptodate = []
for dist, version, typ in self.find_packages_latest_versions(options):
if dist.parsed_version == version:
uptodate.append(dist)
self.output_package_listing(uptodate)
| mit |
lomocoin/lomocoin-qt | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
liyonghelpme/myBlog | pelican/tests/test_utils.py | 1 | 16159 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import logging
import shutil
import os
import datetime
import time
import locale
from sys import platform, version_info
from tempfile import mkdtemp
import pytz
from pelican.generators import TemplatePagesGenerator
from pelican.writers import Writer
from pelican.settings import read_settings
from pelican import utils
from .support import get_article, LoggedTestCase, locale_available, unittest
class TestUtils(LoggedTestCase):
_new_attribute = 'new_value'
@utils.deprecated_attribute(
old='_old_attribute', new='_new_attribute',
since=(3, 1, 0), remove=(4, 1, 3))
def _old_attribute():
return None
def test_deprecated_attribute(self):
value = self._old_attribute
self.assertEqual(value, self._new_attribute)
self.assertLogCountEqual(
count=1,
msg=('_old_attribute has been deprecated since 3.1.0 and will be '
'removed by version 4.1.3. Use _new_attribute instead'),
level=logging.WARNING)
def test_get_date(self):
# valid ones
date = datetime.datetime(year=2012, month=11, day=22)
date_hour = datetime.datetime(
year=2012, month=11, day=22, hour=22, minute=11)
date_hour_sec = datetime.datetime(
year=2012, month=11, day=22, hour=22, minute=11, second=10)
date_hour_sec_z = datetime.datetime(
year=2012, month=11, day=22, hour=22, minute=11, second=10,
tzinfo=pytz.timezone('UTC'))
date_hour_sec_est = datetime.datetime(
year=2012, month=11, day=22, hour=22, minute=11, second=10,
tzinfo=pytz.timezone('EST'))
date_hour_sec_frac_z = datetime.datetime(
year=2012, month=11, day=22, hour=22, minute=11, second=10,
microsecond=123000, tzinfo=pytz.timezone('UTC'))
dates = {
'2012-11-22': date,
'2012/11/22': date,
'2012-11-22 22:11': date_hour,
'2012/11/22 22:11': date_hour,
'22-11-2012': date,
'22/11/2012': date,
'22.11.2012': date,
'22.11.2012 22:11': date_hour,
'2012-11-22 22:11:10': date_hour_sec,
'2012-11-22T22:11:10Z': date_hour_sec_z,
'2012-11-22T22:11:10-0500': date_hour_sec_est,
'2012-11-22T22:11:10.123Z': date_hour_sec_frac_z,
}
# invalid ones
invalid_dates = ['2010-110-12', 'yay']
if version_info < (3, 2):
dates.pop('2012-11-22T22:11:10-0500')
invalid_dates.append('2012-11-22T22:11:10-0500')
for value, expected in dates.items():
self.assertEqual(utils.get_date(value), expected, value)
for item in invalid_dates:
self.assertRaises(ValueError, utils.get_date, item)
def test_slugify(self):
samples = (('this is a test', 'this-is-a-test'),
('this is a test', 'this-is-a-test'),
('this → is ← a ↑ test', 'this-is-a-test'),
('this--is---a test', 'this-is-a-test'),
('unicode測試許功蓋,你看到了嗎?',
'unicodece-shi-xu-gong-gai-ni-kan-dao-liao-ma'),
('大飯原発4号機、18日夜起動へ',
'da-fan-yuan-fa-4hao-ji-18ri-ye-qi-dong-he'),)
for value, expected in samples:
self.assertEqual(utils.slugify(value), expected)
def test_slugify_substitute(self):
samples = (('C++ is based on C', 'cpp-is-based-on-c'),
('C+++ test C+ test', 'cpp-test-c-test'),
('c++, c#, C#, C++', 'cpp-c-sharp-c-sharp-cpp'),
('c++-streams', 'cpp-streams'),)
subs = (('C++', 'CPP'), ('C#', 'C-SHARP'))
for value, expected in samples:
self.assertEqual(utils.slugify(value, subs), expected)
def test_get_relative_path(self):
samples = ((os.path.join('test', 'test.html'), os.pardir),
(os.path.join('test', 'test', 'test.html'),
os.path.join(os.pardir, os.pardir)),
('test.html', os.curdir),
(os.path.join('/test', 'test.html'), os.pardir),
(os.path.join('/test', 'test', 'test.html'),
os.path.join(os.pardir, os.pardir)),
('/test.html', os.curdir),)
for value, expected in samples:
self.assertEqual(utils.get_relative_path(value), expected)
def test_process_translations(self):
# create a bunch of articles
# 1: no translation metadata
fr_article1 = get_article(lang='fr', slug='yay', title='Un titre',
content='en français')
en_article1 = get_article(lang='en', slug='yay', title='A title',
content='in english')
# 2: reverse which one is the translation thanks to metadata
fr_article2 = get_article(lang='fr', slug='yay2', title='Un titre',
content='en français')
en_article2 = get_article(lang='en', slug='yay2', title='A title',
content='in english',
extra_metadata={'translation': 'true'})
# 3: back to default language detection if all items have the
# translation metadata
fr_article3 = get_article(lang='fr', slug='yay3', title='Un titre',
content='en français',
extra_metadata={'translation': 'yep'})
en_article3 = get_article(lang='en', slug='yay3', title='A title',
content='in english',
extra_metadata={'translation': 'yes'})
articles = [fr_article1, en_article1, fr_article2, en_article2,
fr_article3, en_article3]
index, trans = utils.process_translations(articles)
self.assertIn(en_article1, index)
self.assertIn(fr_article1, trans)
self.assertNotIn(en_article1, trans)
self.assertNotIn(fr_article1, index)
self.assertIn(fr_article2, index)
self.assertIn(en_article2, trans)
self.assertNotIn(fr_article2, trans)
self.assertNotIn(en_article2, index)
self.assertIn(en_article3, index)
self.assertIn(fr_article3, trans)
self.assertNotIn(en_article3, trans)
self.assertNotIn(fr_article3, index)
def test_watchers(self):
# Test if file changes are correctly detected
# Make sure to handle not getting any files correctly.
dirname = os.path.join(os.path.dirname(__file__), 'content')
folder_watcher = utils.folder_watcher(dirname, ['rst'])
path = os.path.join(dirname, 'article_with_metadata.rst')
file_watcher = utils.file_watcher(path)
# first check returns True
self.assertEqual(next(folder_watcher), True)
self.assertEqual(next(file_watcher), True)
# next check without modification returns False
self.assertEqual(next(folder_watcher), False)
self.assertEqual(next(file_watcher), False)
# after modification, returns True
t = time.time()
os.utime(path, (t, t))
self.assertEqual(next(folder_watcher), True)
self.assertEqual(next(file_watcher), True)
# file watcher with None or empty path should return None
self.assertEqual(next(utils.file_watcher('')), None)
self.assertEqual(next(utils.file_watcher(None)), None)
empty_path = os.path.join(os.path.dirname(__file__), 'empty')
try:
os.mkdir(empty_path)
os.mkdir(os.path.join(empty_path, "empty_folder"))
shutil.copy(__file__, empty_path)
# if no files of interest, returns None
watcher = utils.folder_watcher(empty_path, ['rst'])
self.assertEqual(next(watcher), None)
except OSError:
self.fail("OSError Exception in test_files_changed test")
finally:
shutil.rmtree(empty_path, True)
def test_clean_output_dir(self):
retention = ()
test_directory = os.path.join(os.path.dirname(__file__),
'clean_output')
content = os.path.join(os.path.dirname(__file__), 'content')
shutil.copytree(content, test_directory)
utils.clean_output_dir(test_directory, retention)
self.assertTrue(os.path.isdir(test_directory))
self.assertListEqual([], os.listdir(test_directory))
shutil.rmtree(test_directory)
def test_clean_output_dir_not_there(self):
retention = ()
test_directory = os.path.join(os.path.dirname(__file__),
'does_not_exist')
utils.clean_output_dir(test_directory, retention)
self.assertFalse(os.path.exists(test_directory))
def test_clean_output_dir_is_file(self):
retention = ()
test_directory = os.path.join(os.path.dirname(__file__),
'this_is_a_file')
f = open(test_directory, 'w')
f.write('')
f.close()
utils.clean_output_dir(test_directory, retention)
self.assertFalse(os.path.exists(test_directory))
def test_strftime(self):
d = datetime.date(2012, 8, 29)
# simple formatting
self.assertEqual(utils.strftime(d, '%d/%m/%y'), '29/08/12')
self.assertEqual(utils.strftime(d, '%d/%m/%Y'), '29/08/2012')
# % escaped
self.assertEqual(utils.strftime(d, '%d%%%m%%%y'), '29%08%12')
self.assertEqual(utils.strftime(d, '%d %% %m %% %y'), '29 % 08 % 12')
# not valid % formatter
self.assertEqual(utils.strftime(d, '10% reduction in %Y'),
'10% reduction in 2012')
self.assertEqual(utils.strftime(d, '%10 reduction in %Y'),
'%10 reduction in 2012')
# with text
self.assertEqual(utils.strftime(d, 'Published in %d-%m-%Y'),
'Published in 29-08-2012')
# with non-ascii text
self.assertEqual(utils.strftime(d, '%d/%m/%Y Øl trinken beim Besäufnis'),
'29/08/2012 Øl trinken beim Besäufnis')
# test the output of utils.strftime in a different locale
# Turkish locale
@unittest.skipUnless(locale_available('tr_TR.UTF-8') or
locale_available('Turkish'),
'Turkish locale needed')
def test_strftime_locale_dependent_turkish(self):
# store current locale
old_locale = locale.setlocale(locale.LC_TIME)
if platform == 'win32':
locale.setlocale(locale.LC_TIME, str('Turkish'))
else:
locale.setlocale(locale.LC_TIME, str('tr_TR.UTF-8'))
d = datetime.date(2012, 8, 29)
# simple
self.assertEqual(utils.strftime(d, '%d %B %Y'), '29 Ağustos 2012')
self.assertEqual(utils.strftime(d, '%A, %d %B %Y'),
'Çarşamba, 29 Ağustos 2012')
# with text
self.assertEqual(utils.strftime(d, 'Yayınlanma tarihi: %A, %d %B %Y'),
'Yayınlanma tarihi: Çarşamba, 29 Ağustos 2012')
# non-ascii format candidate (someone might pass it... for some reason)
self.assertEqual(utils.strftime(d, '%Y yılında %üretim artışı'),
'2012 yılında %üretim artışı')
# restore locale back
locale.setlocale(locale.LC_TIME, old_locale)
# test the output of utils.strftime in a different locale
# French locale
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or
locale_available('French'),
'French locale needed')
def test_strftime_locale_dependent_french(self):
# store current locale
old_locale = locale.setlocale(locale.LC_TIME)
if platform == 'win32':
locale.setlocale(locale.LC_TIME, str('French'))
else:
locale.setlocale(locale.LC_TIME, str('fr_FR.UTF-8'))
d = datetime.date(2012, 8, 29)
# simple
self.assertEqual(utils.strftime(d, '%d %B %Y'), '29 août 2012')
# depending on OS, the first letter is m or M
self.assertTrue(utils.strftime(d, '%A') in ('mercredi', 'Mercredi'))
# with text
self.assertEqual(utils.strftime(d, 'Écrit le %d %B %Y'),
'Écrit le 29 août 2012')
# non-ascii format candidate (someone might pass it... for some reason)
self.assertEqual(utils.strftime(d, '%écrits en %Y'),
'%écrits en 2012')
# restore locale back
locale.setlocale(locale.LC_TIME, old_locale)
class TestDateFormatter(unittest.TestCase):
'''Tests that the output of DateFormatter jinja filter is same as
utils.strftime'''
def setUp(self):
# prepare a temp content and output folder
self.temp_content = mkdtemp(prefix='pelicantests.')
self.temp_output = mkdtemp(prefix='pelicantests.')
# prepare a template file
template_dir = os.path.join(self.temp_content, 'template')
template_path = os.path.join(template_dir, 'source.html')
os.makedirs(template_dir)
with open(template_path, 'w') as template_file:
template_file.write('date = {{ date|strftime("%A, %d %B %Y") }}')
self.date = datetime.date(2012, 8, 29)
def tearDown(self):
shutil.rmtree(self.temp_content)
shutil.rmtree(self.temp_output)
# reset locale to default
locale.setlocale(locale.LC_ALL, '')
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or
locale_available('French'),
'French locale needed')
def test_french_locale(self):
settings = read_settings(
override = {'LOCALE': locale.normalize('fr_FR.UTF-8'),
'TEMPLATE_PAGES': {'template/source.html':
'generated/file.html'}})
generator = TemplatePagesGenerator({'date': self.date}, settings,
self.temp_content, '', self.temp_output, None)
generator.env.filters.update({'strftime': utils.DateFormatter()})
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
output_path = os.path.join(
self.temp_output, 'generated', 'file.html')
# output file has been generated
self.assertTrue(os.path.exists(output_path))
# output content is correct
with utils.pelican_open(output_path) as output_file:
self.assertEqual(output_file,
utils.strftime(self.date, 'date = %A, %d %B %Y'))
@unittest.skipUnless(locale_available('tr_TR.UTF-8') or
locale_available('Turkish'),
'Turkish locale needed')
def test_turkish_locale(self):
settings = read_settings(
override = {'LOCALE': locale.normalize('tr_TR.UTF-8'),
'TEMPLATE_PAGES': {'template/source.html':
'generated/file.html'}})
generator = TemplatePagesGenerator({'date': self.date}, settings,
self.temp_content, '', self.temp_output, None)
generator.env.filters.update({'strftime': utils.DateFormatter()})
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
output_path = os.path.join(
self.temp_output, 'generated', 'file.html')
# output file has been generated
self.assertTrue(os.path.exists(output_path))
# output content is correct
with utils.pelican_open(output_path) as output_file:
self.assertEqual(output_file,
utils.strftime(self.date, 'date = %A, %d %B %Y'))
| agpl-3.0 |
playm2mboy/edx-platform | openedx/core/lib/api/authentication.py | 36 | 3458 | """ Common Authentication Handlers used across projects. """
from rest_framework.authentication import SessionAuthentication
from rest_framework_oauth.authentication import OAuth2Authentication
from rest_framework.exceptions import AuthenticationFailed
from rest_framework_oauth.compat import oauth2_provider, provider_now
class SessionAuthenticationAllowInactiveUser(SessionAuthentication):
"""Ensure that the user is logged in, but do not require the account to be active.
We use this in the special case that a user has created an account,
but has not yet activated it. We still want to allow the user to
enroll in courses, so we remove the usual restriction
on session authentication that requires an active account.
You should use this authentication class ONLY for end-points that
it's safe for an un-activated user to access. For example,
we can allow a user to update his/her own enrollments without
activating an account.
"""
def authenticate(self, request):
"""Authenticate the user, requiring a logged-in account and CSRF.
This is exactly the same as the `SessionAuthentication` implementation,
with the `user.is_active` check removed.
Args:
request (HttpRequest)
Returns:
Tuple of `(user, token)`
Raises:
PermissionDenied: The CSRF token check failed.
"""
# Get the underlying HttpRequest object
request = request._request # pylint: disable=protected-access
user = getattr(request, 'user', None)
# Unauthenticated, CSRF validation not required
# This is where regular `SessionAuthentication` checks that the user is active.
# We have removed that check in this implementation.
# But we added a check to prevent anonymous users since we require a logged-in account.
if not user or user.is_anonymous():
return None
self.enforce_csrf(request)
# CSRF passed with authenticated user
return (user, None)
class OAuth2AuthenticationAllowInactiveUser(OAuth2Authentication):
"""
This is a temporary workaround while the is_active field on the user is coupled
with whether or not the user has verified ownership of their claimed email address.
Once is_active is decoupled from verified_email, we will no longer need this
class override.
But until then, this authentication class ensures that the user is logged in,
but does not require that their account "is_active".
This class can be used for an OAuth2-accessible endpoint that allows users to access
that endpoint without having their email verified. For example, this is used
for mobile endpoints.
"""
def authenticate_credentials(self, request, access_token):
"""
Authenticate the request, given the access token.
Override base class implementation to discard failure if user is inactive.
"""
try:
token = oauth2_provider.oauth2.models.AccessToken.objects.select_related('user')
# provider_now switches to timezone aware datetime when
# the oauth2_provider version supports to it.
token = token.get(token=access_token, expires__gt=provider_now())
except oauth2_provider.oauth2.models.AccessToken.DoesNotExist:
raise AuthenticationFailed('Invalid token')
return token.user, token
| agpl-3.0 |
tquilian/exeNext | twisted/web/google.py | 71 | 2101 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""\"I'm Feeling Lucky\" with U{Google<http://google.com>}.
"""
import urllib
from twisted.internet import protocol, reactor, defer
from twisted.web import http
class GoogleChecker(http.HTTPClient):
def connectionMade(self):
self.sendCommand('GET', self.factory.url)
self.sendHeader('Host', self.factory.host)
self.sendHeader('User-Agent', self.factory.agent)
self.endHeaders()
def handleHeader(self, key, value):
key = key.lower()
if key == 'location':
self.factory.gotLocation(value)
def handleStatus(self, version, status, message):
if status != '302':
self.factory.noLocation(ValueError("bad status"))
def handleEndHeaders(self):
self.factory.noLocation(ValueError("no location"))
def handleResponsePart(self, part):
pass
def handleResponseEnd(self):
pass
def connectionLost(self, reason):
self.factory.noLocation(reason)
class GoogleCheckerFactory(protocol.ClientFactory):
protocol = GoogleChecker
def __init__(self, words):
self.url = ('/search?q=%s&btnI=%s' %
(urllib.quote_plus(' '.join(words)),
urllib.quote_plus("I'm Feeling Lucky")))
self.agent="Twisted/GoogleChecker"
self.host = "www.google.com"
self.deferred = defer.Deferred()
def clientConnectionFailed(self, _, reason):
self.noLocation(reason)
def gotLocation(self, location):
if self.deferred:
self.deferred.callback(location)
self.deferred = None
def noLocation(self, error):
if self.deferred:
self.deferred.errback(error)
self.deferred = None
def checkGoogle(words):
"""Check google for a match.
@returns: a Deferred which will callback with a URL or errback with a
Failure.
"""
factory = GoogleCheckerFactory(words)
reactor.connectTCP('www.google.com', 80, factory)
return factory.deferred
| gpl-2.0 |
AustereCuriosity/astropy | astropy/io/ascii/tests/test_types.py | 4 | 1749 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
from ....extern.six.moves import cStringIO as StringIO
import numpy as np
from ... import ascii
from ....extern.six.moves import zip
from .common import assert_equal, setup_function, teardown_function
def test_types_from_dat():
converters = {'a': [ascii.convert_numpy(np.float)],
'e': [ascii.convert_numpy(np.str)]}
dat = ascii.read(['a b c d e', '1 1 cat 2.1 4.2'],
Reader=ascii.Basic,
converters=converters)
assert dat['a'].dtype.kind == 'f'
assert dat['b'].dtype.kind == 'i'
assert dat['c'].dtype.kind in ('S', 'U')
assert dat['d'].dtype.kind == 'f'
assert dat['e'].dtype.kind in ('S', 'U')
def test_rdb_write_types():
dat = ascii.read(['a b c d', '1 1.0 cat 2.1'],
Reader=ascii.Basic)
out = StringIO()
ascii.write(dat, out, Writer=ascii.Rdb)
outs = out.getvalue().splitlines()
assert_equal(outs[1], 'N\tN\tS\tN')
def test_ipac_read_types():
table = r"""\
| ra | dec | sai |-----v2---| sptype |
| real | float | l | real | char |
| unit | unit | unit | unit | ergs |
| null | null | null | null | -999 |
2.09708 2956 73765 2.06000 B8IVpMnHg
"""
reader = ascii.get_reader(Reader=ascii.Ipac)
dat = reader.read(table)
types = [ascii.FloatType,
ascii.FloatType,
ascii.IntType,
ascii.FloatType,
ascii.StrType]
for (col, expected_type) in zip(reader.cols, types):
assert_equal(col.type, expected_type)
| bsd-3-clause |
Zhongqilong/mykbengineer | kbe/res/scripts/common/Lib/test/test_email/test_contentmanager.py | 91 | 33538 | import unittest
from test.test_email import TestEmailBase, parameterize
import textwrap
from email import policy
from email.message import EmailMessage
from email.contentmanager import ContentManager, raw_data_manager
@parameterize
class TestContentManager(TestEmailBase):
policy = policy.default
message = EmailMessage
get_key_params = {
'full_type': (1, 'text/plain',),
'maintype_only': (2, 'text',),
'null_key': (3, '',),
}
def get_key_as_get_content_key(self, order, key):
def foo_getter(msg, foo=None):
bar = msg['X-Bar-Header']
return foo, bar
cm = ContentManager()
cm.add_get_handler(key, foo_getter)
m = self._make_message()
m['Content-Type'] = 'text/plain'
m['X-Bar-Header'] = 'foo'
self.assertEqual(cm.get_content(m, foo='bar'), ('bar', 'foo'))
def get_key_as_get_content_key_order(self, order, key):
def bar_getter(msg):
return msg['X-Bar-Header']
def foo_getter(msg):
return msg['X-Foo-Header']
cm = ContentManager()
cm.add_get_handler(key, foo_getter)
for precedence, key in self.get_key_params.values():
if precedence > order:
cm.add_get_handler(key, bar_getter)
m = self._make_message()
m['Content-Type'] = 'text/plain'
m['X-Bar-Header'] = 'bar'
m['X-Foo-Header'] = 'foo'
self.assertEqual(cm.get_content(m), ('foo'))
def test_get_content_raises_if_unknown_mimetype_and_no_default(self):
cm = ContentManager()
m = self._make_message()
m['Content-Type'] = 'text/plain'
with self.assertRaisesRegex(KeyError, 'text/plain'):
cm.get_content(m)
class BaseThing(str):
pass
baseobject_full_path = __name__ + '.' + 'TestContentManager.BaseThing'
class Thing(BaseThing):
pass
testobject_full_path = __name__ + '.' + 'TestContentManager.Thing'
set_key_params = {
'type': (0, Thing,),
'full_path': (1, testobject_full_path,),
'qualname': (2, 'TestContentManager.Thing',),
'name': (3, 'Thing',),
'base_type': (4, BaseThing,),
'base_full_path': (5, baseobject_full_path,),
'base_qualname': (6, 'TestContentManager.BaseThing',),
'base_name': (7, 'BaseThing',),
'str_type': (8, str,),
'str_full_path': (9, 'builtins.str',),
'str_name': (10, 'str',), # str name and qualname are the same
'null_key': (11, None,),
}
def set_key_as_set_content_key(self, order, key):
def foo_setter(msg, obj, foo=None):
msg['X-Foo-Header'] = foo
msg.set_payload(obj)
cm = ContentManager()
cm.add_set_handler(key, foo_setter)
m = self._make_message()
msg_obj = self.Thing()
cm.set_content(m, msg_obj, foo='bar')
self.assertEqual(m['X-Foo-Header'], 'bar')
self.assertEqual(m.get_payload(), msg_obj)
def set_key_as_set_content_key_order(self, order, key):
def foo_setter(msg, obj):
msg['X-FooBar-Header'] = 'foo'
msg.set_payload(obj)
def bar_setter(msg, obj):
msg['X-FooBar-Header'] = 'bar'
cm = ContentManager()
cm.add_set_handler(key, foo_setter)
for precedence, key in self.get_key_params.values():
if precedence > order:
cm.add_set_handler(key, bar_setter)
m = self._make_message()
msg_obj = self.Thing()
cm.set_content(m, msg_obj)
self.assertEqual(m['X-FooBar-Header'], 'foo')
self.assertEqual(m.get_payload(), msg_obj)
def test_set_content_raises_if_unknown_type_and_no_default(self):
cm = ContentManager()
m = self._make_message()
msg_obj = self.Thing()
with self.assertRaisesRegex(KeyError, self.testobject_full_path):
cm.set_content(m, msg_obj)
def test_set_content_raises_if_called_on_multipart(self):
cm = ContentManager()
m = self._make_message()
m['Content-Type'] = 'multipart/foo'
with self.assertRaises(TypeError):
cm.set_content(m, 'test')
def test_set_content_calls_clear_content(self):
m = self._make_message()
m['Content-Foo'] = 'bar'
m['Content-Type'] = 'text/html'
m['To'] = 'test'
m.set_payload('abc')
cm = ContentManager()
cm.add_set_handler(str, lambda *args, **kw: None)
m.set_content('xyz', content_manager=cm)
self.assertIsNone(m['Content-Foo'])
self.assertIsNone(m['Content-Type'])
self.assertEqual(m['To'], 'test')
self.assertIsNone(m.get_payload())
@parameterize
class TestRawDataManager(TestEmailBase):
# Note: these tests are dependent on the order in which headers are added
# to the message objects by the code. There's no defined ordering in
# RFC5322/MIME, so this makes the tests more fragile than the standards
# require. However, if the header order changes it is best to understand
# *why*, and make sure it isn't a subtle bug in whatever change was
# applied.
policy = policy.default.clone(max_line_length=60,
content_manager=raw_data_manager)
message = EmailMessage
def test_get_text_plain(self):
m = self._str_msg(textwrap.dedent("""\
Content-Type: text/plain
Basic text.
"""))
self.assertEqual(raw_data_manager.get_content(m), "Basic text.\n")
def test_get_text_html(self):
m = self._str_msg(textwrap.dedent("""\
Content-Type: text/html
<p>Basic text.</p>
"""))
self.assertEqual(raw_data_manager.get_content(m),
"<p>Basic text.</p>\n")
def test_get_text_plain_latin1(self):
m = self._bytes_msg(textwrap.dedent("""\
Content-Type: text/plain; charset=latin1
Basìc tëxt.
""").encode('latin1'))
self.assertEqual(raw_data_manager.get_content(m), "Basìc tëxt.\n")
def test_get_text_plain_latin1_quoted_printable(self):
m = self._str_msg(textwrap.dedent("""\
Content-Type: text/plain; charset="latin-1"
Content-Transfer-Encoding: quoted-printable
Bas=ECc t=EBxt.
"""))
self.assertEqual(raw_data_manager.get_content(m), "Basìc tëxt.\n")
def test_get_text_plain_utf8_base64(self):
m = self._str_msg(textwrap.dedent("""\
Content-Type: text/plain; charset="utf8"
Content-Transfer-Encoding: base64
QmFzw6xjIHTDq3h0Lgo=
"""))
self.assertEqual(raw_data_manager.get_content(m), "Basìc tëxt.\n")
def test_get_text_plain_bad_utf8_quoted_printable(self):
m = self._str_msg(textwrap.dedent("""\
Content-Type: text/plain; charset="utf8"
Content-Transfer-Encoding: quoted-printable
Bas=c3=acc t=c3=abxt=fd.
"""))
self.assertEqual(raw_data_manager.get_content(m), "Basìc tëxt�.\n")
def test_get_text_plain_bad_utf8_quoted_printable_ignore_errors(self):
m = self._str_msg(textwrap.dedent("""\
Content-Type: text/plain; charset="utf8"
Content-Transfer-Encoding: quoted-printable
Bas=c3=acc t=c3=abxt=fd.
"""))
self.assertEqual(raw_data_manager.get_content(m, errors='ignore'),
"Basìc tëxt.\n")
def test_get_text_plain_utf8_base64_recoverable_bad_CTE_data(self):
m = self._str_msg(textwrap.dedent("""\
Content-Type: text/plain; charset="utf8"
Content-Transfer-Encoding: base64
QmFzw6xjIHTDq3h0Lgo\xFF=
"""))
self.assertEqual(raw_data_manager.get_content(m, errors='ignore'),
"Basìc tëxt.\n")
def test_get_text_invalid_keyword(self):
m = self._str_msg(textwrap.dedent("""\
Content-Type: text/plain
Basic text.
"""))
with self.assertRaises(TypeError):
raw_data_manager.get_content(m, foo='ignore')
def test_get_non_text(self):
template = textwrap.dedent("""\
Content-Type: {}
Content-Transfer-Encoding: base64
Ym9ndXMgZGF0YQ==
""")
for maintype in 'audio image video application'.split():
with self.subTest(maintype=maintype):
m = self._str_msg(template.format(maintype+'/foo'))
self.assertEqual(raw_data_manager.get_content(m), b"bogus data")
def test_get_non_text_invalid_keyword(self):
m = self._str_msg(textwrap.dedent("""\
Content-Type: image/jpg
Content-Transfer-Encoding: base64
Ym9ndXMgZGF0YQ==
"""))
with self.assertRaises(TypeError):
raw_data_manager.get_content(m, errors='ignore')
def test_get_raises_on_multipart(self):
m = self._str_msg(textwrap.dedent("""\
Content-Type: multipart/mixed; boundary="==="
--===
--===--
"""))
with self.assertRaises(KeyError):
raw_data_manager.get_content(m)
def test_get_message_rfc822_and_external_body(self):
template = textwrap.dedent("""\
Content-Type: message/{}
To: [email protected]
From: [email protected]
Subject: example
an example message
""")
for subtype in 'rfc822 external-body'.split():
with self.subTest(subtype=subtype):
m = self._str_msg(template.format(subtype))
sub_msg = raw_data_manager.get_content(m)
self.assertIsInstance(sub_msg, self.message)
self.assertEqual(raw_data_manager.get_content(sub_msg),
"an example message\n")
self.assertEqual(sub_msg['to'], '[email protected]')
self.assertEqual(sub_msg['from'].addresses[0].username, 'bar')
def test_get_message_non_rfc822_or_external_body_yields_bytes(self):
m = self._str_msg(textwrap.dedent("""\
Content-Type: message/partial
To: [email protected]
From: [email protected]
Subject: example
The real body is in another message.
"""))
self.assertEqual(raw_data_manager.get_content(m)[:10], b'To: foo@ex')
def test_set_text_plain(self):
m = self._make_message()
content = "Simple message.\n"
raw_data_manager.set_content(m, content)
self.assertEqual(str(m), textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 7bit
Simple message.
"""))
self.assertEqual(m.get_payload(decode=True).decode('utf-8'), content)
self.assertEqual(m.get_content(), content)
def test_set_text_html(self):
m = self._make_message()
content = "<p>Simple message.</p>\n"
raw_data_manager.set_content(m, content, subtype='html')
self.assertEqual(str(m), textwrap.dedent("""\
Content-Type: text/html; charset="utf-8"
Content-Transfer-Encoding: 7bit
<p>Simple message.</p>
"""))
self.assertEqual(m.get_payload(decode=True).decode('utf-8'), content)
self.assertEqual(m.get_content(), content)
def test_set_text_charset_latin_1(self):
m = self._make_message()
content = "Simple message.\n"
raw_data_manager.set_content(m, content, charset='latin-1')
self.assertEqual(str(m), textwrap.dedent("""\
Content-Type: text/plain; charset="iso-8859-1"
Content-Transfer-Encoding: 7bit
Simple message.
"""))
self.assertEqual(m.get_payload(decode=True).decode('utf-8'), content)
self.assertEqual(m.get_content(), content)
def test_set_text_short_line_minimal_non_ascii_heuristics(self):
m = self._make_message()
content = "et là il est monté sur moi et il commence à m'éto.\n"
raw_data_manager.set_content(m, content)
self.assertEqual(bytes(m), textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
et là il est monté sur moi et il commence à m'éto.
""").encode('utf-8'))
self.assertEqual(m.get_payload(decode=True).decode('utf-8'), content)
self.assertEqual(m.get_content(), content)
def test_set_text_long_line_minimal_non_ascii_heuristics(self):
m = self._make_message()
content = ("j'ai un problème de python. il est sorti de son"
" vivarium. et là il est monté sur moi et il commence"
" à m'éto.\n")
raw_data_manager.set_content(m, content)
self.assertEqual(bytes(m), textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: quoted-printable
j'ai un probl=C3=A8me de python. il est sorti de son vivari=
um. et l=C3=A0 il est mont=C3=A9 sur moi et il commence =
=C3=A0 m'=C3=A9to.
""").encode('utf-8'))
self.assertEqual(m.get_payload(decode=True).decode('utf-8'), content)
self.assertEqual(m.get_content(), content)
def test_set_text_11_lines_long_line_minimal_non_ascii_heuristics(self):
m = self._make_message()
content = '\n'*10 + (
"j'ai un problème de python. il est sorti de son"
" vivarium. et là il est monté sur moi et il commence"
" à m'éto.\n")
raw_data_manager.set_content(m, content)
self.assertEqual(bytes(m), textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: quoted-printable
""" + '\n'*10 + """
j'ai un probl=C3=A8me de python. il est sorti de son vivari=
um. et l=C3=A0 il est mont=C3=A9 sur moi et il commence =
=C3=A0 m'=C3=A9to.
""").encode('utf-8'))
self.assertEqual(m.get_payload(decode=True).decode('utf-8'), content)
self.assertEqual(m.get_content(), content)
def test_set_text_maximal_non_ascii_heuristics(self):
m = self._make_message()
content = "áàäéèęöő.\n"
raw_data_manager.set_content(m, content)
self.assertEqual(bytes(m), textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
áàäéèęöő.
""").encode('utf-8'))
self.assertEqual(m.get_payload(decode=True).decode('utf-8'), content)
self.assertEqual(m.get_content(), content)
def test_set_text_11_lines_maximal_non_ascii_heuristics(self):
m = self._make_message()
content = '\n'*10 + "áàäéèęöő.\n"
raw_data_manager.set_content(m, content)
self.assertEqual(bytes(m), textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
""" + '\n'*10 + """
áàäéèęöő.
""").encode('utf-8'))
self.assertEqual(m.get_payload(decode=True).decode('utf-8'), content)
self.assertEqual(m.get_content(), content)
def test_set_text_long_line_maximal_non_ascii_heuristics(self):
m = self._make_message()
content = ("áàäéèęöőáàäéèęöőáàäéèęöőáàäéèęöő"
"áàäéèęöőáàäéèęöőáàäéèęöőáàäéèęöő"
"áàäéèęöőáàäéèęöőáàäéèęöőáàäéèęöő.\n")
raw_data_manager.set_content(m, content)
self.assertEqual(bytes(m), textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: base64
w6HDoMOkw6nDqMSZw7bFkcOhw6DDpMOpw6jEmcO2xZHDocOgw6TDqcOoxJnD
tsWRw6HDoMOkw6nDqMSZw7bFkcOhw6DDpMOpw6jEmcO2xZHDocOgw6TDqcOo
xJnDtsWRw6HDoMOkw6nDqMSZw7bFkcOhw6DDpMOpw6jEmcO2xZHDocOgw6TD
qcOoxJnDtsWRw6HDoMOkw6nDqMSZw7bFkcOhw6DDpMOpw6jEmcO2xZHDocOg
w6TDqcOoxJnDtsWRLgo=
""").encode('utf-8'))
self.assertEqual(m.get_payload(decode=True).decode('utf-8'), content)
self.assertEqual(m.get_content(), content)
def test_set_text_11_lines_long_line_maximal_non_ascii_heuristics(self):
# Yes, it chooses "wrong" here. It's a heuristic. So this result
# could change if we come up with a better heuristic.
m = self._make_message()
content = ('\n'*10 +
"áàäéèęöőáàäéèęöőáàäéèęöőáàäéèęöő"
"áàäéèęöőáàäéèęöőáàäéèęöőáàäéèęöő"
"áàäéèęöőáàäéèęöőáàäéèęöőáàäéèęöő.\n")
raw_data_manager.set_content(m, "\n"*10 +
"áàäéèęöőáàäéèęöőáàäéèęöőáàäéèęöő"
"áàäéèęöőáàäéèęöőáàäéèęöőáàäéèęöő"
"áàäéèęöőáàäéèęöőáàäéèęöőáàäéèęöő.\n")
self.assertEqual(bytes(m), textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: quoted-printable
""" + '\n'*10 + """
=C3=A1=C3=A0=C3=A4=C3=A9=C3=A8=C4=99=C3=B6=C5=91=C3=A1=C3=
=A0=C3=A4=C3=A9=C3=A8=C4=99=C3=B6=C5=91=C3=A1=C3=A0=C3=A4=
=C3=A9=C3=A8=C4=99=C3=B6=C5=91=C3=A1=C3=A0=C3=A4=C3=A9=C3=
=A8=C4=99=C3=B6=C5=91=C3=A1=C3=A0=C3=A4=C3=A9=C3=A8=C4=99=
=C3=B6=C5=91=C3=A1=C3=A0=C3=A4=C3=A9=C3=A8=C4=99=C3=B6=C5=
=91=C3=A1=C3=A0=C3=A4=C3=A9=C3=A8=C4=99=C3=B6=C5=91=C3=A1=
=C3=A0=C3=A4=C3=A9=C3=A8=C4=99=C3=B6=C5=91=C3=A1=C3=A0=C3=
=A4=C3=A9=C3=A8=C4=99=C3=B6=C5=91=C3=A1=C3=A0=C3=A4=C3=A9=
=C3=A8=C4=99=C3=B6=C5=91=C3=A1=C3=A0=C3=A4=C3=A9=C3=A8=C4=
=99=C3=B6=C5=91=C3=A1=C3=A0=C3=A4=C3=A9=C3=A8=C4=99=C3=B6=
=C5=91.
""").encode('utf-8'))
self.assertEqual(m.get_payload(decode=True).decode('utf-8'), content)
self.assertEqual(m.get_content(), content)
def test_set_text_non_ascii_with_cte_7bit_raises(self):
m = self._make_message()
with self.assertRaises(UnicodeError):
raw_data_manager.set_content(m,"áàäéèęöő.\n", cte='7bit')
def test_set_text_non_ascii_with_charset_ascii_raises(self):
m = self._make_message()
with self.assertRaises(UnicodeError):
raw_data_manager.set_content(m,"áàäéèęöő.\n", charset='ascii')
def test_set_text_non_ascii_with_cte_7bit_and_charset_ascii_raises(self):
m = self._make_message()
with self.assertRaises(UnicodeError):
raw_data_manager.set_content(m,"áàäéèęöő.\n", cte='7bit', charset='ascii')
def test_set_message(self):
m = self._make_message()
m['Subject'] = "Forwarded message"
content = self._make_message()
content['To'] = '[email protected]'
content['From'] = '[email protected]'
content['Subject'] = "get back in your box"
content.set_content("Or face the comfy chair.")
raw_data_manager.set_content(m, content)
self.assertEqual(str(m), textwrap.dedent("""\
Subject: Forwarded message
Content-Type: message/rfc822
Content-Transfer-Encoding: 8bit
To: [email protected]
From: [email protected]
Subject: get back in your box
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 7bit
MIME-Version: 1.0
Or face the comfy chair.
"""))
payload = m.get_payload(0)
self.assertIsInstance(payload, self.message)
self.assertEqual(str(payload), str(content))
self.assertIsInstance(m.get_content(), self.message)
self.assertEqual(str(m.get_content()), str(content))
def test_set_message_with_non_ascii_and_coercion_to_7bit(self):
m = self._make_message()
m['Subject'] = "Escape report"
content = self._make_message()
content['To'] = '[email protected]'
content['From'] = '[email protected]'
content['Subject'] = "Help"
content.set_content("j'ai un problème de python. il est sorti de son"
" vivarium.")
raw_data_manager.set_content(m, content)
self.assertEqual(bytes(m), textwrap.dedent("""\
Subject: Escape report
Content-Type: message/rfc822
Content-Transfer-Encoding: 8bit
To: [email protected]
From: [email protected]
Subject: Help
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
j'ai un problème de python. il est sorti de son vivarium.
""").encode('utf-8'))
# The choice of base64 for the body encoding is because generator
# doesn't bother with heuristics and uses it unconditionally for utf-8
# text.
# XXX: the first cte should be 7bit, too...that's a generator bug.
# XXX: the line length in the body also looks like a generator bug.
self.assertEqual(m.as_string(maxheaderlen=self.policy.max_line_length),
textwrap.dedent("""\
Subject: Escape report
Content-Type: message/rfc822
Content-Transfer-Encoding: 8bit
To: [email protected]
From: [email protected]
Subject: Help
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: base64
MIME-Version: 1.0
aidhaSB1biBwcm9ibMOobWUgZGUgcHl0aG9uLiBpbCBlc3Qgc29ydGkgZGUgc29uIHZpdmFyaXVt
Lgo=
"""))
self.assertIsInstance(m.get_content(), self.message)
self.assertEqual(str(m.get_content()), str(content))
def test_set_message_invalid_cte_raises(self):
m = self._make_message()
content = self._make_message()
for cte in 'quoted-printable base64'.split():
for subtype in 'rfc822 external-body'.split():
with self.subTest(cte=cte, subtype=subtype):
with self.assertRaises(ValueError) as ar:
m.set_content(content, subtype, cte=cte)
exc = str(ar.exception)
self.assertIn(cte, exc)
self.assertIn(subtype, exc)
subtype = 'external-body'
for cte in '8bit binary'.split():
with self.subTest(cte=cte, subtype=subtype):
with self.assertRaises(ValueError) as ar:
m.set_content(content, subtype, cte=cte)
exc = str(ar.exception)
self.assertIn(cte, exc)
self.assertIn(subtype, exc)
def test_set_image_jpg(self):
for content in (b"bogus content",
bytearray(b"bogus content"),
memoryview(b"bogus content")):
with self.subTest(content=content):
m = self._make_message()
raw_data_manager.set_content(m, content, 'image', 'jpeg')
self.assertEqual(str(m), textwrap.dedent("""\
Content-Type: image/jpeg
Content-Transfer-Encoding: base64
Ym9ndXMgY29udGVudA==
"""))
self.assertEqual(m.get_payload(decode=True), content)
self.assertEqual(m.get_content(), content)
def test_set_audio_aif_with_quoted_printable_cte(self):
# Why you would use qp, I don't know, but it is technically supported.
# XXX: the incorrect line length is because binascii.b2a_qp doesn't
# support a line length parameter, but we must use it to get newline
# encoding.
# XXX: what about that lack of tailing newline? Do we actually handle
# that correctly in all cases? That is, if the *source* has an
# unencoded newline, do we add an extra newline to the returned payload
# or not? And can that actually be disambiguated based on the RFC?
m = self._make_message()
content = b'b\xFFgus\tcon\nt\rent ' + b'z'*100
m.set_content(content, 'audio', 'aif', cte='quoted-printable')
self.assertEqual(bytes(m), textwrap.dedent("""\
Content-Type: audio/aif
Content-Transfer-Encoding: quoted-printable
MIME-Version: 1.0
b=FFgus=09con=0At=0Dent=20zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz""").encode('latin-1'))
self.assertEqual(m.get_payload(decode=True), content)
self.assertEqual(m.get_content(), content)
def test_set_video_mpeg_with_binary_cte(self):
m = self._make_message()
content = b'b\xFFgus\tcon\nt\rent ' + b'z'*100
m.set_content(content, 'video', 'mpeg', cte='binary')
self.assertEqual(bytes(m), textwrap.dedent("""\
Content-Type: video/mpeg
Content-Transfer-Encoding: binary
MIME-Version: 1.0
""").encode('ascii') +
# XXX: the second \n ought to be a \r, but generator gets it wrong.
# THIS MEANS WE DON'T ACTUALLY SUPPORT THE 'binary' CTE.
b'b\xFFgus\tcon\nt\nent zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' +
b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz')
self.assertEqual(m.get_payload(decode=True), content)
self.assertEqual(m.get_content(), content)
def test_set_application_octet_stream_with_8bit_cte(self):
# In 8bit mode, univeral line end logic applies. It is up to the
# application to make sure the lines are short enough; we don't check.
m = self._make_message()
content = b'b\xFFgus\tcon\nt\rent\n' + b'z'*60 + b'\n'
m.set_content(content, 'application', 'octet-stream', cte='8bit')
self.assertEqual(bytes(m), textwrap.dedent("""\
Content-Type: application/octet-stream
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
""").encode('ascii') +
b'b\xFFgus\tcon\nt\nent\n' +
b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\n')
self.assertEqual(m.get_payload(decode=True), content)
self.assertEqual(m.get_content(), content)
def test_set_headers_from_header_objects(self):
m = self._make_message()
content = "Simple message.\n"
header_factory = self.policy.header_factory
raw_data_manager.set_content(m, content, headers=(
header_factory("To", "[email protected]"),
header_factory("From", "[email protected]"),
header_factory("Subject", "I'm talking to myself.")))
self.assertEqual(str(m), textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
To: [email protected]
From: [email protected]
Subject: I'm talking to myself.
Content-Transfer-Encoding: 7bit
Simple message.
"""))
def test_set_headers_from_strings(self):
m = self._make_message()
content = "Simple message.\n"
raw_data_manager.set_content(m, content, headers=(
"X-Foo-Header: foo",
"X-Bar-Header: bar",))
self.assertEqual(str(m), textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
X-Foo-Header: foo
X-Bar-Header: bar
Content-Transfer-Encoding: 7bit
Simple message.
"""))
def test_set_headers_with_invalid_duplicate_string_header_raises(self):
m = self._make_message()
content = "Simple message.\n"
with self.assertRaisesRegex(ValueError, 'Content-Type'):
raw_data_manager.set_content(m, content, headers=(
"Content-Type: foo/bar",)
)
def test_set_headers_with_invalid_duplicate_header_header_raises(self):
m = self._make_message()
content = "Simple message.\n"
header_factory = self.policy.header_factory
with self.assertRaisesRegex(ValueError, 'Content-Type'):
raw_data_manager.set_content(m, content, headers=(
header_factory("Content-Type", " foo/bar"),)
)
def test_set_headers_with_defective_string_header_raises(self):
m = self._make_message()
content = "Simple message.\n"
with self.assertRaisesRegex(ValueError, 'a@fairly@@invalid@address'):
raw_data_manager.set_content(m, content, headers=(
'To: a@fairly@@invalid@address',)
)
print(m['To'].defects)
def test_set_headers_with_defective_header_header_raises(self):
m = self._make_message()
content = "Simple message.\n"
header_factory = self.policy.header_factory
with self.assertRaisesRegex(ValueError, 'a@fairly@@invalid@address'):
raw_data_manager.set_content(m, content, headers=(
header_factory('To', 'a@fairly@@invalid@address'),)
)
print(m['To'].defects)
def test_set_disposition_inline(self):
m = self._make_message()
m.set_content('foo', disposition='inline')
self.assertEqual(m['Content-Disposition'], 'inline')
def test_set_disposition_attachment(self):
m = self._make_message()
m.set_content('foo', disposition='attachment')
self.assertEqual(m['Content-Disposition'], 'attachment')
def test_set_disposition_foo(self):
m = self._make_message()
m.set_content('foo', disposition='foo')
self.assertEqual(m['Content-Disposition'], 'foo')
# XXX: we should have a 'strict' policy mode (beyond raise_on_defect) that
# would cause 'foo' above to raise.
def test_set_filename(self):
m = self._make_message()
m.set_content('foo', filename='bar.txt')
self.assertEqual(m['Content-Disposition'],
'attachment; filename="bar.txt"')
def test_set_filename_and_disposition_inline(self):
m = self._make_message()
m.set_content('foo', disposition='inline', filename='bar.txt')
self.assertEqual(m['Content-Disposition'], 'inline; filename="bar.txt"')
def test_set_non_ascii_filename(self):
m = self._make_message()
m.set_content('foo', filename='ábárî.txt')
self.assertEqual(bytes(m), textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment;
filename*=utf-8''%C3%A1b%C3%A1r%C3%AE.txt
MIME-Version: 1.0
foo
""").encode('ascii'))
content_object_params = {
'text_plain': ('content', ()),
'text_html': ('content', ('html',)),
'application_octet_stream': (b'content',
('application', 'octet_stream')),
'image_jpeg': (b'content', ('image', 'jpeg')),
'message_rfc822': (message(), ()),
'message_external_body': (message(), ('external-body',)),
}
def content_object_as_header_receiver(self, obj, mimetype):
m = self._make_message()
m.set_content(obj, *mimetype, headers=(
'To: [email protected]',
'From: [email protected]'))
self.assertEqual(m['to'], '[email protected]')
self.assertEqual(m['from'], '[email protected]')
def content_object_as_disposition_inline_receiver(self, obj, mimetype):
m = self._make_message()
m.set_content(obj, *mimetype, disposition='inline')
self.assertEqual(m['Content-Disposition'], 'inline')
def content_object_as_non_ascii_filename_receiver(self, obj, mimetype):
m = self._make_message()
m.set_content(obj, *mimetype, disposition='inline', filename='bár.txt')
self.assertEqual(m['Content-Disposition'], 'inline; filename="bár.txt"')
self.assertEqual(m.get_filename(), "bár.txt")
self.assertEqual(m['Content-Disposition'].params['filename'], "bár.txt")
def content_object_as_cid_receiver(self, obj, mimetype):
m = self._make_message()
m.set_content(obj, *mimetype, cid='some_random_stuff')
self.assertEqual(m['Content-ID'], 'some_random_stuff')
def content_object_as_params_receiver(self, obj, mimetype):
m = self._make_message()
params = {'foo': 'bár', 'abc': 'xyz'}
m.set_content(obj, *mimetype, params=params)
if isinstance(obj, str):
params['charset'] = 'utf-8'
self.assertEqual(m['Content-Type'].params, params)
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
GenericStudent/home-assistant | homeassistant/components/hvv_departures/sensor.py | 9 | 6546 | """Sensor platform for hvv."""
from datetime import timedelta
import logging
from aiohttp import ClientConnectorError
from pygti.exceptions import InvalidAuth
from pytz import timezone
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_ID, DEVICE_CLASS_TIMESTAMP
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.util.dt import utcnow
from .const import ATTRIBUTION, CONF_STATION, DOMAIN, MANUFACTURER
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
MAX_LIST = 20
MAX_TIME_OFFSET = 360
ICON = "mdi:bus"
UNIT_OF_MEASUREMENT = "min"
ATTR_DEPARTURE = "departure"
ATTR_LINE = "line"
ATTR_ORIGIN = "origin"
ATTR_DIRECTION = "direction"
ATTR_TYPE = "type"
ATTR_DELAY = "delay"
ATTR_NEXT = "next"
PARALLEL_UPDATES = 0
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_devices):
"""Set up the sensor platform."""
hub = hass.data[DOMAIN][config_entry.entry_id]
session = aiohttp_client.async_get_clientsession(hass)
sensor = HVVDepartureSensor(hass, config_entry, session, hub)
async_add_devices([sensor], True)
class HVVDepartureSensor(Entity):
"""HVVDepartureSensor class."""
def __init__(self, hass, config_entry, session, hub):
"""Initialize."""
self.config_entry = config_entry
self.station_name = self.config_entry.data[CONF_STATION]["name"]
self.attr = {ATTR_ATTRIBUTION: ATTRIBUTION}
self._available = False
self._state = None
self._name = f"Departures at {self.station_name}"
self._last_error = None
self.gti = hub.gti
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self, **kwargs):
"""Update the sensor."""
departure_time = utcnow() + timedelta(
minutes=self.config_entry.options.get("offset", 0)
)
departure_time_tz_berlin = departure_time.astimezone(timezone("Europe/Berlin"))
payload = {
"station": self.config_entry.data[CONF_STATION],
"time": {
"date": departure_time_tz_berlin.strftime("%d.%m.%Y"),
"time": departure_time_tz_berlin.strftime("%H:%M"),
},
"maxList": MAX_LIST,
"maxTimeOffset": MAX_TIME_OFFSET,
"useRealtime": self.config_entry.options.get("realtime", False),
}
if "filter" in self.config_entry.options:
payload.update({"filter": self.config_entry.options["filter"]})
try:
data = await self.gti.departureList(payload)
except InvalidAuth as error:
if self._last_error != InvalidAuth:
_LOGGER.error("Authentication failed: %r", error)
self._last_error = InvalidAuth
self._available = False
except ClientConnectorError as error:
if self._last_error != ClientConnectorError:
_LOGGER.warning("Network unavailable: %r", error)
self._last_error = ClientConnectorError
self._available = False
except Exception as error: # pylint: disable=broad-except
if self._last_error != error:
_LOGGER.error("Error occurred while fetching data: %r", error)
self._last_error = error
self._available = False
if not (data["returnCode"] == "OK" and data.get("departures")):
self._available = False
return
if self._last_error == ClientConnectorError:
_LOGGER.debug("Network available again")
self._last_error = None
departure = data["departures"][0]
line = departure["line"]
delay = departure.get("delay", 0)
self._available = True
self._state = (
departure_time
+ timedelta(minutes=departure["timeOffset"])
+ timedelta(seconds=delay)
).isoformat()
self.attr.update(
{
ATTR_LINE: line["name"],
ATTR_ORIGIN: line["origin"],
ATTR_DIRECTION: line["direction"],
ATTR_TYPE: line["type"]["shortInfo"],
ATTR_ID: line["id"],
ATTR_DELAY: delay,
}
)
departures = []
for departure in data["departures"]:
line = departure["line"]
delay = departure.get("delay", 0)
departures.append(
{
ATTR_DEPARTURE: departure_time
+ timedelta(minutes=departure["timeOffset"])
+ timedelta(seconds=delay),
ATTR_LINE: line["name"],
ATTR_ORIGIN: line["origin"],
ATTR_DIRECTION: line["direction"],
ATTR_TYPE: line["type"]["shortInfo"],
ATTR_ID: line["id"],
ATTR_DELAY: delay,
}
)
self.attr[ATTR_NEXT] = departures
@property
def unique_id(self):
"""Return a unique ID to use for this sensor."""
station_id = self.config_entry.data[CONF_STATION]["id"]
station_type = self.config_entry.data[CONF_STATION]["type"]
return f"{self.config_entry.entry_id}-{station_id}-{station_type}"
@property
def device_info(self):
"""Return the device info for this sensor."""
return {
"identifiers": {
(
DOMAIN,
self.config_entry.entry_id,
self.config_entry.data[CONF_STATION]["id"],
self.config_entry.data[CONF_STATION]["type"],
)
},
"name": self._name,
"manufacturer": MANUFACTURER,
}
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return ICON
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_TIMESTAMP
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self.attr
| apache-2.0 |
nsinha17/incubator-metron | metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/0.2.1BETA/package/scripts/mysql_users.py | 1 | 1981 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.resources.system import Execute, File
from resource_management.core.source import StaticFile
from resource_management.libraries.functions.format import format
# Used to add metron access to the needed components
def mysql_adduser():
from params import params
File(params.mysql_adduser_path,
mode=0755,
content=StaticFile('addMysqlUser.sh')
)
add_user_cmd = format("bash -x {mysql_adduser_path} {daemon_name} {metron_user} {enrichment_metron_user_passwd!p} {enrichment_host}")
Execute(add_user_cmd,
tries=3,
try_sleep=5,
logoutput=False,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
)
# Removes hive metron from components
def mysql_deluser():
from params import params
File(params.mysql_deluser_path,
mode=0755,
content=StaticFile('removeMysqlUser.sh')
)
del_user_cmd = format("bash -x {mysql_deluser_path} {daemon_name} {metron_user} {enrichment_host}")
Execute(del_user_cmd,
tries=3,
try_sleep=5,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
)
| apache-2.0 |
MarioCodes/Desarrollo_Proyectos_Clase | python/convert-java-logs-to-csv-file/logs/Logs.py | 1 | 1687 | """
Created on May 08, 2019
@author: msanchez
"""
from logs.SVWLogs import SVWLogs
from utils.FileUtils import FileUtils
class Logs(object):
""" Sets the main structure to follow, to modify it in a future with other Log structures.
Attributes:
svw_logs specific, encapsulated solution to clean the logs I needed this time.
"""
def __init__(self):
"""
Constructor
"""
self.svw_logs = SVWLogs()
def __get_grouped_lines(self, input_file):
raw_lines = FileUtils.read_file_lines(input_file)
return self.svw_logs.group_raw_lines(raw_lines)
def __clean_log_lines(self, grouped_raw_lines):
sorted_raw_lines = self.svw_logs.sort_logs(grouped_raw_lines)
return self.svw_logs.clean_lines(sorted_raw_lines)
def convert_into_csv(self, input_file, output_file):
""" Reads a Java log file, groups together the data from the logs, cleans it and gives it back as a .csv file.
This data was obtained from the logs with Linux AWK command and exported into a .txt file so we only have the
data we want without external noise.
:param input_file: .txt file with all raw Java logs.
:param output_file: .csv file with only the data we need to recover.
"""
grouped_raw_lines = self.__get_grouped_lines(input_file)
has_valid_size = self.svw_logs.validate_lists_size(grouped_raw_lines)
if has_valid_size:
cleaned_lines = self.__clean_log_lines(grouped_raw_lines)
csv_content = FileUtils.construct_csv(self.svw_logs.headers, cleaned_lines)
FileUtils.write_file(output_file, csv_content)
| apache-2.0 |
mutanthost/plexhole | lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/url.py | 713 | 5879 | from __future__ import absolute_import
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:[email protected]:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| gpl-2.0 |
MakeHer/edx-platform | common/test/acceptance/tests/lms/test_lms_dashboard_search.py | 74 | 7060 | """
Test dashboard search
"""
import os
import json
from bok_choy.web_app_test import WebAppTest
from ..helpers import generate_course_key
from ...pages.common.logout import LogoutPage
from ...pages.common.utils import click_css
from ...pages.studio.utils import add_html_component, type_in_codemirror
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.container import ContainerPage
from ...pages.lms.dashboard_search import DashboardSearchPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
class DashboardSearchTest(WebAppTest):
"""
Test dashboard search.
"""
USERNAME = 'STUDENT_TESTER'
EMAIL = '[email protected]'
STAFF_USERNAME = "STAFF_TESTER"
STAFF_EMAIL = "[email protected]"
TEST_INDEX_FILENAME = "test_root/index_file.dat"
def setUp(self):
"""
Create the search page and courses to search.
"""
# create test file in which index for this test will live
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
super(DashboardSearchTest, self).setUp()
self.dashboard = DashboardSearchPage(self.browser)
self.courses = {
'A': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_A',
'display_name': 'Test Course A '
},
'B': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_B',
'display_name': 'Test Course B '
},
'C': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_C',
'display_name': 'Test Course C '
}
}
# generate course fixtures and outline pages
self.course_outlines = {}
self.course_fixtures = {}
for key, course_info in self.courses.iteritems():
course_outline = CourseOutlinePage(
self.browser,
course_info['org'],
course_info['number'],
course_info['run']
)
course_fix = CourseFixture(
course_info['org'],
course_info['number'],
course_info['run'],
course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Section 1').add_children(
XBlockFixtureDesc('sequential', 'Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem')
)
)
).add_children(
XBlockFixtureDesc('chapter', 'Section 2').add_children(
XBlockFixtureDesc('sequential', 'Subsection 2')
)
).install()
self.course_outlines[key] = course_outline
self.course_fixtures[key] = course_fix
def tearDown(self):
"""
Remove index file
"""
super(DashboardSearchTest, self).tearDown()
os.remove(self.TEST_INDEX_FILENAME)
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, username=username, email=email, staff=staff).visit()
def _studio_add_content(self, course_outline, html_content):
"""
Add content to first section on studio course page.
"""
# create a unit in course outline
course_outline.visit()
subsection = course_outline.section_at(0).subsection_at(0)
subsection.expand_subsection()
subsection.add_unit()
# got to unit and create an HTML component and save (not publish)
unit_page = ContainerPage(self.browser, None)
unit_page.wait_for_page()
add_html_component(unit_page, 0)
unit_page.wait_for_element_presence('.edit-button', 'Edit button is visible')
click_css(unit_page, '.edit-button', 0, require_notification=False)
unit_page.wait_for_element_visibility('.modal-editor', 'Modal editor is visible')
type_in_codemirror(unit_page, 0, html_content)
click_css(unit_page, '.action-save', 0)
def _studio_publish_content(self, course_outline):
"""
Publish content in first section on studio course page.
"""
course_outline.visit()
subsection = course_outline.section_at(0).subsection_at(0)
subsection.expand_subsection()
unit = subsection.unit_at(0)
unit.publish()
def test_page_existence(self):
"""
Make sure that the page exists.
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.dashboard.visit()
def test_search(self):
"""
Make sure that you can search courses.
"""
search_string = "dashboard"
html_content = "dashboard search"
# Enroll student in courses A & B, but not C
for course_info in [self.courses['A'], self.courses['B']]:
course_key = generate_course_key(
course_info['org'],
course_info['number'],
course_info['run']
)
AutoAuthPage(
self.browser,
username=self.USERNAME,
email=self.EMAIL,
course_id=course_key
).visit()
# Create content in studio without publishing.
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self._studio_add_content(self.course_outlines['A'], html_content)
self._studio_add_content(self.course_outlines['B'], html_content)
self._studio_add_content(self.course_outlines['C'], html_content)
# Do a search, there should be no results shown.
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.dashboard.visit()
self.dashboard.search_for_term(search_string)
assert search_string not in self.dashboard.search_results.html[0]
# Publish in studio to trigger indexing.
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self._studio_publish_content(self.course_outlines['A'])
self._studio_publish_content(self.course_outlines['B'])
self._studio_publish_content(self.course_outlines['C'])
# Do the search again, this time we expect results from courses A & B, but not C
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.dashboard.visit()
self.dashboard.search_for_term(search_string)
assert self.dashboard.search_results.html[0].count(search_string) == 2
assert self.dashboard.search_results.html[0].count(self.courses['A']['display_name']) == 1
assert self.dashboard.search_results.html[0].count(self.courses['B']['display_name']) == 1
| agpl-3.0 |
mehtapgundogan/Tellal | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/eucjpprober.py | 2919 | 3678 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| gpl-2.0 |
enthought/traitsbackendqt | enthought/traits/ui/qt4/ui_editor.py | 1 | 1280 | #------------------------------------------------------------------------------
#
# Copyright (c) 2009, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
#------------------------------------------------------------------------------
""" Defines the BasicUIEditor class, which allows creating editors that define
their function by creating an embedded Traits UI.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from enthought.traits.ui.ui_editor import UIEditor as BaseUIEditor
from editor import Editor
#-------------------------------------------------------------------------------
# 'UIEditor' base class:
#-------------------------------------------------------------------------------
class UIEditor(BaseUIEditor, Editor):
""" An editor that creates an embedded Traits UI.
"""
pass
| gpl-2.0 |
ktan2020/legacy-automation | win/Lib/site-packages/robot/running/timeouts/timeoutsignaling.py | 2 | 1263 | # Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from signal import setitimer, signal, SIGALRM, ITIMER_REAL
from robot.errors import TimeoutError
class Timeout(object):
def __init__(self, timeout, error):
self._timeout = timeout
self._error = error
def execute(self, runnable):
self._start_timer()
try:
return runnable()
finally:
self._stop_timer()
def _start_timer(self):
signal(SIGALRM, self._raise_timeout_error)
setitimer(ITIMER_REAL, self._timeout)
def _raise_timeout_error(self, signum, frame):
raise TimeoutError(self._error)
def _stop_timer(self):
setitimer(ITIMER_REAL, 0)
| mit |
viger/docker | proxy/proxy/code/default/python27/1.0/lib/email/header.py | 255 | 22243 | # Copyright (C) 2002-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: [email protected]
"""Header encoding and decoding functionality."""
__all__ = [
'Header',
'decode_header',
'make_header',
]
import re
import binascii
import email.quoprimime
import email.base64mime
from email.errors import HeaderParseError
from email.charset import Charset
NL = '\n'
SPACE = ' '
USPACE = u' '
SPACE8 = ' ' * 8
UEMPTYSTRING = u''
MAXLINELEN = 76
USASCII = Charset('us-ascii')
UTF8 = Charset('utf-8')
# Match encoded-word strings in the form =?charset?q?Hello_World?=
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
(?=[ \t]|$) # whitespace or the end of the string
''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
# Field name regexp, including trailing colon, but not separating whitespace,
# according to RFC 2822. Character range is from tilde to exclamation mark.
# For use with .match()
fcre = re.compile(r'[\041-\176]+:$')
# Find a header embedded in a putative header value. Used to check for
# header injection attack.
_embeded_header = re.compile(r'\n[^ \t]+:')
# Helpers
_max_append = email.quoprimime._max_append
def decode_header(header):
"""Decode a message header value without converting charset.
Returns a list of (decoded_string, charset) pairs containing each of the
decoded parts of the header. Charset is None for non-encoded parts of the
header, otherwise a lower-case string containing the name of the character
set specified in the encoded string.
An email.errors.HeaderParseError may be raised when certain decoding error
occurs (e.g. a base64 decoding exception).
"""
# If no encoding, just return the header
header = str(header)
if not ecre.search(header):
return [(header, None)]
decoded = []
dec = ''
for line in header.splitlines():
# This line might not have an encoding in it
if not ecre.search(line):
decoded.append((line, None))
continue
parts = ecre.split(line)
while parts:
unenc = parts.pop(0).strip()
if unenc:
# Should we continue a long line?
if decoded and decoded[-1][1] is None:
decoded[-1] = (decoded[-1][0] + SPACE + unenc, None)
else:
decoded.append((unenc, None))
if parts:
charset, encoding = [s.lower() for s in parts[0:2]]
encoded = parts[2]
dec = None
if encoding == 'q':
dec = email.quoprimime.header_decode(encoded)
elif encoding == 'b':
paderr = len(encoded) % 4 # Postel's law: add missing padding
if paderr:
encoded += '==='[:4 - paderr]
try:
dec = email.base64mime.decode(encoded)
except binascii.Error:
# Turn this into a higher level exception. BAW: Right
# now we throw the lower level exception away but
# when/if we get exception chaining, we'll preserve it.
raise HeaderParseError
if dec is None:
dec = encoded
if decoded and decoded[-1][1] == charset:
decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1])
else:
decoded.append((dec, charset))
del parts[0:3]
return decoded
def make_header(decoded_seq, maxlinelen=None, header_name=None,
continuation_ws=' '):
"""Create a Header from a sequence of pairs as returned by decode_header()
decode_header() takes a header value string and returns a sequence of
pairs of the format (decoded_string, charset) where charset is the string
name of the character set.
This function takes one of those sequence of pairs and returns a Header
instance. Optional maxlinelen, header_name, and continuation_ws are as in
the Header constructor.
"""
h = Header(maxlinelen=maxlinelen, header_name=header_name,
continuation_ws=continuation_ws)
for s, charset in decoded_seq:
# None means us-ascii but we can simply pass it on to h.append()
if charset is not None and not isinstance(charset, Charset):
charset = Charset(charset)
h.append(s, charset)
return h
class Header:
def __init__(self, s=None, charset=None,
maxlinelen=None, header_name=None,
continuation_ws=' ', errors='strict'):
"""Create a MIME-compliant header that can contain many character sets.
Optional s is the initial header value. If None, the initial header
value is not set. You can later append to the header with .append()
method calls. s may be a byte string or a Unicode string, but see the
.append() documentation for semantics.
Optional charset serves two purposes: it has the same meaning as the
charset argument to the .append() method. It also sets the default
character set for all subsequent .append() calls that omit the charset
argument. If charset is not provided in the constructor, the us-ascii
charset is used both as s's initial charset and as the default for
subsequent .append() calls.
The maximum line length can be specified explicit via maxlinelen. For
splitting the first line to a shorter value (to account for the field
header which isn't included in s, e.g. `Subject') pass in the name of
the field in header_name. The default maxlinelen is 76.
continuation_ws must be RFC 2822 compliant folding whitespace (usually
either a space or a hard tab) which will be prepended to continuation
lines.
errors is passed through to the .append() call.
"""
if charset is None:
charset = USASCII
if not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
self._continuation_ws = continuation_ws
cws_expanded_len = len(continuation_ws.replace('\t', SPACE8))
# BAW: I believe `chunks' and `maxlinelen' should be non-public.
self._chunks = []
if s is not None:
self.append(s, charset, errors)
if maxlinelen is None:
maxlinelen = MAXLINELEN
if header_name is None:
# We don't know anything about the field header so the first line
# is the same length as subsequent lines.
self._firstlinelen = maxlinelen
else:
# The first line should be shorter to take into account the field
# header. Also subtract off 2 extra for the colon and space.
self._firstlinelen = maxlinelen - len(header_name) - 2
# Second and subsequent lines should subtract off the length in
# columns of the continuation whitespace prefix.
self._maxlinelen = maxlinelen - cws_expanded_len
def __str__(self):
"""A synonym for self.encode()."""
return self.encode()
def __unicode__(self):
"""Helper for the built-in unicode function."""
uchunks = []
lastcs = None
for s, charset in self._chunks:
# We must preserve spaces between encoded and non-encoded word
# boundaries, which means for us we need to add a space when we go
# from a charset to None/us-ascii, or from None/us-ascii to a
# charset. Only do this for the second and subsequent chunks.
nextcs = charset
if uchunks:
if lastcs not in (None, 'us-ascii'):
if nextcs in (None, 'us-ascii'):
uchunks.append(USPACE)
nextcs = None
elif nextcs not in (None, 'us-ascii'):
uchunks.append(USPACE)
lastcs = nextcs
uchunks.append(unicode(s, str(charset)))
return UEMPTYSTRING.join(uchunks)
# Rich comparison operators for equality only. BAW: does it make sense to
# have or explicitly disable <, <=, >, >= operators?
def __eq__(self, other):
# other may be a Header or a string. Both are fine so coerce
# ourselves to a string, swap the args and do another comparison.
return other == self.encode()
def __ne__(self, other):
return not self == other
def append(self, s, charset=None, errors='strict'):
"""Append a string to the MIME header.
Optional charset, if given, should be a Charset instance or the name
of a character set (which will be converted to a Charset instance). A
value of None (the default) means that the charset given in the
constructor is used.
s may be a byte string or a Unicode string. If it is a byte string
(i.e. isinstance(s, str) is true), then charset is the encoding of
that byte string, and a UnicodeError will be raised if the string
cannot be decoded with that charset. If s is a Unicode string, then
charset is a hint specifying the character set of the characters in
the string. In this case, when producing an RFC 2822 compliant header
using RFC 2047 rules, the Unicode string will be encoded using the
following charsets in order: us-ascii, the charset hint, utf-8. The
first character set not to provoke a UnicodeError is used.
Optional `errors' is passed as the third argument to any unicode() or
ustr.encode() call.
"""
if charset is None:
charset = self._charset
elif not isinstance(charset, Charset):
charset = Charset(charset)
# If the charset is our faux 8bit charset, leave the string unchanged
if charset != '8bit':
# We need to test that the string can be converted to unicode and
# back to a byte string, given the input and output codecs of the
# charset.
if isinstance(s, str):
# Possibly raise UnicodeError if the byte string can't be
# converted to a unicode with the input codec of the charset.
incodec = charset.input_codec or 'us-ascii'
ustr = unicode(s, incodec, errors)
# Now make sure that the unicode could be converted back to a
# byte string with the output codec, which may be different
# than the iput coded. Still, use the original byte string.
outcodec = charset.output_codec or 'us-ascii'
ustr.encode(outcodec, errors)
elif isinstance(s, unicode):
# Now we have to be sure the unicode string can be converted
# to a byte string with a reasonable output codec. We want to
# use the byte string in the chunk.
for charset in USASCII, charset, UTF8:
try:
outcodec = charset.output_codec or 'us-ascii'
s = s.encode(outcodec, errors)
break
except UnicodeError:
pass
else:
assert False, 'utf-8 conversion failed'
self._chunks.append((s, charset))
def _split(self, s, charset, maxlinelen, splitchars):
# Split up a header safely for use with encode_chunks.
splittable = charset.to_splittable(s)
encoded = charset.from_splittable(splittable, True)
elen = charset.encoded_header_len(encoded)
# If the line's encoded length first, just return it
if elen <= maxlinelen:
return [(encoded, charset)]
# If we have undetermined raw 8bit characters sitting in a byte
# string, we really don't know what the right thing to do is. We
# can't really split it because it might be multibyte data which we
# could break if we split it between pairs. The least harm seems to
# be to not split the header at all, but that means they could go out
# longer than maxlinelen.
if charset == '8bit':
return [(s, charset)]
# BAW: I'm not sure what the right test here is. What we're trying to
# do is be faithful to RFC 2822's recommendation that ($2.2.3):
#
# "Note: Though structured field bodies are defined in such a way that
# folding can take place between many of the lexical tokens (and even
# within some of the lexical tokens), folding SHOULD be limited to
# placing the CRLF at higher-level syntactic breaks."
#
# For now, I can only imagine doing this when the charset is us-ascii,
# although it's possible that other charsets may also benefit from the
# higher-level syntactic breaks.
elif charset == 'us-ascii':
return self._split_ascii(s, charset, maxlinelen, splitchars)
# BAW: should we use encoded?
elif elen == len(s):
# We can split on _maxlinelen boundaries because we know that the
# encoding won't change the size of the string
splitpnt = maxlinelen
first = charset.from_splittable(splittable[:splitpnt], False)
last = charset.from_splittable(splittable[splitpnt:], False)
else:
# Binary search for split point
first, last = _binsplit(splittable, charset, maxlinelen)
# first is of the proper length so just wrap it in the appropriate
# chrome. last must be recursively split.
fsplittable = charset.to_splittable(first)
fencoded = charset.from_splittable(fsplittable, True)
chunk = [(fencoded, charset)]
return chunk + self._split(last, charset, self._maxlinelen, splitchars)
def _split_ascii(self, s, charset, firstlen, splitchars):
chunks = _split_ascii(s, firstlen, self._maxlinelen,
self._continuation_ws, splitchars)
return zip(chunks, [charset]*len(chunks))
def _encode_chunks(self, newchunks, maxlinelen):
# MIME-encode a header with many different charsets and/or encodings.
#
# Given a list of pairs (string, charset), return a MIME-encoded
# string suitable for use in a header field. Each pair may have
# different charsets and/or encodings, and the resulting header will
# accurately reflect each setting.
#
# Each encoding can be email.utils.QP (quoted-printable, for
# ASCII-like character sets like iso-8859-1), email.utils.BASE64
# (Base64, for non-ASCII like character sets like KOI8-R and
# iso-2022-jp), or None (no encoding).
#
# Each pair will be represented on a separate line; the resulting
# string will be in the format:
#
# =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n
# =?charset2?b?SvxyZ2VuIEL2aW5n?="
chunks = []
for header, charset in newchunks:
if not header:
continue
if charset is None or charset.header_encoding is None:
s = header
else:
s = charset.header_encode(header)
# Don't add more folding whitespace than necessary
if chunks and chunks[-1].endswith(' '):
extra = ''
else:
extra = ' '
_max_append(chunks, s, maxlinelen, extra)
joiner = NL + self._continuation_ws
return joiner.join(chunks)
def encode(self, splitchars=';, '):
"""Encode a message header into an RFC-compliant format.
There are many issues involved in converting a given string for use in
an email header. Only certain character sets are readable in most
email clients, and as header strings can only contain a subset of
7-bit ASCII, care must be taken to properly convert and encode (with
Base64 or quoted-printable) header strings. In addition, there is a
75-character length limit on any given encoded header field, so
line-wrapping must be performed, even with double-byte character sets.
This method will do its best to convert the string to the correct
character set used in email, and encode and line wrap it safely with
the appropriate scheme for that character set.
If the given charset is not known or an error occurs during
conversion, this function will return the header untouched.
Optional splitchars is a string containing characters to split long
ASCII lines on, in rough support of RFC 2822's `highest level
syntactic breaks'. This doesn't affect RFC 2047 encoded lines.
"""
newchunks = []
maxlinelen = self._firstlinelen
lastlen = 0
for s, charset in self._chunks:
# The first bit of the next chunk should be just long enough to
# fill the next line. Don't forget the space separating the
# encoded words.
targetlen = maxlinelen - lastlen - 1
if targetlen < charset.encoded_header_len(''):
# Stick it on the next line
targetlen = maxlinelen
newchunks += self._split(s, charset, targetlen, splitchars)
lastchunk, lastcharset = newchunks[-1]
lastlen = lastcharset.encoded_header_len(lastchunk)
value = self._encode_chunks(newchunks, maxlinelen)
if _embeded_header.search(value):
raise HeaderParseError("header value appears to contain "
"an embedded header: {!r}".format(value))
return value
def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars):
lines = []
maxlen = firstlen
for line in s.splitlines():
# Ignore any leading whitespace (i.e. continuation whitespace) already
# on the line, since we'll be adding our own.
line = line.lstrip()
if len(line) < maxlen:
lines.append(line)
maxlen = restlen
continue
# Attempt to split the line at the highest-level syntactic break
# possible. Note that we don't have a lot of smarts about field
# syntax; we just try to break on semi-colons, then commas, then
# whitespace.
for ch in splitchars:
if ch in line:
break
else:
# There's nothing useful to split the line on, not even spaces, so
# just append this line unchanged
lines.append(line)
maxlen = restlen
continue
# Now split the line on the character plus trailing whitespace
cre = re.compile(r'%s\s*' % ch)
if ch in ';,':
eol = ch
else:
eol = ''
joiner = eol + ' '
joinlen = len(joiner)
wslen = len(continuation_ws.replace('\t', SPACE8))
this = []
linelen = 0
for part in cre.split(line):
curlen = linelen + max(0, len(this)-1) * joinlen
partlen = len(part)
onfirstline = not lines
# We don't want to split after the field name, if we're on the
# first line and the field name is present in the header string.
if ch == ' ' and onfirstline and \
len(this) == 1 and fcre.match(this[0]):
this.append(part)
linelen += partlen
elif curlen + partlen > maxlen:
if this:
lines.append(joiner.join(this) + eol)
# If this part is longer than maxlen and we aren't already
# splitting on whitespace, try to recursively split this line
# on whitespace.
if partlen > maxlen and ch != ' ':
subl = _split_ascii(part, maxlen, restlen,
continuation_ws, ' ')
lines.extend(subl[:-1])
this = [subl[-1]]
else:
this = [part]
linelen = wslen + len(this[-1])
maxlen = restlen
else:
this.append(part)
linelen += partlen
# Put any left over parts on a line by themselves
if this:
lines.append(joiner.join(this))
return lines
def _binsplit(splittable, charset, maxlinelen):
i = 0
j = len(splittable)
while i < j:
# Invariants:
# 1. splittable[:k] fits for all k <= i (note that we *assume*,
# at the start, that splittable[:0] fits).
# 2. splittable[:k] does not fit for any k > j (at the start,
# this means we shouldn't look at any k > len(splittable)).
# 3. We don't know about splittable[:k] for k in i+1..j.
# 4. We want to set i to the largest k that fits, with i <= k <= j.
#
m = (i+j+1) >> 1 # ceiling((i+j)/2); i < m <= j
chunk = charset.from_splittable(splittable[:m], True)
chunklen = charset.encoded_header_len(chunk)
if chunklen <= maxlinelen:
# m is acceptable, so is a new lower bound.
i = m
else:
# m is not acceptable, so final i must be < m.
j = m - 1
# i == j. Invariant #1 implies that splittable[:i] fits, and
# invariant #2 implies that splittable[:i+1] does not fit, so i
# is what we're looking for.
first = charset.from_splittable(splittable[:i], False)
last = charset.from_splittable(splittable[i:], False)
return first, last
| mit |
acumb/LatticeDNAOrigami | origamipy/mbar.py | 1 | 5092 | """Functions for carrying out Multi Bennet Acceptance Ration (MBAR) analysis"""
import math
from pymbar import timeseries
import scipy
from origamipy.us_process import *
def calc_rstaple_u(staple_M, lattice_site_volume):
"""Calculate reduced staple chemical potential"""
sites_per_litre = 1e-3 / lattice_site_volume
rstaple_u = math.log(staple_M * scipy.constants.N_A / sites_per_litre)
return rstaple_u
def calc_correlated_rpots(wins, win_enes, win_ops, win_biases, rstaple_u, tags,
min_bias, slope):
correlated_rpots = []
for i in range(len(wins)):
rpots = calc_reduced_potentials(wins[i], win_enes[i], win_ops[i],
win_biases[i], rstaple_u, tags, min_bias, slope)
correlated_rpots.append(rpots)
return correlated_rpots
def calc_reduced_potentials(win, win_enes, win_ops, win_biases, rstaple_u, tags,
min_bias, slope):
"""Calculate reduced potentials as defined in shirts2008"""
min_op1 = win[0][0]
max_op1 = win[1][0]
min_op2 = win[0][1]
max_op2 = win[1][1]
reduced_potentials = []
for i in range(len(win_enes)):
num_staples = win_ops['numstaples'][i]
op1 = win_ops[tags[0]][i]
op2 = win_ops[tags[1]][i]
rchem_pot = num_staples * rstaple_u
bias = 0
if op1 < min_op1:
bias += min_bias + slope*(min_op1 - op1)
if op1 > max_op1:
bias += min_bias + slope*(op1 - max_op1)
if op2 < min_op2:
bias += min_bias + slope*(min_op2 - op2)
if op2 > max_op2:
bias += min_bias + slope*(op2 - max_op2)
point = (int(op1), int(op2))
if point in win_biases:
bias += win_biases[point]
rpot = win_enes[i] + bias + rchem_pot
reduced_potentials.append(rpot)
return reduced_potentials
def calc_no_bias_reduced_potentials(enes, ops, rstaple_u):
"""Calculate reduced potentials as defined in shirts2008"""
reduced_potentials = []
for i in range(len(enes)):
num_staples = ops['numstaples'][i]
rchem_pot = num_staples * rstaple_u
rpot = enes[i] + rchem_pot
reduced_potentials.append(rpot)
return np.array(reduced_potentials)
def subsample_independent_config_set(win_rpots):
print('Window, configs, t0, g, Neff')
win_subsample_indices = []
for i, rpots in enumerate(win_rpots):
# t is start of equilbrated subset, g is statistical innefficiency,
# Neff is effective sample number
t, g, Neff = timeseries.detectEquilibration(np.array(rpots))
print('{:<7} {:<8} {:<3} {:<4.1f} {:<.1f}'.format(i, len(rpots), t, g, Neff))
prod_indices = timeseries.subsampleCorrelatedData(rpots[t:], g=g)
indices = [i + t for i in prod_indices]
#indices = list(range(len(rpots)))
win_subsample_indices.append(indices)
return win_subsample_indices
win_uncorrelated_enes = []
win_uncorrelated_ops = []
def create_uncorrelated_concatenation(wins, win_subsample_indices, win_obvs):
subsample_indices = win_subsample_indices[0]
win_uncorrelated_obvs = np.array(win_obvs[0])[subsample_indices]
for i in range(1, len(wins)):
subsample_indices = win_subsample_indices[i]
win_subsampled_obvs = np.array(win_obvs[i])[subsample_indices]
win_uncorrelated_obvs = np.concatenate([win_uncorrelated_obvs,
win_subsampled_obvs])
return win_uncorrelated_obvs
def create_uncorrelated_ops_concatenation(wins, win_subsample_indices, win_ops):
win_uncorrelated_ops = {}
tags = win_ops[0].keys()
subsample_indices = win_subsample_indices[0]
for tag in tags:
win_uncorrelated_ops[tag] = np.array(win_ops[0][tag])[subsample_indices]
for i in range(1, len(wins)):
subsample_indices = win_subsample_indices[i]
for tag in tags:
win_subsampled_ops = np.array(win_ops[i][tag])[subsample_indices]
win_uncorrelated_ops[tag] = np.concatenate(
[win_uncorrelated_ops[tag], win_subsampled_ops])
return win_uncorrelated_ops
def calc_uncorrelated_rpots(wins, win_uncorrelated_enes, win_uncorrelated_ops,
win_biases, rstaple_u, tags, min_bias, slope):
uncorrelated_rpots = []
for i in range(len(wins)):
rpots = calc_reduced_potentials(wins[i], win_uncorrelated_enes,
win_uncorrelated_ops, win_biases[i], rstaple_u, tags, min_bias,
slope)
uncorrelated_rpots.append(rpots)
return uncorrelated_rpots
def sort_and_fill_pmfs(bins, pmfs, staple_lims, domain_lims):
bin_pmf = {bins[i]: pmfs[i] for i in range(len(bins))}
for x in range(staple_lims[0], staple_lims[1] + 1):
for y in range(domain_lims[0], domain_lims[1] + 1):
if (x, y) not in bin_pmf.keys():
bin_pmf[(x, y)] = 'nan'
sorted_bin_pmf = sorted(bin_pmf.items(), key=itemgetter(0))
bins = []
pmfs = []
for point, pmf in sorted_bin_pmf:
bins.append(point)
pmfs.append(pmf)
return bins, pmfs
| mit |
zhanqxun/cv_fish | numpy/doc/creation.py | 118 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| apache-2.0 |
S01780/python-social-auth | social/tests/backends/test_amazon.py | 92 | 1317 | import json
from social.tests.backends.oauth import OAuth2Test
class AmazonOAuth2Test(OAuth2Test):
backend_path = 'social.backends.amazon.AmazonOAuth2'
user_data_url = 'https://www.amazon.com/ap/user/profile'
expected_username = 'FooBar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'user_id': 'amzn1.account.ABCDE1234',
'email': '[email protected]',
'name': 'Foo Bar'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
class AmazonOAuth2BrokenServerResponseTest(OAuth2Test):
backend_path = 'social.backends.amazon.AmazonOAuth2'
user_data_url = 'https://www.amazon.com/ap/user/profile'
expected_username = 'FooBar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'Request-Id': '02GGTU7CWMNFTV3KH3J6',
'Profile': {
'Name': 'Foo Bar',
'CustomerId': 'amzn1.account.ABCDE1234',
'PrimaryEmail': '[email protected]'
}
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| bsd-3-clause |
Outernet-Project/librarian-ondd | librarian_ondd/setup.py | 1 | 2550 | import logging
from bottle import request
from bottle_utils.i18n import lazy_gettext as _
from .forms import ONDDForm
def read_ondd_setup():
initial_data = request.app.supervisor.exts.setup.get('ondd')
return {} if isinstance(initial_data, bool) else initial_data
def has_invalid_config():
ondd_client = request.app.supervisor.exts.ondd
ondd_alive = ondd_client.ping()
if not ondd_alive:
# If ondd is not running, skip the step
return False
settings = read_ondd_setup()
if settings is None:
# Settings is None if ONDD configuration has never been performed
return True
if settings == {}:
# Settings is a dict if settings has been performed by no preset is
# present. This is allowed, as user is allowed to skip through the
# setup step without setting the tuner settings.
return False
form = ONDDForm(read_ondd_setup())
return not form.is_valid()
def setup_ondd_form():
ondd_client = request.app.supervisor.exts.ondd
snr_min = request.app.config.get('ondd.snr_min', 0.2)
snr_max = request.app.config.get('ondd.snr_max', 0.9)
return dict(status=ondd_client.get_status(), form=ONDDForm(),
SNR_MIN=snr_min, SNR_MAX=snr_max)
def setup_ondd():
ondd_client = request.app.supervisor.exts.ondd
is_test_mode = request.forms.get('mode', 'submit') == 'test'
form = ONDDForm(request.forms)
form_valid = form.is_valid()
snr_min = request.app.config.get('ondd.snr_min', 0.2)
snr_max = request.app.config.get('ondd.snr_max', 0.9)
if form_valid:
# Store full settings
logging.info('ONDD: tuner settings updated')
request.app.supervisor.exts.setup.append({'ondd': form.processed_data})
if is_test_mode:
return dict(successful=False, form=form,
status=ondd_client.get_status(),
# Translators, shown when tuner settings are updated
# during setup wizard step.
message=_('Tuner settings have been updated'),
SNR_MIN=snr_min, SNR_MAX=snr_max)
return dict(successful=True)
# Form is not valid
if is_test_mode:
# We only do something about this in test mode
return dict(successful=False, form=form,
status=ondd_client.get_status(),
SNR_MIN=snr_min, SNR_MAX=snr_max)
request.app.supervisor.exts.setup.append({'ondd': {}})
return dict(successful=True)
| gpl-3.0 |
annapowellsmith/openpresc | openprescribing/pipeline/management/commands/update_smoketests.py | 1 | 1634 | import os
import glob
import json
from django.conf import settings
from django.core.management import BaseCommand
from gcutils.bigquery import Client, results_to_dicts
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('last_imported')
def handle(self, *args, **kwargs):
last_imported = kwargs['last_imported']
prescribing_date = "-".join(last_imported.split('_')) + '-01'
date_condition = ('month > TIMESTAMP(DATE_SUB(DATE "%s", '
'INTERVAL 5 YEAR))' % prescribing_date)
path = os.path.join(settings.PIPELINE_METADATA_DIR, 'smoketests')
for sql_file in glob.glob(os.path.join(path, '*.sql')):
test_name = os.path.splitext(os.path.basename(sql_file))[0]
with open(sql_file, 'rb') as f:
query = f.read().replace(
'{{ date_condition }}', date_condition)
print(query)
client = Client()
results = client.query(query)
quantity = []
cost = []
items = []
for row in results_to_dicts(results):
quantity.append(row['quantity'])
cost.append(row['actual_cost'])
items.append(row['items'])
print("Updating test expectations for %s" % test_name)
json_path = os.path.join(path, '%s.json' % test_name)
with open(json_path, 'wb') as f:
obj = {'cost': cost,
'items': items,
'quantity': quantity}
json.dump(obj, f, indent=2)
| mit |
diagramsoftware/account-financial-tools | account_journal_always_check_date/__openerp__.py | 13 | 2087 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Account Journal Always Check Date module for OpenERP
# Copyright (C) 2013-2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Journal Always Check Date',
'version': '8.0.0.1.0',
'category': 'Accounting & Finance',
'license': 'AGPL-3',
'summary': 'Option Check Date in Period always active on journals',
'description': """
Check Date in Period always active on Account Journals
======================================================
This module:
* activates the 'Check Date in Period' option on all existing account journals,
* enable the 'Check Date in Period' option on new account journals,
* prevent users from deactivating the 'Check Date in Period' option.
So this module is an additionnal security for countries where, on an account
move, the date must be inside the period.
Please contact Alexis de Lattre from Akretion <[email protected]>
for any help or question about this module.
""",
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com',
'depends': ['account'],
'data': [],
'installable': True,
'active': False,
}
| agpl-3.0 |
TuXiaokang/pyseeta | pyseeta/model_zoo.py | 1 | 4747 | '''
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (Clement Farabet)
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
'''
import hashlib
import os
import re
import shutil
import sys
import tempfile
try:
from requests.utils import urlparse
import requests.get as urlopen
requests_available = True
except ImportError:
requests_available = False
if sys.version_info[0] == 2:
from urlparse import urlparse # noqa f811
from urllib2 import urlopen # noqa f811
else:
from urllib.request import urlopen
from urllib.parse import urlparse
try:
from tqdm import tqdm
except ImportError:
tqdm = None # defined below
# matches bfd8deac from resnet18-bfd8deac.pth
HASH_REGEX = re.compile(r'-([a-f0-9]*)\.')
def load_url(url, model_dir=None, map_location=None):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overriden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
map_location (optional): a function or a dict specifying how to remap storage locations (see torch.load)
Example:
>>> state_dict = torch.utils.model_zoo.load_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
if model_dir is None:
seeta_home = os.path.expanduser(os.getenv('SEETA_HOME', '~/.pyseeta'))
model_dir = os.getenv('SEETA_MODEL_ZOO', os.path.join(seeta_home, 'models'))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
parts = urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename).group(1)
_download_url_to_file(url, cached_file, hash_prefix)
# return torch.load(cached_file, map_location=map_location)
return cached_file
def _download_url_to_file(url, dst, hash_prefix):
u = urlopen(url)
if requests_available:
file_size = int(u.headers["Content-Length"])
u = u.raw
else:
meta = u.info()
if hasattr(meta, 'getheaders'):
file_size = int(meta.getheaders("Content-Length")[0])
else:
file_size = int(meta.get_all("Content-Length")[0])
f = tempfile.NamedTemporaryFile(delete=False)
try:
sha256 = hashlib.sha256()
with tqdm(total=file_size) as pbar:
while True:
buffer = u.read(8192)
if len(buffer) == 0:
break
f.write(buffer)
sha256.update(buffer)
pbar.update(len(buffer))
f.close()
digest = sha256.hexdigest()
if digest[:len(hash_prefix)] != hash_prefix:
raise RuntimeError('invalid hash value (expected "{}", got "{}")'
.format(hash_prefix, digest))
shutil.move(f.name, dst)
finally:
f.close()
if os.path.exists(f.name):
os.remove(f.name)
if tqdm is None:
# fake tqdm if it's not installed
class tqdm(object):
def __init__(self, total):
self.total = total
self.n = 0
def update(self, n):
self.n += n
sys.stderr.write("\r{0:.1f}%".format(100 * self.n / float(self.total)))
sys.stderr.flush()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stderr.write('\n')
| mit |
nagyistoce/odoo-dev-odoo | addons/l10n_be/wizard/l10n_be_account_vat_declaration.py | 309 | 10685 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Adapted by Noviat to
# - enforce correct vat number
# - support negative balance
# - assign amount of tax code 71-72 correclty to grid 71 or 72
# - support Noviat tax code scheme
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from openerp.osv import fields, osv
from openerp.tools.translate import _
class l10n_be_vat_declaration(osv.osv_memory):
""" Vat Declaration """
_name = "l1on_be.vat.declaration"
_description = "Vat Declaration"
def _get_xml_data(self, cr, uid, context=None):
if context.get('file_save', False):
return base64.encodestring(context['file_save'].encode('utf8'))
return ''
_columns = {
'name': fields.char('File Name'),
'period_id': fields.many2one('account.period','Period', required=True),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Code', domain=[('parent_id', '=', False)], required=True),
'msg': fields.text('File created', readonly=True),
'file_save': fields.binary('Save File'),
'ask_restitution': fields.boolean('Ask Restitution',help='It indicates whether a restitution is to make or not?'),
'ask_payment': fields.boolean('Ask Payment',help='It indicates whether a payment is to make or not?'),
'client_nihil': fields.boolean('Last Declaration, no clients in client listing', help='Tick this case only if it concerns only the last statement on the civil or cessation of activity: ' \
'no clients to be included in the client listing.'),
'comments': fields.text('Comments'),
}
def _get_tax_code(self, cr, uid, context=None):
obj_tax_code = self.pool.get('account.tax.code')
obj_user = self.pool.get('res.users')
company_id = obj_user.browse(cr, uid, uid, context=context).company_id.id
tax_code_ids = obj_tax_code.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', False)], context=context)
return tax_code_ids and tax_code_ids[0] or False
_defaults = {
'msg': 'Save the File with '".xml"' extension.',
'file_save': _get_xml_data,
'name': 'vat_declaration.xml',
'tax_code_id': _get_tax_code,
}
def create_xml(self, cr, uid, ids, context=None):
obj_tax_code = self.pool.get('account.tax.code')
obj_acc_period = self.pool.get('account.period')
obj_user = self.pool.get('res.users')
obj_partner = self.pool.get('res.partner')
mod_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
list_of_tags = ['00','01','02','03','44','45','46','47','48','49','54','55','56','57','59','61','62','63','64','71','72','81','82','83','84','85','86','87','88','91']
data_tax = self.browse(cr, uid, ids[0])
if data_tax.tax_code_id:
obj_company = data_tax.tax_code_id.company_id
else:
obj_company = obj_user.browse(cr, uid, uid, context=context).company_id
vat_no = obj_company.partner_id.vat
if not vat_no:
raise osv.except_osv(_('Insufficient Data!'), _('No VAT number associated with your company.'))
vat_no = vat_no.replace(' ','').upper()
vat = vat_no[2:]
tax_code_ids = obj_tax_code.search(cr, uid, [('parent_id','child_of',data_tax.tax_code_id.id), ('company_id','=',obj_company.id)], context=context)
ctx = context.copy()
data = self.read(cr, uid, ids)[0]
ctx['period_id'] = data['period_id'][0]
tax_info = obj_tax_code.read(cr, uid, tax_code_ids, ['code','sum_period'], context=ctx)
default_address = obj_partner.address_get(cr, uid, [obj_company.partner_id.id])
default_address_id = default_address.get("default", obj_company.partner_id.id)
address_id= obj_partner.browse(cr, uid, default_address_id, context)
account_period = obj_acc_period.browse(cr, uid, data['period_id'][0], context=context)
issued_by = vat_no[:2]
comments = data['comments'] or ''
send_ref = str(obj_company.partner_id.id) + str(account_period.date_start[5:7]) + str(account_period.date_stop[:4])
starting_month = account_period.date_start[5:7]
ending_month = account_period.date_stop[5:7]
quarter = str(((int(starting_month) - 1) / 3) + 1)
if not address_id.email:
raise osv.except_osv(_('Insufficient Data!'),_('No email address associated with the company.'))
if not address_id.phone:
raise osv.except_osv(_('Insufficient Data!'),_('No phone associated with the company.'))
file_data = {
'issued_by': issued_by,
'vat_no': vat_no,
'only_vat': vat_no[2:],
'cmpny_name': obj_company.name,
'address': "%s %s"%(address_id.street or "",address_id.street2 or ""),
'post_code': address_id.zip or "",
'city': address_id.city or "",
'country_code': address_id.country_id and address_id.country_id.code or "",
'email': address_id.email or "",
'phone': address_id.phone.replace('.','').replace('/','').replace('(','').replace(')','').replace(' ',''),
'send_ref': send_ref,
'quarter': quarter,
'month': starting_month,
'year': str(account_period.date_stop[:4]),
'client_nihil': (data['client_nihil'] and 'YES' or 'NO'),
'ask_restitution': (data['ask_restitution'] and 'YES' or 'NO'),
'ask_payment': (data['ask_payment'] and 'YES' or 'NO'),
'comments': comments,
}
data_of_file = """<?xml version="1.0"?>
<ns2:VATConsignment xmlns="http://www.minfin.fgov.be/InputCommon" xmlns:ns2="http://www.minfin.fgov.be/VATConsignment" VATDeclarationsNbr="1">
<ns2:Representative>
<RepresentativeID identificationType="NVAT" issuedBy="%(issued_by)s">%(only_vat)s</RepresentativeID>
<Name>%(cmpny_name)s</Name>
<Street>%(address)s</Street>
<PostCode>%(post_code)s</PostCode>
<City>%(city)s</City>
<CountryCode>%(country_code)s</CountryCode>
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Representative>
<ns2:VATDeclaration SequenceNumber="1" DeclarantReference="%(send_ref)s">
<ns2:Declarant>
<VATNumber xmlns="http://www.minfin.fgov.be/InputCommon">%(only_vat)s</VATNumber>
<Name>%(cmpny_name)s</Name>
<Street>%(address)s</Street>
<PostCode>%(post_code)s</PostCode>
<City>%(city)s</City>
<CountryCode>%(country_code)s</CountryCode>
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Declarant>
<ns2:Period>
""" % (file_data)
if starting_month != ending_month:
#starting month and ending month of selected period are not the same
#it means that the accounting isn't based on periods of 1 month but on quarters
data_of_file += '\t\t<ns2:Quarter>%(quarter)s</ns2:Quarter>\n\t\t' % (file_data)
else:
data_of_file += '\t\t<ns2:Month>%(month)s</ns2:Month>\n\t\t' % (file_data)
data_of_file += '\t<ns2:Year>%(year)s</ns2:Year>' % (file_data)
data_of_file += '\n\t\t</ns2:Period>\n'
data_of_file += '\t\t<ns2:Data>\t'
cases_list = []
for item in tax_info:
if item['code'] == '91' and ending_month != 12:
#the tax code 91 can only be send for the declaration of December
continue
if item['code'] and item['sum_period']:
if item['code'] == 'VI':
if item['sum_period'] >= 0:
item['code'] = '71'
else:
item['code'] = '72'
if item['code'] in list_of_tags:
cases_list.append(item)
cases_list.sort()
for item in cases_list:
grid_amount_data = {
'code': str(int(item['code'])),
'amount': '%.2f' % abs(item['sum_period']),
}
data_of_file += '\n\t\t\t<ns2:Amount GridNumber="%(code)s">%(amount)s</ns2:Amount''>' % (grid_amount_data)
data_of_file += '\n\t\t</ns2:Data>'
data_of_file += '\n\t\t<ns2:ClientListingNihil>%(client_nihil)s</ns2:ClientListingNihil>' % (file_data)
data_of_file += '\n\t\t<ns2:Ask Restitution="%(ask_restitution)s" Payment="%(ask_payment)s"/>' % (file_data)
data_of_file += '\n\t\t<ns2:Comment>%(comments)s</ns2:Comment>' % (file_data)
data_of_file += '\n\t</ns2:VATDeclaration> \n</ns2:VATConsignment>'
model_data_ids = mod_obj.search(cr, uid,[('model','=','ir.ui.view'),('name','=','view_vat_save')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
context = dict(context or {})
context['file_save'] = data_of_file
return {
'name': _('Save XML For Vat declaration'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'l1on_be.vat.declaration',
'views': [(resource_id,'form')],
'view_id': 'view_vat_save',
'type': 'ir.actions.act_window',
'target': 'new',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Grumpy-Mike/Mikes-Pi-Bakery | LEGO_Boost/Part 1/distance_sensor.py | 1 | 1577 | from pylgbst.movehub import MoveHub, ColorDistanceSensor
import time
def callback(clr, distance):
if clr <= 10:
print("Colour number",clr,LED_COLORS[clr], " / Distance",distance)
else:
print("Color: %s / Distance: %s" % (clr, distance))
hub = MoveHub()
LED_COLORS = ['BLACK', '', '', 'BLUE', '', 'GREEN', '', 'YELLOW', '', 'RED', 'WHITE']
hub.color_distance_sensor.subscribe(callback, mode=ColorDistanceSensor.COLOR_DISTANCE_FLOAT)
time.sleep(60) # play with sensor while it waits
hub.color_distance_sensor.unsubscribe(callback)
'''
Subscription mode constants in class `ColorDistanceSensor` are:
- `COLOR_DISTANCE_FLOAT` - default mode, use `callback(color, distance)` where `distance` is float value in inches
- `COLOR_ONLY` - use `callback(color)`
- `DISTANCE_INCHES` - use `callback(color)` measures distance in integer inches count
- `COUNT_2INCH` - use `callback(count)` - it counts crossing distance ~2 inches in front of sensor
- `DISTANCE_HOW_CLOSE` - use `callback(value)` - value of 0 to 255 for 30 inches, larger with closer distance
- `DISTANCE_SUBINCH_HOW_CLOSE` - use `callback(value)` - value of 0 to 255 for 1 inch, larger with closer distance
- `LUMINOSITY` - use `callback(luminosity)` where `luminosity` is float value from 0 to 1
- `OFF1` and `OFF2` - seems to turn sensor LED and notifications off
- `STREAM_3_VALUES` - use `callback(val1, val2, val3)`, sends some values correlating to distance, not well understood at the moment
Tip: laser pointer pointing to sensor makes it to trigger distance sensor
'''
| gpl-2.0 |
r0h4n/node-agent | tendrl/node_agent/provisioner/gluster/plugins/gdeploy.py | 3 | 6240 | import importlib
from tendrl.commons.utils import log_utils as logger
from tendrl.commons.utils.ssh import generate_key
from tendrl.node_agent.provisioner.gluster.provisioner_base import\
ProvisionerBasePlugin
try:
from python_gdeploy.actions import configure_gluster_firewall # noqa
from python_gdeploy.actions import configure_gluster_service
from python_gdeploy.actions import create_cluster
from python_gdeploy.actions import install_gluster
from python_gdeploy.actions import remove_host
except ImportError:
logger.log(
"debug",
NS.publisher_id,
{"message": "python-gdeploy is not installed in this node"},
integration_id=NS.tendrl_context.integration_id
)
class GdeployPlugin(ProvisionerBasePlugin):
def _reload_modules(self):
globals()['install_gluster'] = importlib.import_module(
'python_gdeploy.actions.install_gluster')
globals()['configure_gluster_service'] = importlib.import_module(
'python_gdeploy.actions.configure_gluster_service')
globals()['configure_gluster_firewall'] = importlib.import_module(
'python_gdeploy.actions.configure_gluster_firewall')
globals()['create_cluster'] = importlib.import_module(
'python_gdeploy.actions.create_cluster')
globals()['remove_host'] = importlib.import_module(
'python_gdeploy.actions.remove_host')
def setup_gluster_node(self, hosts, packages=None, repo=None):
self._reload_modules()
out, err, rc = install_gluster.install_gluster_packages(
hosts,
packages,
repo
)
if rc == 0:
logger.log(
"info",
NS.publisher_id,
{"message": "gluster packages installed successfully"},
integration_id=NS.tendrl_context.integration_id
)
else:
logger.log(
"error",
NS.publisher_id,
{"message": "Error while installing glusterfs packages"
". Details: %s" % str(out)},
integration_id=NS.tendrl_context.integration_id
)
return False
out, err, rc = configure_gluster_service.configure_gluster_service(
hosts
)
if rc == 0:
logger.log(
"debug",
NS.publisher_id,
{"message": "glusterd service started successfully"},
integration_id=NS.tendrl_context.integration_id
)
else:
logger.log(
"debug",
NS.publisher_id,
{"message": "Error while starting glusterd service"
". Details: %s" % str(out)},
integration_id=NS.tendrl_context.integration_id
)
return False
# TODO(team) fix firewall configuration, after we have a clear idea of
# firewall settings needed for tendrl. Till then this section will be
# commented
# out, err, rc = configure_gluster_firewall.configure_gluster_firewall(
# hosts
# )
# if rc == 0:
# logger.log(
# "info",
# NS.publisher_id,
# {"message": "gluster firewall configured successfully"},
# integration_id=NS.tendrl_context.integration_id
# )
# else:
# logger.log(
# "error",
# NS.publisher_id,
# {"message": "Error while configuring gluster firewall"
# ". Details: %s" % str(out)},
# integration_id=NS.tendrl_context.integration_id
# )
# return False
return True
def create_gluster_cluster(self, hosts):
self._reload_modules()
out, err, rc = create_cluster.create_cluster(
hosts
)
if rc == 0:
logger.log(
"info",
NS.publisher_id,
{"message": "gluster cluster created successfully"},
integration_id=NS.tendrl_context.integration_id
)
else:
logger.log(
"debug",
NS.publisher_id,
{"message": "Error while creating gluster cluster"
". Details: %s" % str(out)},
integration_id=NS.tendrl_context.integration_id
)
return False
return True
def expand_gluster_cluster(self, host):
self._reload_modules()
current_host = NS.node_context.fqdn
out, err, rc = create_cluster.create_cluster(
[current_host, host]
)
if rc == 0:
logger.log(
"info",
NS.publisher_id,
{"message": "gluster cluster expandeded successfully"},
integration_id=NS.tendrl_context.integration_id
)
else:
logger.log(
"debug",
NS.publisher_id,
{"message": "Error while expanding gluster cluster"
". Details: %s" % str(out)},
integration_id=NS.tendrl_context.integration_id
)
return False
return True
def shrink_gluster_cluster(self, host):
self._reload_modules()
current_host = NS.node_context.fqdn
out, err, rc = remove_host.remove_host(
[current_host, host]
)
if rc == 0:
logger.log(
"info",
NS.publisher_id,
{"message": "gluster cluster shrinked successfully"},
integration_id=NS.tendrl_context.integration_id
)
else:
logger.log(
"debug",
NS.publisher_id,
{"message": "Error while shrinking gluster cluster"
". Details: %s" % str(out)},
integration_id=NS.tendrl_context.integration_id
)
return False
return True
def setup(self):
result, err = generate_key.GenerateKey().run()
return result, err
| lgpl-2.1 |
ATIX-AG/ansible | test/units/plugins/action/test_synchronize.py | 32 | 8106 | '''
(Epdb) pprint(DeepDiff(self.final_task_vars, out_task_vars), indent=2)
{ 'dic_item_added': set([u"root['ansible_python_interpreter']"]),
'dic_item_removed': set([ u"root['hostvars']['127.0.0.1']",
u"root['hostvars']['::1']",
u"root['hostvars']['localhost']"]),
'iterable_item_added': { u"root['hostvars']['el6host']['groups']['all'][1]": u'::1',
u"root['hostvars']['el6host']['groups']['ungrouped'][1]": u'::1',
u"root['vars']['hostvars']['el6host']['groups']['all'][1]": u'::1',
u"root['vars']['hostvars']['el6host']['groups']['ungrouped'][1]": u'::1'}}
'''
import json
import os
import sys
import unittest
import yaml
from pprint import pprint
import ansible.plugins
from ansible.compat.tests.mock import patch, MagicMock
from ansible.plugins.action.synchronize import ActionModule
# Getting the incoming and outgoing task vars from the plugin's run method
'''
import copy
safe_vars = {}
for k,v in task_vars.items():
if k not in ['vars', 'hostvars']:
safe_vars[k] = copy.deepcopy(v)
else:
sdata = str(v)
newv = eval(sdata)
safe_vars[k] = newv
import json
with open('task_vars.json', 'wb') as f:
f.write(json.dumps(safe_vars, indent=2))
'''
class TaskMock(object):
args = {'src': u'/tmp/deleteme',
'dest': '/tmp/deleteme',
'rsync_path': 'rsync'}
async_val = None
become = None
become_user = None
become_method = None
class StdinMock(object):
shell = None
class ConnectionMock(object):
ismock = True
_play_context = None
# transport = 'ssh'
transport = None
_new_stdin = StdinMock()
# my shell
_shell = MagicMock()
_shell.mkdtemp.return_value = 'mkdir command'
_shell.join_path.side_effect = os.path.join
class PlayContextMock(object):
shell = None
private_key_file = None
become = False
become_user = 'root'
become_method = None
check_mode = False
no_log = None
diff = None
remote_addr = None
remote_user = None
password = None
class ModuleLoaderMock(object):
def find_plugin(self, module_name, mod_type):
pass
class SharedLoaderMock(object):
module_loader = ModuleLoaderMock()
class SynchronizeTester(object):
''' A wrapper for mocking out synchronize environments '''
task = TaskMock()
connection = ConnectionMock()
_play_context = PlayContextMock()
loader = None
templar = None
shared_loader_obj = SharedLoaderMock()
final_task_vars = None
execute_called = False
def _execute_module(self, module_name, module_args=None, task_vars=None):
self.execute_called = True
self.final_module_args = module_args
self.final_task_vars = task_vars
return {}
def runtest(self, fixturepath='fixtures/synchronize/basic'):
metapath = os.path.join(fixturepath, 'meta.yaml')
with open(metapath, 'rb') as f:
fdata = f.read()
test_meta = yaml.load(fdata)
# load initial play context vars
if '_play_context' in test_meta:
if test_meta['_play_context']:
self.task.args = {}
for (k, v) in test_meta['_play_context'].items():
if v == 'None':
v = None
setattr(self._play_context, k, v)
# load initial task context vars
if '_task' in test_meta:
if test_meta['_task']:
self.task.args = {}
for (k, v) in test_meta['_task'].items():
# import epdb; epdb.st()
if v == 'None':
v = None
setattr(self.task, k, v)
# load initial task vars
if 'task_args' in test_meta:
if test_meta['task_args']:
self.task.args = {}
for (k, v) in test_meta['task_args'].items():
self.task.args[k] = v
# load initial task vars
invarspath = os.path.join(fixturepath, test_meta.get('fixtures', {}).get('taskvars_in', 'taskvars_in.json'))
with open(invarspath, 'rb') as f:
fdata = f.read()
fdata = fdata.decode("utf-8")
in_task_vars = json.loads(fdata)
# load expected final task vars
outvarspath = os.path.join(fixturepath, test_meta.get('fixtures', {}).get('taskvars_out', 'taskvars_out.json'))
with open(outvarspath, 'rb') as f:
fdata = f.read()
fdata = fdata.decode("utf-8")
out_task_vars = json.loads(fdata)
# fixup the connection
for (k, v) in test_meta['connection'].items():
setattr(self.connection, k, v)
# fixup the hostvars
if test_meta['hostvars']:
for (k, v) in test_meta['hostvars'].items():
in_task_vars['hostvars'][k] = v
# initialize and run the module
SAM = ActionModule(self.task, self.connection, self._play_context,
self.loader, self.templar, self.shared_loader_obj)
SAM._execute_module = self._execute_module
result = SAM.run(task_vars=in_task_vars)
# run assertions
for check in test_meta['asserts']:
value = eval(check)
# if not value:
# print(check, value)
# import epdb; epdb.st()
assert value, check
class FakePluginLoader(object):
mocked = True
@staticmethod
def get(transport, play_context, new_stdin):
conn = ConnectionMock()
conn.transport = transport
conn._play_context = play_context
conn._new_stdin = new_stdin
return conn
class TestSynchronizeAction(unittest.TestCase):
fixturedir = os.path.dirname(__file__)
fixturedir = os.path.join(fixturedir, 'fixtures', 'synchronize')
# print(basedir)
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_basic(self):
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_basic_become(self):
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_become'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_basic_become_cli(self):
# --become on the cli sets _play_context.become
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_become_cli'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_basic_vagrant(self):
# simple vagrant example
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_vagrant'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_basic_vagrant_sudo(self):
# vagrant plus sudo
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_vagrant_sudo'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_basic_vagrant_become_cli(self):
# vagrant plus sudo
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'basic_vagrant_become_cli'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_delegate_remote(self):
# delegate to other remote host
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'delegate_remote'))
@patch('ansible.plugins.action.synchronize.connection_loader', FakePluginLoader)
def test_delegate_remote_su(self):
# delegate to other remote host with su enabled
x = SynchronizeTester()
x.runtest(fixturepath=os.path.join(self.fixturedir, 'delegate_remote_su'))
| gpl-3.0 |
pemiu01/ionicons | builder/generate.py | 357 | 9438 | from subprocess import call
import os
import json
BUILDER_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.join(BUILDER_PATH, '..')
FONTS_FOLDER_PATH = os.path.join(ROOT_PATH, 'fonts')
CSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'css')
SCSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'scss')
LESS_FOLDER_PATH = os.path.join(ROOT_PATH, 'less')
def main():
generate_font_files()
data = get_build_data()
rename_svg_glyph_names(data)
generate_scss(data)
generate_less(data)
generate_cheatsheet(data)
generate_component_json(data)
generate_composer_json(data)
generate_bower_json(data)
def generate_font_files():
print "Generate Fonts"
cmd = "fontforge -script %s/scripts/generate_font.py" % (BUILDER_PATH)
call(cmd, shell=True)
def rename_svg_glyph_names(data):
# hacky and slow (but safe) way to rename glyph-name attributes
svg_path = os.path.join(FONTS_FOLDER_PATH, 'ionicons.svg')
svg_file = open(svg_path, 'r+')
svg_text = svg_file.read()
svg_file.seek(0)
for ionicon in data['icons']:
# uniF2CA
org_name = 'uni%s' % (ionicon['code'].replace('0x', '').upper())
ion_name = 'ion-%s' % (ionicon['name'])
svg_text = svg_text.replace(org_name, ion_name)
svg_file.write(svg_text)
svg_file.close()
def generate_less(data):
print "Generate LESS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(LESS_FOLDER_PATH, '_ionicons-variables.less')
icons_file_path = os.path.join(LESS_FOLDER_PATH, '_ionicons-icons.less')
d = []
d.append('/*!');
d.append('Ionicons, v%s' % (font_version) );
d.append('Created by Ben Sperry for the Ionic Framework, http://ionicons.com/');
d.append('https://twitter.com/benjsperry https://twitter.com/ionicframework');
d.append('MIT License: https://github.com/driftyco/ionicons');
d.append('*/');
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('@ionicons-font-path: "../fonts";')
d.append('@ionicons-font-family: "%s";' % (font_name) )
d.append('@ionicons-version: "%s";' % (font_version) )
d.append('@ionicons-prefix: %s;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('@ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.@{ionicons-prefix}%s:before' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' &:extend(.ion);')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.@{ionicons-prefix}%s:before { content: @ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
def generate_scss(data):
print "Generate SCSS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-variables.scss')
icons_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-icons.scss')
d = []
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('$ionicons-font-path: "../fonts" !default;')
d.append('$ionicons-font-family: "%s" !default;' % (font_name) )
d.append('$ionicons-version: "%s" !default;' % (font_version) )
d.append('$ionicons-prefix: %s !default;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('$ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.#{$ionicons-prefix}%s:before' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' @extend .ion;')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.#{$ionicons-prefix}%s:before { content: $ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
generate_css_from_scss(data)
def generate_css_from_scss(data):
print "Generate CSS From SCSS"
scss_file_path = os.path.join(SCSS_FOLDER_PATH, 'ionicons.scss')
css_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.css')
css_min_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.min.css')
cmd = "sass %s %s --style compact" % (scss_file_path, css_file_path)
call(cmd, shell=True)
print "Generate Minified CSS From SCSS"
cmd = "sass %s %s --style compressed" % (scss_file_path, css_min_file_path)
call(cmd, shell=True)
def generate_cheatsheet(data):
print "Generate Cheatsheet"
cheatsheet_file_path = os.path.join(ROOT_PATH, 'cheatsheet.html')
template_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'template.html')
icon_row_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'icon-row.html')
f = open(template_path, 'r')
template_html = f.read()
f.close()
f = open(icon_row_path, 'r')
icon_row_template = f.read()
f.close()
content = []
for ionicon in data['icons']:
css_code = ionicon['code'].replace('0x', '\\')
escaped_html_code = ionicon['code'].replace('0x', '&#x') + ';'
html_code = ionicon['code'].replace('0x', '&#x') + ';'
item_row = icon_row_template
item_row = item_row.replace('{{name}}', ionicon['name'])
item_row = item_row.replace('{{prefix}}', data['prefix'])
item_row = item_row.replace('{{css_code}}', css_code)
item_row = item_row.replace('{{escaped_html_code}}', escaped_html_code)
item_row = item_row.replace('{{html_code}}', html_code)
content.append(item_row)
template_html = template_html.replace("{{font_name}}", data["name"])
template_html = template_html.replace("{{font_version}}", data["version"])
template_html = template_html.replace("{{icon_count}}", str(len(data["icons"])) )
template_html = template_html.replace("{{content}}", '\n'.join(content) )
f = open(cheatsheet_file_path, 'w')
f.write(template_html)
f.close()
def generate_component_json(data):
print "Generate component.json"
d = {
"name": data['name'],
"repo": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"version": data['version'],
"keywords": [],
"dependencies": {},
"development": {},
"license": "MIT",
"styles": [
"css/%s.css" % (data['name'].lower())
],
"fonts": [
"fonts/%s.eot" % (data['name'].lower()),
"fonts/%s.svg" % (data['name'].lower()),
"fonts/%s.ttf" % (data['name'].lower()),
"fonts/%s.woff" % (data['name'].lower())
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
component_file_path = os.path.join(ROOT_PATH, 'component.json')
f = open(component_file_path, 'w')
f.write(txt)
f.close()
def generate_composer_json(data):
print "Generate composer.json"
d = {
"name": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"homepage": "http://ionicons.com/",
"authors": [
{
"name": "Ben Sperry",
"email": "[email protected]",
"role": "Designer",
"homepage": "https://twitter.com/benjsperry"
},
{
"name": "Adam Bradley",
"email": "[email protected]",
"role": "Developer",
"homepage": "https://twitter.com/adamdbradley"
},
{
"name": "Max Lynch",
"email": "[email protected]",
"role": "Developer",
"homepage": "https://twitter.com/maxlynch"
}
],
"extra": {},
"license": [ "MIT" ]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
composer_file_path = os.path.join(ROOT_PATH, 'composer.json')
f = open(composer_file_path, 'w')
f.write(txt)
f.close()
def generate_bower_json(data):
print "Generate bower.json"
d = {
"name": data['name'],
"version": data['version'],
"homepage": "https://github.com/driftyco/ionicons",
"authors": [
"Ben Sperry <[email protected]>",
"Adam Bradley <[email protected]>",
"Max Lynch <[email protected]>"
],
"description": "Ionicons - free and beautiful icons from the creators of Ionic Framework",
"main": [
"css/%s.css" % (data['name'].lower()),
"fonts/*"
],
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"license": "MIT",
"ignore": [
"**/.*",
"builder",
"node_modules",
"bower_components",
"test",
"tests"
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
bower_file_path = os.path.join(ROOT_PATH, 'bower.json')
f = open(bower_file_path, 'w')
f.write(txt)
f.close()
def get_build_data():
build_data_path = os.path.join(BUILDER_PATH, 'build_data.json')
f = open(build_data_path, 'r')
data = json.loads(f.read())
f.close()
return data
if __name__ == "__main__":
main()
| mit |
WSDC-NITWarangal/django | django/conf/locale/fi/formats.py | 504 | 1390 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y'
TIME_FORMAT = 'G.i'
DATETIME_FORMAT = r'j. E Y \k\e\l\l\o G.i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.n.Y'
SHORT_DATETIME_FORMAT = 'j.n.Y G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', # '20.3.2014'
'%d.%m.%y', # '20.3.14'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H.%M.%S', # '20.3.2014 14.30.59'
'%d.%m.%Y %H.%M.%S.%f', # '20.3.2014 14.30.59.000200'
'%d.%m.%Y %H.%M', # '20.3.2014 14.30'
'%d.%m.%Y', # '20.3.2014'
'%d.%m.%y %H.%M.%S', # '20.3.14 14.30.59'
'%d.%m.%y %H.%M.%S.%f', # '20.3.14 14.30.59.000200'
'%d.%m.%y %H.%M', # '20.3.14 14.30'
'%d.%m.%y', # '20.3.14'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M.%S.%f', # '14.30.59.000200'
'%H.%M', # '14.30'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # Non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
alexanderturner/ansible | lib/ansible/modules/network/vyos/vyos_command.py | 5 | 7152 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'version': '1.0',
}
DOCUMENTATION = """
---
module: vyos_command
version_added: "2.2"
author: "Nathaniel Case (@qalthos)"
short_description: Run one or more commands on VyOS devices
description:
- The command module allows running one or more commands on remote
devices running VyOS. This module can also be introspected
to validate key parameters before returning successfully. If the
conditional statements are not met in the wait period, the task
fails.
- Certain C(show) commands in VyOS produce many lines of output and
use a custom pager that can cause this module to hang. If the
value of the environment variable C(ANSIBLE_VYOS_TERMINAL_LENGTH)
is not set, the default number of 10000 is used.
options:
commands:
description:
- The ordered set of commands to execute on the remote device
running VyOS. The output from the command execution is
returned to the playbook. If the I(wait_for) argument is
provided, the module is not returned until the condition is
satisfied or the number of retries has been exceeded.
required: true
wait_for:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured I(retries), the task fails. See examples.
required: false
default: null
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the I(wait_for)
conditionals.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between I(retries)
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
notes:
- Running C(show system boot-messages all) will cause the module to hang since
VyOS is using a custom pager setting to display the output of that command.
"""
EXAMPLES = """
tasks:
- name: show configuration on ethernet devices eth0 and eth1
vyos_command:
commands:
- show interfaces ethernet {{ item }}
with_items:
- eth0
- eth1
- name: run multiple commands and check if version output contains specific version string
vyos_command:
commands:
- show version
- show hardware cpu
wait_for:
- "result[0] contains 'VyOS 1.1.7'"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcli import Conditional
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.six import string_types
from ansible.module_utils.vyos import run_commands
from ansible.module_utils.vyos import vyos_argument_spec, check_args
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
response=dict(),
), module)
commands = command(module.params['commands'])
for index, cmd in enumerate(commands):
if module.check_mode and not cmd['command'].startswith('show'):
warnings.append('only show commands are supported when using '
'check mode, not executing `%s`' % cmd['command'])
commands[index] = module.jsonify(cmd)
return commands
def main():
spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
spec.update(vyos_argument_spec)
module = AnsibleModule(argument_spec=spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
for _ in range(retries):
responses = run_commands(module, commands)
for item in conditionals:
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, falied_conditions=failed_conditions)
result = {
'changed': False,
'stdout': responses,
'warnings': warnings,
'stdout_lines': list(to_lines(responses)),
}
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
1zha0/gwibber-with-fanfou-protocol | gwibber/microblog/util/const.py | 2 | 7814 | import xdg.BaseDirectory
from os.path import join, isdir, realpath
from os import mkdir, environ
CACHE_DIR = realpath(join(xdg.BaseDirectory.xdg_cache_home, "gwibber"))
if not isdir(CACHE_DIR):
mkdir(CACHE_DIR)
from os import environ
if environ.has_key("FB_APP_KEY"):
FB_APP_KEY = environ["FB_APP_KEY"]
else:
FB_APP_KEY = "71b85c6d8cb5bbb9f1a3f8bbdcdd4b05"
TWITTER_OAUTH_KEY = "VDOuA5qCJ1XhjaSa4pl76g"
TWITTER_OAUTH_SECRET = "BqHlB8sMz5FhZmmFimwgiIdB0RiBr72Y0bio49IVJM"
# Gwibber
MAX_MESSAGE_LENGTH = 140
MAX_MESSAGE_COUNT = 20000
cache_dir = realpath(join(xdg.BaseDirectory.xdg_cache_home, "gwibber"))
config_dir = realpath(join(xdg.BaseDirectory.xdg_config_home, "gwibber"))
SQLITE_DB_FILENAME = "%s/gwibber.sqlite" % config_dir
if not isdir(cache_dir): mkdir(cache_dir)
if not isdir(config_dir): mkdir(config_dir)
GWIBBER_TEST_DUMP = None
if environ.has_key("GWIBBER_TEST_DUMP"):
GWIBBER_TEST_DUMP = environ["GWIBBER_TEST_DUMP"]
DEFAULT_SETTINGS = {
"interval": 15,
"view": "SingleStreamUi",
"streams": '[{"stream": "messages", "account": null}]',
"show_notifications": True,
"notify_mentions_only": True,
"presence_check": True,
"show_fullname": True,
"shorten_urls": True,
"urlshorter": "is.gd",
"reply_append_colon": True,
"retweet_style": "recycle",
"global_retweet": False,
"theme": "default",
"window_size": (500, 580),
"window_position": (0, 24),
"window_splitter": 450,
"sidebar_splitter": 40,
"minimize_to_tray": False,
"hide_taskbar_entry": False,
"show_tray_icon": True,
}
RETWEET_FORMATS = {
"via": "{text} (via @{nick})",
"RT": "RT @{nick}: {text}",
"RD": "RD @{nick}: {text}",
"/via": "{text} /via @{nick}",
"/by": "{text} /by @{nick}",
"recycle": u"\u267a @{nick}: {text}",
"service": "{R} @{nick}: {text}",
}
VERSION_NUMBER = "3.0.0"
GCONF_CLIENT_DIR = "/apps/gwibber/client/"
BUG_URL = "https://bugs.launchpad.net/gwibber/+filebug"
QUESTIONS_URL = "https://answers.launchpad.net/gwibber"
TRANSLATE_URL = "https://translations.launchpad.net/gwibber"
# Setup some Network Manager stuff to query for online state
NM_DBUS_SERVICE = "org.freedesktop.NetworkManager"
NM_DBUS_OBJECT_PATH = "/org/freedesktop/NetworkManager"
NM_DBUS_INTERFACE = "org.freedesktop.NetworkManager"
NM_STATE_UNKNOWN = 0
NM_STATE_ASLEEP = 1
NM_STATE_CONNECTING = 2
NM_STATE_CONNECTED = 3
NM_STATE_DISCONNECTED = 4
GWIBBER_OPERATIONS = """
{
"delete": {
"account_tree": false,
"dynamic": false,
"enabled": null,
"first_only": false,
"function": null,
"return_value": false,
"search": false,
"stream": null,
"transient": false
},
"favorites": {
"account_tree": true,
"dynamic": true,
"enabled": "receive",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "favorites",
"transient": false
},
"group": {
"account_tree": false,
"dynamic": false,
"enabled": "receive",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "group",
"transient": true
},
"like": {
"account_tree": false,
"dynamic": false,
"enabled": null,
"first_only": false,
"function": null,
"return_value": false,
"search": false,
"stream": null,
"transient": false
},
"lists": {
"account_tree": true,
"dynamic": false,
"enabled": "receive",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "lists",
"transient": true,
"interval": 20
},
"list": {
"account_tree": true,
"dynamic": false,
"enabled": "receive",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "list",
"transient": true
},
"private": {
"account_tree": true,
"dynamic": false,
"enabled": "receive",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "private",
"transient": false
},
"public": {
"account_tree": true,
"dynamic": true,
"enabled": "receive",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "public",
"transient": false
},
"receive": {
"account_tree": true,
"dynamic": false,
"enabled": "receive",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "messages",
"transient": false
},
"images": {
"account_tree": true,
"dynamic": false,
"enabled": "receive",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "images",
"transient": false
},
"links": {
"account_tree": true,
"dynamic": false,
"enabled": "receive",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "links",
"transient": false
},
"videos": {
"account_tree": true,
"dynamic": false,
"enabled": "receive",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "videos",
"transient": false
},
"reply": {
"account_tree": false,
"dynamic": false,
"enabled": null,
"first_only": true,
"function": "send",
"return_value": false,
"search": false,
"stream": null,
"transient": false
},
"responses": {
"account_tree": true,
"dynamic": false,
"enabled": "receive",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "replies",
"transient": false
},
"retweet": {
"account_tree": false,
"dynamic": false,
"enabled": null,
"first_only": false,
"function": null,
"return_value": false,
"search": false,
"stream": null,
"transient": false
},
"search": {
"account_tree": false,
"dynamic": false,
"enabled": "search",
"first_only": false,
"function": null,
"return_value": true,
"search": true,
"stream": "search",
"transient": true
},
"search_url": {
"account_tree": false,
"dynamic": false,
"enabled": "search",
"first_only": false,
"function": null,
"return_value": true,
"search": true,
"stream": "search",
"transient": true
},
"send": {
"account_tree": false,
"dynamic": false,
"enabled": "send",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "messages",
"transient": false
},
"send_thread": {
"account_tree": false,
"dynamic": false,
"enabled": "send",
"first_only": false,
"function": null,
"return_value": false,
"search": false,
"stream": null,
"transient": false
},
"send_private": {
"account_tree": false,
"dynamic": false,
"enabled": "send",
"first_only": false,
"function": null,
"return_value": false,
"search": false,
"stream": null,
"transient": false
},
"tag": {
"account_tree": false,
"dynamic": false,
"enabled": null,
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": null,
"transient": false
},
"thread": {
"account_tree": false,
"dynamic": false,
"enabled": "receive",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "thread",
"transient": true
},
"user_messages": {
"account_tree": false,
"dynamic": false,
"enabled": "receive",
"first_only": false,
"function": null,
"return_value": true,
"search": false,
"stream": "user",
"transient": true
}
}
"""
| gpl-2.0 |
unnamed-idea/ardupilot | Tools/autotest/pysim/fg_display.py | 229 | 1919 | #!/usr/bin/env python
import socket, struct, time, math, errno
from pymavlink import fgFDM
class udp_socket(object):
'''a UDP socket'''
def __init__(self, device, blocking=True, input=True):
a = device.split(':')
if len(a) != 2:
print("UDP ports must be specified as host:port")
sys.exit(1)
self.port = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if input:
self.port.bind((a[0], int(a[1])))
self.destination_addr = None
else:
self.destination_addr = (a[0], int(a[1]))
if not blocking:
self.port.setblocking(0)
self.last_address = None
def recv(self,n=1000):
try:
data, self.last_address = self.port.recvfrom(n)
except socket.error as e:
if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK ]:
return ""
raise
return data
def write(self, buf):
try:
if self.destination_addr:
self.port.sendto(buf, self.destination_addr)
else:
self.port.sendto(buf, self.last_addr)
except socket.error:
pass
def ft2m(x):
return x * 0.3048
def m2ft(x):
return x / 0.3048
def kt2mps(x):
return x * 0.514444444
def mps2kt(x):
return x / 0.514444444
udp = udp_socket("127.0.0.1:5123")
fgout = udp_socket("127.0.0.1:5124", input=False)
tlast = time.time()
count = 0
fg = fgFDM.fgFDM()
while True:
buf = udp.recv(1000)
fg.parse(buf)
fgout.write(fg.pack())
count += 1
if time.time() - tlast > 1.0:
print("%u FPS len=%u" % (count, len(buf)))
count = 0
tlast = time.time()
print(fg.get('latitude', units='degrees'),
fg.get('longitude', units='degrees'),
fg.get('altitude', units='meters'),
fg.get('vcas', units='mps'))
| gpl-3.0 |
popazerty/test-gui | lib/python/Components/GUIComponent.py | 57 | 2512 | import skin
from enigma import ePoint, eSize
class GUIComponent(object):
""" GUI component """
def __init__(self):
self.instance = None
self.onVisibilityChange = [ ]
self.__visible = 0
self.visible = 1
self.skinAttributes = None
self.deprecationInfo = None
def execBegin(self):
pass
def execEnd(self):
pass
def onShow(self):
pass
def onHide(self):
pass
def destroy(self):
self.__dict__.clear()
# this works only with normal widgets - if you don't have self.instance, override this.
def applySkin(self, desktop, parent):
if not self.visible:
self.instance.hide()
if self.skinAttributes is None:
return False
skin.applyAllAttributes(self.instance, desktop, self.skinAttributes, parent.scale)
return True
def move(self, x, y = None):
# we assume, that x is already an ePoint
if y is None:
self.instance.move(x)
else:
self.instance.move(ePoint(int(x), int(y)))
def resize(self, x, y = None):
self.width = x
self.height = y
if y is None:
self.instance.resize(x)
else:
self.instance.resize(eSize(int(x), int(y)))
def setZPosition(self, z):
self.instance.setZPosition(z)
def show(self):
old = self.__visible
self.__visible = 1
if self.instance is not None:
self.instance.show()
if old != self.__visible:
for fnc in self.onVisibilityChange:
fnc(True)
def hide(self):
old = self.__visible
self.__visible = 0
if self.instance is not None:
self.instance.hide()
if old != self.__visible:
for fnc in self.onVisibilityChange:
fnc(False)
def getVisible(self):
return self.__visible
def setVisible(self, visible):
if visible:
self.show()
else:
self.hide()
visible = property(getVisible, setVisible)
def setPosition(self, x, y):
self.instance.move(ePoint(int(x), int(y)))
def getPosition(self):
p = self.instance.position()
return (p.x(), p.y())
def getWidth(self):
return self.width
def getHeight(self):
return self.height
position = property(getPosition, setPosition)
# default implementation for only one widget per component
# feel free to override!
def GUIcreate(self, parent):
self.instance = self.createWidget(parent)
self.postWidgetCreate(self.instance)
def GUIdelete(self):
self.preWidgetRemove(self.instance)
self.instance = None
# default for argumentless widget constructor
def createWidget(self, parent):
return self.GUI_WIDGET(parent)
def postWidgetCreate(self, instance):
pass
def preWidgetRemove(self, instance):
pass
| gpl-2.0 |
sanguinariojoe/FreeCAD | src/3rdParty/libkdtree/python-bindings/py-kdtree_test.py | 24 | 13015 | #
# $Id: py-kdtree_test.py 2268 2008-08-20 10:08:58Z richert $
#
import unittest
from kdtree import KDTree_2Int, KDTree_4Int, KDTree_3Float, KDTree_4Float, KDTree_6Float
try:
long
except NameError:
long = int
class KDTree_2IntTestCase(unittest.TestCase):
def test_empty(self):
nn = KDTree_2Int()
self.assertEqual(0, nn.size())
actual = nn.find_nearest((2,3))
self.assertTrue(None==actual, "%s != %s"%(str(None), str(actual)))
def test_get_all(self):
nn = KDTree_2Int()
o1 = object()
nn.add(((1,1), id(o1)))
o2 = object()
nn.add(((10,10), id(o2)))
o3 = object()
nn.add(((11,11), id(o3)))
self.assertEqual([((1,1), id(o1)), ((10,10), id(o2)), ((11,11), id(o3))], nn.get_all())
self.assertEqual(3, len(nn))
nn.remove(((10,10), id(o2)))
self.assertEqual(2, len(nn))
self.assertEqual([((1,1), id(o1)), ((11,11), id(o3))], nn.get_all())
def test_nearest(self):
nn = KDTree_2Int()
nn_id = {}
o1 = object()
nn.add(((1,1), id(o1)))
nn_id[id(o1)] = o1
o2 = object()
nn.add(((10,10), id(o2)))
nn_id[id(o2)] = o2
expected = o1
actual = nn.find_nearest((2,2))[1]
self.assertTrue(expected==nn_id[actual], "%s != %s"%(str(expected), str(nn_id[actual])))
expected = o2
actual = nn.find_nearest((6, 6))[1]
self.assertTrue(expected==nn_id[actual], "%s != %s"%(str(expected), str(nn_id[actual])))
def test_find_within_range(self):
nn = KDTree_6Float()
nn_id = {}
o1 = object()
nn.add(((1,1,0,0,0,0), id(o1)))
nn_id[id(o1)] = o1
o2 = object()
nn.add(((10,10,0,0,0,0), id(o2)))
nn_id[id(o2)] = o2
o3 = object()
nn.add(((4.1, 4.1,0,0,0,0), id(o3)))
nn_id[id(o3)] = o3
expected = set([long(id(o1)), long(id(o3))])
actual = set([ident
for _coord, ident
in nn.find_within_range((2.1,2.1,0,0,0,0), 3.9)])
self.assertTrue(expected==actual, "%s != %s"%(str(expected), str(actual)))
def test_remove(self):
class C:
def __init__(self, i):
self.i = i
self.next = None
nn = KDTree_2Int()
k1, o1 = (1,1), C(7)
self.assertFalse(nn.remove((k1, id(o1))), "This cannot be removed!")
nn.add((k1, id(o1)))
k2, o2 = (1,1), C(7)
nn.add((k2, id(o2)))
self.assertEqual(2, nn.size())
self.assertTrue(nn.remove((k2, id(o2))))
self.assertEqual(1, nn.size())
self.assertFalse(nn.remove((k2, id(o2))))
self.assertEqual(1, nn.size())
nearest = nn.find_nearest(k1)
self.assertTrue(nearest[1] == id(o1), "%s != %s"%(nearest[1], o1))
#self.assertTrue(nearest[1] is o1, "%s,%s is not %s"%(str(nearest[0]), str(nearest[1]), str((k1,id(o1)))))
def test_count_within_range(self):
nn = KDTree_2Int()
for p in [(0,0), (1,0), (0,1), (1,1)]:
nn.add((p, id(p)))
res = nn.count_within_range((0,0), 1.0)
self.assertEqual(3, res, "Counted %i points instead of %i"%(res, 3))
res = nn.count_within_range((0,0), 1.9)
self.assertEqual(4, res, "Counted %i points instead of %i"%(res, 4))
class KDTree_4IntTestCase(unittest.TestCase):
def test_empty(self):
nn = KDTree_4Int()
self.assertEqual(0, nn.size())
actual = nn.find_nearest((0,0,2,3))
self.assertTrue(None==actual, "%s != %s"%(str(None), str(actual)))
def test_get_all(self):
nn = KDTree_4Int()
o1 = object()
nn.add(((0,0,1,1), id(o1)))
o2 = object()
nn.add(((0,0,10,10), id(o2)))
o3 = object()
nn.add(((0,0,11,11), id(o3)))
self.assertEqual([((0,0,1,1), id(o1)), ((0,0,10,10), id(o2)), ((0,0,11,11), id(o3))], nn.get_all())
self.assertEqual(3, len(nn))
nn.remove(((0,0,10,10), id(o2)))
self.assertEqual(2, len(nn))
self.assertEqual([((0,0,1,1), id(o1)), ((0,0,11,11), id(o3))], nn.get_all())
def test_nearest(self):
nn = KDTree_4Int()
nn_id = {}
o1 = object()
nn.add(((0,0,1,1), id(o1)))
nn_id[id(o1)] = o1
o2 = object()
nn.add(((0,0,10,10), id(o2)))
nn_id[id(o2)] = o2
expected = o1
actual = nn.find_nearest((0,0,2,2))[1]
self.assertTrue(expected==nn_id[actual], "%s != %s"%(str(expected), str(nn_id[actual])))
expected = o2
actual = nn.find_nearest((0,0,6,6))[1]
self.assertTrue(expected==nn_id[actual], "%s != %s"%(str(expected), str(nn_id[actual])))
def test_remove(self):
class C:
def __init__(self, i):
self.i = i
self.next = None
nn = KDTree_4Int()
k1, o1 = (0,0,1,1), C(7)
self.assertFalse(nn.remove((k1, id(o1))), "This cannot be removed!")
nn.add((k1, id(o1)))
k2, o2 = (0,0,1,1), C(7)
nn.add((k2, id(o2)))
self.assertEqual(2, nn.size())
self.assertTrue(nn.remove((k2, id(o2))))
self.assertEqual(1, nn.size())
self.assertFalse(nn.remove((k2, id(o2))))
self.assertEqual(1, nn.size())
nearest = nn.find_nearest(k1)
self.assertTrue(nearest[1] == id(o1), "%s != %s"%(nearest[1], o1))
#self.assertTrue(nearest[1] is o1, "%s,%s is not %s"%(str(nearest[0]), str(nearest[1]), str((k1,id(o1)))))
class KDTree_4FloatTestCase(unittest.TestCase):
def test_empty(self):
nn = KDTree_4Float()
self.assertEqual(0, nn.size())
actual = nn.find_nearest((0,0,2,3))
self.assertTrue(None==actual, "%s != %s"%(str(None), str(actual)))
def test_get_all(self):
nn = KDTree_4Int()
o1 = object()
nn.add(((0,0,1,1), id(o1)))
o2 = object()
nn.add(((0,0,10,10), id(o2)))
o3 = object()
nn.add(((0,0,11,11), id(o3)))
self.assertEqual([((0,0,1,1), id(o1)), ((0,0,10,10), id(o2)), ((0,0,11,11), id(o3))], nn.get_all())
self.assertEqual(3, len(nn))
nn.remove(((0,0,10,10), id(o2)))
self.assertEqual(2, len(nn))
self.assertEqual([((0,0,1,1), id(o1)), ((0,0,11,11), id(o3))], nn.get_all())
def test_nearest(self):
nn = KDTree_4Int()
nn_id = {}
o1 = object()
nn.add(((0,0,1,1), id(o1)))
nn_id[id(o1)] = o1
o2 = object()
nn.add(((0,0,10,10), id(o2)))
nn_id[id(o2)] = o2
expected = o1
actual = nn.find_nearest((0,0,2,2))[1]
self.assertTrue(expected==nn_id[actual], "%s != %s"%(str(expected), str(nn_id[actual])))
expected = o2
actual = nn.find_nearest((0,0,6,6))[1]
self.assertTrue(expected==nn_id[actual], "%s != %s"%(str(expected), str(nn_id[actual])))
def test_remove(self):
class C:
def __init__(self, i):
self.i = i
self.next = None
nn = KDTree_4Int()
k1, o1 = (0,0,1,1), C(7)
self.assertFalse(nn.remove((k1, id(o1))), "This cannot be removed!")
nn.add((k1, id(o1)))
k2, o2 = (0,0,1,1), C(7)
nn.add((k2, id(o2)))
self.assertEqual(2, nn.size())
self.assertTrue(nn.remove((k2, id(o2))))
self.assertEqual(1, nn.size())
self.assertFalse(nn.remove((k2, id(o2))))
self.assertEqual(1, nn.size())
nearest = nn.find_nearest(k1)
self.assertTrue(nearest[1] == id(o1), "%s != %s"%(nearest[1], o1))
#self.assertTrue(nearest[1] is o1, "%s,%s is not %s"%(str(nearest[0]), str(nearest[1]), str((k1,id(o1)))))
class KDTree_3FloatTestCase(unittest.TestCase):
def test_empty(self):
nn = KDTree_3Float()
self.assertEqual(0, nn.size())
actual = nn.find_nearest((2,3,0))
self.assertTrue(None==actual, "%s != %s"%(str(None), str(actual)))
def test_get_all(self):
nn = KDTree_3Float()
o1 = object()
nn.add(((1,1,0), id(o1)))
o2 = object()
nn.add(((10,10,0), id(o2)))
o3 = object()
nn.add(((11,11,0), id(o3)))
self.assertEqual([((1,1,0), id(o1)), ((10,10,0), id(o2)), ((11,11,0), id(o3))], nn.get_all())
self.assertEqual(3, len(nn))
nn.remove(((10,10,0), id(o2)))
self.assertEqual(2, len(nn))
self.assertEqual([((1,1,0), id(o1)), ((11,11,0), id(o3))], nn.get_all())
def test_nearest(self):
nn = KDTree_3Float()
nn_id = {}
o1 = object()
nn.add(((1,1,0), id(o1)))
nn_id[id(o1)] = o1
o2 = object()
nn.add(((10,10,0), id(o2)))
nn_id[id(o2)] = o2
o3 = object()
nn.add(((4.1, 4.1,0), id(o3)))
nn_id[id(o3)] = o3
expected = o3
actual = nn.find_nearest((2.9,2.9,0))[1]
self.assertTrue(expected==nn_id[actual], "%s != %s"%(str(expected), str(nn_id[actual])))
expected = o3
actual = nn.find_nearest((6, 6,0))[1]
self.assertTrue(expected==nn_id[actual], "%s != %s"%(str(expected), str(nn_id[actual])))
def test_remove(self):
class C:
def __init__(self, i):
self.i = i
self.next = None
nn = KDTree_3Float()
k1, o1 = (1.1,1.1,0), C(7)
self.assertFalse(nn.remove((k1, id(o1))), "This cannot be removed!")
nn.add((k1, id(o1)))
k2, o2 = (1.1,1.1,0), C(7)
nn.add((k2, id(o2)))
self.assertEqual(2, nn.size())
self.assertTrue(nn.remove((k2, id(o2))))
self.assertEqual(1, nn.size())
self.assertFalse(nn.remove((k2, id(o2))))
self.assertEqual(1, nn.size())
nearest = nn.find_nearest(k1)
self.assertTrue(nearest[1] == id(o1), "%s != %s"%(nearest[1], o1))
#self.assertTrue(nearest[1] is o1, "%s,%s is not %s"%(str(nearest[0]), str(nearest[1]), str((k1,id(o1)))))
class KDTree_6FloatTestCase(unittest.TestCase):
def test_empty(self):
nn = KDTree_6Float()
self.assertEqual(0, nn.size())
actual = nn.find_nearest((2,3,0,0,0,0))
self.assertTrue(None==actual, "%s != %s"%(str(None), str(actual)))
def test_get_all(self):
nn = KDTree_6Float()
o1 = object()
nn.add(((1,1,0,0,0,0), id(o1)))
o2 = object()
nn.add(((10,10,0,0,0,0), id(o2)))
o3 = object()
nn.add(((11,11,0,0,0,0), id(o3)))
self.assertEqual([((1,1,0,0,0,0), id(o1)), ((10,10,0,0,0,0), id(o2)), ((11,11,0,0,0,0 ), id(o3))], nn.get_all())
self.assertEqual(3, len(nn))
nn.remove(((10,10,0,0,0,0), id(o2)))
self.assertEqual(2, len(nn))
self.assertEqual([((1,1,0,0,0,0), id(o1)), ((11,11,0,0,0,0), id(o3))], nn.get_all())
def test_nearest(self):
nn = KDTree_6Float()
nn_id = {}
o1 = object()
nn.add(((1,1,0,0,0,0), id(o1)))
nn_id[id(o1)] = o1
o2 = object()
nn.add(((10,10,0,0,0,0), id(o2)))
nn_id[id(o2)] = o2
o3 = object()
nn.add(((4.1, 4.1,0,0,0,0), id(o3)))
nn_id[id(o3)] = o3
expected = o3
actual = nn.find_nearest((2.9,2.9,0,0,0,0))[1]
self.assertTrue(expected==nn_id[actual], "%s != %s"%(str(expected), str(nn_id[actual])))
expected = o3
actual = nn.find_nearest((6, 6,0,0,0,0))[1]
self.assertTrue(expected==nn_id[actual], "%s != %s"%(str(expected), str(nn_id[actual])))
def test_remove(self):
class C:
def __init__(self, i):
self.i = i
self.next = None
nn = KDTree_6Float()
k1, o1 = (1.1,1.1,0,0,0,0), C(7)
self.assertFalse(nn.remove((k1, id(o1))), "This cannot be removed!")
nn.add((k1, id(o1)))
k2, o2 = (1.1,1.1,0,0,0,0), C(7)
nn.add((k2, id(o2)))
self.assertEqual(2, nn.size())
self.assertTrue(nn.remove((k2, id(o2))))
self.assertEqual(1, nn.size())
self.assertFalse(nn.remove((k2, id(o2))))
self.assertEqual(1, nn.size())
nearest = nn.find_nearest(k1)
self.assertTrue(nearest[1] == id(o1), "%s != %s"%(nearest[1], o1))
#self.assertTrue(nearest[1] is o1, "%s,%s is not %s"%(str(nearest[0]), str(nearest[1]), str((k1,id(o1)))))
def suite():
return unittest.defaultTestLoader.loadTestsFromModule(sys.modules.get(__name__))
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
yostudios/Spritemapper | spritecss/main.py | 1 | 4772 | import sys
import logging
import optparse
from os import path, access, R_OK
from itertools import ifilter
from contextlib import contextmanager
from spritecss.css import CSSParser, print_css
from spritecss.config import CSSConfig
from spritecss.finder import find_sprite_refs
from spritecss.mapper import SpriteMapCollector, mapper_from_conf
from spritecss.packing import PackedBoxes, print_packed_size
from spritecss.packing.sprites import open_sprites
from spritecss.stitch import stitch
from spritecss.replacer import SpriteReplacer
logger = logging.getLogger(__name__)
# TODO CSSFile should probably fit into the bigger picture
class CSSFile(object):
def __init__(self, fname, conf=None):
self.fname = fname
self.conf = conf
@contextmanager
def open_parser(self):
with open(self.fname, "rb") as fp:
yield CSSParser.read_file(fp)
@classmethod
def open_file(cls, fname, conf=None):
with cls(fname).open_parser() as p:
return cls(fname, conf=CSSConfig(p, base=conf, fname=fname))
@property
def mapper(self):
return mapper_from_conf(self.conf)
@property
def output_fname(self):
return self.conf.get_css_out(self.fname)
def map_sprites(self):
with self.open_parser() as p:
srefs = find_sprite_refs(p, conf=self.conf, source=self.fname)
def test_sref(sref):
if not access(str(sref), R_OK):
logger.error("%s: not readable", sref); return False
else:
logger.debug("%s passed", sref); return True
return self.mapper.map_reduced(ifilter(test_sref, srefs))
class InMemoryCSSFile(CSSFile):
def __init__(self, *a, **k):
sup = super(InMemoryCSSFile, self)
sup.__init__(*a, **k)
with sup.open_parser() as p:
self._evs = list(p)
@contextmanager
def open_parser(self):
yield self._evs
def spritemap(css_fs, conf=None, out=sys.stderr):
w_ln = lambda t: out.write(t + "\n")
#: sum of all spritemaps used from any css files
smaps = SpriteMapCollector(conf=conf)
for css in css_fs:
w_ln("mapping sprites in source %s" % (css.fname,))
for sm in smaps.collect(css.map_sprites()):
w_ln(" - %s" % (sm.fname,))
# Weed out single-image spritemaps (these make no sense.)
smaps = [sm for sm in smaps if len(sm) > 1]
sm_plcs = []
for smap in smaps:
with open_sprites(smap, pad=conf.padding) as sprites:
w_ln("packing sprites in mapping %s" % (smap.fname,))
logger.debug("annealing %s in steps of %d",
smap.fname, conf.anneal_steps)
packed = PackedBoxes(sprites, anneal_steps=conf.anneal_steps)
print_packed_size(packed)
sm_plcs.append((smap, packed.placements))
w_ln("writing spritemap image at %s" % (smap.fname,))
im = stitch(packed)
with open(smap.fname, "wb") as fp:
im.save(fp)
replacer = SpriteReplacer(sm_plcs)
for css in css_fs:
w_ln("writing new css at %s" % (css.output_fname,))
with open(css.output_fname, "wb") as fp:
print_css(replacer(css), out=fp)
op = optparse.OptionParser()
op.set_usage("%prog [opts] <css file(s) ...>")
op.add_option("-c", "--conf", metavar="INI",
help="read base configuration from INI")
op.add_option("--padding", type=int, metavar="N",
help="keep N pixels of padding between sprites")
op.add_option("-v", "--verbose", action="store_true",
help="use debug logging level")
#op.add_option("--in-memory", action="store_true",
# help="keep CSS parsing results in memory")
#op.add_option("--anneal", type=int, metavar="N", default=9200,
# help="simulated anneal steps (default: 9200)")
op.set_default("in_memory", False)
op.set_default("anneal", None)
def main():
(opts, args) = op.parse_args()
logging.basicConfig(level=logging.DEBUG if opts.verbose else logging.INFO)
if not args:
op.error("you must provide at least one css file")
if opts.in_memory:
css_cls = InMemoryCSSFile
else:
css_cls = CSSFile
base = {}
if opts.conf:
from ConfigParser import ConfigParser
cp = ConfigParser()
with open(opts.conf) as fp:
cp.readfp(fp)
base.update(cp.items("spritemapper"))
if opts.anneal:
base["anneal_steps"] = opts.anneal
if opts.padding:
base["padding"] = (opts.padding, opts.padding)
conf = CSSConfig(base=base)
spritemap([css_cls.open_file(fn, conf=conf) for fn in args], conf=conf)
if __name__ == "__main__":
main()
| mit |
khink/xhtml2pdf | demo/wsgi/pisawsgidemo.py | 98 | 2215 | #!/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "$Revision: 103 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2007-10-31 17:08:54 +0100 (Mi, 31 Okt 2007) $"
__svnid__ = "$Id: pisa.py 103 2007-10-31 16:08:54Z holtwick $"
from wsgiref.simple_server import make_server
import logging
from xhtml2pdf import wsgi
def SimpleApp(environ, start_response):
# That's the magic!
#
# Set the environment variable "pisa.topdf" to the filename
# you would like to have for the resulting PDF
environ["pisa.topdf"] = "index.pdf"
# Simple Hello World example
start_response(
'200 OK', [
('content-type', "text/html"),
])
return ["Hello <strong>World</strong>"]
if __name__ == '__main__':
HOST = ''
PORT = 8080
logging.basicConfig(level=logging.DEBUG)
app = SimpleApp
# Add PISA WSGI Middleware
app = wsgi.PisaMiddleware(app)
httpd = make_server(HOST, PORT, app)
print "Serving HTTP on port %d..." % PORT
httpd.serve_forever()
| apache-2.0 |
cgwalters/librepo | examples/python/verify_local_yum_metadata.py | 2 | 1145 | #!/usr/bin/python
"""
Example: Verify checksum of local yum metadata
Use case:
We have some incomplete yum metadata localy.
They are incomplete because they doesn't
contain all files specified in repomd.xml.
They contains only primary.xml and filelists.xml.
We want to check checksum of this metadata.
"""
import sys
import librepo
METADATA_PATH = "downloaded_metadata"
if __name__ == "__main__":
h = librepo.Handle()
r = librepo.Result()
# Yum metadata
h.setopt(librepo.LRO_REPOTYPE, librepo.LR_YUMREPO)
# Path to the metadata
h.setopt(librepo.LRO_URLS, [METADATA_PATH])
# Do not duplicate (copy) the metadata
h.setopt(librepo.LRO_LOCAL, True)
# Check checksum of metadata
h.setopt(librepo.LRO_CHECKSUM, True)
# Ignore missing metadata files
h.setopt(librepo.LRO_IGNOREMISSING, True)
try:
h.perform(r)
except librepo.LibrepoException as e:
rc, msg, general_msg = e
if rc == librepo.LRE_BADCHECKSUM:
print "Corrupted metadata! (%s)" % msg
else:
print "Other error: %s" % msg
sys.exit(1)
print "Metadata are fine!"
| lgpl-2.1 |
isabellaleehs/isabellaleehs.github.io | projects/webapp1/flask/lib/python2.7/site-packages/wheel/egg2wheel.py | 471 | 2633 | #!/usr/bin/env python
import os.path
import re
import sys
import tempfile
import zipfile
import wheel.bdist_wheel
import shutil
import distutils.dist
from distutils.archive_util import make_archive
from argparse import ArgumentParser
from glob import iglob
egg_info_re = re.compile(r'''(?P<name>.+?)-(?P<ver>.+?)
(-(?P<pyver>.+?))?(-(?P<arch>.+?))?.egg''', re.VERBOSE)
def egg2wheel(egg_path, dest_dir):
egg_info = egg_info_re.match(os.path.basename(egg_path)).groupdict()
dir = tempfile.mkdtemp(suffix="_e2w")
if os.path.isfile(egg_path):
# assume we have a bdist_egg otherwise
egg = zipfile.ZipFile(egg_path)
egg.extractall(dir)
else:
# support buildout-style installed eggs directories
for pth in os.listdir(egg_path):
src = os.path.join(egg_path, pth)
if os.path.isfile(src):
shutil.copy2(src, dir)
else:
shutil.copytree(src, os.path.join(dir, pth))
dist_info = "%s-%s" % (egg_info['name'], egg_info['ver'])
abi = 'none'
pyver = egg_info['pyver'].replace('.', '')
arch = (egg_info['arch'] or 'any').replace('.', '_').replace('-', '_')
if arch != 'any':
# assume all binary eggs are for CPython
pyver = 'cp' + pyver[2:]
wheel_name = '-'.join((
dist_info,
pyver,
abi,
arch
))
bw = wheel.bdist_wheel.bdist_wheel(distutils.dist.Distribution())
bw.root_is_purelib = egg_info['arch'] is None
dist_info_dir = os.path.join(dir, '%s.dist-info' % dist_info)
bw.egg2dist(os.path.join(dir, 'EGG-INFO'),
dist_info_dir)
bw.write_wheelfile(dist_info_dir, generator='egg2wheel')
bw.write_record(dir, dist_info_dir)
filename = make_archive(os.path.join(dest_dir, wheel_name), 'zip', root_dir=dir)
os.rename(filename, filename[:-3] + 'whl')
shutil.rmtree(dir)
def main():
parser = ArgumentParser()
parser.add_argument('eggs', nargs='*', help="Eggs to convert")
parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
help="Directory to store wheels (default %(default)s)")
parser.add_argument('--verbose', '-v', action='store_true')
args = parser.parse_args()
for pat in args.eggs:
for egg in iglob(pat):
if args.verbose:
sys.stdout.write("{0}... ".format(egg))
egg2wheel(egg, args.dest_dir)
if args.verbose:
sys.stdout.write("OK\n")
if __name__ == "__main__":
main()
| mit |
nicolasiscoding/IEEEOpenCV2016 | ROS/src/object_detection/src/GenericCascade.py | 2 | 2724 | #!/usr/bin/env python
import cv2
# import rospy
# import roslib
import numpy as np
import os.path
# roslib.load_manifest('object_detection')
# from sensor_msgs.msg import Image
# from cv_bridge import CvBridge, CvBridgeError
'''
#############################################################
# IEEE Hardware Team 2016 #
# Created by: Nicolas Fry #
# Email: [email protected] #
# #
# Created for: The use of detecting images with a #
# Cascade Classifier given a trained file #
# Created Date: September 27th, 2015 #
# #
# Modified by: Ausin Seber #
# Modified Date: Oct. 06th, 2015 #
# Reason Mod.: Refactoring code to AGILE style #
#############################################################
'''
def GenericCascade(image, classifierPath, erosionFactor = 0.05):
if os.path.isfile(classifierPath) is not True:
print '\n\n***********!!!No training file present\n\n'
return;
loadedCascadeClassifier = cv2.CascadeClassifier(classifierPath)
greyimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
greyimage = cv2.equalizeHist(greyimage)
#Look at greyscale image and find occurances of object create a locatedcount variable for debug purposes
located = loadedCascadeClassifier.detectMultiScale(greyimage,scaleFactor=5.5,minNeighbors=48,minSize=(38,38), flags = cv2.CASCADE_SCALE_IMAGE)
# Now we go through each rectangle in located
# and add it to a list of images
# We also add a bit of extra space to all
# images based on erosionFactor
images = []
for (x, y, w, h) in located:
erosionW = int(round(w - w*(1 - erosionFactor)))
erosionH = int(round(h - h*(1 - erosionFactor)))
rectX = x - erosionW
rectY = y - erosionH
rectW = x + w + erosionW
rectH = y + h + erosionH
croppedImage = image[rectX:rectY, rectW:rectH]
images.append(croppedImage)
return images
def main():
cascade_path = '/home/aseber/Desktop/ieeeData/IEEEOpenCV2016/ROS/src/object_detection/src/QR/QRCodeCascadeRC1/cascade.xml'
camera = cv2.VideoCapture(-1)
while(True):
_, image = camera.read()
images = GenericCascade(image, cascade_path)
print len(images)
cv2.waitKey(50)
cv2.imshow("Image window", image)
#Destroy open windows to prevent memory leaks
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| mit |
mosdef-hub/mbuild | mbuild/formats/par_writer.py | 2 | 6707 | """CHARMM Par format."""
import warnings
__all__ = ["write_par"]
def write_par(structure, filename):
"""Write CHARMM Par file given a parametrized structure.
Notes
-----
Follows format according to
https://www.ks.uiuc.edu/Training/Tutorials/namd/namd-tutorial-unix-html/
node25.html
Furthermore, ParmEd should support writing CHARMM par, rtf, str files
by converting the parmed.Structure into parmed.CharmmParameterSet
Parmed stores rmin/2 in "rmin"
"""
# ATOMS
with open(filename, "w") as f:
f.write("ATOMS\n")
unique_atoms = set()
for atom in structure.atoms:
unique_atoms.add((atom.atom_type.name, atom.atom_type.mass))
for atom in unique_atoms:
f.write("MASS -1 {:8s} {:8.4f}\n".format(atom[0], atom[1]))
f.write("\nBONDS\n")
unique_bonds = set()
for bond in structure.bonds:
unique_bonds.add(
(
bond.atom1.atom_type.name,
bond.atom2.atom_type.name,
bond.type,
)
)
for bond in unique_bonds:
f.write(
"{:8s} {:8s} {:.5f} {:.5f}\n".format(
bond[0], bond[1], bond[2].k, bond[2].req
)
)
f.write("\nANGLES\n")
unique_angles = set()
unique_ubs = set()
for angle in structure.angles:
associated_ub = False
for ub in structure.urey_bradleys:
if ((angle.atom1, angle.atom3) == (ub.atom1, ub.atom2)) or (
angle.atom3,
angle.atom1,
) == (ub.atom1, ub.atom2):
unique_ubs.add(
(
angle.atom1.atom_type.name,
angle.atom2.atom_type.name,
angle.atom3.atom_type.name,
angle.type,
ub.type,
)
)
associated_ub = True
if not associated_ub:
unique_angles.add(
(
angle.atom1.atom_type.name,
angle.atom2.atom_type.name,
angle.atom3.atom_type.name,
angle.type,
)
)
for ub in unique_ubs:
f.write(
"{:8s} {:8s} {:8s} {:.5f} {:.5f} {:.5f} {:.5f}\n".format(
ub[0],
ub[1],
ub[2],
ub[3].k,
ub[3].theteq,
ub[4].k,
ub[4].req,
)
)
for angle in unique_angles:
f.write(
"{:8s} {:8s} {:8s} {:.5f} {:.5f}\n".format(
angle[0], angle[1], angle[2], angle[3].k, angle[3].theteq
)
)
# These dihedrals need to be PeriodicTorsion Style (Charmm style)
if len(structure.rb_torsions) > 0:
warnings.warn("RB Torsions detected, but unsupported in par writer")
f.write("\nDIHEDRALS\n")
unique_dihedrals = set()
scnb = set()
for dihedral in structure.dihedrals:
if not dihedral.improper:
unique_dihedrals.add(
(
dihedral.atom1.atom_type.name,
dihedral.atom2.atom_type.name,
dihedral.atom3.atom_type.name,
dihedral.atom4.atom_type.name,
dihedral.type,
)
)
scnb.add(dihedral.type.scnb)
else:
msg = (
"AMBER-style improper detected between "
+ "{} {} {} {}".format(
dihedral.atom1,
dihedral.atom2,
dihedral.atom3,
dihedral.atom4,
)
+ ", but unsupported in par writer"
)
warnings.warn(msg)
for dihedral in unique_dihedrals:
f.write(
"{:8s} {:8s} {:8s} {:8s} {:.5f} {:5d} {:.5f}\n".format(
dihedral[0],
dihedral[1],
dihedral[2],
dihedral[3],
dihedral[4].phi_k,
dihedral[4].per,
dihedral[4].phase,
)
)
f.write("\nIMPROPER\n")
unique_impropers = set()
for improper in structure.impropers:
unique_impropers.add(
(
improper.atom1.atom_type.name,
improper.atom2.atom_type.name,
improper.atom3.atom_type.name,
improper.atom4.atom_type.name,
improper.type,
)
)
for improper in unique_impropers:
f.write(
"{:8s} {:8s} {:8s} {:8s} {:.5f} {:5d} {:.5f}\n".format(
improper[2],
improper[0],
improper[1],
improper[3],
improper[4].psi_k,
0,
improper[4].psi_eq,
)
)
sc_nb = [a for a in scnb]
if len(sc_nb) > 1:
warnings.warn(
"Multiple 1-4 LJ scalings were detected, "
"defaulting to first LJ scaling detected, {}".format(sc_nb[0])
)
sc_nb = sc_nb[0]
elif len(sc_nb) == 1:
sc_nb = sc_nb[0]
elif len(sc_nb) == 0:
warnings.warn("No 1-4 LJ scaling was detected, defaulting 1")
sc_nb = 1.0
f.write("\nNONBONDED\n")
unique_atypes = set()
for atom in structure.atoms:
unique_atypes.add(atom.atom_type)
for atype in unique_atypes:
# atype, 0.0, epsilon, rmin/2, 0.0, epsilon(1-4), rmin/2 (1-4)
f.write(
"{:8s} {:8.3f} {:8.3f} {:8.3f} {:8.3f} {:8.3f} {:8.3f}\n".format(
atype.name,
0.0,
-1 * atype.epsilon,
atype.rmin,
0.0,
-1 * sc_nb * atype.epsilon,
atype.rmin,
)
)
if structure.has_NBFIX():
warnings.warn("NBFixes detected but unsupported in par writer")
f.write("\nEND")
| mit |
tpltnt/SimpleCV | SimpleCV/Features/EdgeHistogramFeatureExtractor.py | 13 | 1898 | from SimpleCV.base import *
from SimpleCV.ImageClass import Image
from SimpleCV.Features.FeatureExtractorBase import *
class EdgeHistogramFeatureExtractor(FeatureExtractorBase):
"""
Create a 1D edge length histogram and 1D edge angle histogram.
This method takes in an image, applies an edge detector, and calculates
the length and direction of lines in the image.
bins = the number of bins
"""
mNBins = 10
def __init__(self, bins=10):
self.mNBins = bins
def extract(self, img):
"""
Extract the line orientation and and length histogram.
"""
#I am not sure this is the best normalization constant.
retVal = []
p = max(img.width,img.height)/2
minLine = 0.01*p
gap = 0.1*p
fs = img.findLines(threshold=10,minlinelength=minLine,maxlinegap=gap)
ls = fs.length()/p #normalize to image length
angs = fs.angle()
lhist = np.histogram(ls,self.mNBins,normed=True,range=(0,1))
ahist = np.histogram(angs,self.mNBins,normed=True,range=(-180,180))
retVal.extend(lhist[0].tolist())
retVal.extend(ahist[0].tolist())
return retVal
def getFieldNames(self):
"""
Return the names of all of the length and angle fields.
"""
retVal = []
for i in range(self.mNBins):
name = "Length"+str(i)
retVal.append(name)
for i in range(self.mNBins):
name = "Angle"+str(i)
retVal.append(name)
return retVal
"""
This method gives the names of each field in the feature vector in the
order in which they are returned. For example, 'xpos' or 'width'
"""
def getNumFields(self):
"""
This method returns the total number of fields in the feature vector.
"""
return self.mNBins*2
| bsd-3-clause |
tfmorris/dedupe | dedupe/variables/exists.py | 2 | 1117 | from .base import DerivedType
from categorical import CategoricalComparator
from .categorical_type import CategoricalType
class ExistsType(CategoricalType) :
type = "Exists"
_predicate_functions = []
def __init__(self, definition) :
super(CategoricalType, self ).__init__(definition)
self.cat_comparator = CategoricalComparator([0,1])
self.higher_vars = []
for higher_var in self.cat_comparator.dummy_names :
dummy_var = DerivedType({'name' : higher_var,
'type' : 'Dummy',
'has missing' : self.has_missing})
self.higher_vars.append(dummy_var)
def comparator(self, field_1, field_2) :
if field_1 and field_2 :
return self.cat_comparator(1, 1)
elif field_1 or field_2 :
return self.cat_comparator(0, 1)
else :
return self.cat_comparator(0, 0)
# This flag tells fieldDistances in dedupe.core to pass
# missing values (None) into the comparator
comparator.missing = True
| mit |
raulshepherd/ardupilot-solo | mk/PX4/Tools/genmsg/scripts/genmsg_check_deps.py | 216 | 2999 | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import os
import sys
from genmsg import EXT_MSG, EXT_SRV, MsgContext
from genmsg.gentools import compute_full_type_name
from genmsg.msg_loader import load_msg_from_file, load_srv_from_file
from genmsg.msgs import bare_msg_type, is_builtin, resolve_type
pkg_name = sys.argv[1]
msg_file = sys.argv[2]
deps = sys.argv[3].split(':') if len(sys.argv) > 3 else []
msg_context = MsgContext.create_default()
full_type_name = compute_full_type_name(pkg_name, os.path.basename(msg_file))
if msg_file.endswith(EXT_MSG):
spec = load_msg_from_file(msg_context, msg_file, full_type_name)
unresolved_types = spec.types
elif msg_file.endswith(EXT_SRV):
spec = load_srv_from_file(msg_context, msg_file, full_type_name)
unresolved_types = spec.request.types + spec.response.types
else:
print("Processing file: '%s' - unknown file extension" % msg_file, file=sys.stderr)
sys.exit(1)
package_context = spec.package
for unresolved_type in unresolved_types:
bare_type = bare_msg_type(unresolved_type)
resolved_type = resolve_type(bare_type, package_context)
if not is_builtin(resolved_type) and resolved_type not in deps:
print("The dependencies of the message/service '%s' have changed. Please rerun cmake." % spec.full_name, file=sys.stderr)
sys.exit(1)
| gpl-3.0 |
ycaihua/kbengine | kbe/res/scripts/common/Lib/test/test_readline.py | 60 | 2234 | """
Very minimal unittests for parts of the readline module.
"""
import os
import unittest
from test.support import run_unittest, import_module
from test.script_helper import assert_python_ok
# Skip tests if there is no readline module
readline = import_module('readline')
class TestHistoryManipulation (unittest.TestCase):
"""
These tests were added to check that the libedit emulation on OSX and the
"real" readline have the same interface for history manipulation. That's
why the tests cover only a small subset of the interface.
"""
@unittest.skipIf(not hasattr(readline, 'clear_history'),
"The history update test cannot be run because the "
"clear_history method is not available.")
def testHistoryUpdates(self):
readline.clear_history()
readline.add_history("first line")
readline.add_history("second line")
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
readline.replace_history_item(0, "replaced line")
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "replaced line")
self.assertEqual(readline.get_history_item(2), "second line")
self.assertEqual(readline.get_current_history_length(), 2)
readline.remove_history_item(0)
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "second line")
self.assertEqual(readline.get_current_history_length(), 1)
class TestReadline(unittest.TestCase):
def test_init(self):
# Issue #19884: Ensure that the ANSI sequence "\033[1034h" is not
# written into stdout when the readline module is imported and stdout
# is redirected to a pipe.
rc, stdout, stderr = assert_python_ok('-c', 'import readline',
TERM='xterm-256color')
self.assertEqual(stdout, b'')
def test_main():
run_unittest(TestHistoryManipulation, TestReadline)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
colinligertwood/odoo | addons/sale/edi/sale_order.py | 403 | 10861 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.addons.edi import EDIMixin
from openerp.tools.translate import _
from werkzeug import url_encode
SALE_ORDER_LINE_EDI_STRUCT = {
'sequence': True,
'name': True,
#custom: 'date_planned'
'product_id': True,
'product_uom': True,
'price_unit': True,
#custom: 'product_qty'
'discount': True,
# fields used for web preview only - discarded on import
'price_subtotal': True,
}
SALE_ORDER_EDI_STRUCT = {
'name': True,
'origin': True,
'company_id': True, # -> to be changed into partner
#custom: 'partner_ref'
'date_order': True,
'partner_id': True,
#custom: 'partner_address'
#custom: 'notes'
'order_line': SALE_ORDER_LINE_EDI_STRUCT,
# fields used for web preview only - discarded on import
'amount_total': True,
'amount_untaxed': True,
'amount_tax': True,
'payment_term': True,
'order_policy': True,
'user_id': True,
'state': True,
}
class sale_order(osv.osv, EDIMixin):
_inherit = 'sale.order'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Exports a Sale order"""
edi_struct = dict(edi_struct or SALE_ORDER_EDI_STRUCT)
res_company = self.pool.get('res.company')
res_partner_obj = self.pool.get('res.partner')
edi_doc_list = []
for order in records:
# generate the main report
self._edi_generate_report_attachment(cr, uid, order, context=context)
# Get EDI doc based on struct. The result will also contain all metadata fields and attachments.
edi_doc = super(sale_order,self).edi_export(cr, uid, [order], edi_struct, context)[0]
edi_doc.update({
# force trans-typing to purchase.order upon import
'__import_model': 'purchase.order',
'__import_module': 'purchase',
'company_address': res_company.edi_export_address(cr, uid, order.company_id, context=context),
'partner_address': res_partner_obj.edi_export(cr, uid, [order.partner_id], context=context)[0],
'currency': self.pool.get('res.currency').edi_export(cr, uid, [order.pricelist_id.currency_id],
context=context)[0],
'partner_ref': order.client_order_ref or False,
'notes': order.note or False,
})
edi_doc_list.append(edi_doc)
return edi_doc_list
def _edi_import_company(self, cr, uid, edi_document, context=None):
# TODO: for multi-company setups, we currently import the document in the
# user's current company, but we should perhaps foresee a way to select
# the desired company among the user's allowed companies
self._edi_requires_attributes(('company_id','company_address'), edi_document)
res_partner = self.pool.get('res.partner')
xid, company_name = edi_document.pop('company_id')
# Retrofit address info into a unified partner info (changed in v7 - used to keep them separate)
company_address_edi = edi_document.pop('company_address')
company_address_edi['name'] = company_name
company_address_edi['is_company'] = True
company_address_edi['__import_model'] = 'res.partner'
company_address_edi['__id'] = xid # override address ID, as of v7 they should be the same anyway
if company_address_edi.get('logo'):
company_address_edi['image'] = company_address_edi.pop('logo')
company_address_edi['customer'] = True
partner_id = res_partner.edi_import(cr, uid, company_address_edi, context=context)
# modify edi_document to refer to new partner
partner = res_partner.browse(cr, uid, partner_id, context=context)
partner_edi_m2o = self.edi_m2o(cr, uid, partner, context=context)
edi_document['partner_id'] = partner_edi_m2o
edi_document['partner_invoice_id'] = partner_edi_m2o
edi_document['partner_shipping_id'] = partner_edi_m2o
edi_document.pop('partner_address', None) # ignored, that's supposed to be our own address!
return partner_id
def _edi_get_pricelist(self, cr, uid, partner_id, currency, context=None):
# TODO: refactor into common place for purchase/sale, e.g. into product module
partner_model = self.pool.get('res.partner')
partner = partner_model.browse(cr, uid, partner_id, context=context)
pricelist = partner.property_product_pricelist
if not pricelist:
pricelist = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'list0', context=context)
if not pricelist.currency_id == currency:
# look for a pricelist with the right type and currency, or make a new one
pricelist_type = 'sale'
product_pricelist = self.pool.get('product.pricelist')
match_pricelist_ids = product_pricelist.search(cr, uid,[('type','=',pricelist_type),
('currency_id','=',currency.id)])
if match_pricelist_ids:
pricelist_id = match_pricelist_ids[0]
else:
pricelist_name = _('EDI Pricelist (%s)') % (currency.name,)
pricelist_id = product_pricelist.create(cr, uid, {'name': pricelist_name,
'type': pricelist_type,
'currency_id': currency.id,
})
self.pool.get('product.pricelist.version').create(cr, uid, {'name': pricelist_name,
'pricelist_id': pricelist_id})
pricelist = product_pricelist.browse(cr, uid, pricelist_id)
return self.edi_m2o(cr, uid, pricelist, context=context)
def edi_import(self, cr, uid, edi_document, context=None):
self._edi_requires_attributes(('company_id','company_address','order_line','date_order','currency'), edi_document)
#import company as a new partner
partner_id = self._edi_import_company(cr, uid, edi_document, context=context)
# currency for rounding the discount calculations and for the pricelist
res_currency = self.pool.get('res.currency')
currency_info = edi_document.pop('currency')
currency_id = res_currency.edi_import(cr, uid, currency_info, context=context)
order_currency = res_currency.browse(cr, uid, currency_id)
partner_ref = edi_document.pop('partner_ref', False)
edi_document['client_order_ref'] = edi_document['name']
edi_document['name'] = partner_ref or edi_document['name']
edi_document['note'] = edi_document.pop('notes', False)
edi_document['pricelist_id'] = self._edi_get_pricelist(cr, uid, partner_id, order_currency, context=context)
# discard web preview fields, if present
edi_document.pop('amount_total', None)
edi_document.pop('amount_tax', None)
edi_document.pop('amount_untaxed', None)
order_lines = edi_document['order_line']
for order_line in order_lines:
self._edi_requires_attributes(('product_id', 'product_uom', 'product_qty', 'price_unit'), order_line)
order_line['product_uom_qty'] = order_line['product_qty']
del order_line['product_qty']
# discard web preview fields, if present
order_line.pop('price_subtotal', None)
return super(sale_order,self).edi_import(cr, uid, edi_document, context=context)
def _edi_paypal_url(self, cr, uid, ids, field, arg, context=None):
res = dict.fromkeys(ids, False)
for order in self.browse(cr, uid, ids, context=context):
if order.order_policy in ('prepaid', 'manual') and \
order.company_id.paypal_account and order.state != 'draft':
params = {
"cmd": "_xclick",
"business": order.company_id.paypal_account,
"item_name": order.company_id.name + " Order " + order.name,
"invoice": order.name,
"amount": order.amount_total,
"currency_code": order.pricelist_id.currency_id.name,
"button_subtype": "services",
"no_note": "1",
"bn": "OpenERP_Order_PayNow_" + order.pricelist_id.currency_id.name,
}
res[order.id] = "https://www.paypal.com/cgi-bin/webscr?" + url_encode(params)
return res
_columns = {
'paypal_url': fields.function(_edi_paypal_url, type='char', string='Paypal Url'),
}
class sale_order_line(osv.osv, EDIMixin):
_inherit='sale.order.line'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Overridden to provide sale order line fields with the expected names
(sale and purchase orders have different column names)"""
edi_struct = dict(edi_struct or SALE_ORDER_LINE_EDI_STRUCT)
edi_doc_list = []
for line in records:
edi_doc = super(sale_order_line,self).edi_export(cr, uid, [line], edi_struct, context)[0]
edi_doc['__import_model'] = 'purchase.order.line'
edi_doc['product_qty'] = line.product_uom_qty
if line.product_uos:
edi_doc.update(product_uom=line.product_uos,
product_qty=line.product_uos_qty)
edi_doc_list.append(edi_doc)
return edi_doc_list
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
scs/uclinux | user/python/python-2.4.4/Lib/idlelib/PyParse.py | 11 | 18904 | import re
import sys
# Reason last stmt is continued (or C_NONE if it's not).
C_NONE, C_BACKSLASH, C_STRING, C_BRACKET = range(4)
if 0: # for throwaway debugging output
def dump(*stuff):
sys.__stdout__.write(" ".join(map(str, stuff)) + "\n")
# Find what looks like the start of a popular stmt.
_synchre = re.compile(r"""
^
[ \t]*
(?: if
| for
| while
| else
| def
| return
| assert
| break
| class
| continue
| elif
| try
| except
| raise
| import
| yield
)
\b
""", re.VERBOSE | re.MULTILINE).search
# Match blank line or non-indenting comment line.
_junkre = re.compile(r"""
[ \t]*
(?: \# \S .* )?
\n
""", re.VERBOSE).match
# Match any flavor of string; the terminating quote is optional
# so that we're robust in the face of incomplete program text.
_match_stringre = re.compile(r"""
\""" [^"\\]* (?:
(?: \\. | "(?!"") )
[^"\\]*
)*
(?: \""" )?
| " [^"\\\n]* (?: \\. [^"\\\n]* )* "?
| ''' [^'\\]* (?:
(?: \\. | '(?!'') )
[^'\\]*
)*
(?: ''' )?
| ' [^'\\\n]* (?: \\. [^'\\\n]* )* '?
""", re.VERBOSE | re.DOTALL).match
# Match a line that starts with something interesting;
# used to find the first item of a bracket structure.
_itemre = re.compile(r"""
[ \t]*
[^\s#\\] # if we match, m.end()-1 is the interesting char
""", re.VERBOSE).match
# Match start of stmts that should be followed by a dedent.
_closere = re.compile(r"""
\s*
(?: return
| break
| continue
| raise
| pass
)
\b
""", re.VERBOSE).match
# Chew up non-special chars as quickly as possible. If match is
# successful, m.end() less 1 is the index of the last boring char
# matched. If match is unsuccessful, the string starts with an
# interesting char.
_chew_ordinaryre = re.compile(r"""
[^[\](){}#'"\\]+
""", re.VERBOSE).match
# Build translation table to map uninteresting chars to "x", open
# brackets to "(", and close brackets to ")".
_tran = ['x'] * 256
for ch in "({[":
_tran[ord(ch)] = '('
for ch in ")}]":
_tran[ord(ch)] = ')'
for ch in "\"'\\\n#":
_tran[ord(ch)] = ch
_tran = ''.join(_tran)
del ch
try:
UnicodeType = type(unicode(""))
except NameError:
UnicodeType = None
class Parser:
def __init__(self, indentwidth, tabwidth):
self.indentwidth = indentwidth
self.tabwidth = tabwidth
def set_str(self, str):
assert len(str) == 0 or str[-1] == '\n'
if type(str) is UnicodeType:
# The parse functions have no idea what to do with Unicode, so
# replace all Unicode characters with "x". This is "safe"
# so long as the only characters germane to parsing the structure
# of Python are 7-bit ASCII. It's *necessary* because Unicode
# strings don't have a .translate() method that supports
# deletechars.
uniphooey = str
str = []
push = str.append
for raw in map(ord, uniphooey):
push(raw < 127 and chr(raw) or "x")
str = "".join(str)
self.str = str
self.study_level = 0
# Return index of a good place to begin parsing, as close to the
# end of the string as possible. This will be the start of some
# popular stmt like "if" or "def". Return None if none found:
# the caller should pass more prior context then, if possible, or
# if not (the entire program text up until the point of interest
# has already been tried) pass 0 to set_lo.
#
# This will be reliable iff given a reliable is_char_in_string
# function, meaning that when it says "no", it's absolutely
# guaranteed that the char is not in a string.
#
# Ack, hack: in the shell window this kills us, because there's
# no way to tell the differences between output, >>> etc and
# user input. Indeed, IDLE's first output line makes the rest
# look like it's in an unclosed paren!:
# Python 1.5.2 (#0, Apr 13 1999, ...
def find_good_parse_start(self, use_ps1, is_char_in_string=None,
_synchre=_synchre):
str, pos = self.str, None
if use_ps1:
# shell window
ps1 = '\n' + sys.ps1
i = str.rfind(ps1)
if i >= 0:
pos = i + len(ps1)
# make it look like there's a newline instead
# of ps1 at the start -- hacking here once avoids
# repeated hackery later
self.str = str[:pos-1] + '\n' + str[pos:]
return pos
# File window -- real work.
if not is_char_in_string:
# no clue -- make the caller pass everything
return None
# Peek back from the end for a good place to start,
# but don't try too often; pos will be left None, or
# bumped to a legitimate synch point.
limit = len(str)
for tries in range(5):
i = str.rfind(":\n", 0, limit)
if i < 0:
break
i = str.rfind('\n', 0, i) + 1 # start of colon line
m = _synchre(str, i, limit)
if m and not is_char_in_string(m.start()):
pos = m.start()
break
limit = i
if pos is None:
# Nothing looks like a block-opener, or stuff does
# but is_char_in_string keeps returning true; most likely
# we're in or near a giant string, the colorizer hasn't
# caught up enough to be helpful, or there simply *aren't*
# any interesting stmts. In any of these cases we're
# going to have to parse the whole thing to be sure, so
# give it one last try from the start, but stop wasting
# time here regardless of the outcome.
m = _synchre(str)
if m and not is_char_in_string(m.start()):
pos = m.start()
return pos
# Peeking back worked; look forward until _synchre no longer
# matches.
i = pos + 1
while 1:
m = _synchre(str, i)
if m:
s, i = m.span()
if not is_char_in_string(s):
pos = s
else:
break
return pos
# Throw away the start of the string. Intended to be called with
# find_good_parse_start's result.
def set_lo(self, lo):
assert lo == 0 or self.str[lo-1] == '\n'
if lo > 0:
self.str = self.str[lo:]
# As quickly as humanly possible <wink>, find the line numbers (0-
# based) of the non-continuation lines.
# Creates self.{goodlines, continuation}.
def _study1(self):
if self.study_level >= 1:
return
self.study_level = 1
# Map all uninteresting characters to "x", all open brackets
# to "(", all close brackets to ")", then collapse runs of
# uninteresting characters. This can cut the number of chars
# by a factor of 10-40, and so greatly speed the following loop.
str = self.str
str = str.translate(_tran)
str = str.replace('xxxxxxxx', 'x')
str = str.replace('xxxx', 'x')
str = str.replace('xx', 'x')
str = str.replace('xx', 'x')
str = str.replace('\nx', '\n')
# note that replacing x\n with \n would be incorrect, because
# x may be preceded by a backslash
# March over the squashed version of the program, accumulating
# the line numbers of non-continued stmts, and determining
# whether & why the last stmt is a continuation.
continuation = C_NONE
level = lno = 0 # level is nesting level; lno is line number
self.goodlines = goodlines = [0]
push_good = goodlines.append
i, n = 0, len(str)
while i < n:
ch = str[i]
i = i+1
# cases are checked in decreasing order of frequency
if ch == 'x':
continue
if ch == '\n':
lno = lno + 1
if level == 0:
push_good(lno)
# else we're in an unclosed bracket structure
continue
if ch == '(':
level = level + 1
continue
if ch == ')':
if level:
level = level - 1
# else the program is invalid, but we can't complain
continue
if ch == '"' or ch == "'":
# consume the string
quote = ch
if str[i-1:i+2] == quote * 3:
quote = quote * 3
w = len(quote) - 1
i = i+w
while i < n:
ch = str[i]
i = i+1
if ch == 'x':
continue
if str[i-1:i+w] == quote:
i = i+w
break
if ch == '\n':
lno = lno + 1
if w == 0:
# unterminated single-quoted string
if level == 0:
push_good(lno)
break
continue
if ch == '\\':
assert i < n
if str[i] == '\n':
lno = lno + 1
i = i+1
continue
# else comment char or paren inside string
else:
# didn't break out of the loop, so we're still
# inside a string
continuation = C_STRING
continue # with outer loop
if ch == '#':
# consume the comment
i = str.find('\n', i)
assert i >= 0
continue
assert ch == '\\'
assert i < n
if str[i] == '\n':
lno = lno + 1
if i+1 == n:
continuation = C_BACKSLASH
i = i+1
# The last stmt may be continued for all 3 reasons.
# String continuation takes precedence over bracket
# continuation, which beats backslash continuation.
if continuation != C_STRING and level > 0:
continuation = C_BRACKET
self.continuation = continuation
# Push the final line number as a sentinel value, regardless of
# whether it's continued.
assert (continuation == C_NONE) == (goodlines[-1] == lno)
if goodlines[-1] != lno:
push_good(lno)
def get_continuation_type(self):
self._study1()
return self.continuation
# study1 was sufficient to determine the continuation status,
# but doing more requires looking at every character. study2
# does this for the last interesting statement in the block.
# Creates:
# self.stmt_start, stmt_end
# slice indices of last interesting stmt
# self.lastch
# last non-whitespace character before optional trailing
# comment
# self.lastopenbracketpos
# if continuation is C_BRACKET, index of last open bracket
def _study2(self):
if self.study_level >= 2:
return
self._study1()
self.study_level = 2
# Set p and q to slice indices of last interesting stmt.
str, goodlines = self.str, self.goodlines
i = len(goodlines) - 1
p = len(str) # index of newest line
while i:
assert p
# p is the index of the stmt at line number goodlines[i].
# Move p back to the stmt at line number goodlines[i-1].
q = p
for nothing in range(goodlines[i-1], goodlines[i]):
# tricky: sets p to 0 if no preceding newline
p = str.rfind('\n', 0, p-1) + 1
# The stmt str[p:q] isn't a continuation, but may be blank
# or a non-indenting comment line.
if _junkre(str, p):
i = i-1
else:
break
if i == 0:
# nothing but junk!
assert p == 0
q = p
self.stmt_start, self.stmt_end = p, q
# Analyze this stmt, to find the last open bracket (if any)
# and last interesting character (if any).
lastch = ""
stack = [] # stack of open bracket indices
push_stack = stack.append
while p < q:
# suck up all except ()[]{}'"#\\
m = _chew_ordinaryre(str, p, q)
if m:
# we skipped at least one boring char
newp = m.end()
# back up over totally boring whitespace
i = newp - 1 # index of last boring char
while i >= p and str[i] in " \t\n":
i = i-1
if i >= p:
lastch = str[i]
p = newp
if p >= q:
break
ch = str[p]
if ch in "([{":
push_stack(p)
lastch = ch
p = p+1
continue
if ch in ")]}":
if stack:
del stack[-1]
lastch = ch
p = p+1
continue
if ch == '"' or ch == "'":
# consume string
# Note that study1 did this with a Python loop, but
# we use a regexp here; the reason is speed in both
# cases; the string may be huge, but study1 pre-squashed
# strings to a couple of characters per line. study1
# also needed to keep track of newlines, and we don't
# have to.
lastch = ch
p = _match_stringre(str, p, q).end()
continue
if ch == '#':
# consume comment and trailing newline
p = str.find('\n', p, q) + 1
assert p > 0
continue
assert ch == '\\'
p = p+1 # beyond backslash
assert p < q
if str[p] != '\n':
# the program is invalid, but can't complain
lastch = ch + str[p]
p = p+1 # beyond escaped char
# end while p < q:
self.lastch = lastch
if stack:
self.lastopenbracketpos = stack[-1]
# Assuming continuation is C_BRACKET, return the number
# of spaces the next line should be indented.
def compute_bracket_indent(self):
self._study2()
assert self.continuation == C_BRACKET
j = self.lastopenbracketpos
str = self.str
n = len(str)
origi = i = str.rfind('\n', 0, j) + 1
j = j+1 # one beyond open bracket
# find first list item; set i to start of its line
while j < n:
m = _itemre(str, j)
if m:
j = m.end() - 1 # index of first interesting char
extra = 0
break
else:
# this line is junk; advance to next line
i = j = str.find('\n', j) + 1
else:
# nothing interesting follows the bracket;
# reproduce the bracket line's indentation + a level
j = i = origi
while str[j] in " \t":
j = j+1
extra = self.indentwidth
return len(str[i:j].expandtabs(self.tabwidth)) + extra
# Return number of physical lines in last stmt (whether or not
# it's an interesting stmt! this is intended to be called when
# continuation is C_BACKSLASH).
def get_num_lines_in_stmt(self):
self._study1()
goodlines = self.goodlines
return goodlines[-1] - goodlines[-2]
# Assuming continuation is C_BACKSLASH, return the number of spaces
# the next line should be indented. Also assuming the new line is
# the first one following the initial line of the stmt.
def compute_backslash_indent(self):
self._study2()
assert self.continuation == C_BACKSLASH
str = self.str
i = self.stmt_start
while str[i] in " \t":
i = i+1
startpos = i
# See whether the initial line starts an assignment stmt; i.e.,
# look for an = operator
endpos = str.find('\n', startpos) + 1
found = level = 0
while i < endpos:
ch = str[i]
if ch in "([{":
level = level + 1
i = i+1
elif ch in ")]}":
if level:
level = level - 1
i = i+1
elif ch == '"' or ch == "'":
i = _match_stringre(str, i, endpos).end()
elif ch == '#':
break
elif level == 0 and ch == '=' and \
(i == 0 or str[i-1] not in "=<>!") and \
str[i+1] != '=':
found = 1
break
else:
i = i+1
if found:
# found a legit =, but it may be the last interesting
# thing on the line
i = i+1 # move beyond the =
found = re.match(r"\s*\\", str[i:endpos]) is None
if not found:
# oh well ... settle for moving beyond the first chunk
# of non-whitespace chars
i = startpos
while str[i] not in " \t\n":
i = i+1
return len(str[self.stmt_start:i].expandtabs(\
self.tabwidth)) + 1
# Return the leading whitespace on the initial line of the last
# interesting stmt.
def get_base_indent_string(self):
self._study2()
i, n = self.stmt_start, self.stmt_end
j = i
str = self.str
while j < n and str[j] in " \t":
j = j + 1
return str[i:j]
# Did the last interesting stmt open a block?
def is_block_opener(self):
self._study2()
return self.lastch == ':'
# Did the last interesting stmt close a block?
def is_block_closer(self):
self._study2()
return _closere(self.str, self.stmt_start) is not None
# index of last open bracket ({[, or None if none
lastopenbracketpos = None
def get_last_open_bracket_pos(self):
self._study2()
return self.lastopenbracketpos
| gpl-2.0 |
GladeRom/android_external_chromium_org | tools/telemetry/telemetry/core/forwarders/do_nothing_forwarder_unittest.py | 37 | 2557 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import forwarders
from telemetry.core import util
from telemetry.core.forwarders import do_nothing_forwarder
class TestDoNothingForwarder(do_nothing_forwarder.DoNothingForwarder):
"""Override _WaitForConnect to avoid actual socket connection."""
def __init__(self, port_pairs):
self.connected_addresses = []
super(TestDoNothingForwarder, self).__init__(port_pairs)
def _WaitForConnectionEstablished(self, address, timeout):
self.connected_addresses.append(address)
class TestErrorDoNothingForwarder(do_nothing_forwarder.DoNothingForwarder):
"""Simulate a connection error."""
def _WaitForConnectionEstablished(self, address, timeout):
raise util.TimeoutException
class CheckPortPairsTest(unittest.TestCase):
def testChecksOnlyHttpHttps(self):
port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(80, 80),
https=forwarders.PortPair(443, 443),
dns=forwarders.PortPair(53, 53))
f = TestDoNothingForwarder(port_pairs)
expected_connected_addresses = [
('127.0.0.1', 80),
('127.0.0.1', 443),
# Port 53 is skipped because it is UDP and does not support connections.
]
self.assertEqual(expected_connected_addresses, f.connected_addresses)
def testNoDnsStillChecksHttpHttps(self):
port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(5566, 5566),
https=forwarders.PortPair(7788, 7788),
dns=None)
f = TestDoNothingForwarder(port_pairs)
expected_connected_addresses = [
('127.0.0.1', 5566),
('127.0.0.1', 7788),
]
self.assertEqual(expected_connected_addresses, f.connected_addresses)
def testPortMismatchRaisesPortsMismatchError(self):
# The do_nothing_forward cannot forward from one port to another.
port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(80, 80),
https=forwarders.PortPair(8443, 443),
dns=None)
with self.assertRaises(do_nothing_forwarder.PortsMismatchError):
TestDoNothingForwarder(port_pairs)
def testConnectionTimeoutRaisesConnectionError(self):
port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(80, 80),
https=forwarders.PortPair(8443, 443),
dns=None)
with self.assertRaises(do_nothing_forwarder.ConnectionError):
TestErrorDoNothingForwarder(port_pairs)
| bsd-3-clause |
aam-at/tensorflow | tensorflow/python/keras/layers/recurrent_v2_test.py | 8 | 5232 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for recurrent v2 layers functionality other than GRU, LSTM.
See also: lstm_v2_test.py, gru_v2_test.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import embeddings
from tensorflow.python.keras.layers import recurrent_v2 as rnn_v2
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class RNNV2Test(keras_parameterized.TestCase):
@parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU])
def test_device_placement(self, layer):
if not test.is_gpu_available():
self.skipTest('Need GPU for testing.')
vocab_size = 20
embedding_dim = 10
batch_size = 8
timestep = 12
units = 5
x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
# Test when GPU is available but not used, the graph should be properly
# created with CPU ops.
with testing_utils.device(should_use_gpu=False):
model = keras.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, timestep]),
layer(units, return_sequences=True, stateful=True),
keras.layers.Dense(vocab_size)
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, shuffle=False)
@parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU])
def test_reset_dropout_mask_between_batch(self, layer):
# See https://github.com/tensorflow/tensorflow/issues/29187 for more details
batch_size = 8
timestep = 12
embedding_dim = 10
units = 5
layer = layer(units, dropout=0.5, recurrent_dropout=0.5)
inputs = np.random.random((batch_size, timestep, embedding_dim)).astype(
np.float32)
previous_dropout, previous_recurrent_dropout = None, None
for _ in range(5):
layer(inputs, training=True)
dropout = layer.cell.get_dropout_mask_for_cell(inputs, training=True)
recurrent_dropout = layer.cell.get_recurrent_dropout_mask_for_cell(
inputs, training=True)
if previous_dropout is not None:
self.assertNotAllClose(self.evaluate(previous_dropout),
self.evaluate(dropout))
previous_dropout = dropout
if previous_recurrent_dropout is not None:
self.assertNotAllClose(self.evaluate(previous_recurrent_dropout),
self.evaluate(recurrent_dropout))
previous_recurrent_dropout = recurrent_dropout
@parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU])
def test_recurrent_dropout_with_stateful_RNN(self, layer):
# See https://github.com/tensorflow/tensorflow/issues/27829 for details.
# The issue was caused by using inplace mul for a variable, which was a
# warning for RefVariable, but an error for ResourceVariable in 2.0
keras.models.Sequential([
layer(128, stateful=True, return_sequences=True, dropout=0.2,
batch_input_shape=[32, None, 5], recurrent_dropout=0.2)
])
def test_recurrent_dropout_saved_model(self):
if not context.executing_eagerly():
self.skipTest('v2-only test')
inputs = keras.Input(shape=(784, 3), name='digits')
x = keras.layers.GRU(64, activation='relu', name='GRU', dropout=0.1)(inputs)
x = keras.layers.Dense(64, activation='relu', name='dense')(x)
outputs = keras.layers.Dense(
10, activation='softmax', name='predictions')(
x)
model = keras.Model(inputs=inputs, outputs=outputs, name='3_layer')
model.save(os.path.join(self.get_temp_dir(), 'model'), save_format='tf')
@parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU])
def test_ragged(self, layer):
vocab_size = 100
inputs = ragged_factory_ops.constant(
np.random.RandomState(0).randint(0, vocab_size, [128, 25]))
embedder = embeddings.Embedding(input_dim=vocab_size, output_dim=16)
embedded_inputs = embedder(inputs)
lstm = layer(32)
lstm(embedded_inputs)
if __name__ == '__main__':
test.main()
| apache-2.0 |
candy7393/VTK | Examples/Modelling/Python/reconstructSurface.py | 23 | 2614 | #!/usr/bin/env python
# This example shows how to construct a surface from a point cloud.
# First we generate a volume using the
# vtkSurfaceReconstructionFilter. The volume values are a distance
# field. Once this is generated, the volume is countoured at a
# distance value of 0.0.
import os
import string
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Read some points. Use a programmable filter to read them.
pointSource = vtk.vtkProgrammableSource()
def readPoints():
output = pointSource.GetPolyDataOutput()
points = vtk.vtkPoints()
output.SetPoints(points)
file = open(os.path.normpath(os.path.join(VTK_DATA_ROOT, "Data/cactus.3337.pts")))
line = file.readline()
while line:
data = string.split(line)
if data and data[0] == 'p':
x, y, z = float(data[1]), float(data[2]), float(data[3])
points.InsertNextPoint(x, y, z)
line = file.readline()
pointSource.SetExecuteMethod(readPoints)
# Construct the surface and create isosurface.
surf = vtk.vtkSurfaceReconstructionFilter()
surf.SetInputConnection(pointSource.GetOutputPort())
cf = vtk.vtkContourFilter()
cf.SetInputConnection(surf.GetOutputPort())
cf.SetValue(0, 0.0)
# Sometimes the contouring algorithm can create a volume whose gradient
# vector and ordering of polygon (using the right hand rule) are
# inconsistent. vtkReverseSense cures this problem.
reverse = vtk.vtkReverseSense()
reverse.SetInputConnection(cf.GetOutputPort())
reverse.ReverseCellsOn()
reverse.ReverseNormalsOn()
map = vtk.vtkPolyDataMapper()
map.SetInputConnection(reverse.GetOutputPort())
map.ScalarVisibilityOff()
surfaceActor = vtk.vtkActor()
surfaceActor.SetMapper(map)
surfaceActor.GetProperty().SetDiffuseColor(1.0000, 0.3882, 0.2784)
surfaceActor.GetProperty().SetSpecularColor(1, 1, 1)
surfaceActor.GetProperty().SetSpecular(.4)
surfaceActor.GetProperty().SetSpecularPower(50)
# Create the RenderWindow, Renderer and both Actors
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(surfaceActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(400, 400)
ren.GetActiveCamera().SetFocalPoint(0, 0, 0)
ren.GetActiveCamera().SetPosition(1, 0, 0)
ren.GetActiveCamera().SetViewUp(0, 0, 1)
ren.ResetCamera()
ren.GetActiveCamera().Azimuth(20)
ren.GetActiveCamera().Elevation(30)
ren.GetActiveCamera().Dolly(1.2)
ren.ResetCameraClippingRange()
iren.Initialize()
renWin.Render()
iren.Start()
| bsd-3-clause |
ms-iot/python | cpython/Lib/test/buffer_tests.py | 91 | 11374 | # Tests that work for both bytes and buffer objects.
# See PEP 3137.
import struct
import sys
class MixinBytesBufferCommonTests(object):
"""Tests that work for both bytes and buffer objects.
See PEP 3137.
"""
def marshal(self, x):
"""Convert x into the appropriate type for these tests."""
raise RuntimeError('test class must provide a marshal method')
def test_islower(self):
self.assertFalse(self.marshal(b'').islower())
self.assertTrue(self.marshal(b'a').islower())
self.assertFalse(self.marshal(b'A').islower())
self.assertFalse(self.marshal(b'\n').islower())
self.assertTrue(self.marshal(b'abc').islower())
self.assertFalse(self.marshal(b'aBc').islower())
self.assertTrue(self.marshal(b'abc\n').islower())
self.assertRaises(TypeError, self.marshal(b'abc').islower, 42)
def test_isupper(self):
self.assertFalse(self.marshal(b'').isupper())
self.assertFalse(self.marshal(b'a').isupper())
self.assertTrue(self.marshal(b'A').isupper())
self.assertFalse(self.marshal(b'\n').isupper())
self.assertTrue(self.marshal(b'ABC').isupper())
self.assertFalse(self.marshal(b'AbC').isupper())
self.assertTrue(self.marshal(b'ABC\n').isupper())
self.assertRaises(TypeError, self.marshal(b'abc').isupper, 42)
def test_istitle(self):
self.assertFalse(self.marshal(b'').istitle())
self.assertFalse(self.marshal(b'a').istitle())
self.assertTrue(self.marshal(b'A').istitle())
self.assertFalse(self.marshal(b'\n').istitle())
self.assertTrue(self.marshal(b'A Titlecased Line').istitle())
self.assertTrue(self.marshal(b'A\nTitlecased Line').istitle())
self.assertTrue(self.marshal(b'A Titlecased, Line').istitle())
self.assertFalse(self.marshal(b'Not a capitalized String').istitle())
self.assertFalse(self.marshal(b'Not\ta Titlecase String').istitle())
self.assertFalse(self.marshal(b'Not--a Titlecase String').istitle())
self.assertFalse(self.marshal(b'NOT').istitle())
self.assertRaises(TypeError, self.marshal(b'abc').istitle, 42)
def test_isspace(self):
self.assertFalse(self.marshal(b'').isspace())
self.assertFalse(self.marshal(b'a').isspace())
self.assertTrue(self.marshal(b' ').isspace())
self.assertTrue(self.marshal(b'\t').isspace())
self.assertTrue(self.marshal(b'\r').isspace())
self.assertTrue(self.marshal(b'\n').isspace())
self.assertTrue(self.marshal(b' \t\r\n').isspace())
self.assertFalse(self.marshal(b' \t\r\na').isspace())
self.assertRaises(TypeError, self.marshal(b'abc').isspace, 42)
def test_isalpha(self):
self.assertFalse(self.marshal(b'').isalpha())
self.assertTrue(self.marshal(b'a').isalpha())
self.assertTrue(self.marshal(b'A').isalpha())
self.assertFalse(self.marshal(b'\n').isalpha())
self.assertTrue(self.marshal(b'abc').isalpha())
self.assertFalse(self.marshal(b'aBc123').isalpha())
self.assertFalse(self.marshal(b'abc\n').isalpha())
self.assertRaises(TypeError, self.marshal(b'abc').isalpha, 42)
def test_isalnum(self):
self.assertFalse(self.marshal(b'').isalnum())
self.assertTrue(self.marshal(b'a').isalnum())
self.assertTrue(self.marshal(b'A').isalnum())
self.assertFalse(self.marshal(b'\n').isalnum())
self.assertTrue(self.marshal(b'123abc456').isalnum())
self.assertTrue(self.marshal(b'a1b3c').isalnum())
self.assertFalse(self.marshal(b'aBc000 ').isalnum())
self.assertFalse(self.marshal(b'abc\n').isalnum())
self.assertRaises(TypeError, self.marshal(b'abc').isalnum, 42)
def test_isdigit(self):
self.assertFalse(self.marshal(b'').isdigit())
self.assertFalse(self.marshal(b'a').isdigit())
self.assertTrue(self.marshal(b'0').isdigit())
self.assertTrue(self.marshal(b'0123456789').isdigit())
self.assertFalse(self.marshal(b'0123456789a').isdigit())
self.assertRaises(TypeError, self.marshal(b'abc').isdigit, 42)
def test_lower(self):
self.assertEqual(b'hello', self.marshal(b'HeLLo').lower())
self.assertEqual(b'hello', self.marshal(b'hello').lower())
self.assertRaises(TypeError, self.marshal(b'hello').lower, 42)
def test_upper(self):
self.assertEqual(b'HELLO', self.marshal(b'HeLLo').upper())
self.assertEqual(b'HELLO', self.marshal(b'HELLO').upper())
self.assertRaises(TypeError, self.marshal(b'hello').upper, 42)
def test_capitalize(self):
self.assertEqual(b' hello ', self.marshal(b' hello ').capitalize())
self.assertEqual(b'Hello ', self.marshal(b'Hello ').capitalize())
self.assertEqual(b'Hello ', self.marshal(b'hello ').capitalize())
self.assertEqual(b'Aaaa', self.marshal(b'aaaa').capitalize())
self.assertEqual(b'Aaaa', self.marshal(b'AaAa').capitalize())
self.assertRaises(TypeError, self.marshal(b'hello').capitalize, 42)
def test_ljust(self):
self.assertEqual(b'abc ', self.marshal(b'abc').ljust(10))
self.assertEqual(b'abc ', self.marshal(b'abc').ljust(6))
self.assertEqual(b'abc', self.marshal(b'abc').ljust(3))
self.assertEqual(b'abc', self.marshal(b'abc').ljust(2))
self.assertEqual(b'abc*******', self.marshal(b'abc').ljust(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').ljust)
def test_rjust(self):
self.assertEqual(b' abc', self.marshal(b'abc').rjust(10))
self.assertEqual(b' abc', self.marshal(b'abc').rjust(6))
self.assertEqual(b'abc', self.marshal(b'abc').rjust(3))
self.assertEqual(b'abc', self.marshal(b'abc').rjust(2))
self.assertEqual(b'*******abc', self.marshal(b'abc').rjust(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').rjust)
def test_center(self):
self.assertEqual(b' abc ', self.marshal(b'abc').center(10))
self.assertEqual(b' abc ', self.marshal(b'abc').center(6))
self.assertEqual(b'abc', self.marshal(b'abc').center(3))
self.assertEqual(b'abc', self.marshal(b'abc').center(2))
self.assertEqual(b'***abc****', self.marshal(b'abc').center(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').center)
def test_swapcase(self):
self.assertEqual(b'hEllO CoMPuTErS',
self.marshal(b'HeLLo cOmpUteRs').swapcase())
self.assertRaises(TypeError, self.marshal(b'hello').swapcase, 42)
def test_zfill(self):
self.assertEqual(b'123', self.marshal(b'123').zfill(2))
self.assertEqual(b'123', self.marshal(b'123').zfill(3))
self.assertEqual(b'0123', self.marshal(b'123').zfill(4))
self.assertEqual(b'+123', self.marshal(b'+123').zfill(3))
self.assertEqual(b'+123', self.marshal(b'+123').zfill(4))
self.assertEqual(b'+0123', self.marshal(b'+123').zfill(5))
self.assertEqual(b'-123', self.marshal(b'-123').zfill(3))
self.assertEqual(b'-123', self.marshal(b'-123').zfill(4))
self.assertEqual(b'-0123', self.marshal(b'-123').zfill(5))
self.assertEqual(b'000', self.marshal(b'').zfill(3))
self.assertEqual(b'34', self.marshal(b'34').zfill(1))
self.assertEqual(b'0034', self.marshal(b'34').zfill(4))
self.assertRaises(TypeError, self.marshal(b'123').zfill)
def test_expandtabs(self):
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs())
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(8))
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(4))
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs())
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs(8))
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs(4))
self.assertEqual(b'abc\r\nab\r\ndef\ng\r\nhi',
self.marshal(b'abc\r\nab\r\ndef\ng\r\nhi').expandtabs(4))
# check keyword args
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(tabsize=8))
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(tabsize=4))
self.assertEqual(b' a\n b', self.marshal(b' \ta\n\tb').expandtabs(1))
self.assertRaises(TypeError, self.marshal(b'hello').expandtabs, 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxsize < (1 << 32) and struct.calcsize('P') == 4:
self.assertRaises(OverflowError,
self.marshal(b'\ta\n\tb').expandtabs, sys.maxsize)
def test_title(self):
self.assertEqual(b' Hello ', self.marshal(b' hello ').title())
self.assertEqual(b'Hello ', self.marshal(b'hello ').title())
self.assertEqual(b'Hello ', self.marshal(b'Hello ').title())
self.assertEqual(b'Format This As Title String',
self.marshal(b'fOrMaT thIs aS titLe String').title())
self.assertEqual(b'Format,This-As*Title;String',
self.marshal(b'fOrMaT,thIs-aS*titLe;String').title())
self.assertEqual(b'Getint', self.marshal(b'getInt').title())
self.assertRaises(TypeError, self.marshal(b'hello').title, 42)
def test_splitlines(self):
self.assertEqual([b'abc', b'def', b'', b'ghi'],
self.marshal(b'abc\ndef\n\rghi').splitlines())
self.assertEqual([b'abc', b'def', b'', b'ghi'],
self.marshal(b'abc\ndef\n\r\nghi').splitlines())
self.assertEqual([b'abc', b'def', b'ghi'],
self.marshal(b'abc\ndef\r\nghi').splitlines())
self.assertEqual([b'abc', b'def', b'ghi'],
self.marshal(b'abc\ndef\r\nghi\n').splitlines())
self.assertEqual([b'abc', b'def', b'ghi', b''],
self.marshal(b'abc\ndef\r\nghi\n\r').splitlines())
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines())
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(False))
self.assertEqual([b'\n', b'abc\n', b'def\r\n', b'ghi\n', b'\r'],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(True))
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(keepends=False))
self.assertEqual([b'\n', b'abc\n', b'def\r\n', b'ghi\n', b'\r'],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(keepends=True))
self.assertRaises(TypeError, self.marshal(b'abc').splitlines, 42, 42)
| bsd-3-clause |
cloudbau/cinder | cinder/tests/api/contrib/test_hosts.py | 3 | 8033 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import webob.exc
from cinder.api.contrib import hosts as os_hosts
from cinder import context
from cinder import db
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import test
LOG = logging.getLogger(__name__)
created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099)
curr_time = timeutils.utcnow()
SERVICE_LIST = [
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'}]
LIST_RESPONSE = [{'service-status': 'available', 'service': 'cinder-volume',
'zone': 'cinder', 'service-state': 'enabled',
'host_name': 'test.host.1', 'last-update': curr_time},
{'service-status': 'available', 'service': 'cinder-volume',
'zone': 'cinder', 'service-state': 'enabled',
'host_name': 'test.host.1', 'last-update': curr_time},
{'service-status': 'available', 'service': 'cinder-volume',
'zone': 'cinder', 'service-state': 'enabled',
'host_name': 'test.host.1', 'last-update': curr_time},
{'service-status': 'available', 'service': 'cinder-volume',
'zone': 'cinder', 'service-state': 'enabled',
'host_name': 'test.host.1', 'last-update': curr_time}]
def stub_service_get_all(self, req):
return SERVICE_LIST
class FakeRequest(object):
environ = {'cinder.context': context.get_admin_context()}
GET = {}
class FakeRequestWithcinderZone(object):
environ = {'cinder.context': context.get_admin_context()}
GET = {'zone': 'cinder'}
class HostTestCase(test.TestCase):
"""Test Case for hosts."""
def setUp(self):
super(HostTestCase, self).setUp()
self.controller = os_hosts.HostController()
self.req = FakeRequest()
self.stubs.Set(db, 'service_get_all',
stub_service_get_all)
def _test_host_update(self, host, key, val, expected_value):
body = {key: val}
result = self.controller.update(self.req, host, body=body)
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
"""Verify that the volume hosts are returned."""
hosts = os_hosts._list_hosts(self.req)
self.assertEqual(hosts, LIST_RESPONSE)
cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume')
expected = [host for host in LIST_RESPONSE
if host['service'] == 'cinder-volume']
self.assertEqual(cinder_hosts, expected)
def test_list_hosts_with_zone(self):
req = FakeRequestWithcinderZone()
hosts = os_hosts._list_hosts(req)
self.assertEqual(hosts, LIST_RESPONSE)
def test_bad_status_value(self):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'test.host.1', body={'status': 'bad'})
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
self.req,
'test.host.1',
body={'status': 'disablabc'})
def test_bad_update_key(self):
bad_body = {'crazy': 'bad'}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'test.host.1', body=bad_body)
def test_bad_update_key_and_correct_udpate_key(self):
bad_body = {'status': 'disable', 'crazy': 'bad'}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'test.host.1', body=bad_body)
def test_good_udpate_keys(self):
body = {'status': 'disable'}
self.assertRaises(NotImplementedError, self.controller.update,
self.req, 'test.host.1', body=body)
def test_bad_host(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
self.req,
'bogus_host_name',
body={'disabled': 0})
def test_show_forbidden(self):
self.req.environ['cinder.context'].is_admin = False
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.show,
self.req, dest)
self.req.environ['cinder.context'].is_admin = True
def test_show_host_not_exist(self):
"""A host given as an argument does not exists."""
self.req.environ['cinder.context'].is_admin = True
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
self.req, dest)
class HostSerializerTest(test.TestCase):
def setUp(self):
super(HostSerializerTest, self).setUp()
self.deserializer = os_hosts.HostDeserializer()
def test_index_serializer(self):
serializer = os_hosts.HostIndexTemplate()
text = serializer.serialize({"hosts": LIST_RESPONSE})
tree = etree.fromstring(text)
self.assertEqual('hosts', tree.tag)
self.assertEqual(len(LIST_RESPONSE), len(tree))
for i in range(len(LIST_RESPONSE)):
self.assertEqual('host', tree[i].tag)
self.assertEqual(LIST_RESPONSE[i]['service-status'],
tree[i].get('service-status'))
self.assertEqual(LIST_RESPONSE[i]['service'],
tree[i].get('service'))
self.assertEqual(LIST_RESPONSE[i]['zone'],
tree[i].get('zone'))
self.assertEqual(LIST_RESPONSE[i]['service-state'],
tree[i].get('service-state'))
self.assertEqual(LIST_RESPONSE[i]['host_name'],
tree[i].get('host_name'))
self.assertEqual(str(LIST_RESPONSE[i]['last-update']),
tree[i].get('last-update'))
def test_update_serializer_with_status(self):
exemplar = dict(host='test.host.1', status='enabled')
serializer = os_hosts.HostUpdateTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_update_deserializer(self):
exemplar = dict(status='enabled', foo='bar')
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<updates><status>enabled</status><foo>bar</foo></updates>')
result = self.deserializer.deserialize(intext)
self.assertEqual(dict(body=exemplar), result)
| apache-2.0 |
zycdragonball/tensorflow | tensorflow/python/tools/strip_unused_lib.py | 104 | 4859 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to remove unneeded nodes from a GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
def strip_unused(input_graph_def, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a GraphDef.
Args:
input_graph_def: A graph with nodes we want to prune.
input_node_names: A list of the nodes we use as inputs.
output_node_names: A list of the output nodes.
placeholder_type_enum: The AttrValue enum for the placeholder data type, or
a list that specifies one value per input node name.
Returns:
A `GraphDef` with all unnecessary ops removed.
Raises:
ValueError: If any element in `input_node_names` refers to a tensor instead
of an operation.
KeyError: If any element in `input_node_names` is not found in the graph.
"""
for name in input_node_names:
if ":" in name:
raise ValueError("Name '%s' appears to refer to a Tensor, "
"not a Operation." % name)
# Here we replace the nodes we're going to override as inputs with
# placeholders so that any unused nodes that are inputs to them are
# automatically stripped out by extract_sub_graph().
not_found = {name for name in input_node_names}
inputs_replaced_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node.name in input_node_names:
not_found.remove(node.name)
placeholder_node = node_def_pb2.NodeDef()
placeholder_node.op = "Placeholder"
placeholder_node.name = node.name
if isinstance(placeholder_type_enum, list):
input_node_index = input_node_names.index(node.name)
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum[
input_node_index]))
else:
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum))
if "_output_shapes" in node.attr:
placeholder_node.attr["_output_shapes"].CopyFrom(node.attr[
"_output_shapes"])
inputs_replaced_graph_def.node.extend([placeholder_node])
else:
inputs_replaced_graph_def.node.extend([copy.deepcopy(node)])
if not_found:
raise KeyError("The following input nodes were not found: %s\n" % not_found)
output_graph_def = graph_util.extract_sub_graph(inputs_replaced_graph_def,
output_node_names)
return output_graph_def
def strip_unused_from_files(input_graph, input_binary, output_graph,
output_binary, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a graph file."""
if not gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
input_graph_def = graph_pb2.GraphDef()
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_graph_def)
output_graph_def = strip_unused(input_graph_def,
input_node_names.split(","),
output_node_names.split(","),
placeholder_type_enum)
if output_binary:
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
else:
with gfile.GFile(output_graph, "w") as f:
f.write(text_format.MessageToString(output_graph_def))
print("%d ops in the final graph." % len(output_graph_def.node))
| apache-2.0 |
dlazz/ansible | lib/ansible/modules/network/fortios/fortios_firewall_vip46.py | 7 | 15899 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_vip46
short_description: Configure IPv4 to IPv6 virtual IPs in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and vip46 category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_vip46:
description:
- Configure IPv4 to IPv6 virtual IPs.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
arp-reply:
description:
- Enable ARP reply.
choices:
- disable
- enable
color:
description:
- Color of icon on the GUI.
comment:
description:
- Comment.
extip:
description:
- Start-external-IP [-end-external-IP].
extport:
description:
- External service port.
id:
description:
- Custom defined id.
ldb-method:
description:
- Load balance method.
choices:
- static
- round-robin
- weighted
- least-session
- least-rtt
- first-alive
mappedip:
description:
- Start-mapped-IP [-end mapped-IP].
mappedport:
description:
- Mapped service port.
monitor:
description:
- Health monitors.
suboptions:
name:
description:
- Health monitor name. Source firewall.ldb-monitor.name.
required: true
name:
description:
- VIP46 name.
required: true
portforward:
description:
- Enable port forwarding.
choices:
- disable
- enable
protocol:
description:
- Mapped port protocol.
choices:
- tcp
- udp
realservers:
description:
- Real servers.
suboptions:
client-ip:
description:
- Restrict server to a client IP in this range.
healthcheck:
description:
- Per server health check.
choices:
- disable
- enable
- vip
holddown-interval:
description:
- Hold down interval.
id:
description:
- Real server ID.
required: true
ip:
description:
- Mapped server IPv6.
max-connections:
description:
- Maximum number of connections allowed to server.
monitor:
description:
- Health monitors. Source firewall.ldb-monitor.name.
port:
description:
- Mapped server port.
status:
description:
- Server administrative status.
choices:
- active
- standby
- disable
weight:
description:
- weight
server-type:
description:
- Server type.
choices:
- http
- tcp
- udp
- ip
src-filter:
description:
- Source IP filter (x.x.x.x/x).
suboptions:
range:
description:
- Src-filter range.
required: true
type:
description:
- "VIP type: static NAT or server load balance."
choices:
- static-nat
- server-load-balance
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv4 to IPv6 virtual IPs.
fortios_firewall_vip46:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_vip46:
state: "present"
arp-reply: "disable"
color: "4"
comment: "Comment."
extip: "<your_own_value>"
extport: "<your_own_value>"
id: "8"
ldb-method: "static"
mappedip: "<your_own_value>"
mappedport: "<your_own_value>"
monitor:
-
name: "default_name_13 (source firewall.ldb-monitor.name)"
name: "default_name_14"
portforward: "disable"
protocol: "tcp"
realservers:
-
client-ip: "<your_own_value>"
healthcheck: "disable"
holddown-interval: "20"
id: "21"
ip: "<your_own_value>"
max-connections: "23"
monitor: "<your_own_value> (source firewall.ldb-monitor.name)"
port: "25"
status: "active"
weight: "27"
server-type: "http"
src-filter:
-
range: "<your_own_value>"
type: "static-nat"
uuid: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_vip46_data(json):
option_list = ['arp-reply', 'color', 'comment',
'extip', 'extport', 'id',
'ldb-method', 'mappedip', 'mappedport',
'monitor', 'name', 'portforward',
'protocol', 'realservers', 'server-type',
'src-filter', 'type', 'uuid']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_vip46(data, fos):
vdom = data['vdom']
firewall_vip46_data = data['firewall_vip46']
filtered_data = filter_firewall_vip46_data(firewall_vip46_data)
if firewall_vip46_data['state'] == "present":
return fos.set('firewall',
'vip46',
data=filtered_data,
vdom=vdom)
elif firewall_vip46_data['state'] == "absent":
return fos.delete('firewall',
'vip46',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_vip46']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"firewall_vip46": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"arp-reply": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"color": {"required": False, "type": "int"},
"comment": {"required": False, "type": "str"},
"extip": {"required": False, "type": "str"},
"extport": {"required": False, "type": "str"},
"id": {"required": False, "type": "int"},
"ldb-method": {"required": False, "type": "str",
"choices": ["static", "round-robin", "weighted",
"least-session", "least-rtt", "first-alive"]},
"mappedip": {"required": False, "type": "str"},
"mappedport": {"required": False, "type": "str"},
"monitor": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"portforward": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"protocol": {"required": False, "type": "str",
"choices": ["tcp", "udp"]},
"realservers": {"required": False, "type": "list",
"options": {
"client-ip": {"required": False, "type": "str"},
"healthcheck": {"required": False, "type": "str",
"choices": ["disable", "enable", "vip"]},
"holddown-interval": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"ip": {"required": False, "type": "str"},
"max-connections": {"required": False, "type": "int"},
"monitor": {"required": False, "type": "str"},
"port": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["active", "standby", "disable"]},
"weight": {"required": False, "type": "int"}
}},
"server-type": {"required": False, "type": "str",
"choices": ["http", "tcp", "udp",
"ip"]},
"src-filter": {"required": False, "type": "list",
"options": {
"range": {"required": True, "type": "str"}
}},
"type": {"required": False, "type": "str",
"choices": ["static-nat", "server-load-balance"]},
"uuid": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kodi-addons/plugin.video.espn_3 | resources/lib/adobe_activate_api.py | 1 | 11244 | import urlparse
import urllib
import uuid
import hashlib
import hmac
import base64
import urllib2
import time
import json
import gzip
import os
import cookielib
from StringIO import StringIO
import requests
import xbmc
from globals import ADDON_PATH_PROFILE
SETTINGS_FILE = 'adobe.json'
UA_ATV = 'AppleCoreMedia/1.0.0.13Y234 (Apple TV; U; CPU OS 9_2 like Mac OS X; en_us)'
TAG = 'ESPN3-adobe-api: '
adobe_session = requests.Session()
adobe_session.headers.update({
'Accept': 'application/json',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent': UA_ATV
})
class AuthorizationException(Exception):
pass
def reset_settings():
save_settings(dict())
def save_settings(settings):
settings_file = os.path.join(ADDON_PATH_PROFILE, SETTINGS_FILE)
with open(settings_file, 'w') as fp:
json.dump(settings, fp, sort_keys=False, indent=4)
def load_settings():
settings_file = os.path.join(ADDON_PATH_PROFILE, SETTINGS_FILE)
if not os.path.isfile(settings_file):
save_settings(dict())
with open(settings_file, 'r') as fp:
return json.load(fp)
def get_device_id():
settings = load_settings()
if 'device_id' not in settings:
settings['device_id'] = str(uuid.uuid1())
save_settings(settings)
return settings['device_id']
def is_expired(expiration):
return (time.time() * 1000) >= int(expiration)
def get_url_response(url, message, body=None, method=None):
# xbmc.log(TAG + 'url %s message %s' % (url, message), xbmc.LOGDEBUG)
headers = {'Authorization': message}
if method == 'DELETE':
resp = requests.delete(url, headers=headers)
elif method == 'POST':
resp = adobe_session.post(url, headers=headers)
else:
resp = adobe_session.get(url, headers=headers)
# xbmc.log(TAG + 'resp %s ' % (resp.text), xbmc.LOGDEBUG)
return resp.json()
def generate_message(method, path):
nonce = str(uuid.uuid4())
today = str(int(time.time() * 1000))
key = 'gB8HYdEPyezeYbR1'
message = method + ' requestor_id=ESPN, nonce=' + nonce + ', signature_method=HMAC-SHA1, request_time=' + today + ', request_uri=' + path
signature = hmac.new(key, message, hashlib.sha1)
signature = base64.b64encode(signature.digest())
message = message + ', public_key=yKpsHYd8TOITdTMJHmkJOVmgbb2DykNK, signature=' + signature
return message
def is_reg_code_valid():
settings = load_settings()
if 'generateRegCode' not in settings:
xbmc.log(TAG + 'Unable to find reg code', xbmc.LOGDEBUG)
return False
# Check code isn't expired
expiration = settings['generateRegCode']['expires']
if is_expired(expiration):
xbmc.log(TAG + 'Reg code is expired at %s' % expiration, xbmc.LOGDEBUG)
return False
return True
# Gets called when the user wants to authorize this device, it returns a registration code to enter
# on the activation website page
# Sample : '{"id":"","code":"","requestor":"ESPN","generated":1463616806831,
# "expires":1463618606831,"info":{"deviceId":"","deviceType":"appletv","deviceUser":null,
# "appId":null,"appVersion":null,"registrationURL":null}}'
# (generateRegCode)
def get_regcode():
params = urllib.urlencode(
{'deviceId': get_device_id(),
'deviceType': 'appletv',
'ttl': '1800'})
path = '/regcode'
url = urlparse.urlunsplit(['https', 'api.auth.adobe.com',
'reggie/v1/ESPN' + path,
params, ''])
message = generate_message('POST', path)
resp = get_url_response(url, message, dict(), 'POST')
settings = load_settings()
settings['generateRegCode'] = resp
save_settings(settings)
return resp['code']
# Authenticates the user after they have been authenticated on the activation website (authenticateRegCode)
# Sample: '{"mvpd":"","requestor":"ESPN","userId":"","expires":"1466208969000"}'
def authenticate(regcode):
params = urllib.urlencode({'requestor': 'ESPN'})
path = '/authenticate/' + regcode
url = urlparse.urlunsplit(['https', 'api.auth.adobe.com',
'api/v1' + path,
params, ''])
message = generate_message('GET', path)
resp = get_url_response(url, message)
settings = load_settings()
settings['authenticateRegCode'] = resp
save_settings(settings)
# Get authn token (re-auth device after it expires), getAuthnToken
def re_authenticate():
params = urllib.urlencode({'requestor': 'ESPN',
'deviceId': get_device_id()})
path = '/tokens/authn'
url = urlparse.urlunsplit(['https', 'api.auth.adobe.com',
'api/v1' + path,
params, ''])
message = generate_message('GET', path)
xbmc.log(TAG + 'Attempting to re-authenticate the device', xbmc.LOGDEBUG)
resp = get_url_response(url, message)
if 'status' in resp and resp['status'] == '410':
raise AuthorizationException()
settings = load_settings()
settings['authenticateRegCode'] = resp
if 'authorize' in settings:
del settings['authorize']
xbmc.log(TAG + 'Re-authenticated device', xbmc.LOGDEBUG)
save_settings(settings)
def get_resource(channel, event_name, event_guid, event_parental_rating):
return '<rss version="2.0" xmlns:media="http://search.yahoo.com/mrss/"><channel><title><![CDATA[' + channel + "]]></title><item><title><![CDATA[" + event_name + "]]></title><guid><![CDATA[" + event_guid + ']]></guid><media:rating scheme="urn:v-chip"><![CDATA[' + event_parental_rating + "]]></media:rating></item></channel></rss>"
# Sample '{"resource":"resource","mvpd":"","requestor":"ESPN","expires":"1463621239000"}'
def authorize(resource):
if is_authorized(resource):
xbmc.log(TAG + 'already authorized', xbmc.LOGDEBUG)
return
params = urllib.urlencode({'requestor': 'ESPN',
'deviceId': get_device_id(),
'resource': resource})
path = '/authorize'
url = urlparse.urlunsplit(['https', 'api.auth.adobe.com',
'api/v1' + path,
params, ''])
message = generate_message('GET', path)
resp = get_url_response(url, message)
settings = load_settings()
if 'authorize' not in settings:
settings['authorize'] = dict()
xbmc.log(TAG + 'resource %s' % resource, xbmc.LOGDEBUG)
if 'status' in resp and resp['status'] == 403:
raise AuthorizationException()
settings['authorize'][resource.decode('iso-8859-1').encode('utf-8')] = resp
save_settings(settings)
def deauthorize():
params = urllib.urlencode({'deviceId': get_device_id()})
path = '/logout'
url = urlparse.urlunsplit(['https', 'api.auth.adobe.com',
'api/v1' + path,
params, ''])
message = generate_message('DELETE', path)
try:
resp = get_url_response(url, message, body=None, method='DELETE')
except:
xbmc.log(TAG + 'De-authorize failed', xbmc.LOGDEBUG)
settings = load_settings()
if 'authorize' in settings:
del settings['authorize']
if 'authenticateRegCode' in settings:
del settings['authenticateRegCode']
save_settings(settings)
# getShortMediaToken
# Sample '{"mvpdId":"","expires":"1463618218000","serializedToken":"+++++++=","userId":"",
# "requestor":"ESPN","resource":" resource"}'
def get_short_media_token(resource):
if has_to_reauthenticate():
xbmc.log(TAG + 're-authenticating device', xbmc.LOGDEBUG)
re_authenticate()
params = urllib.urlencode({'requestor': 'ESPN',
'deviceId' : get_device_id(),
'resource' : resource})
path = '/mediatoken'
url = urlparse.urlunsplit(['https', 'api.auth.adobe.com',
'api/v1' + path,
params, ''])
message = generate_message('GET', path)
try:
authorize(resource)
resp = get_url_response(url, message)
if 'status' in resp and resp['status'] == 403:
raise AuthorizationException()
except urllib2.HTTPError as exception:
if exception.code == 401:
xbmc.log(TAG + 'Unauthorized exception, trying again', xbmc.LOGDEBUG)
re_authenticate()
authorize(resource)
resp = get_url_response(url, message)
else:
xbmc.log(TAG + 'Rethrowing exception %s' % exception, xbmc.LOGDEBUG)
raise exception
except AuthorizationException as exception:
xbmc.log(TAG + 'Authorization exception, trying again', xbmc.LOGDEBUG)
re_authenticate()
authorize(resource)
resp = get_url_response(url, message)
if 'status' in resp and resp['status'] == 403:
raise AuthorizationException()
xbmc.log(TAG + 'Resp %s' % resp, xbmc.LOGDEBUG)
settings = load_settings()
settings['getShortMediaToken'] = resp
save_settings(settings)
return resp['serializedToken']
def is_authenticated():
settings = load_settings()
return 'authenticateRegCode' in settings
def has_to_reauthenticate():
settings = load_settings()
if 'authenticateRegCode' in settings and 'expires' in settings['authenticateRegCode']:
return is_expired(settings['authenticateRegCode']['expires'])
return True
def is_authorized(resource):
settings = load_settings()
if 'authorize' in settings and resource.decode('iso-8859-1').encode('utf-8') in settings['authorize']:
return not is_expired(settings['authorize'][resource.decode('iso-8859-1').encode('utf-8')]['expires'])
def get_expires_time(key):
settings = load_settings()
expires = settings[key]['expires']
expires_time = time.localtime(int(expires) / 1000)
return time.strftime('%Y-%m-%d %H:%M', expires_time)
def get_authentication_expires():
return get_expires_time('authenticateRegCode')
def get_authorization_expires():
return get_expires_time('authorize')
def clean_up_authorization_tokens():
settings = load_settings()
keys_to_delete = list()
if 'authorize' in settings:
for key in settings['authorize']:
if 'expires' in settings['authorize'][key]:
if is_expired(settings['authorize'][key]['expires']):
keys_to_delete.append(key)
else:
keys_to_delete.append(key)
for key in keys_to_delete:
del settings['authorize'][key]
save_settings(settings)
def get_user_metadata():
params = urllib.urlencode({'requestor': 'ESPN',
'deviceId' : get_device_id()})
path = '/tokens/usermetadata'
url = urlparse.urlunsplit(['https', 'api.auth.adobe.com',
'api/v1' + path,
params, ''])
message = generate_message('GET', path)
resp = get_url_response(url, message)
| gpl-3.0 |
SmartDeveloperHub/agora-fountain | agora/fountain/tests/__init__.py | 1 | 7700 | """
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
__author__ = 'Fernando Serena'
import unittest
import json
from urlparse import urlparse
import logging
from nose.tools import *
from agora.fountain.server import app
from agora.fountain.tests.util import AgoraGraph
def setup():
from agora.fountain.server.config import TestingConfig
app.config['TESTING'] = True
app.config.from_object(TestingConfig)
app.config['STORE'] = 'memory'
from agora.fountain.index.core import r
r.flushdb()
from agora.fountain import api
class FountainTest(unittest.TestCase):
@classmethod
def tearDownClass(cls):
from agora.fountain.index.core import r
r.flushdb()
test_client = app.test_client()
vocabs = json.loads(test_client.get('/vocabs').data)
for v_uri in vocabs.values():
rv = test_client.delete(v_uri)
eq_(rv.status_code, 200, "The resource couldn't be deleted")
@classmethod
def setUpClass(cls):
from agora.fountain.index.core import r
r.flushdb()
def setUp(self):
self.app = app.test_client()
self._graph = None
self.log = logging.getLogger('agora.fountain.test')
def get_vocabularies(self):
return json.loads(self.get('/vocabs'))
def get_vocabulary(self, uri):
path = urlparse(uri).path
return self.get(path)
def post_vocabulary(self, filename, exp_code=201):
with open('agora/fountain/tests/vocabs/{}.ttl'.format(filename)) as f:
vocab = f.read()
response = self.post('/vocabs', vocab, message='The vocabulary was not created properly', exp_code=exp_code)
if exp_code != 201:
return None
return response.headers.get('Location', None)
def delete_vocabulary(self, uri):
path = urlparse(uri).path
self.delete(path, 'The test vocabulary should exist previously')
def post_seed(self, ty, uri, exp_code=201):
response = self.post('/seeds', json.dumps({"uri": uri, "type": ty}),
content_type='application/json', exp_code=exp_code)
return response.headers.get('Location', None)
def delete_seed(self, uri):
path = urlparse(uri).path
self.delete(path, 'That seed should exist previously')
def get_type_seeds(self, ty):
return json.loads(self.get('/seeds/{}'.format(ty)))["seeds"]
@property
def seeds(self):
return json.loads(self.get('/seeds'))["seeds"]
def get_paths(self, node):
paths = json.loads(self.get('paths/{}'.format(node)))
return paths["paths"], paths["all-cycles"]
def get(self, path, exp_code=200, error_message=None):
rv = self.app.get(path)
if error_message is None:
error_message = 'There is a problem with the request'
eq_(rv.status_code, exp_code, error_message)
return rv.data
def post(self, path, data, content_type='text/turtle', exp_code=201, message=None):
rv = self.app.post(path, data=data, headers={'Content-Type': content_type})
if message is None:
message = 'The resource was not created properly'
eq_(rv.status_code, exp_code, message + ": %s" % rv.status_code)
return rv
def delete(self, path, error_message=None):
rv = self.app.delete(path)
if error_message is None:
error_message = "The resource couldn't be deleted"
eq_(rv.status_code, 200, error_message)
return rv.data
@property
def types(self):
return json.loads(self.get('/types'))['types']
@property
def properties(self):
return json.loads(self.get('/properties'))['properties']
def get_type(self, ty):
return json.loads(self.get('/types/{}'.format(ty)))
def get_property(self, ty):
return json.loads(self.get('/properties/{}'.format(ty)))
@property
def graph(self):
if self._graph is None:
_graph = AgoraGraph()
types = self.types
_graph.add_types_from(types)
for node in self.properties:
p_dict = self.get_property(node)
dom = p_dict.get('domain')
ran = p_dict.get('range')
edges = [(d, node) for d in dom]
if p_dict.get('type') == 'object':
edges.extend([(node, r) for r in ran])
_graph.add_edges_from(edges)
_graph.add_property(node, obj=p_dict.get('type') == 'object')
for node in types:
p_dict = self.get_type(node)
refs = p_dict.get('refs')
props = p_dict.get('properties')
edges = [(r, node) for r in refs]
edges.extend([(node, p) for p in props])
_graph.add_edges_from(edges)
if p_dict['super']:
_graph[node]['super'] = p_dict['super']
if p_dict['sub']:
_graph[node]['sub'] = p_dict['sub']
return _graph
def check_property(self, name, domain=None, range=None, inverse=None):
def check_edge(p_name, direction, func, expected):
actual = func(p_name)
if type(expected) is list:
eq_(len(actual), len(expected), '{} must have {} {} type'.format(p_name, len(expected), direction))
eq_(len(set.difference(set(actual), set(expected))), 0,
'Found wrong %s types: %s' % (direction, actual))
elif len(actual):
assert 'No %s was expected!' % direction
check_edge(name, 'domain', self.graph.get_property_domain, domain)
check_edge(name, 'range', self.graph.get_property_range, range)
actual_inverse = self.graph.get_inverse_property(name)
eq_(actual_inverse, inverse, 'Expected {} as inverse, but found: {}'.format(inverse, actual_inverse))
def check_type(self, name, properties=None, refs=None):
def check_attribute(p_name, attribute, func, expected):
actual = func(p_name)
if type(expected) is list:
eq_(len(actual), len(expected), '{} must have {} {}'.format(p_name, len(expected), attribute))
eq_(len(set.difference(set(actual), set(expected))), 0,
'Found wrong %s: %s' % (attribute, actual))
elif len(actual):
assert 'No %s was expected!' % attribute
check_attribute(name, 'properties', self.graph.get_type_properties, properties)
check_attribute(name, 'references', self.graph.get_type_refs, refs)
| apache-2.0 |
Accelerite/cinder | cinder/scheduler/flows/create_volume.py | 4 | 7696 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import excutils
import taskflow.engines
from taskflow.patterns import linear_flow
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder import rpc
from cinder import utils
from cinder.volume.flows import common
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
class ExtractSchedulerSpecTask(flow_utils.CinderTask):
"""Extracts a spec object from a partial and/or incomplete request spec.
Reversion strategy: N/A
"""
default_provides = set(['request_spec'])
def __init__(self, db_api, **kwargs):
super(ExtractSchedulerSpecTask, self).__init__(addons=[ACTION],
**kwargs)
self.db_api = db_api
def _populate_request_spec(self, context, volume_id, snapshot_id,
image_id):
# Create the full request spec using the volume_id.
#
# NOTE(harlowja): this will fetch the volume from the database, if
# the volume has been deleted before we got here then this should fail.
#
# In the future we might want to have a lock on the volume_id so that
# the volume can not be deleted while its still being created?
if not volume_id:
msg = _("No volume_id provided to populate a request_spec from")
raise exception.InvalidInput(reason=msg)
volume_ref = self.db_api.volume_get(context, volume_id)
volume_type_id = volume_ref.get('volume_type_id')
vol_type = self.db_api.volume_type_get(context, volume_type_id)
return {
'volume_id': volume_id,
'snapshot_id': snapshot_id,
'image_id': image_id,
'volume_properties': {
'size': utils.as_int(volume_ref.get('size'), quiet=False),
'availability_zone': volume_ref.get('availability_zone'),
'volume_type_id': volume_type_id,
},
'volume_type': list(dict(vol_type).iteritems()),
}
def execute(self, context, request_spec, volume_id, snapshot_id,
image_id):
# For RPC version < 1.2 backward compatibility
if request_spec is None:
request_spec = self._populate_request_spec(context, volume_id,
snapshot_id, image_id)
return {
'request_spec': request_spec,
}
class ScheduleCreateVolumeTask(flow_utils.CinderTask):
"""Activates a scheduler driver and handles any subsequent failures.
Notification strategy: on failure the scheduler rpc notifier will be
activated and a notification will be emitted indicating what errored,
the reason, and the request (and misc. other data) that caused the error
to be triggered.
Reversion strategy: N/A
"""
FAILURE_TOPIC = "scheduler.create_volume"
def __init__(self, db_api, driver_api, **kwargs):
super(ScheduleCreateVolumeTask, self).__init__(addons=[ACTION],
**kwargs)
self.db_api = db_api
self.driver_api = driver_api
def _handle_failure(self, context, request_spec, cause):
try:
self._notify_failure(context, request_spec, cause)
finally:
LOG.error(_LE("Failed to run task %(name)s: %(cause)s") %
{'cause': cause, 'name': self.name})
def _notify_failure(self, context, request_spec, cause):
"""When scheduling fails send out an event that it failed."""
payload = {
'request_spec': request_spec,
'volume_properties': request_spec.get('volume_properties', {}),
'volume_id': request_spec['volume_id'],
'state': 'error',
'method': 'create_volume',
'reason': cause,
}
try:
rpc.get_notifier('scheduler').error(context, self.FAILURE_TOPIC,
payload)
except exception.CinderException:
LOG.exception(_LE("Failed notifying on %(topic)s "
"payload %(payload)s") %
{'topic': self.FAILURE_TOPIC, 'payload': payload})
def execute(self, context, request_spec, filter_properties):
try:
self.driver_api.schedule_create_volume(context, request_spec,
filter_properties)
except exception.NoValidHost as e:
# No host found happened, notify on the scheduler queue and log
# that this happened and set the volume to errored out and
# *do not* reraise the error (since whats the point).
try:
self._handle_failure(context, request_spec, e)
finally:
common.error_out_volume(context, self.db_api,
request_spec['volume_id'], reason=e)
except Exception as e:
# Some other error happened, notify on the scheduler queue and log
# that this happened and set the volume to errored out and
# *do* reraise the error.
with excutils.save_and_reraise_exception():
try:
self._handle_failure(context, request_spec, e)
finally:
common.error_out_volume(context, self.db_api,
request_spec['volume_id'],
reason=e)
def get_flow(context, db_api, driver_api, request_spec=None,
filter_properties=None,
volume_id=None, snapshot_id=None, image_id=None):
"""Constructs and returns the scheduler entrypoint flow.
This flow will do the following:
1. Inject keys & values for dependent tasks.
2. Extracts a scheduler specification from the provided inputs.
3. Attaches 2 activated only on *failure* tasks (one to update the db
status and one to notify on the MQ of the failure that occurred).
4. Uses provided driver to then select and continue processing of
volume request.
"""
create_what = {
'context': context,
'raw_request_spec': request_spec,
'filter_properties': filter_properties,
'volume_id': volume_id,
'snapshot_id': snapshot_id,
'image_id': image_id,
}
flow_name = ACTION.replace(":", "_") + "_scheduler"
scheduler_flow = linear_flow.Flow(flow_name)
# This will extract and clean the spec from the starting values.
scheduler_flow.add(ExtractSchedulerSpecTask(
db_api,
rebind={'request_spec': 'raw_request_spec'}))
# This will activate the desired scheduler driver (and handle any
# driver related failures appropriately).
scheduler_flow.add(ScheduleCreateVolumeTask(db_api, driver_api))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(scheduler_flow, store=create_what)
| apache-2.0 |
shubhamchopra/spark | python/pyspark/streaming/flume.py | 4 | 5985 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= "3":
from io import BytesIO
else:
from StringIO import StringIO
from py4j.protocol import Py4JJavaError
from pyspark.storagelevel import StorageLevel
from pyspark.serializers import PairDeserializer, NoOpSerializer, UTF8Deserializer, read_int
from pyspark.streaming import DStream
__all__ = ['FlumeUtils', 'utf8_decoder']
def utf8_decoder(s):
""" Decode the unicode as UTF-8 """
if s is None:
return None
return s.decode('utf-8')
class FlumeUtils(object):
@staticmethod
def createStream(ssc, hostname, port,
storageLevel=StorageLevel.MEMORY_AND_DISK_2,
enableDecompression=False,
bodyDecoder=utf8_decoder):
"""
Create an input stream that pulls events from Flume.
:param ssc: StreamingContext object
:param hostname: Hostname of the slave machine to which the flume data will be sent
:param port: Port of the slave machine to which the flume data will be sent
:param storageLevel: Storage level to use for storing the received objects
:param enableDecompression: Should netty server decompress input stream
:param bodyDecoder: A function used to decode body (default is utf8_decoder)
:return: A DStream object
.. note:: Deprecated in 2.3.0
"""
jlevel = ssc._sc._getJavaStorageLevel(storageLevel)
helper = FlumeUtils._get_helper(ssc._sc)
jstream = helper.createStream(ssc._jssc, hostname, port, jlevel, enableDecompression)
return FlumeUtils._toPythonDStream(ssc, jstream, bodyDecoder)
@staticmethod
def createPollingStream(ssc, addresses,
storageLevel=StorageLevel.MEMORY_AND_DISK_2,
maxBatchSize=1000,
parallelism=5,
bodyDecoder=utf8_decoder):
"""
Creates an input stream that is to be used with the Spark Sink deployed on a Flume agent.
This stream will poll the sink for data and will pull events as they are available.
:param ssc: StreamingContext object
:param addresses: List of (host, port)s on which the Spark Sink is running.
:param storageLevel: Storage level to use for storing the received objects
:param maxBatchSize: The maximum number of events to be pulled from the Spark sink
in a single RPC call
:param parallelism: Number of concurrent requests this stream should send to the sink.
Note that having a higher number of requests concurrently being pulled
will result in this stream using more threads
:param bodyDecoder: A function used to decode body (default is utf8_decoder)
:return: A DStream object
.. note:: Deprecated in 2.3.0
"""
jlevel = ssc._sc._getJavaStorageLevel(storageLevel)
hosts = []
ports = []
for (host, port) in addresses:
hosts.append(host)
ports.append(port)
helper = FlumeUtils._get_helper(ssc._sc)
jstream = helper.createPollingStream(
ssc._jssc, hosts, ports, jlevel, maxBatchSize, parallelism)
return FlumeUtils._toPythonDStream(ssc, jstream, bodyDecoder)
@staticmethod
def _toPythonDStream(ssc, jstream, bodyDecoder):
ser = PairDeserializer(NoOpSerializer(), NoOpSerializer())
stream = DStream(jstream, ssc, ser)
def func(event):
headersBytes = BytesIO(event[0]) if sys.version >= "3" else StringIO(event[0])
headers = {}
strSer = UTF8Deserializer()
for i in range(0, read_int(headersBytes)):
key = strSer.loads(headersBytes)
value = strSer.loads(headersBytes)
headers[key] = value
body = bodyDecoder(event[1])
return (headers, body)
return stream.map(func)
@staticmethod
def _get_helper(sc):
try:
return sc._jvm.org.apache.spark.streaming.flume.FlumeUtilsPythonHelper()
except TypeError as e:
if str(e) == "'JavaPackage' object is not callable":
FlumeUtils._printErrorMsg(sc)
raise
@staticmethod
def _printErrorMsg(sc):
print("""
________________________________________________________________________________________________
Spark Streaming's Flume libraries not found in class path. Try one of the following.
1. Include the Flume library and its dependencies with in the
spark-submit command as
$ bin/spark-submit --packages org.apache.spark:spark-streaming-flume:%s ...
2. Download the JAR of the artifact from Maven Central http://search.maven.org/,
Group Id = org.apache.spark, Artifact Id = spark-streaming-flume-assembly, Version = %s.
Then, include the jar in the spark-submit command as
$ bin/spark-submit --jars <spark-streaming-flume-assembly.jar> ...
________________________________________________________________________________________________
""" % (sc.version, sc.version))
| apache-2.0 |
wangyum/tensorflow | tensorflow/contrib/keras/python/keras/layers/merge.py | 2 | 18933 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=not-callable
# pylint: disable=redefined-builtin
"""Layers can merge several input tensors into a single output tensor.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.engine.topology import Layer
from tensorflow.python.framework import tensor_shape
class _Merge(Layer):
"""Generic merge layer for elementwise merge functions.
Used to implement `Sum`, `Average`, etc.
Arguments:
**kwargs: standard layer keyword arguments.
"""
def __init__(self, **kwargs):
super(_Merge, self).__init__(**kwargs)
self.supports_masking = True
def _merge_function(self, inputs):
raise NotImplementedError
def _compute_elemwise_op_output_shape(self, shape1, shape2):
"""Computes the shape of the resultant of an elementwise operation.
Arguments:
shape1: tuple or None. Shape of the first tensor
shape2: tuple or None. Shape of the second tensor
Returns:
expected output shape when an element-wise operation is
carried out on 2 tensors with shapes shape1 and shape2.
tuple or None.
Raises:
ValueError: if shape1 and shape2 are not compatible for
element-wise operations.
"""
if None in [shape1, shape2]:
return None
elif len(shape1) < len(shape2):
return self._compute_elemwise_op_output_shape(shape2, shape1)
elif not shape2:
return shape1
output_shape = list(shape1[:-len(shape2)])
for i, j in zip(shape1[-len(shape2):], shape2):
if i is None or j is None:
output_shape.append(None)
elif i == 1:
output_shape.append(j)
elif j == 1:
output_shape.append(i)
else:
if i != j:
raise ValueError('Operands could not be broadcast '
'together with shapes ' + str(shape1) + ' ' +
str(shape2))
output_shape.append(i)
return tuple(output_shape)
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape, list):
raise ValueError('A merge layer should be called ' 'on a list of inputs.')
if len(input_shape) < 2:
raise ValueError('A merge layer should be called '
'on a list of at least 2 inputs. '
'Got ' + str(len(input_shape)) + ' inputs.')
input_shape = [tensor_shape.TensorShape(s).as_list() for s in input_shape]
batch_sizes = [s[0] for s in input_shape if s is not None]
batch_sizes = set(batch_sizes)
batch_sizes -= set([None])
if len(batch_sizes) > 1:
raise ValueError('Can not merge tensors with different '
'batch sizes. Got tensors with shapes : ' +
str(input_shape))
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
# If the inputs have different ranks, we have to reshape them
# to make them broadcastable.
if None not in input_shape and len(set(map(len, input_shape))) == 1:
self._reshape_required = False
else:
self._reshape_required = True
def call(self, inputs):
if self._reshape_required:
reshaped_inputs = []
input_ndims = list(map(K.ndim, inputs))
if None not in input_ndims:
# If ranks of all inputs are available,
# we simply expand each of them at axis=1
# until all of them have the same rank.
max_ndim = max(input_ndims)
for x in inputs:
x_ndim = K.ndim(x)
for _ in range(max_ndim - x_ndim):
x = K.expand_dims(x, 1)
reshaped_inputs.append(x)
return self._merge_function(reshaped_inputs)
else:
# Transpose all inputs so that batch size is the last dimension.
# (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
transposed = False
for x in inputs:
x_ndim = K.ndim(x)
if x_ndim is None:
x_shape = K.shape(x)
batch_size = x_shape[0]
new_shape = K.concatenate([x_shape[1:], K.expand_dims(batch_size)])
x_transposed = K.reshape(x,
K.stack([batch_size, K.prod(x_shape[1:])]))
x_transposed = K.permute_dimensions(x_transposed, (1, 0))
x_transposed = K.reshape(x_transposed, new_shape)
reshaped_inputs.append(x_transposed)
transposed = True
elif x_ndim > 1:
dims = list(range(1, x_ndim)) + [0]
reshaped_inputs.append(K.permute_dimensions(x, dims))
transposed = True
else:
# We don't transpose inputs if they are 1D vectors or scalars.
reshaped_inputs.append(x)
y = self._merge_function(reshaped_inputs)
y_ndim = K.ndim(y)
if transposed:
# If inputs have been transposed, we have to transpose the output too.
if y_ndim is None:
y_shape = K.shape(y)
y_ndim = K.shape(y_shape)[0]
batch_size = y_shape[y_ndim - 1]
new_shape = K.concatenate(
[K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
y = K.reshape(y, (-1, batch_size))
y = K.permute_dimensions(y, (1, 0))
y = K.reshape(y, new_shape)
elif y_ndim > 1:
dims = [y_ndim - 1] + list(range(y_ndim - 1))
y = K.permute_dimensions(y, dims)
return y
else:
return self._merge_function(inputs)
def compute_output_shape(self, input_shape):
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
batch_sizes = [s[0] for s in input_shape if s is not None]
batch_sizes = set(batch_sizes)
batch_sizes -= set([None])
if len(batch_sizes) == 1:
output_shape = (list(batch_sizes)[0],) + output_shape
else:
output_shape = (None,) + output_shape
return output_shape
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, list):
raise ValueError('`mask` should be a list.')
if not isinstance(inputs, list):
raise ValueError('`inputs` should be a list.')
if len(mask) != len(inputs):
raise ValueError('The lists `inputs` and `mask` '
'should have the same length.')
if all([m is None for m in mask]):
return None
masks = [K.expand_dims(m, 0) for m in mask if m is not None]
return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
class Add(_Merge):
"""Layer that adds a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output
class Multiply(_Merge):
"""Layer that multiplies (element-wise) a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output *= inputs[i]
return output
class Average(_Merge):
"""Layer that averages a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs[i]
return output / len(inputs)
class Maximum(_Merge):
"""Layer that computes the maximum (element-wise) a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = K.maximum(output, inputs[i])
return output
class Concatenate(_Merge):
"""Layer that concatenates a list of inputs.
It takes as input a list of tensors,
all of the same shape expect for the concatenation axis,
and returns a single tensor, the concatenation of all inputs.
Arguments:
axis: Axis along which to concatenate.
**kwargs: standard layer keyword arguments.
"""
def __init__(self, axis=-1, **kwargs):
super(Concatenate, self).__init__(**kwargs)
self.axis = axis
self.supports_masking = True
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape, list):
raise ValueError('`Concatenate` layer should be called '
'on a list of inputs')
if all([shape is None for shape in input_shape]):
return
reduced_inputs_shapes = [
tensor_shape.TensorShape(shape).as_list() for shape in input_shape
]
shape_set = set()
for i in range(len(reduced_inputs_shapes)):
del reduced_inputs_shapes[i][self.axis]
shape_set.add(tuple(reduced_inputs_shapes[i]))
if len(shape_set) > 1:
raise ValueError('`Concatenate` layer requires '
'inputs with matching shapes '
'except for the concat axis. '
'Got inputs shapes: %s' % (input_shape))
def call(self, inputs):
if not isinstance(inputs, list):
raise ValueError('A `Concatenate` layer should be called '
'on a list of inputs.')
return K.concatenate(inputs, axis=self.axis)
def _compute_output_shape(self, input_shape):
if not isinstance(input_shape, list):
raise ValueError('A `Concatenate` layer should be called '
'on a list of inputs.')
input_shapes = input_shape
output_shape = tensor_shape.TensorShape(input_shapes[0]).as_list()
for shape in input_shapes[1:]:
shape = tensor_shape.TensorShape(shape).as_list()
if output_shape[self.axis] is None or shape[self.axis] is None:
output_shape[self.axis] = None
break
output_shape[self.axis] += shape[self.axis]
return tensor_shape.TensorShape(output_shape)
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, list):
raise ValueError('`mask` should be a list.')
if not isinstance(inputs, list):
raise ValueError('`inputs` should be a list.')
if len(mask) != len(inputs):
raise ValueError('The lists `inputs` and `mask` '
'should have the same length.')
if all([m is None for m in mask]):
return None
# Make a list of masks while making sure
# the dimensionality of each mask
# is the same as the corresponding input.
masks = []
for input_i, mask_i in zip(inputs, mask):
if mask_i is None:
# Input is unmasked. Append all 1s to masks,
# but cast it to bool first
masks.append(K.cast(K.ones_like(input_i), 'bool'))
elif K.ndim(mask_i) < K.ndim(input_i):
# Mask is smaller than the input, expand it
masks.append(K.expand_dims(mask_i))
else:
masks.append(mask_i)
concatenated = K.concatenate(masks, axis=self.axis)
return K.all(concatenated, axis=-1, keepdims=False)
def get_config(self):
config = {
'axis': self.axis,
}
base_config = super(Concatenate, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Dot(_Merge):
"""Layer that computes a dot product between samples in two tensors.
E.g. if applied to two tensors `a` and `b` of shape `(batch_size, n)`,
the output will be a tensor of shape `(batch_size, 1)`
where each entry `i` will be the dot product between
`a[i]` and `b[i]`.
Arguments:
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
"""
def __init__(self, axes, normalize=False, **kwargs):
super(Dot, self).__init__(**kwargs)
if not isinstance(axes, int):
if not isinstance(axes, (list, tuple)):
raise TypeError('Invalid type for `axes` - '
'should be a list or an int.')
if len(axes) != 2:
raise ValueError('Invalid format for `axes` - '
'should contain two elements.')
if not isinstance(axes[0], int) or not isinstance(axes[1], int):
raise ValueError('Invalid format for `axes` - '
'list elements should be "int".')
self.axes = axes
self.normalize = normalize
self.supports_masking = True
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `Dot` layer should be called '
'on a list of 2 inputs.')
shape1 = tensor_shape.TensorShape(input_shape[0]).as_list()
shape2 = tensor_shape.TensorShape(input_shape[1]).as_list()
if shape1 is None or shape2 is None:
return
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
if shape1[axes[0]] != shape2[axes[1]]:
raise ValueError('Dimension incompatibility '
'%s != %s. ' % (shape1[axes[0]], shape2[axes[1]]) +
'Layer shapes: %s, %s' % (shape1, shape2))
def call(self, inputs):
x1 = inputs[0]
x2 = inputs[1]
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % K.ndim(x1), self.axes % K.ndim(x2)]
else:
axes = [self.axes] * 2
else:
axes = []
for i in range(len(self.axes)):
if self.axes[i] < 0:
axes.append(self.axes[i] % K.ndim(inputs[i]))
else:
axes.append(self.axes[i])
if self.normalize:
x1 = K.l2_normalize(x1, axis=axes[0])
x2 = K.l2_normalize(x2, axis=axes[1])
output = K.batch_dot(x1, x2, axes)
return output
def _compute_output_shape(self, input_shape):
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `Dot` layer should be called '
'on a list of 2 inputs.')
shape1 = tensor_shape.TensorShape(input_shape[0]).as_list()
shape2 = tensor_shape.TensorShape(input_shape[1]).as_list()
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
shape1.pop(axes[0])
shape2.pop(axes[1])
shape2.pop(0)
output_shape = shape1 + shape2
if len(output_shape) == 1:
output_shape += [1]
return tensor_shape.TensorShape(output_shape)
def compute_mask(self, inputs, mask=None):
return None
def get_config(self):
config = {
'axes': self.axes,
'normalize': self.normalize,
}
base_config = super(Dot, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def add(inputs, **kwargs):
"""Functional interface to the `Add` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the sum of the inputs.
"""
return Add(**kwargs)(inputs)
def multiply(inputs, **kwargs):
"""Functional interface to the `Multiply` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the element-wise product of the inputs.
"""
return Multiply(**kwargs)(inputs)
def average(inputs, **kwargs):
"""Functional interface to the `Average` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the average of the inputs.
"""
return Average(**kwargs)(inputs)
def maximum(inputs, **kwargs):
"""Functional interface to the `Maximum` layer.
Arguments:
inputs: A list of input tensors (at least 2).
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the element-wise maximum of the inputs.
"""
return Maximum(**kwargs)(inputs)
def concatenate(inputs, axis=-1, **kwargs):
"""Functional interface to the `Concatenate` layer.
Arguments:
inputs: A list of input tensors (at least 2).
axis: Concatenation axis.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the concatenation of the inputs alongside axis `axis`.
"""
return Concatenate(axis=axis, **kwargs)(inputs)
def dot(inputs, axes, normalize=False, **kwargs):
"""Functional interface to the `Dot` layer.
Arguments:
inputs: A list of input tensors (at least 2).
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the dot product of the samples from the inputs.
"""
return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)
| apache-2.0 |
steedos/odoo | addons/hr_holidays/hr_holidays.py | 159 | 33482 | # -*- coding: utf-8 -*-
##################################################################################
#
# Copyright (c) 2005-2006 Axelor SARL. (http://www.axelor.com)
# and 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# $Id: hr.py 4656 2006-11-24 09:58:42Z Cyp $
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import math
import time
from operator import attrgetter
from openerp.exceptions import Warning
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_holidays_status(osv.osv):
_name = "hr.holidays.status"
_description = "Leave Type"
def get_days(self, cr, uid, ids, employee_id, context=None):
result = dict((id, dict(max_leaves=0, leaves_taken=0, remaining_leaves=0,
virtual_remaining_leaves=0)) for id in ids)
holiday_ids = self.pool['hr.holidays'].search(cr, uid, [('employee_id', '=', employee_id),
('state', 'in', ['confirm', 'validate1', 'validate']),
('holiday_status_id', 'in', ids)
], context=context)
for holiday in self.pool['hr.holidays'].browse(cr, uid, holiday_ids, context=context):
status_dict = result[holiday.holiday_status_id.id]
if holiday.type == 'add':
status_dict['virtual_remaining_leaves'] += holiday.number_of_days_temp
if holiday.state == 'validate':
status_dict['max_leaves'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] += holiday.number_of_days_temp
elif holiday.type == 'remove': # number of days is negative
status_dict['virtual_remaining_leaves'] -= holiday.number_of_days_temp
if holiday.state == 'validate':
status_dict['leaves_taken'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] -= holiday.number_of_days_temp
return result
def _user_left_days(self, cr, uid, ids, name, args, context=None):
employee_id = False
if context and 'employee_id' in context:
employee_id = context['employee_id']
else:
employee_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if employee_ids:
employee_id = employee_ids[0]
if employee_id:
res = self.get_days(cr, uid, ids, employee_id, context=context)
else:
res = dict((res_id, {'leaves_taken': 0, 'remaining_leaves': 0, 'max_leaves': 0}) for res_id in ids)
return res
_columns = {
'name': fields.char('Leave Type', size=64, required=True, translate=True),
'categ_id': fields.many2one('calendar.event.type', 'Meeting Type',
help='Once a leave is validated, Odoo will create a corresponding meeting of this type in the calendar.'),
'color_name': fields.selection([('red', 'Red'),('blue','Blue'), ('lightgreen', 'Light Green'), ('lightblue','Light Blue'), ('lightyellow', 'Light Yellow'), ('magenta', 'Magenta'),('lightcyan', 'Light Cyan'),('black', 'Black'),('lightpink', 'Light Pink'),('brown', 'Brown'),('violet', 'Violet'),('lightcoral', 'Light Coral'),('lightsalmon', 'Light Salmon'),('lavender', 'Lavender'),('wheat', 'Wheat'),('ivory', 'Ivory')],'Color in Report', required=True, help='This color will be used in the leaves summary located in Reporting\Leaves by Department.'),
'limit': fields.boolean('Allow to Override Limit', help='If you select this check box, the system allows the employees to take more leaves than the available ones for this type and will not take them into account for the "Remaining Legal Leaves" defined on the employee form.'),
'active': fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the leave type without removing it."),
'max_leaves': fields.function(_user_left_days, string='Maximum Allowed', help='This value is given by the sum of all holidays requests with a positive value.', multi='user_left_days'),
'leaves_taken': fields.function(_user_left_days, string='Leaves Already Taken', help='This value is given by the sum of all holidays requests with a negative value.', multi='user_left_days'),
'remaining_leaves': fields.function(_user_left_days, string='Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken', multi='user_left_days'),
'virtual_remaining_leaves': fields.function(_user_left_days, string='Virtual Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken - Leaves Waiting Approval', multi='user_left_days'),
'double_validation': fields.boolean('Apply Double Validation', help="When selected, the Allocation/Leave Requests for this type require a second validation to be approved."),
}
_defaults = {
'color_name': 'red',
'active': True,
}
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if not context.get('employee_id',False):
# leave counts is based on employee_id, would be inaccurate if not based on correct employee
return super(hr_holidays_status, self).name_get(cr, uid, ids, context=context)
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if not record.limit:
name = name + (' (%g/%g)' % (record.leaves_taken or 0.0, record.max_leaves or 0.0))
res.append((record.id, name))
return res
class hr_holidays(osv.osv):
_name = "hr.holidays"
_description = "Leave"
_order = "type desc, date_from asc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_track = {
'state': {
'hr_holidays.mt_holidays_approved': lambda self, cr, uid, obj, ctx=None: obj.state == 'validate',
'hr_holidays.mt_holidays_refused': lambda self, cr, uid, obj, ctx=None: obj.state == 'refuse',
'hr_holidays.mt_holidays_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state == 'confirm',
},
}
def _employee_get(self, cr, uid, context=None):
emp_id = context.get('default_employee_id', False)
if emp_id:
return emp_id
ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if ids:
return ids[0]
return False
def _compute_number_of_days(self, cr, uid, ids, name, args, context=None):
result = {}
for hol in self.browse(cr, uid, ids, context=context):
if hol.type=='remove':
result[hol.id] = -hol.number_of_days_temp
else:
result[hol.id] = hol.number_of_days_temp
return result
def _get_can_reset(self, cr, uid, ids, name, arg, context=None):
"""User can reset a leave request if it is its own leave request or if
he is an Hr Manager. """
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
group_hr_manager_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_hr_manager')[1]
if group_hr_manager_id in [g.id for g in user.groups_id]:
return dict.fromkeys(ids, True)
result = dict.fromkeys(ids, False)
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.employee_id and holiday.employee_id.user_id and holiday.employee_id.user_id.id == uid:
result[holiday.id] = True
return result
def _check_date(self, cr, uid, ids, context=None):
for holiday in self.browse(cr, uid, ids, context=context):
domain = [
('date_from', '<=', holiday.date_to),
('date_to', '>=', holiday.date_from),
('employee_id', '=', holiday.employee_id.id),
('id', '!=', holiday.id),
('state', 'not in', ['cancel', 'refuse']),
]
nholidays = self.search_count(cr, uid, domain, context=context)
if nholidays:
return False
return True
_check_holidays = lambda self, cr, uid, ids, context=None: self.check_holidays(cr, uid, ids, context=context)
_columns = {
'name': fields.char('Description', size=64),
'state': fields.selection([('draft', 'To Submit'), ('cancel', 'Cancelled'),('confirm', 'To Approve'), ('refuse', 'Refused'), ('validate1', 'Second Approval'), ('validate', 'Approved')],
'Status', readonly=True, track_visibility='onchange', copy=False,
help='The status is set to \'To Submit\', when a holiday request is created.\
\nThe status is \'To Approve\', when holiday request is confirmed by user.\
\nThe status is \'Refused\', when holiday request is refused by manager.\
\nThe status is \'Approved\', when holiday request is approved by manager.'),
'user_id':fields.related('employee_id', 'user_id', type='many2one', relation='res.users', string='User', store=True),
'date_from': fields.datetime('Start Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, select=True, copy=False),
'date_to': fields.datetime('End Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'holiday_status_id': fields.many2one("hr.holidays.status", "Leave Type", required=True,readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'employee_id': fields.many2one('hr.employee', "Employee", select=True, invisible=False, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'manager_id': fields.many2one('hr.employee', 'First Approval', invisible=False, readonly=True, copy=False,
help='This area is automatically filled by the user who validate the leave'),
'notes': fields.text('Reasons',readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'number_of_days_temp': fields.float('Allocation', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'number_of_days': fields.function(_compute_number_of_days, string='Number of Days', store=True),
'meeting_id': fields.many2one('calendar.event', 'Meeting'),
'type': fields.selection([('remove','Leave Request'),('add','Allocation Request')], 'Request Type', required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help="Choose 'Leave Request' if someone wants to take an off-day. \nChoose 'Allocation Request' if you want to increase the number of leaves available for someone", select=True),
'parent_id': fields.many2one('hr.holidays', 'Parent'),
'linked_request_ids': fields.one2many('hr.holidays', 'parent_id', 'Linked Requests',),
'department_id':fields.related('employee_id', 'department_id', string='Department', type='many2one', relation='hr.department', readonly=True, store=True),
'category_id': fields.many2one('hr.employee.category', "Employee Tag", help='Category of Employee', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'holiday_type': fields.selection([('employee','By Employee'),('category','By Employee Tag')], 'Allocation Mode', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help='By Employee: Allocation/Request for individual Employee, By Employee Tag: Allocation/Request for group of employees in category', required=True),
'manager_id2': fields.many2one('hr.employee', 'Second Approval', readonly=True, copy=False,
help='This area is automaticly filled by the user who validate the leave with second level (If Leave type need second validation)'),
'double_validation': fields.related('holiday_status_id', 'double_validation', type='boolean', relation='hr.holidays.status', string='Apply Double Validation'),
'can_reset': fields.function(
_get_can_reset,
type='boolean'),
}
_defaults = {
'employee_id': _employee_get,
'state': 'confirm',
'type': 'remove',
'user_id': lambda obj, cr, uid, context: uid,
'holiday_type': 'employee'
}
_constraints = [
(_check_date, 'You can not have 2 leaves that overlaps on same day!', ['date_from','date_to']),
(_check_holidays, 'The number of remaining leaves is not sufficient for this leave type', ['state','number_of_days_temp'])
]
_sql_constraints = [
('type_value', "CHECK( (holiday_type='employee' AND employee_id IS NOT NULL) or (holiday_type='category' AND category_id IS NOT NULL))",
"The employee or employee category of this request is missing. Please make sure that your user login is linked to an employee."),
('date_check2', "CHECK ( (type='add') OR (date_from <= date_to))", "The start date must be anterior to the end date."),
('date_check', "CHECK ( number_of_days_temp >= 0 )", "The number of days must be greater than 0."),
]
def _create_resource_leave(self, cr, uid, leaves, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays validated '''
obj_res_leave = self.pool.get('resource.calendar.leaves')
for leave in leaves:
vals = {
'name': leave.name,
'date_from': leave.date_from,
'holiday_id': leave.id,
'date_to': leave.date_to,
'resource_id': leave.employee_id.resource_id.id,
'calendar_id': leave.employee_id.resource_id.calendar_id.id
}
obj_res_leave.create(cr, uid, vals, context=context)
return True
def _remove_resource_leave(self, cr, uid, ids, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays cancel/removed'''
obj_res_leave = self.pool.get('resource.calendar.leaves')
leave_ids = obj_res_leave.search(cr, uid, [('holiday_id', 'in', ids)], context=context)
return obj_res_leave.unlink(cr, uid, leave_ids, context=context)
def onchange_type(self, cr, uid, ids, holiday_type, employee_id=False, context=None):
result = {}
if holiday_type == 'employee' and not employee_id:
ids_employee = self.pool.get('hr.employee').search(cr, uid, [('user_id','=', uid)])
if ids_employee:
result['value'] = {
'employee_id': ids_employee[0]
}
elif holiday_type != 'employee':
result['value'] = {
'employee_id': False
}
return result
def onchange_employee(self, cr, uid, ids, employee_id):
result = {'value': {'department_id': False}}
if employee_id:
employee = self.pool.get('hr.employee').browse(cr, uid, employee_id)
result['value'] = {'department_id': employee.department_id.id}
return result
# TODO: can be improved using resource calendar method
def _get_number_of_days(self, date_from, date_to):
"""Returns a float equals to the timedelta between two dates given as string."""
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
from_dt = datetime.datetime.strptime(date_from, DATETIME_FORMAT)
to_dt = datetime.datetime.strptime(date_to, DATETIME_FORMAT)
timedelta = to_dt - from_dt
diff_day = timedelta.days + float(timedelta.seconds) / 86400
return diff_day
def unlink(self, cr, uid, ids, context=None):
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ['draft', 'cancel', 'confirm']:
raise osv.except_osv(_('Warning!'),_('You cannot delete a leave which is in %s state.')%(rec.state))
return super(hr_holidays, self).unlink(cr, uid, ids, context)
def onchange_date_from(self, cr, uid, ids, date_to, date_from):
"""
If there are no date set for date_to, automatically set one 8 hours later than
the date_from.
Also update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise osv.except_osv(_('Warning!'),_('The start date must be anterior to the end date.'))
result = {'value': {}}
# No date_to set so far: automatically compute one 8 hours later
if date_from and not date_to:
date_to_with_delta = datetime.datetime.strptime(date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT) + datetime.timedelta(hours=8)
result['value']['date_to'] = str(date_to_with_delta)
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def onchange_date_to(self, cr, uid, ids, date_to, date_from):
"""
Update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise osv.except_osv(_('Warning!'),_('The start date must be anterior to the end date.'))
result = {'value': {}}
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def create(self, cr, uid, values, context=None):
""" Override to avoid automatic logging of creation """
if context is None:
context = {}
context = dict(context, mail_create_nolog=True)
if values.get('state') and values['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
raise osv.except_osv(_('Warning!'), _('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % values.get('state'))
return super(hr_holidays, self).create(cr, uid, values, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('state') and vals['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
raise osv.except_osv(_('Warning!'), _('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % vals.get('state'))
return super(hr_holidays, self).write(cr, uid, ids, vals, context=context)
def holidays_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state': 'draft',
'manager_id': False,
'manager_id2': False,
})
to_unlink = []
for record in self.browse(cr, uid, ids, context=context):
for record2 in record.linked_request_ids:
self.holidays_reset(cr, uid, [record2.id], context=context)
to_unlink.append(record2.id)
if to_unlink:
self.unlink(cr, uid, to_unlink, context=context)
return True
def holidays_first_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
self.holidays_first_validate_notificate(cr, uid, ids, context=context)
return self.write(cr, uid, ids, {'state':'validate1', 'manager_id': manager})
def holidays_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
self.write(cr, uid, ids, {'state':'validate'})
data_holiday = self.browse(cr, uid, ids)
for record in data_holiday:
if record.double_validation:
self.write(cr, uid, [record.id], {'manager_id2': manager})
else:
self.write(cr, uid, [record.id], {'manager_id': manager})
if record.holiday_type == 'employee' and record.type == 'remove':
meeting_obj = self.pool.get('calendar.event')
meeting_vals = {
'name': record.name or _('Leave Request'),
'categ_ids': record.holiday_status_id.categ_id and [(6,0,[record.holiday_status_id.categ_id.id])] or [],
'duration': record.number_of_days_temp * 8,
'description': record.notes,
'user_id': record.user_id.id,
'start': record.date_from,
'stop': record.date_to,
'allday': False,
'state': 'open', # to block that meeting date in the calendar
'class': 'confidential'
}
#Add the partner_id (if exist) as an attendee
if record.user_id and record.user_id.partner_id:
meeting_vals['partner_ids'] = [(4,record.user_id.partner_id.id)]
ctx_no_email = dict(context or {}, no_email=True)
meeting_id = meeting_obj.create(cr, uid, meeting_vals, context=ctx_no_email)
self._create_resource_leave(cr, uid, [record], context=context)
self.write(cr, uid, ids, {'meeting_id': meeting_id})
elif record.holiday_type == 'category':
emp_ids = obj_emp.search(cr, uid, [('category_ids', 'child_of', [record.category_id.id])])
leave_ids = []
for emp in obj_emp.browse(cr, uid, emp_ids):
vals = {
'name': record.name,
'type': record.type,
'holiday_type': 'employee',
'holiday_status_id': record.holiday_status_id.id,
'date_from': record.date_from,
'date_to': record.date_to,
'notes': record.notes,
'number_of_days_temp': record.number_of_days_temp,
'parent_id': record.id,
'employee_id': emp.id
}
leave_ids.append(self.create(cr, uid, vals, context=None))
for leave_id in leave_ids:
# TODO is it necessary to interleave the calls?
for sig in ('confirm', 'validate', 'second_validate'):
self.signal_workflow(cr, uid, [leave_id], sig)
return True
def holidays_confirm(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.employee_id and record.employee_id.parent_id and record.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [record.id], user_ids=[record.employee_id.parent_id.user_id.id], context=context)
return self.write(cr, uid, ids, {'state': 'confirm'})
def holidays_refuse(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.state == 'validate1':
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id': manager})
else:
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id2': manager})
self.holidays_cancel(cr, uid, ids, context=context)
return True
def holidays_cancel(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids):
# Delete the meeting
if record.meeting_id:
record.meeting_id.unlink()
# If a category that created several holidays, cancel all related
self.signal_workflow(cr, uid, map(attrgetter('id'), record.linked_request_ids or []), 'refuse')
self._remove_resource_leave(cr, uid, ids, context=context)
return True
def check_holidays(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.holiday_type != 'employee' or record.type != 'remove' or not record.employee_id or record.holiday_status_id.limit:
continue
leave_days = self.pool.get('hr.holidays.status').get_days(cr, uid, [record.holiday_status_id.id], record.employee_id.id, context=context)[record.holiday_status_id.id]
if leave_days['remaining_leaves'] < 0 or leave_days['virtual_remaining_leaves'] < 0:
# Raising a warning gives a more user-friendly feedback than the default constraint error
raise Warning(_('The number of remaining leaves is not sufficient for this leave type.\n'
'Please verify also the leaves waiting for validation.'))
return True
# -----------------------------
# OpenChatter and notifications
# -----------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
# if this user is a hr.manager, he should do second validations
if self.pool.get('res.users').has_group(cr, uid, 'base.group_hr_manager'):
dom = ['|'] + dom + [('state', '=', 'validate1')]
return dom
def holidays_first_validate_notificate(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
self.message_post(cr, uid, [obj.id],
_("Request approved, waiting second validation."), context=context)
class resource_calendar_leaves(osv.osv):
_inherit = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'holiday_id': fields.many2one("hr.holidays", "Leave Request"),
}
class hr_employee(osv.osv):
_inherit="hr.employee"
def create(self, cr, uid, vals, context=None):
# don't pass the value of remaining leave if it's 0 at the creation time, otherwise it will trigger the inverse
# function _set_remaining_days and the system may not be configured for. Note that we don't have this problem on
# the write because the clients only send the fields that have been modified.
if 'remaining_leaves' in vals and not vals['remaining_leaves']:
del(vals['remaining_leaves'])
return super(hr_employee, self).create(cr, uid, vals, context=context)
def _set_remaining_days(self, cr, uid, empl_id, name, value, arg, context=None):
employee = self.browse(cr, uid, empl_id, context=context)
diff = value - employee.remaining_leaves
type_obj = self.pool.get('hr.holidays.status')
holiday_obj = self.pool.get('hr.holidays')
# Find for holidays status
status_ids = type_obj.search(cr, uid, [('limit', '=', False)], context=context)
if len(status_ids) != 1 :
raise osv.except_osv(_('Warning!'),_("The feature behind the field 'Remaining Legal Leaves' can only be used when there is only one leave type with the option 'Allow to Override Limit' unchecked. (%s Found). Otherwise, the update is ambiguous as we cannot decide on which leave type the update has to be done. \nYou may prefer to use the classic menus 'Leave Requests' and 'Allocation Requests' located in 'Human Resources \ Leaves' to manage the leave days of the employees if the configuration does not allow to use this field.") % (len(status_ids)))
status_id = status_ids and status_ids[0] or False
if not status_id:
return False
if diff > 0:
leave_id = holiday_obj.create(cr, uid, {'name': _('Allocation for %s') % employee.name, 'employee_id': employee.id, 'holiday_status_id': status_id, 'type': 'add', 'holiday_type': 'employee', 'number_of_days_temp': diff}, context=context)
elif diff < 0:
raise osv.except_osv(_('Warning!'), _('You cannot reduce validated allocation requests'))
else:
return False
for sig in ('confirm', 'validate', 'second_validate'):
holiday_obj.signal_workflow(cr, uid, [leave_id], sig)
return True
def _get_remaining_days(self, cr, uid, ids, name, args, context=None):
cr.execute("""SELECT
sum(h.number_of_days) as days,
h.employee_id
from
hr_holidays h
join hr_holidays_status s on (s.id=h.holiday_status_id)
where
h.state='validate' and
s.limit=False and
h.employee_id in %s
group by h.employee_id""", (tuple(ids),))
res = cr.dictfetchall()
remaining = {}
for r in res:
remaining[r['employee_id']] = r['days']
for employee_id in ids:
if not remaining.get(employee_id):
remaining[employee_id] = 0.0
return remaining
def _get_leave_status(self, cr, uid, ids, name, args, context=None):
holidays_obj = self.pool.get('hr.holidays')
holidays_id = holidays_obj.search(cr, uid,
[('employee_id', 'in', ids), ('date_from','<=',time.strftime('%Y-%m-%d %H:%M:%S')),
('date_to','>=',time.strftime('%Y-%m-%d 23:59:59')),('type','=','remove'),('state','not in',('cancel','refuse'))],
context=context)
result = {}
for id in ids:
result[id] = {
'current_leave_state': False,
'current_leave_id': False,
'leave_date_from':False,
'leave_date_to':False,
}
for holiday in self.pool.get('hr.holidays').browse(cr, uid, holidays_id, context=context):
result[holiday.employee_id.id]['leave_date_from'] = holiday.date_from
result[holiday.employee_id.id]['leave_date_to'] = holiday.date_to
result[holiday.employee_id.id]['current_leave_state'] = holiday.state
result[holiday.employee_id.id]['current_leave_id'] = holiday.holiday_status_id.id
return result
def _leaves_count(self, cr, uid, ids, field_name, arg, context=None):
Holidays = self.pool['hr.holidays']
return {
employee_id: Holidays.search_count(cr,uid, [('employee_id', '=', employee_id), ('type', '=', 'remove')], context=context)
for employee_id in ids
}
_columns = {
'remaining_leaves': fields.function(_get_remaining_days, string='Remaining Legal Leaves', fnct_inv=_set_remaining_days, type="float", help='Total number of legal leaves allocated to this employee, change this value to create allocation/leave request. Total based on all the leave types without overriding limit.'),
'current_leave_state': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Status", type="selection",
selection=[('draft', 'New'), ('confirm', 'Waiting Approval'), ('refuse', 'Refused'),
('validate1', 'Waiting Second Approval'), ('validate', 'Approved'), ('cancel', 'Cancelled')]),
'current_leave_id': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Type",type='many2one', relation='hr.holidays.status'),
'leave_date_from': fields.function(_get_leave_status, multi='leave_status', type='date', string='From Date'),
'leave_date_to': fields.function(_get_leave_status, multi='leave_status', type='date', string='To Date'),
'leaves_count': fields.function(_leaves_count, type='integer', string='Leaves'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pathawks/tweet-dump | tweet-dump.py | 1 | 7425 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''Load the latest update for a Twitter user and leave it in an XHTML fragment'''
__author__ = 'Pat Hawks <[email protected]>'
__version__ = '0.2'
import cgi
import codecs
import getopt
import sys
import twitter
import keyring
import webbrowser
TWEET_TEMPLATE = """
<blockquote class="twitter-tweet tw-align-center" width="500"><p>{tweet_text}</p>— {user_name} (@{screen_name}) <a href="https://twitter.com/{screen_name}/status/{id}" data-datetime="{date_datetime}">{date_string}</a></blockquote>
"""
HTML_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<!--
tweet-dump.py
https://github.com/pathawks/tweet-dump
Backup Date: {current_time}
-->
<title>{name} (@{screen_name})</title>
<base href="http://twitter.com/">
<link rel="icon" href="{url}" type="image/png"/>
<style>
body{text-align:center;color:#{color};padding:0;margin:0;font-family:"Helvetica Neue", Arial, sans-serif;font-size:14px;background:#{bg};background-attachment:fixed}
a{color:#{color}}
#container{margin:0 auto;width:500px;background:rgba(255,255,255,0.3);padding:8px 14px' )}
#profile,blockquote{background:#fff;border:#DDD 1px solid;border-radius:5px;width:480px;margin:0 0 10px;padding:0 0 20px 20px;text-align:left}
#profile{color:#fff;background:#ccc;background:rgba(0,0,0,0.6);width:490px;text-align:center;padding:5px}
#profile a{color:#fff}
.profile-picture{margin:20px auto 6px;border:#fff 4px solid;border-radius:4px;display:block;width:73px;height:73px;background:#fff}
.profile-picture .avatar{border-radius:3px}
.profile-card-inner h1{font-size:24px;text-shadow:rgba(0, 0, 0, 0.5) 0px 0.6px 0.6px}
.profile-card-inner h2{font-size:18px;font-weight:normal}
</style>
</head>
<body>
<div id="container">
<div id="profile">
<a href="{avatar}" class="profile-picture">
<img src="{avatar}" alt="{name}" class="avatar">
</a>
<div class="profile-card-inner">
<h1 class="fullname">{name}</h1>
<h2 class="username"><span class="screen-name">@{screen_name}</span></h2>
<p class="bio ">{bio}</p>
<p class="location-and-url">
<span class="location">{location}</span> ·
<span class="url">
<a href="{url}">{url}</a>
</span>
</p>
</div>
</div>
{timeline}
</div>
<script src="http://platform.twitter.com/widgets.js" charset="utf-8"></script>
</body>
</html>
"""
def print_banner():
print "tweet-dump %s ©2013 %s" % (__version__, __author__)
print """ .-.
(. .)__,')
/ V )
\ ( \/ .
`._`.__\\ o ,
<< `' .o..
"""
def Usage():
print_banner()
print 'Usage: %s [options] twitterid' % __file__
print
print ' This script fetches a users latest twitter update and stores'
print ' the result in a file as an XHTML fragment'
print
print ' Options:'
print ' -h, --help print this help'
print ' -o, --output the output file [default: stdout]'
print ' -n, --number the number of Tweets to retrieve [default: 1]'
def get_access_token(consumer_key, consumer_secret):
try:
from urlparse import parse_qsl
except:
from cgi import parse_qsl
import webbrowser
import oauth2 as oauth
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
print 'Requesting temp token from Twitter'
resp, content = oauth_client.request(REQUEST_TOKEN_URL, 'POST', body="oauth_callback=oob")
if resp['status'] != '200':
print 'Invalid respond from Twitter requesting temp token: %s' % resp['status']
else:
request_token = dict(parse_qsl(content))
url = '%s?oauth_token=%s' % (AUTHORIZATION_URL, request_token['oauth_token'])
print ''
print 'I will try to start a browser to visit the following Twitter page'
print 'if a browser will not start, copy the URL to your browser'
print 'and retrieve the pincode to be used'
print 'in the next step to obtaining an Authentication Token:'
print ''
print url
print ''
webbrowser.open(url)
pincode = raw_input('Pincode? ')
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(pincode)
print ''
print 'Generating and signing request for an access token'
print ''
oauth_client = oauth.Client(oauth_consumer, token)
resp, content = oauth_client.request(ACCESS_TOKEN_URL, method='POST', body='oauth_callback=oob&oauth_verifier=%s' % pincode)
access_token = dict(parse_qsl(content))
if resp['status'] != '200':
print 'The request for a Token did not succeed: %s' % resp['status']
print access_token
else:
print 'Your Twitter Access Token key: %s' % access_token['oauth_token']
print ' Access Token secret: %s' % access_token['oauth_token_secret']
print ''
keyring.set_password(__file__, 'oauth_consumer', consumer_key)
keyring.set_password(__file__, 'oauth_consumer_secret', consumer_secret)
keyring.set_password(__file__, 'oauth_token', access_token['oauth_token'])
keyring.set_password(__file__, 'oauth_token_secret', access_token['oauth_token_secret'])
def UserSignIn():
print_banner()
print 'Before you can use %s, you must sign in with Twitter' % __file__
print
print 'Setup a new Twitter Application at https://dev.twitter.com/apps/new'
print 'Then provide your applications details below'
print
webbrowser.open('https://dev.twitter.com/apps/new')
consumer_key = raw_input('Enter your consumer key: ')
consumer_secret = raw_input("Enter your consumer secret: ")
get_access_token(consumer_key, consumer_secret)
def FetchTwitter(user, output, number):
assert user
statuses = twitter.Api(
consumer_key=keyring.get_password(__file__, 'oauth_consumer'),
consumer_secret=keyring.get_password(__file__, 'oauth_consumer_secret'),
access_token_key=keyring.get_password(__file__, 'oauth_token'),
access_token_secret=keyring.get_password(__file__, 'oauth_token_secret')
).GetUserTimeline(screen_name=user, count=number)
for s in statuses:
xhtml = TWEET_TEMPLATE.format(
tweet_text = cgi.escape(s.text).encode('ascii', 'xmlcharrefreplace'),
user_name = s.user.name,
screen_name = s.user.screen_name,
id = s.id,
date_datetime = s.created_at_in_seconds,
date_string = s.relative_created_at
)
if output:
Save(xhtml, output)
else:
print xhtml
def Save(xhtml, output):
out = codecs.open(output, mode='w', encoding='ascii',
errors='xmlcharrefreplace')
out.write(xhtml)
out.close()
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hon', ['help', 'output=', 'number='])
except getopt.GetoptError:
Usage()
sys.exit(2)
try:
user = args[0]
except:
Usage()
sys.exit(2)
output = None
number = 1
for o, a in opts:
if o in ("-h", "--help"):
Usage()
sys.exit(2)
if o in ("-o", "--output"):
output = a
if o in ("-n", "--number"):
number = a
FetchTwitter(user, output, number)
if __name__ == "__main__":
if keyring.get_password(__file__, 'oauth_consumer'):
main()
else:
UserSignIn()
| apache-2.0 |
oranagra/redis | deps/jemalloc/scripts/gen_travis.py | 21 | 3183 | #!/usr/bin/env python
from itertools import combinations
travis_template = """\
language: generic
matrix:
include:
%s
before_script:
- autoconf
- ./configure ${COMPILER_FLAGS:+ \
CC="$CC $COMPILER_FLAGS" \
CXX="$CXX $COMPILER_FLAGS" } \
$CONFIGURE_FLAGS
- make -j3
- make -j3 tests
script:
- make check
"""
# The 'default' configuration is gcc, on linux, with no compiler or configure
# flags. We also test with clang, -m32, --enable-debug, --enable-prof,
# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing
# travis though, we don't test all 2**7 = 128 possible combinations of these;
# instead, we only test combinations of up to 2 'unusual' settings, under the
# hope that bugs involving interactions of such settings are rare.
# Things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29
MAX_UNUSUAL_OPTIONS = 2
os_default = 'linux'
os_unusual = 'osx'
compilers_default = 'CC=gcc CXX=g++'
compilers_unusual = 'CC=clang CXX=clang++'
compiler_flag_unusuals = ['-m32']
configure_flag_unusuals = [
'--enable-debug',
'--enable-prof',
'--disable-stats',
]
malloc_conf_unusuals = [
'tcache:false',
'dss:primary',
'percpu_arena:percpu',
'background_thread:true',
]
all_unusuals = (
[os_unusual] + [compilers_unusual] + compiler_flag_unusuals
+ configure_flag_unusuals + malloc_conf_unusuals
)
unusual_combinations_to_test = []
for i in xrange(MAX_UNUSUAL_OPTIONS + 1):
unusual_combinations_to_test += combinations(all_unusuals, i)
include_rows = ""
for unusual_combination in unusual_combinations_to_test:
os = os_default
if os_unusual in unusual_combination:
os = os_unusual
compilers = compilers_default
if compilers_unusual in unusual_combination:
compilers = compilers_unusual
compiler_flags = [
x for x in unusual_combination if x in compiler_flag_unusuals]
configure_flags = [
x for x in unusual_combination if x in configure_flag_unusuals]
malloc_conf = [
x for x in unusual_combination if x in malloc_conf_unusuals]
# Filter out unsupported configurations on OS X.
if os == 'osx' and ('dss:primary' in malloc_conf or \
'percpu_arena:percpu' in malloc_conf or 'background_thread:true' \
in malloc_conf):
continue
if len(malloc_conf) > 0:
configure_flags.append('--with-malloc-conf=' + ",".join(malloc_conf))
# Filter out an unsupported configuration - heap profiling on OS X.
if os == 'osx' and '--enable-prof' in configure_flags:
continue
# We get some spurious errors when -Warray-bounds is enabled.
env_string = ('{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}" '
'EXTRA_CFLAGS="-Werror -Wno-array-bounds"').format(
compilers, " ".join(compiler_flags), " ".join(configure_flags))
include_rows += ' - os: %s\n' % os
include_rows += ' env: %s\n' % env_string
if '-m32' in unusual_combination and os == 'linux':
include_rows += ' addons:\n'
include_rows += ' apt:\n'
include_rows += ' packages:\n'
include_rows += ' - gcc-multilib\n'
print travis_template % include_rows
| bsd-3-clause |
Gabriel439/pants | tests/python/pants_test/backend/codegen/targets/test_java_protobuf_library.py | 1 | 3446 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.base.build_file_aliases import BuildFileAliases
from pants_test.base_test import BaseTest
class JavaProtobufLibraryTest(BaseTest):
@property
def alias_groups(self):
return BuildFileAliases(targets={'java_protobuf_library': JavaProtobufLibrary,
'jar_library': JarLibrary},
objects={'jar': JarDependency})
def test_empty(self):
self.add_to_build_file('BUILD', dedent('''
java_protobuf_library(name='foo',
sources=[],
)'''))
target = self.target('//:foo')
self.assertIsInstance(target, JavaProtobufLibrary)
self.assertSequenceEqual([], target.imported_jars)
traversable_specs = [seq for seq in target.traversable_specs]
self.assertSequenceEqual([], traversable_specs)
def test_jar_library_imports(self):
self.add_to_build_file('BUILD', dedent('''
java_protobuf_library(name='foo',
sources=[],
imports=[':import_jars',]
)
jar_library(name='import_jars',
jars=[
jar(org='foo', name='bar', rev='123'),
],
)
'''))
target = self.target('//:foo')
self.assertIsInstance(target, JavaProtobufLibrary)
self.assertEquals(1, len(target.imported_jars))
import_jar_dep = target.imported_jars[0]
self.assertIsInstance(import_jar_dep, JarDependency)
def test_wrong_import_type1(self):
self.add_to_build_file('BUILD', dedent('''
java_protobuf_library(name='foo',
sources=[],
imports=[':not_jar']
)
java_protobuf_library(name='not_jar',
sources=[],
)
'''))
target = self.target('//:foo')
self.assertIsInstance(target, JavaProtobufLibrary)
with self.assertRaises(JarLibrary.WrongTargetTypeError):
target.imported_jars
def test_wrong_import_type2(self):
self.add_to_build_file('BUILD', dedent('''
java_protobuf_library(name='foo',
sources=[],
imports=[
jar(org='foo', name='bar', rev='123'),
],
)
'''))
target = self.target('//:foo')
self.assertIsInstance(target, JavaProtobufLibrary)
with self.assertRaises(JarLibrary.ExpectedAddressError):
target.imported_jars
def test_traversable_specs(self):
self.add_to_build_file('BUILD', dedent('''
java_protobuf_library(name='foo',
sources=[],
imports=[':import_jars',],
# Note: Should not be a part of traversable_specs
dependencies=[
':proto_dep',
]
)
jar_library(name='import_jars',
jars=[
jar(org='foo', name='bar', rev='123'),
],
)
java_protobuf_library(name='proto_dep',
sources=[],
)
'''))
target = self.target('//:foo')
self.assertIsInstance(target, JavaProtobufLibrary)
traversable_specs = [spec for spec in target.traversable_specs]
self.assertSequenceEqual([':import_jars'], traversable_specs)
| apache-2.0 |
flyingbanana1024102/transmission-line-simulator | src/models/model.py | 1 | 3474 | #
# Transmission Line Simulator
#
# Author(s): Jiacong Xu
# Created: May-28-2017
#
from util.constants import *
from collections import deque
from circuit import Circuit
import numpy as np
class AppState(object):
"""
Describes the valid states in this app.
"""
Editing = 'editing'
Simulating = 'simulating'
Paused = 'paused'
class Model(object):
"""
An instance that keeps data on the simulation, including circuit info,
wave generation, animations, and so on.
circuit: model representing the current state of electrical
components in the system.
waveSpeed: speed of wave, in m/s.
simSpeed: simulation speed, a multiplier for elapsed time.
appState: the application state.
elapsed: amount of elapsed time, in seconds.
maxAmplitude: the maximum amplitude reached within the current
simulation.
"""
def __init__(self):
"""
Initializes a brand new model for a fresh app start.
"""
self.graph = [np.array([]), np.array([])]
self.circuit = Circuit()
self.simSpeed = 1.0 / NS_IN_S
self.elapsed = 0
self._lastStep = 0
self.maxAmplitude = 10
self.appState = AppState.Editing
def simulate(self, dt):
"""
Simulate the system by step dt, in seconds.
"""
last = self.elapsed
self.elapsed += dt * self.simSpeed
# Determine how many steps must be made.
segs = int(STEPS_PER_NS * (self.elapsed - self._lastStep) * NS_IN_S)
for s in range(segs):
self._lastStep += 1.0 / STEPS_PER_NS / NS_IN_S
self._step()
# Recompute overall
self.graph = [np.array([]), np.array([])]
e = self.circuit.head.next
while e.next != None:
self.graph[0] = np.concatenate((self.graph[0], e.xs))
v = e.forward + e.backward
if len(v) > 0:
self.maxAmplitude = max(self.maxAmplitude, v.max(), v.min())
self.graph[1] = np.concatenate((self.graph[1], v))
e = e.next
# Update every oscilloscope
h = self.circuit.headOscilloscope
i = 1
while h != None:
while i < len(self.graph[0]) and self.graph[0][i] < h.position:
i += 1
h.record(self._lastStep, self.graph[1][i - 1])
h = h.next
def reset(self):
"""
Resets the simulation, but not the circuit.
"""
self.elapsed = 0
self._lastStep = 0
self.graph = [np.array([]), np.array([])]
self.circuit.reset()
self.maxAmplitude = 10
def _step(self):
"""
Simulates a discrete step for each part of the circuit.
"""
# We go through each discretized value in forward and backward
# currents, deciding whether it should move or not, and how it
# should move.
e = self.circuit.head
while e != None:
e.split()
e = e.next
# Now shift
e = self.circuit.head
while e.next != None:
e.rotateBackward()
e = e.next
while e != None:
e.rotateForward()
e = e.prev
| mit |
hydrius/schwarzschild-radius | old/SchwarzschildRadius1/buildings.py | 1 | 1719 | class buildings:
@staticmethod
def builds(building, quantity, player, planet, time, *kwargs):
success = 0
#planet = player['planets'][player['viewPlanetIndex']]
for key in buildings.buildings[building]["cost"].keys():
if planet['resource'][key] - (buildings.buildings[building]["cost"][key] * quantity) > 0:
success +=1
if success == len(buildings.buildings[building]["cost"]):
for key in buildings.buildings[building]["cost"].keys():
planet['resource'][key] -= buildings.buildings[building]["cost"][key]*quantity
planet['building'][building] +=quantity
return True
else:
return False
buildings = {"mine": {
"cost": {
"REE": 200,
"water": 100
},
"output": 100,
"requirements": None
},
"farm": {
"cost": {
"REE": 200,
"water": 100
},
"output": 100,
"requirements": None
},
"desalination": {
"cost": {
"REE": 200,
"water": 100
},
"output": 100,
"requirements": None
}
}
| mit |
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_transformer.py | 136 | 1120 | import unittest
from test import test_support
# Silence Py3k warning
test_support.import_module('compiler', deprecated=True)
from compiler import transformer, ast
from compiler import compile
class Tests(unittest.TestCase):
def testMultipleLHS(self):
""" Test multiple targets on the left hand side. """
snippets = ['a, b = 1, 2',
'(a, b) = 1, 2',
'((a, b), c) = (1, 2), 3']
for s in snippets:
a = transformer.parse(s)
self.assertIsInstance(a, ast.Module)
child1 = a.getChildNodes()[0]
self.assertIsInstance(child1, ast.Stmt)
child2 = child1.getChildNodes()[0]
self.assertIsInstance(child2, ast.Assign)
# This actually tests the compiler, but it's a way to assure the ast
# is correct
c = compile(s, '<string>', 'single')
vals = {}
exec c in vals
assert vals['a'] == 1
assert vals['b'] == 2
def test_main():
test_support.run_unittest(Tests)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
furbrain/Coherence | coherence/backends/feed_storage.py | 3 | 7861 | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2009, Dominik Ruf <dominikruf at googlemail dot com>
from coherence.backend import BackendItem
from coherence.backend import BackendStore
from coherence.upnp.core import DIDLLite
from coherence.upnp.core.utils import ReverseProxyUriResource
from xml.etree.ElementTree import ElementTree
import urllib
import httplib
from urlparse import urlsplit
try:
import feedparser
except:
raise ImportError("""
This backend depends on the feedparser module.
You can get it at http://www.feedparser.org/.""")
MIME_TYPES_EXTENTION_MAPPING = {'mp3': 'audio/mpeg', }
ROOT_CONTAINER_ID = 0
AUDIO_ALL_CONTAINER_ID = 51
AUDIO_ARTIST_CONTAINER_ID = 52
AUDIO_ALBUM_CONTAINER_ID = 53
VIDEO_FOLDER_CONTAINER_ID = 54
class RedirectingReverseProxyUriResource(ReverseProxyUriResource):
def render(self, request):
self.uri = self.follow_redirect(self.uri)
self.resetUri(self.uri)
return ReverseProxyUriResource.render(self, request)
def follow_redirect(self, uri):
netloc, path, query, fragment = urlsplit(uri)[1:]
conn = httplib.HTTPConnection(netloc)
conn.request('HEAD', '%s?%s#%s' % (path, query, fragment))
res = conn.getresponse()
if(res.status == 301 or res.status == 302):
return self.follow_redirect(res.getheader('location'))
else:
return uri
class FeedStorageConfigurationException(Exception):
pass
class FeedContainer(BackendItem):
def __init__(self, parent_id, id, title):
BackendItem.__init__(self)
self.id = id
self.parent_id = parent_id
self.name = title
self.mimetype = 'directory'
self.item = DIDLLite.Container(self.id, self.parent_id, self.name)
self.children = []
def get_children(self, start=0, end=0):
"""returns all the chidlren of this container"""
if end != 0:
return self.children[start:end]
return self.children[start:]
def get_child_count(self):
"""returns the number of children in this container"""
return len(self.children)
class FeedEnclosure(BackendItem):
def __init__(self, store, parent, id, title, enclosure):
BackendItem.__init__(self)
self.store = store
self.parent = parent
self.external_id = id
self.name = title
self.location = RedirectingReverseProxyUriResource(enclosure.url.encode('latin-1'))
# doing this because some (Fraunhofer Podcast) feeds say there mime type is audio/x-mpeg
# which at least my XBOX doesn't like
ext = enclosure.url.rsplit('.', 1)[0]
if ext in MIME_TYPES_EXTENTION_MAPPING:
mime_type = MIME_TYPES_EXTENTION_MAPPING[ext]
else:
mime_type = enclosure.type
if(enclosure.type.startswith('audio')):
self.item = DIDLLite.AudioItem(id, parent, self.name)
elif(enclosure.type.startswith('video')):
self.item = DIDLLite.VideoItem(id, parent, self.name)
elif(enclosure.type.startswith('image')):
self.item = DIDLLite.ImageItem(id, parent, self.name)
res = DIDLLite.Resource("%s%d" % (store.urlbase, id), 'http-get:*:%s:*' % mime_type)
self.item.res.append(res)
class FeedStore(BackendStore):
"""a general feed store"""
logCategory = 'feed_store'
implements = ['MediaServer']
def __init__(self, server, **kwargs):
BackendStore.__init__(self, server, **kwargs)
self.name = kwargs.get('name', 'Feed Store')
self.urlbase = kwargs.get('urlbase', '')
if(len(self.urlbase) > 0 and
self.urlbase[len(self.urlbase) - 1] != '/'):
self.urlbase += '/'
self.feed_urls = kwargs.get('feed_urls')
self.opml_url = kwargs.get('opml_url')
if(not(self.feed_urls or self.opml_url)):
raise FeedStorageConfigurationException("either feed_urls or opml_url has to be set")
if(self.feed_urls and self.opml_url):
raise FeedStorageConfigurationException("only feed_urls OR opml_url can be set")
self.server = server
self.refresh = int(kwargs.get('refresh', 1)) * (60 * 60) # TODO: not used yet
self.store = {}
self.wmc_mapping = {'4': str(AUDIO_ALL_CONTAINER_ID), # all tracks
'7': str(AUDIO_ALBUM_CONTAINER_ID), # all albums
'6': str(AUDIO_ARTIST_CONTAINER_ID), # all artists
'15': str(VIDEO_FOLDER_CONTAINER_ID), # all videos
}
self.store[ROOT_CONTAINER_ID] = FeedContainer(-1, ROOT_CONTAINER_ID, self.name)
self.store[AUDIO_ALL_CONTAINER_ID] = FeedContainer(-1, AUDIO_ALL_CONTAINER_ID, 'AUDIO_ALL_CONTAINER')
self.store[AUDIO_ALBUM_CONTAINER_ID] = FeedContainer(-1, AUDIO_ALBUM_CONTAINER_ID, 'AUDIO_ALBUM_CONTAINER')
self.store[VIDEO_FOLDER_CONTAINER_ID] = FeedContainer(-1, VIDEO_FOLDER_CONTAINER_ID, 'VIDEO_FOLDER_CONTAINER')
try:
self._update_data()
except Exception, e:
self.error('error while updateing the feed contant for %s: %s', self.name, str(e))
self.init_completed()
def get_by_id(self, id):
"""returns the item according to the DIDLite id"""
if isinstance(id, basestring):
id = id.split('@', 1)
id = id[0]
try:
return self.store[int(id)]
except (ValueError, KeyError):
self.info("can't get item %d from %s feed storage", int(id), self.name)
return None
def _update_data(self):
"""get the feed xml, parse it, etc."""
feed_urls = []
if(self.opml_url):
tree = ElementTree(file=urllib.urlopen(self.opml_url))
body = tree.find('body')
for outline in body.findall('outline'):
feed_urls.append(outline.attrib['url'])
if(self.feed_urls):
feed_urls = self.feed_urls.split()
container_id = 100
item_id = 1001
for feed_url in feed_urls:
netloc, path, query, fragment = urlsplit(feed_url)[1:]
conn = httplib.HTTPConnection(netloc)
conn.request('HEAD', '%s?%s#%s' % (path, query, fragment))
res = conn.getresponse()
if res.status >= 400:
self.warning('error getting %s status code: %d', feed_url, res.status)
continue
fp_dict = feedparser.parse(feed_url)
name = fp_dict.feed.title
self.store[container_id] = FeedContainer(ROOT_CONTAINER_ID, container_id, name)
self.store[ROOT_CONTAINER_ID].children.append(self.store[container_id])
self.store[VIDEO_FOLDER_CONTAINER_ID].children.append(self.store[container_id])
self.store[AUDIO_ALBUM_CONTAINER_ID].children.append(self.store[container_id])
for item in fp_dict.entries:
for enclosure in item.enclosures:
self.store[item_id] = FeedEnclosure(self, container_id, item_id, '%04d - %s' % (item_id, item.title), enclosure)
self.store[container_id].children.append(self.store[item_id])
if enclosure.type.startswith('audio'):
self.store[AUDIO_ALL_CONTAINER_ID].children.append(self.store[item_id])
if not isinstance(self.store[container_id].item, DIDLLite.MusicAlbum):
self.store[container_id].item = DIDLLite.MusicAlbum(container_id, AUDIO_ALBUM_CONTAINER_ID, name)
item_id += 1
if container_id <= 1000:
container_id += 1
else:
raise Exception('to many containers')
| mit |
etherkit/OpenBeacon2 | client/linux-x86/venv/lib/python3.8/site-packages/pkg_resources/_vendor/appdirs.py | 177 | 24701 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 3)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user config directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical site config directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific state dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user state directories are:
Mac OS X: same as user_data_dir
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
to extend the XDG spec and support $XDG_STATE_HOME.
That means, by default "~/.local/state/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user log directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname=None, appauthor=None, version=None,
roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_state_dir(self):
return user_state_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"site_data_dir",
"site_config_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| gpl-3.0 |
CyrilPeponnet/Archipel | ArchipelAgent/archipel-agent-hypervisor-geolocalization/archipelagenthypervisorgeolocalization/geoloc.py | 4 | 7374 | # -*- coding: utf-8 -*-
#
# geoloc.py
#
# Copyright (C) 2010 Antoine Mercadal <[email protected]>
# Copyright, 2011 - Franck Villaume <[email protected]>
# This file is part of ArchipelProject
# http://archipelproject.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import httplib
from archipelcore.archipelPlugin import TNArchipelPlugin
from archipelcore.utils import build_error_iq
from archipelcore import xmpp
ARCHIPEL_NS_HYPERVISOR_GEOLOC = "archipel:hypervisor:geolocalization"
ARCHIPEL_ERROR_CODE_LOCALIZATION_GET = -9001
class TNHypervisorGeolocalization (TNArchipelPlugin):
"""
This plugin allow to geolocalize the hypervisor.
"""
def __init__(self, configuration, entity, entry_point_group):
"""
Initialize the module.
@type configuration: Configuration object
@param configuration: the configuration
@type entity: L{TNArchipelEntity}
@param entity: the entity that owns the plugin
@type entry_point_group: string
@param entry_point_group: the group name of plugin entry_point
"""
TNArchipelPlugin.__init__(self, configuration=configuration, entity=entity, entry_point_group=entry_point_group)
self.plugin_deactivated = False
try:
mode = self.configuration.get("GEOLOCALIZATION", "localization_mode")
lat = ""
lon = ""
if mode == "auto":
service = self.configuration.get("GEOLOCALIZATION", "localization_service_url")
request = self.configuration.get("GEOLOCALIZATION", "localization_service_request")
method = self.configuration.get("GEOLOCALIZATION", "localization_service_method")
conn = httplib.HTTPConnection(service)
conn.request(method, request)
data_node = xmpp.simplexml.NodeBuilder(data=str(conn.getresponse().read())).getDom()
lat = data_node.getTagData("Latitude")
lon = data_node.getTagData("Longitude")
else:
lat = self.configuration.getfloat("GEOLOCALIZATION", "localization_latitude")
lon = self.configuration.getfloat("GEOLOCALIZATION", "localization_longitude")
string = "<gelocalization><Latitude>"+str(lat)+"</Latitude>\n<Longitude>"+str(lon)+"</Longitude></gelocalization>"
self.localization_information = xmpp.simplexml.NodeBuilder(data=string).getDom()
registrar_item = { "commands" : ["where are you", "localize"],
"parameters": {},
"method": self.message_get,
"permissions": ["geolocalization_get"],
"description": "give my the latitude and longitude." }
self.entity.add_message_registrar_item(registrar_item)
# permissions
self.entity.permission_center.create_permission("geolocalization_get", "Authorizes user to get the entity location coordinates", False)
except Exception as ex:
self.plugin_deactivated = True;
self.entity.log.error("Cannot initialize geolocalization. plugin deactivated. Exception: %s" % str(ex))
### Plugin interface
def register_handlers(self):
"""
This method will be called by the plugin user when it will be
necessary to register module for listening to stanza.
"""
if self.plugin_deactivated:
return
self.entity.xmppclient.RegisterHandler('iq', self.process_iq, ns=ARCHIPEL_NS_HYPERVISOR_GEOLOC)
def unregister_handlers(self):
"""
Unregister the handlers.
"""
if self.plugin_deactivated:
return
self.entity.xmppclient.UnregisterHandler('iq', self.process_iq, ns=ARCHIPEL_NS_HYPERVISOR_GEOLOC)
@staticmethod
def plugin_info():
"""
Return informations about the plugin.
@rtype: dict
@return: dictionary contaning plugin informations
"""
plugin_friendly_name = "Hypervisor Geolocalization"
plugin_identifier = "geolocalization"
plugin_configuration_section = "GEOLOCALIZATION"
plugin_configuration_tokens = [ "localization_mode",
"localization_latitude",
"localization_longitude",
"localization_service_url",
"localization_service_request",
"localization_service_method",
"localization_service_response_root_node"]
return { "common-name" : plugin_friendly_name,
"identifier" : plugin_identifier,
"configuration-section" : plugin_configuration_section,
"configuration-tokens" : plugin_configuration_tokens }
### XMPP Processing
def process_iq(self, conn, iq):
"""
This method is invoked when a ARCHIPEL_NS_HYPERVISOR_GEOLOC IQ is received.
It understands IQ of type:
- get
@type conn: xmpp.Dispatcher
@param conn: ths instance of the current connection that send the stanza
@type iq: xmpp.Protocol.Iq
@param iq: the received IQ
"""
reply = None
action = self.entity.check_acp(conn, iq)
self.entity.check_perm(conn, iq, action, -1, prefix="geolocalization_")
if action == "get":
reply = self.iq_get(iq)
if reply:
conn.send(reply)
raise xmpp.protocol.NodeProcessed
def iq_get(self, iq):
"""
Return the geolocalization information.
@type iq: xmpp.Protocol.Iq
@param iq: the received IQ
"""
reply = iq.buildReply("result")
try:
reply.setQueryPayload([self.localization_information])
except Exception as ex:
reply = build_error_iq(self, ex, iq, ARCHIPEL_ERROR_CODE_LOCALIZATION_GET)
return reply
def message_get(self, msg):
"""
Return the geolocalization information asked by message.
@type msg: xmpp.Protocol.Message
@param msg: the received message
@rtype: string
@return: string containing the answer to send
"""
lat = self.localization_information.getTagData("Latitude")
lon = self.localization_information.getTagData("Longitude")
return "I'm localized at longitude: %s latitude: %s" % (lon, lat) | agpl-3.0 |
JeffRoy/mi-dataset | mi/dataset/driver/adcpt_acfgm/dcl/pd8/adcpt_acfgm_dcl_pd8_driver_common.py | 1 | 1392 | #!/usr/bin/env python
# ##
# OOIPLACEHOLDER
#
# Copyright 2014 Raytheon Co.
##
__author__ = "Ronald Ronquillo"
from mi.core.log import get_logger
log = get_logger()
from mi.dataset.dataset_driver import DataSetDriver
from mi.dataset.parser.adcpt_acfgm_dcl_pd8 import AdcptAcfgmPd8Parser
MODULE_NAME = 'mi.dataset.parser.adcpt_acfgm_dcl_pd8'
ADCPT_ACFGM_DCL_PD8_RECOVERED_PARTICLE_CLASS = 'AdcptAcfgmPd8DclInstrumentRecoveredParticle'
ADCPT_ACFGM_DCL_PD8_TELEMETERED_PARTICLE_CLASS = 'AdcptAcfgmPd8DclInstrumentParticle'
class AdcptAcfgmDclPd8Driver:
def __init__(self, sourceFilePath, particleDataHdlrObj, parser_config):
self._sourceFilePath = sourceFilePath
self._particleDataHdlrObj = particleDataHdlrObj
self._parser_config = parser_config
def process(self):
with open(self._sourceFilePath, "r") as file_handle:
def exception_callback(exception):
log.trace("Exception: %s", exception)
self._particleDataHdlrObj.setParticleDataCaptureFailure()
parser = AdcptAcfgmPd8Parser(self._parser_config,
file_handle,
exception_callback)
driver = DataSetDriver(parser, self._particleDataHdlrObj)
driver.processFileStream()
return self._particleDataHdlrObj
| bsd-2-clause |
adobecs5/urp2015 | lib/python3.4/site-packages/pip/_vendor/cachecontrol/controller.py | 69 | 9047 | """
The httplib2 algorithms ported for use with requests.
"""
import re
import calendar
import time
from email.utils import parsedate_tz
from pip._vendor.requests.structures import CaseInsensitiveDict
from .cache import DictCache
from .serialize import Serializer
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
class CacheController(object):
"""An interface to see if request should cached or not.
"""
def __init__(self, cache=None, cache_etags=True, serializer=None):
self.cache = cache or DictCache()
self.cache_etags = cache_etags
self.serializer = serializer or Serializer()
def _urlnorm(self, uri):
"""Normalize the URL to create a safe key for the cache"""
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
scheme = scheme.lower()
authority = authority.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
defrag_uri = scheme + "://" + authority + request_uri
return defrag_uri
def cache_url(self, uri):
return self._urlnorm(uri)
def parse_cache_control(self, headers):
"""
Parse the cache control headers returning a dictionary with values
for the different directives.
"""
retval = {}
cc_header = 'cache-control'
if 'Cache-Control' in headers:
cc_header = 'Cache-Control'
if cc_header in headers:
parts = headers[cc_header].split(',')
parts_with_args = [
tuple([x.strip().lower() for x in part.split("=", 1)])
for part in parts if -1 != part.find("=")
]
parts_wo_args = [
(name.strip().lower(), 1)
for name in parts if -1 == name.find("=")
]
retval = dict(parts_with_args + parts_wo_args)
return retval
def cached_request(self, request):
"""
Return a cached response if it exists in the cache, otherwise
return False.
"""
cache_url = self.cache_url(request.url)
cc = self.parse_cache_control(request.headers)
# non-caching states
no_cache = True if 'no-cache' in cc else False
if 'max-age' in cc and cc['max-age'] == 0:
no_cache = True
# Bail out if no-cache was set
if no_cache:
return False
# It is in the cache, so lets see if it is going to be
# fresh enough
resp = self.serializer.loads(request, self.cache.get(cache_url))
# Check to see if we have a cached object
if not resp:
return False
headers = CaseInsensitiveDict(resp.headers)
now = time.time()
date = calendar.timegm(
parsedate_tz(headers['date'])
)
current_age = max(0, now - date)
# TODO: There is an assumption that the result will be a
# urllib3 response object. This may not be best since we
# could probably avoid instantiating or constructing the
# response until we know we need it.
resp_cc = self.parse_cache_control(headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
if 'max-age' in resp_cc and resp_cc['max-age'].isdigit():
freshness_lifetime = int(resp_cc['max-age'])
# If there isn't a max-age, check for an expires header
elif 'expires' in headers:
expires = parsedate_tz(headers['expires'])
if expires is not None:
expire_time = calendar.timegm(expires) - date
freshness_lifetime = max(0, expire_time)
# determine if we are setting freshness limit in the req
if 'max-age' in cc:
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if 'min-fresh' in cc:
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
# adjust our current age by our min fresh
current_age += min_fresh
# see how fresh we actually are
fresh = (freshness_lifetime > current_age)
if fresh:
return resp
# we're not fresh. If we don't have an Etag, clear it out
if 'etag' not in headers:
self.cache.delete(cache_url)
# return the original handler
return False
def conditional_headers(self, request):
cache_url = self.cache_url(request.url)
resp = self.serializer.loads(request, self.cache.get(cache_url))
new_headers = {}
if resp:
headers = CaseInsensitiveDict(resp.headers)
if 'etag' in headers:
new_headers['If-None-Match'] = headers['ETag']
if 'last-modified' in headers:
new_headers['If-Modified-Since'] = headers['Last-Modified']
return new_headers
def cache_response(self, request, response, body=None):
"""
Algorithm for caching requests.
This assumes a requests Response object.
"""
# From httplib2: Don't cache 206's since we aren't going to
# handle byte range requests
if response.status not in [200, 203]:
return
response_headers = CaseInsensitiveDict(response.headers)
cc_req = self.parse_cache_control(request.headers)
cc = self.parse_cache_control(response_headers)
cache_url = self.cache_url(request.url)
# Delete it from the cache if we happen to have it stored there
no_store = cc.get('no-store') or cc_req.get('no-store')
if no_store and self.cache.get(cache_url):
self.cache.delete(cache_url)
# If we've been given an etag, then keep the response
if self.cache_etags and 'etag' in response_headers:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# Add to the cache if the response headers demand it. If there
# is no date header then we can't do anything about expiring
# the cache.
elif 'date' in response_headers:
# cache when there is a max-age > 0
if cc and cc.get('max-age'):
if int(cc['max-age']) > 0:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# If the request can expire, it means we should cache it
# in the meantime.
elif 'expires' in response_headers:
if response_headers['expires']:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
def update_cached_response(self, request, response):
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
"""
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(request, self.cache.get(cache_url))
if not cached_response:
# we didn't have a cached response
return response
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problmatic due to
# typical assumptions.
excluded_headers = [
"content-length",
]
cached_response.headers.update(
dict((k, v) for k, v in response.headers.items()
if k.lower() not in excluded_headers)
)
# we want a 200 b/c we have content via the cache
cached_response.status = 200
# update our cache
self.cache.set(
cache_url,
self.serializer.dumps(request, cached_response),
)
return cached_response
| apache-2.0 |
vismartltd/edx-platform | common/djangoapps/heartbeat/tests/test_heartbeat.py | 114 | 1460 | """
Test the heartbeat
"""
import json
from django.core.urlresolvers import reverse
from django.db.utils import DatabaseError
from django.test.client import Client
from django.test.testcases import TestCase
from mock import patch
from xmodule.exceptions import HeartbeatFailure
@patch('heartbeat.views.modulestore')
class HeartbeatTestCase(TestCase):
"""
Test the heartbeat
"""
def setUp(self):
self.client = Client()
self.heartbeat_url = reverse('heartbeat')
return super(HeartbeatTestCase, self).setUp()
def test_success(self, mock_modulestore): # pylint: disable=unused-argument
response = self.client.get(self.heartbeat_url)
self.assertEqual(response.status_code, 200)
@patch('heartbeat.views.connection')
def test_sql_fail(self, mock_connection, mock_modulestore): # pylint: disable=unused-argument
mock_connection.cursor.return_value.execute.side_effect = DatabaseError
response = self.client.get(self.heartbeat_url)
self.assertEqual(response.status_code, 503)
response_dict = json.loads(response.content)
self.assertIn('SQL', response_dict)
def test_modulestore_fail(self, mock_modulestore): # pylint: disable=unused-argument
mock_modulestore.return_value.heartbeat.side_effect = HeartbeatFailure('msg', 'service')
response = self.client.get(self.heartbeat_url)
self.assertEqual(response.status_code, 503)
| agpl-3.0 |
amyvmiwei/chromium | tools/grit/grit/clique_unittest.py | 6 | 7941 | #!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.clique'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..'))
import re
import StringIO
import unittest
from grit import clique
from grit import exception
from grit import pseudo
from grit import tclib
from grit import grd_reader
from grit import util
class MessageCliqueUnittest(unittest.TestCase):
def testClique(self):
factory = clique.UberClique()
msg = tclib.Message(text='Hello USERNAME, how are you?',
placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
c = factory.MakeClique(msg)
self.failUnless(c.GetMessage() == msg)
self.failUnless(c.GetId() == msg.GetId())
msg_fr = tclib.Translation(text='Bonjour USERNAME, comment ca va?',
id=msg.GetId(), placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
msg_de = tclib.Translation(text='Guten tag USERNAME, wie geht es dir?',
id=msg.GetId(), placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
c.AddTranslation(msg_fr, 'fr')
factory.FindCliqueAndAddTranslation(msg_de, 'de')
# sort() sorts lists in-place and does not return them
for lang in ('en', 'fr', 'de'):
self.failUnless(lang in c.clique)
self.failUnless(c.MessageForLanguage('fr').GetRealContent() ==
msg_fr.GetRealContent())
try:
c.MessageForLanguage('zh-CN', False)
self.fail('Should have gotten exception')
except:
pass
self.failUnless(c.MessageForLanguage('zh-CN', True) != None)
rex = re.compile('fr|de|bingo')
self.failUnless(len(c.AllMessagesThatMatch(rex, False)) == 2)
self.failUnless(c.AllMessagesThatMatch(rex, True)[pseudo.PSEUDO_LANG] != None)
def testBestClique(self):
factory = clique.UberClique()
factory.MakeClique(tclib.Message(text='Alfur', description='alfaholl'))
factory.MakeClique(tclib.Message(text='Alfur', description=''))
factory.MakeClique(tclib.Message(text='Vaettur', description=''))
factory.MakeClique(tclib.Message(text='Vaettur', description=''))
factory.MakeClique(tclib.Message(text='Troll', description=''))
factory.MakeClique(tclib.Message(text='Gryla', description='ID: IDS_GRYLA'))
factory.MakeClique(tclib.Message(text='Gryla', description='vondakerling'))
factory.MakeClique(tclib.Message(text='Leppaludi', description='ID: IDS_LL'))
factory.MakeClique(tclib.Message(text='Leppaludi', description=''))
count_best_cliques = 0
for c in factory.BestCliquePerId():
count_best_cliques += 1
msg = c.GetMessage()
text = msg.GetRealContent()
description = msg.GetDescription()
if text == 'Alfur':
self.failUnless(description == 'alfaholl')
elif text == 'Gryla':
self.failUnless(description == 'vondakerling')
elif text == 'Leppaludi':
self.failUnless(description == 'ID: IDS_LL')
self.failUnless(count_best_cliques == 5)
def testAllInUberClique(self):
resources = grd_reader.Parse(util.WrapInputStream(
StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<messages>
<message name="IDS_GREETING" desc="Printed to greet the currently logged in user">
Hello <ph name="USERNAME">%s<ex>Joi</ex></ph>, how are you doing today?
</message>
</messages>
<structures>
<structure type="dialog" name="IDD_ABOUTBOX" encoding="utf-16" file="grit/test/data/klonk.rc" />
<structure type="tr_html" name="ID_HTML" file="grit/test/data/simple.html" />
</structures>
</release>
</grit>''')), util.PathFromRoot('.'))
resources.RunGatherers(True)
content_list = []
for clique_list in resources.UberClique().cliques_.values():
for clique in clique_list:
content_list.append(clique.GetMessage().GetRealContent())
self.failUnless('Hello %s, how are you doing today?' in content_list)
self.failUnless('Jack "Black" Daniels' in content_list)
self.failUnless('Hello!' in content_list)
def testCorrectExceptionIfWrongEncodingOnResourceFile(self):
'''This doesn't really belong in this unittest file, but what the heck.'''
resources = grd_reader.Parse(util.WrapInputStream(
StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<release seq="3">
<structures>
<structure type="dialog" name="IDD_ABOUTBOX" file="grit/test/data/klonk.rc" />
</structures>
</release>
</grit>''')), util.PathFromRoot('.'))
self.assertRaises(exception.SectionNotFound, resources.RunGatherers, True)
def testSemiIdenticalCliques(self):
messages = [
tclib.Message(text='Hello USERNAME',
placeholders=[tclib.Placeholder('USERNAME', '$1', 'Joi')]),
tclib.Message(text='Hello USERNAME',
placeholders=[tclib.Placeholder('USERNAME', '%s', 'Joi')]),
]
self.failUnless(messages[0].GetId() == messages[1].GetId())
# Both of the above would share a translation.
translation = tclib.Translation(id=messages[0].GetId(),
text='Bonjour USERNAME',
placeholders=[tclib.Placeholder(
'USERNAME', '$1', 'Joi')])
factory = clique.UberClique()
cliques = [factory.MakeClique(msg) for msg in messages]
for clq in cliques:
clq.AddTranslation(translation, 'fr')
self.failUnless(cliques[0].MessageForLanguage('fr').GetRealContent() ==
'Bonjour $1')
self.failUnless(cliques[1].MessageForLanguage('fr').GetRealContent() ==
'Bonjour %s')
def testMissingTranslations(self):
messages = [ tclib.Message(text='Hello'), tclib.Message(text='Goodbye') ]
factory = clique.UberClique()
cliques = [factory.MakeClique(msg) for msg in messages]
cliques[1].MessageForLanguage('fr', False, True)
self.failUnless(not factory.HasMissingTranslations())
cliques[0].MessageForLanguage('de', False, False)
self.failUnless(factory.HasMissingTranslations())
report = factory.MissingTranslationsReport()
self.failUnless(report.count('WARNING') == 1)
self.failUnless(report.count('8053599568341804890 "Goodbye" fr') == 1)
self.failUnless(report.count('ERROR') == 1)
self.failUnless(report.count('800120468867715734 "Hello" de') == 1)
def testCustomTypes(self):
factory = clique.UberClique()
message = tclib.Message(text='Bingo bongo')
c = factory.MakeClique(message)
try:
c.SetCustomType(DummyCustomType())
self.fail()
except:
pass # expected case - 'Bingo bongo' does not start with 'jjj'
message = tclib.Message(text='jjjBingo bongo')
c = factory.MakeClique(message)
c.SetCustomType(util.NewClassInstance(
'grit.clique_unittest.DummyCustomType', clique.CustomType))
translation = tclib.Translation(id=message.GetId(), text='Bilingo bolongo')
c.AddTranslation(translation, 'fr')
self.failUnless(c.MessageForLanguage('fr').GetRealContent().startswith('jjj'))
class DummyCustomType(clique.CustomType):
def Validate(self, message):
return message.GetRealContent().startswith('jjj')
def ValidateAndModify(self, lang, translation):
is_ok = self.Validate(translation)
self.ModifyEachTextPart(lang, translation)
def ModifyTextPart(self, lang, text):
return 'jjj%s' % text
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
hujingguang/OpsSystem | users/views.py | 1 | 3093 | from django.shortcuts import render,render_to_response
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.http import HttpResponse
from django.contrib.auth.models import User
from datetime import datetime
import json
@login_required(login_url='/')
def list_all_users(request):
users=User.objects.all()
for u in users:
if u.last_login:
u.last_login=datetime.strftime(u.last_login,'%Y-%m-%d %H:%M:%S')
else:
u.last_login=''
if u.date_joined:
u.date_joined=datetime.strftime(u.date_joined,'%Y-%m-%d %H:%M:%S')
else:
u.date_joined=''
return render_to_response('user_list.html',RequestContext(request,{'users':users}))
@login_required(login_url='/')
def add_user(request):
if request.method=='POST':
username=request.POST.get('username','').replace(' ','')
password=request.POST.get('password','').replace(' ','')
email=request.POST.get('email','').replace(' ','')
isadmin=request.POST.get('isadmin','').replace(' ','')
if not request.user.is_superuser:
return HttpResponse(json.dumps({'code':400,'info':'No Permission'}))
if username == '' or password == '' or email== '' or isadmin == '':
return HttpResponse(json.dumps({'code':400,'info':'Error Form Params'}))
if isadmin == 'Y':
user=User.objects.create_superuser(username=username,email=email,password=password)
else:
user=User.objects.create_user(username=username,email=email,password=password)
try:
user.save()
except Exception as e:
return HttpResponse(json.dumps({'code':400,'info':'add user failed'}))
return HttpResponse(json.dumps({'code':200,'info':'add user success'}))
return HttpResponse(json.dumps({'code':400,'info':'error http method'}))
@login_required(login_url='/')
def change_user_api(request):
if request.method=='POST':
if not request.user.is_superuser:
return HttpResponse(json.dumps({'code':400,'info':'No Permission !'}))
userid=request.POST.get('userid','').replace(' ','')
opt=request.POST.get('opt','').replace(' ','')
if not userid or not opt:
return HttpResponse(json.dumps({'code':400,'info':'Error Params !'}))
u=User.objects.get(id=userid)
if not u:
return HttpResponse(json.dumps({'code':400,'info':'User do not exist !'}))
print dir(u)
if opt == 'delete':
try:
u.delete()
except Exception as e:
return HttpResponse(json.dumps({'code':400,'info':'Delete Failed'}))
elif opt == 'active':
if not u.is_active:
u.is_active=1
try:
u.save()
except Exception as e:
return HttpResponse(json.dumps({'code':400,'info':'Active Failed'}))
elif opt == 'freeze':
if u.is_active:
u.is_active=0
try:
u.save()
except Exception as e:
return HttpResponse(json.dumps({'code':400,'info':'Freeze Failed'}))
else:
return HttpResponse(json.dumps({'code':400,'info':'Error Params'}))
return HttpResponse(json.dumps({'code':200,'info':'Change User Status Success'}))
else:
return HttpResponse(json.dumps({'code':400,'info':'Error Http Method'}))
| mit |
ahmetabdi/SickRage | tornado/locale.py | 160 | 21946 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Translation methods for generating localized strings.
To load a locale and generate a translated string::
user_locale = tornado.locale.get("es_LA")
print user_locale.translate("Sign out")
`tornado.locale.get()` returns the closest matching locale, not necessarily the
specific locale you requested. You can support pluralization with
additional arguments to `~Locale.translate()`, e.g.::
people = [...]
message = user_locale.translate(
"%(list)s is online", "%(list)s are online", len(people))
print message % {"list": user_locale.list(people)}
The first string is chosen if ``len(people) == 1``, otherwise the second
string is chosen.
Applications should call one of `load_translations` (which uses a simple
CSV format) or `load_gettext_translations` (which uses the ``.mo`` format
supported by `gettext` and related tools). If neither method is called,
the `Locale.translate` method will simply return the original string.
"""
from __future__ import absolute_import, division, print_function, with_statement
import csv
import datetime
import numbers
import os
import re
from tornado import escape
from tornado.log import gen_log
from tornado.util import u
_default_locale = "en_US"
_translations = {}
_supported_locales = frozenset([_default_locale])
_use_gettext = False
def get(*locale_codes):
"""Returns the closest match for the given locale codes.
We iterate over all given locale codes in order. If we have a tight
or a loose match for the code (e.g., "en" for "en_US"), we return
the locale. Otherwise we move to the next code in the list.
By default we return ``en_US`` if no translations are found for any of
the specified locales. You can change the default locale with
`set_default_locale()`.
"""
return Locale.get_closest(*locale_codes)
def set_default_locale(code):
"""Sets the default locale.
The default locale is assumed to be the language used for all strings
in the system. The translations loaded from disk are mappings from
the default locale to the destination locale. Consequently, you don't
need to create a translation file for the default locale.
"""
global _default_locale
global _supported_locales
_default_locale = code
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
def load_translations(directory):
"""Loads translations from CSV files in a directory.
Translations are strings with optional Python-style named placeholders
(e.g., ``My name is %(name)s``) and their associated translations.
The directory should have translation files of the form ``LOCALE.csv``,
e.g. ``es_GT.csv``. The CSV files should have two or three columns: string,
translation, and an optional plural indicator. Plural indicators should
be one of "plural" or "singular". A given string can have both singular
and plural forms. For example ``%(name)s liked this`` may have a
different verb conjugation depending on whether %(name)s is one
name or a list of names. There should be two rows in the CSV file for
that string, one with plural indicator "singular", and one "plural".
For strings with no verbs that would change on translation, simply
use "unknown" or the empty string (or don't include the column at all).
The file is read using the `csv` module in the default "excel" dialect.
In this format there should not be spaces after the commas.
Example translation ``es_LA.csv``::
"I love you","Te amo"
"%(name)s liked this","A %(name)s les gustó esto","plural"
"%(name)s liked this","A %(name)s le gustó esto","singular"
"""
global _translations
global _supported_locales
_translations = {}
for path in os.listdir(directory):
if not path.endswith(".csv"):
continue
locale, extension = path.split(".")
if not re.match("[a-z]+(_[A-Z]+)?$", locale):
gen_log.error("Unrecognized locale %r (path: %s)", locale,
os.path.join(directory, path))
continue
full_path = os.path.join(directory, path)
try:
# python 3: csv.reader requires a file open in text mode.
# Force utf8 to avoid dependence on $LANG environment variable.
f = open(full_path, "r", encoding="utf-8")
except TypeError:
# python 2: files return byte strings, which are decoded below.
f = open(full_path, "r")
_translations[locale] = {}
for i, row in enumerate(csv.reader(f)):
if not row or len(row) < 2:
continue
row = [escape.to_unicode(c).strip() for c in row]
english, translation = row[:2]
if len(row) > 2:
plural = row[2] or "unknown"
else:
plural = "unknown"
if plural not in ("plural", "singular", "unknown"):
gen_log.error("Unrecognized plural indicator %r in %s line %d",
plural, path, i + 1)
continue
_translations[locale].setdefault(plural, {})[english] = translation
f.close()
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def load_gettext_translations(directory, domain):
"""Loads translations from `gettext`'s locale tree
Locale tree is similar to system's ``/usr/share/locale``, like::
{directory}/{lang}/LC_MESSAGES/{domain}.mo
Three steps are required to have you app translated:
1. Generate POT translation file::
xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
2. Merge against existing POT file::
msgmerge old.po mydomain.po > new.po
3. Compile::
msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
"""
import gettext
global _translations
global _supported_locales
global _use_gettext
_translations = {}
for lang in os.listdir(directory):
if lang.startswith('.'):
continue # skip .svn, etc
if os.path.isfile(os.path.join(directory, lang)):
continue
try:
os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo"))
_translations[lang] = gettext.translation(domain, directory,
languages=[lang])
except Exception as e:
gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
continue
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
_use_gettext = True
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def get_supported_locales():
"""Returns a list of all the supported locale codes."""
return _supported_locales
class Locale(object):
"""Object representing a locale.
After calling one of `load_translations` or `load_gettext_translations`,
call `get` or `get_closest` to get a Locale object.
"""
@classmethod
def get_closest(cls, *locale_codes):
"""Returns the closest match for the given locale code."""
for code in locale_codes:
if not code:
continue
code = code.replace("-", "_")
parts = code.split("_")
if len(parts) > 2:
continue
elif len(parts) == 2:
code = parts[0].lower() + "_" + parts[1].upper()
if code in _supported_locales:
return cls.get(code)
if parts[0].lower() in _supported_locales:
return cls.get(parts[0].lower())
return cls.get(_default_locale)
@classmethod
def get(cls, code):
"""Returns the Locale for the given locale code.
If it is not supported, we raise an exception.
"""
if not hasattr(cls, "_cache"):
cls._cache = {}
if code not in cls._cache:
assert code in _supported_locales
translations = _translations.get(code, None)
if translations is None:
locale = CSVLocale(code, {})
elif _use_gettext:
locale = GettextLocale(code, translations)
else:
locale = CSVLocale(code, translations)
cls._cache[code] = locale
return cls._cache[code]
def __init__(self, code, translations):
self.code = code
self.name = LOCALE_NAMES.get(code, {}).get("name", u("Unknown"))
self.rtl = False
for prefix in ["fa", "ar", "he"]:
if self.code.startswith(prefix):
self.rtl = True
break
self.translations = translations
# Initialize strings for date formatting
_ = self.translate
self._months = [
_("January"), _("February"), _("March"), _("April"),
_("May"), _("June"), _("July"), _("August"),
_("September"), _("October"), _("November"), _("December")]
self._weekdays = [
_("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"),
_("Friday"), _("Saturday"), _("Sunday")]
def translate(self, message, plural_message=None, count=None):
"""Returns the translation for the given message for this locale.
If ``plural_message`` is given, you must also provide
``count``. We return ``plural_message`` when ``count != 1``,
and we return the singular form for the given message when
``count == 1``.
"""
raise NotImplementedError()
def format_date(self, date, gmt_offset=0, relative=True, shorter=False,
full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
"""
if isinstance(date, numbers.Real):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
_ = self.translate
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return _("1 second ago", "%(seconds)d seconds ago",
seconds) % {"seconds": seconds}
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return _("1 minute ago", "%(minutes)d minutes ago",
minutes) % {"minutes": minutes}
hours = round(seconds / (60.0 * 60))
return _("1 hour ago", "%(hours)d hours ago",
hours) % {"hours": hours}
if days == 0:
format = _("%(time)s")
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = _("yesterday") if shorter else \
_("yesterday at %(time)s")
elif days < 5:
format = _("%(weekday)s") if shorter else \
_("%(weekday)s at %(time)s")
elif days < 334: # 11mo, since confusing for same month last year
format = _("%(month_name)s %(day)s") if shorter else \
_("%(month_name)s %(day)s at %(time)s")
if format is None:
format = _("%(month_name)s %(day)s, %(year)s") if shorter else \
_("%(month_name)s %(day)s, %(year)s at %(time)s")
tfhour_clock = self.code not in ("en", "en_US", "zh_CN")
if tfhour_clock:
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
elif self.code == "zh_CN":
str_time = "%s%d:%02d" % (
(u('\u4e0a\u5348'), u('\u4e0b\u5348'))[local_date.hour >= 12],
local_date.hour % 12 or 12, local_date.minute)
else:
str_time = "%d:%02d %s" % (
local_date.hour % 12 or 12, local_date.minute,
("am", "pm")[local_date.hour >= 12])
return format % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
"year": str(local_date.year),
"time": str_time
}
def format_day(self, date, gmt_offset=0, dow=True):
"""Formats the given date as a day of week.
Example: "Monday, January 22". You can remove the day of week with
``dow=False``.
"""
local_date = date - datetime.timedelta(minutes=gmt_offset)
_ = self.translate
if dow:
return _("%(weekday)s, %(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
}
else:
return _("%(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"day": str(local_date.day),
}
def list(self, parts):
"""Returns a comma-separated list for the given list of parts.
The format is, e.g., "A, B and C", "A and B" or just "A" for lists
of size 1.
"""
_ = self.translate
if len(parts) == 0:
return ""
if len(parts) == 1:
return parts[0]
comma = u(' \u0648 ') if self.code.startswith("fa") else u(", ")
return _("%(commas)s and %(last)s") % {
"commas": comma.join(parts[:-1]),
"last": parts[len(parts) - 1],
}
def friendly_number(self, value):
"""Returns a comma-separated number for the given integer."""
if self.code not in ("en", "en_US"):
return str(value)
value = str(value)
parts = []
while value:
parts.append(value[-3:])
value = value[:-3]
return ",".join(reversed(parts))
class CSVLocale(Locale):
"""Locale implementation using tornado's CSV translation format."""
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
if count != 1:
message = plural_message
message_dict = self.translations.get("plural", {})
else:
message_dict = self.translations.get("singular", {})
else:
message_dict = self.translations.get("unknown", {})
return message_dict.get(message, message)
class GettextLocale(Locale):
"""Locale implementation using the `gettext` module."""
def __init__(self, code, translations):
try:
# python 2
self.ngettext = translations.ungettext
self.gettext = translations.ugettext
except AttributeError:
# python 3
self.ngettext = translations.ngettext
self.gettext = translations.gettext
# self.gettext must exist before __init__ is called, since it
# calls into self.translate
super(GettextLocale, self).__init__(code, translations)
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
return self.ngettext(message, plural_message, count)
else:
return self.gettext(message)
LOCALE_NAMES = {
"af_ZA": {"name_en": u("Afrikaans"), "name": u("Afrikaans")},
"am_ET": {"name_en": u("Amharic"), "name": u('\u12a0\u121b\u122d\u129b')},
"ar_AR": {"name_en": u("Arabic"), "name": u("\u0627\u0644\u0639\u0631\u0628\u064a\u0629")},
"bg_BG": {"name_en": u("Bulgarian"), "name": u("\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438")},
"bn_IN": {"name_en": u("Bengali"), "name": u("\u09ac\u09be\u0982\u09b2\u09be")},
"bs_BA": {"name_en": u("Bosnian"), "name": u("Bosanski")},
"ca_ES": {"name_en": u("Catalan"), "name": u("Catal\xe0")},
"cs_CZ": {"name_en": u("Czech"), "name": u("\u010ce\u0161tina")},
"cy_GB": {"name_en": u("Welsh"), "name": u("Cymraeg")},
"da_DK": {"name_en": u("Danish"), "name": u("Dansk")},
"de_DE": {"name_en": u("German"), "name": u("Deutsch")},
"el_GR": {"name_en": u("Greek"), "name": u("\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac")},
"en_GB": {"name_en": u("English (UK)"), "name": u("English (UK)")},
"en_US": {"name_en": u("English (US)"), "name": u("English (US)")},
"es_ES": {"name_en": u("Spanish (Spain)"), "name": u("Espa\xf1ol (Espa\xf1a)")},
"es_LA": {"name_en": u("Spanish"), "name": u("Espa\xf1ol")},
"et_EE": {"name_en": u("Estonian"), "name": u("Eesti")},
"eu_ES": {"name_en": u("Basque"), "name": u("Euskara")},
"fa_IR": {"name_en": u("Persian"), "name": u("\u0641\u0627\u0631\u0633\u06cc")},
"fi_FI": {"name_en": u("Finnish"), "name": u("Suomi")},
"fr_CA": {"name_en": u("French (Canada)"), "name": u("Fran\xe7ais (Canada)")},
"fr_FR": {"name_en": u("French"), "name": u("Fran\xe7ais")},
"ga_IE": {"name_en": u("Irish"), "name": u("Gaeilge")},
"gl_ES": {"name_en": u("Galician"), "name": u("Galego")},
"he_IL": {"name_en": u("Hebrew"), "name": u("\u05e2\u05d1\u05e8\u05d9\u05ea")},
"hi_IN": {"name_en": u("Hindi"), "name": u("\u0939\u093f\u0928\u094d\u0926\u0940")},
"hr_HR": {"name_en": u("Croatian"), "name": u("Hrvatski")},
"hu_HU": {"name_en": u("Hungarian"), "name": u("Magyar")},
"id_ID": {"name_en": u("Indonesian"), "name": u("Bahasa Indonesia")},
"is_IS": {"name_en": u("Icelandic"), "name": u("\xcdslenska")},
"it_IT": {"name_en": u("Italian"), "name": u("Italiano")},
"ja_JP": {"name_en": u("Japanese"), "name": u("\u65e5\u672c\u8a9e")},
"ko_KR": {"name_en": u("Korean"), "name": u("\ud55c\uad6d\uc5b4")},
"lt_LT": {"name_en": u("Lithuanian"), "name": u("Lietuvi\u0173")},
"lv_LV": {"name_en": u("Latvian"), "name": u("Latvie\u0161u")},
"mk_MK": {"name_en": u("Macedonian"), "name": u("\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438")},
"ml_IN": {"name_en": u("Malayalam"), "name": u("\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02")},
"ms_MY": {"name_en": u("Malay"), "name": u("Bahasa Melayu")},
"nb_NO": {"name_en": u("Norwegian (bokmal)"), "name": u("Norsk (bokm\xe5l)")},
"nl_NL": {"name_en": u("Dutch"), "name": u("Nederlands")},
"nn_NO": {"name_en": u("Norwegian (nynorsk)"), "name": u("Norsk (nynorsk)")},
"pa_IN": {"name_en": u("Punjabi"), "name": u("\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40")},
"pl_PL": {"name_en": u("Polish"), "name": u("Polski")},
"pt_BR": {"name_en": u("Portuguese (Brazil)"), "name": u("Portugu\xeas (Brasil)")},
"pt_PT": {"name_en": u("Portuguese (Portugal)"), "name": u("Portugu\xeas (Portugal)")},
"ro_RO": {"name_en": u("Romanian"), "name": u("Rom\xe2n\u0103")},
"ru_RU": {"name_en": u("Russian"), "name": u("\u0420\u0443\u0441\u0441\u043a\u0438\u0439")},
"sk_SK": {"name_en": u("Slovak"), "name": u("Sloven\u010dina")},
"sl_SI": {"name_en": u("Slovenian"), "name": u("Sloven\u0161\u010dina")},
"sq_AL": {"name_en": u("Albanian"), "name": u("Shqip")},
"sr_RS": {"name_en": u("Serbian"), "name": u("\u0421\u0440\u043f\u0441\u043a\u0438")},
"sv_SE": {"name_en": u("Swedish"), "name": u("Svenska")},
"sw_KE": {"name_en": u("Swahili"), "name": u("Kiswahili")},
"ta_IN": {"name_en": u("Tamil"), "name": u("\u0ba4\u0bae\u0bbf\u0bb4\u0bcd")},
"te_IN": {"name_en": u("Telugu"), "name": u("\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41")},
"th_TH": {"name_en": u("Thai"), "name": u("\u0e20\u0e32\u0e29\u0e32\u0e44\u0e17\u0e22")},
"tl_PH": {"name_en": u("Filipino"), "name": u("Filipino")},
"tr_TR": {"name_en": u("Turkish"), "name": u("T\xfcrk\xe7e")},
"uk_UA": {"name_en": u("Ukraini "), "name": u("\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430")},
"vi_VN": {"name_en": u("Vietnamese"), "name": u("Ti\u1ebfng Vi\u1ec7t")},
"zh_CN": {"name_en": u("Chinese (Simplified)"), "name": u("\u4e2d\u6587(\u7b80\u4f53)")},
"zh_TW": {"name_en": u("Chinese (Traditional)"), "name": u("\u4e2d\u6587(\u7e41\u9ad4)")},
}
| gpl-3.0 |
marcuskelly/recover | Lib/site-packages/Crypto/Cipher/_mode_cfb.py | 9 | 9008 | # -*- coding: utf-8 -*-
#
# Cipher/mode_cfb.py : CFB mode
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""
Counter Feedback (CFB) mode.
"""
__all__ = ['CfbMode']
from Crypto.Util._raw_api import (load_pycryptodome_raw_lib, VoidPointer,
create_string_buffer, get_raw_buffer,
SmartPointer, c_size_t, expect_byte_string)
from Crypto.Random import get_random_bytes
raw_cfb_lib = load_pycryptodome_raw_lib("Crypto.Cipher._raw_cfb","""
int CFB_start_operation(void *cipher,
const uint8_t iv[],
size_t iv_len,
size_t segment_len, /* In bytes */
void **pResult);
int CFB_encrypt(void *cfbState,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int CFB_decrypt(void *cfbState,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int CFB_stop_operation(void *state);"""
)
class CfbMode(object):
"""*Cipher FeedBack (CFB)*.
This mode is similar to CFB, but it transforms
the underlying block cipher into a stream cipher.
Plaintext and ciphertext are processed in *segments*
of **s** bits. The mode is therefore sometimes
labelled **s**-bit CFB.
An Initialization Vector (*IV*) is required.
See `NIST SP800-38A`_ , Section 6.3.
.. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
:undocumented: __init__
"""
def __init__(self, block_cipher, iv, segment_size):
"""Create a new block cipher, configured in CFB mode.
:Parameters:
block_cipher : C pointer
A smart pointer to the low-level block cipher instance.
iv : byte string
The initialization vector to use for encryption or decryption.
It is as long as the cipher block.
**The IV must be unpredictable**. Ideally it is picked randomly.
Reusing the *IV* for encryptions performed with the same key
compromises confidentiality.
segment_size : integer
The number of bytes the plaintext and ciphertext are segmented in.
"""
expect_byte_string(iv)
self._state = VoidPointer()
result = raw_cfb_lib.CFB_start_operation(block_cipher.get(),
iv,
c_size_t(len(iv)),
c_size_t(segment_size),
self._state.address_of())
if result:
raise ValueError("Error %d while instatiating the CFB mode" % result)
# Ensure that object disposal of this Python object will (eventually)
# free the memory allocated by the raw library for the cipher mode
self._state = SmartPointer(self._state.get(),
raw_cfb_lib.CFB_stop_operation)
# Memory allocated for the underlying block cipher is now owed
# by the cipher mode
block_cipher.release()
self.block_size = len(iv)
"""The block size of the underlying cipher, in bytes."""
self.iv = iv
"""The Initialization Vector originally used to create the object.
The value does not change."""
self.IV = iv
"""Alias for `iv`"""
self._next = [ self.encrypt, self.decrypt ]
def encrypt(self, plaintext):
"""Encrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have encrypted a message
you cannot encrypt (or decrypt) another message using the same
object.
The data to encrypt can be broken up in two or
more pieces and `encrypt` can be called multiple times.
That is, the statement:
>>> c.encrypt(a) + c.encrypt(b)
is equivalent to:
>>> c.encrypt(a+b)
This function does not add any padding to the plaintext.
:Parameters:
plaintext : byte string
The piece of data to encrypt.
It can be of any length.
:Return:
the encrypted data, as a byte string.
It is as long as *plaintext*.
"""
if self.encrypt not in self._next:
raise TypeError("encrypt() cannot be called after decrypt()")
self._next = [ self.encrypt ]
expect_byte_string(plaintext)
ciphertext = create_string_buffer(len(plaintext))
result = raw_cfb_lib.CFB_encrypt(self._state.get(),
plaintext,
ciphertext,
c_size_t(len(plaintext)))
if result:
raise ValueError("Error %d while encrypting in CFB mode" % result)
return get_raw_buffer(ciphertext)
def decrypt(self, ciphertext):
"""Decrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have decrypted a message
you cannot decrypt (or encrypt) another message with the same
object.
The data to decrypt can be broken up in two or
more pieces and `decrypt` can be called multiple times.
That is, the statement:
>>> c.decrypt(a) + c.decrypt(b)
is equivalent to:
>>> c.decrypt(a+b)
This function does not remove any padding from the plaintext.
:Parameters:
ciphertext : byte string
The piece of data to decrypt.
It can be of any length.
:Return: the decrypted data (byte string).
"""
if self.decrypt not in self._next:
raise TypeError("decrypt() cannot be called after encrypt()")
self._next = [ self.decrypt ]
expect_byte_string(ciphertext)
plaintext = create_string_buffer(len(ciphertext))
result = raw_cfb_lib.CFB_decrypt(self._state.get(),
ciphertext,
plaintext,
c_size_t(len(ciphertext)))
if result:
raise ValueError("Error %d while decrypting in CFB mode" % result)
return get_raw_buffer(plaintext)
def _create_cfb_cipher(factory, **kwargs):
"""Instantiate a cipher object that performs CFB encryption/decryption.
:Parameters:
factory : module
The underlying block cipher, a module from ``Crypto.Cipher``.
:Keywords:
iv : byte string
The IV to use for CFB.
IV : byte string
Alias for ``iv``.
segment_size : integer
The number of bit the plaintext and ciphertext are segmented in.
If not present, the default is 8.
Any other keyword will be passed to the underlying block cipher.
See the relevant documentation for details (at least ``key`` will need
to be present).
"""
cipher_state = factory._create_base_cipher(kwargs)
iv = kwargs.pop("IV", None)
IV = kwargs.pop("iv", None)
if (None, None) == (iv, IV):
iv = get_random_bytes(factory.block_size)
if iv is not None:
if IV is not None:
raise TypeError("You must either use 'iv' or 'IV', not both")
else:
iv = IV
segment_size_bytes, rem = divmod(kwargs.pop("segment_size", 8), 8)
if segment_size_bytes == 0 or rem != 0:
raise ValueError("'segment_size' must be positive and multiple of 8 bits")
if kwargs:
raise TypeError("Unknown parameters for CFB: %s" % str(kwargs))
return CfbMode(cipher_state, iv, segment_size_bytes)
| bsd-2-clause |
chauhanmohit/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/leakdetector_unittest.py | 121 | 9172 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.port.leakdetector import LeakDetector
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.executive_mock import MockExecutive
class LeakDetectorTest(unittest.TestCase):
def _mock_port(self):
class MockPort(object):
def __init__(self):
self._filesystem = MockFileSystem()
self._executive = MockExecutive()
return MockPort()
def _make_detector(self):
return LeakDetector(self._mock_port())
def test_leaks_args(self):
detector = self._make_detector()
detector._callstacks_to_exclude_from_leaks = lambda: ['foo bar', 'BAZ']
detector._types_to_exlude_from_leaks = lambda: ['abcdefg', 'hi jklmno']
expected_args = ['--exclude-callstack=foo bar', '--exclude-callstack=BAZ', '--exclude-type=abcdefg', '--exclude-type=hi jklmno', 1234]
self.assertEqual(detector._leaks_args(1234), expected_args)
example_leaks_output = """Process 5122: 663744 nodes malloced for 78683 KB
Process 5122: 337301 leaks for 6525216 total leaked bytes.
Leak: 0x38cb600 size=3072 zone: DefaultMallocZone_0x1d94000 instance of 'NSCFData', type ObjC, implemented in Foundation
0xa033f0b8 0x01001384 0x00000b3a 0x00000b3a ..3.....:...:...
0x00000000 0x038cb620 0x00000000 0x00000000 .... ...........
0x00000000 0x21000000 0x726c6468 0x00000000 .......!hdlr....
0x00000000 0x7269646d 0x6c707061 0x00000000 ....mdirappl....
0x00000000 0x04000000 0x736c69c1 0x00000074 .........ilst...
0x6f74a923 0x0000006f 0x7461641b 0x00000061 #.too....data...
0x00000001 0x76614c00 0x2e323566 0x302e3236 .....Lavf52.62.0
0x37000000 0x6d616ea9 0x2f000000 0x61746164 ...7.nam.../data
...
Leak: 0x2a9c960 size=288 zone: DefaultMallocZone_0x1d94000
0x09a1cc47 0x1bda8560 0x3d472cd1 0xfbe9bccd G...`....,G=....
0x8bcda008 0x9e972a91 0xa892cf63 0x2448bdb0 .....*..c.....H$
0x4736fc34 0xdbe2d94e 0x25f56688 0x839402a4 4.6GN....f.%....
0xd12496b3 0x59c40c12 0x8cfcab2a 0xd20ef9c4 ..$....Y*.......
0xe7c56b1b 0x5835af45 0xc69115de 0x6923e4bb .k..E.5X......#i
0x86f15553 0x15d40fa9 0x681288a4 0xc33298a9 SU.........h..2.
0x439bb535 0xc4fc743d 0x7dfaaff8 0x2cc49a4a 5..C=t.....}J..,
0xdd119df8 0x7e086821 0x3d7d129e 0x2e1b1547 ....!h.~..}=G...
...
Leak: 0x25102fe0 size=176 zone: DefaultMallocZone_0x1d94000 string 'NSException Data'
"""
example_leaks_output_with_exclusions = """
Process 57064: 865808 nodes malloced for 81032 KB
Process 57064: 282 leaks for 21920 total leaked bytes.
Leak: 0x7fc506023960 size=576 zone: DefaultMallocZone_0x107c29000 URLConnectionLoader::LoaderConnectionEventQueue C++ CFNetwork
0x73395460 0x00007fff 0x7488af40 0x00007fff `[email protected]....
0x73395488 0x00007fff 0x46eecd74 0x0001ed83 .T9s....t..F....
0x0100000a 0x00000000 0x7488bfc0 0x00007fff ...........t....
0x00000000 0x00000000 0x46eecd8b 0x0001ed83 ...........F....
0x00000000 0x00000000 0x00000000 0x00000000 ................
0x00000000 0x00000000 0x46eecda3 0x0001ed83 ...........F....
0x00000000 0x00000000 0x00000000 0x00000000 ................
0x00000000 0x00000000 0x46eecdbc 0x0001ed83 ...........F....
...
Leak: 0x7fc506025980 size=432 zone: DefaultMallocZone_0x107c29000 URLConnectionInstanceData CFType CFNetwork
0x74862b28 0x00007fff 0x00012b80 0x00000001 (+.t.....+......
0x73395310 0x00007fff 0x733953f8 0x00007fff .S9s.....S9s....
0x4d555458 0x00000000 0x00000000 0x00002068 XTUM........h ..
0x00000000 0x00000000 0x00000b00 0x00000b00 ................
0x00000000 0x00000000 0x060259b8 0x00007fc5 .........Y......
0x060259bc 0x00007fc5 0x00000000 0x00000000 .Y..............
0x73395418 0x00007fff 0x06025950 0x00007fc5 .T9s....PY......
0x73395440 0x00007fff 0x00005013 0x00000001 @T9s.....P......
...
Binary Images:
0x107ac2000 - 0x107b4aff7 +DumpRenderTree (??? - ???) <5694BE03-A60A-30B2-9D40-27CFFCFB88EE> /Volumes/Data/WebKit-BuildSlave/lion-intel-leaks/build/WebKitBuild/Debug/DumpRenderTree
0x107c2f000 - 0x107c58fff +libWebCoreTestSupport.dylib (535.8.0 - compatibility 1.0.0) <E4F7A13E-5807-30F7-A399-62F8395F9106> /Volumes/Data/WebKit-BuildSlave/lion-intel-leaks/build/WebKitBuild/Debug/libWebCoreTestSupport.dylib
17 leaks excluded (not printed)
"""
def test_parse_leaks_output(self):
self.assertEqual(self._make_detector()._parse_leaks_output(self.example_leaks_output), (337301, 0, 6525216))
self.assertEqual(self._make_detector()._parse_leaks_output(self.example_leaks_output_with_exclusions), (282, 17, 21920))
def test_leaks_files_in_directory(self):
detector = self._make_detector()
self.assertEqual(detector.leaks_files_in_directory('/bogus-directory'), [])
detector._filesystem = MockFileSystem({
'/mock-results/DumpRenderTree-1234-leaks.txt': '',
'/mock-results/DumpRenderTree-23423-leaks.txt': '',
'/mock-results/DumpRenderTree-823-leaks.txt': '',
})
self.assertEqual(len(detector.leaks_files_in_directory('/mock-results')), 3)
def test_count_total_bytes_and_unique_leaks(self):
detector = self._make_detector()
def mock_run_script(name, args, include_configuration_arguments=False):
print "MOCK _run_script: %s %s" % (name, args)
return """1 calls for 16 bytes: -[NSURLRequest mutableCopyWithZone:] | +[NSObject(NSObject) allocWithZone:] | _internal_class_createInstanceFromZone | calloc | malloc_zone_calloc
147 calls for 9,408 bytes: _CFRuntimeCreateInstance | _ZN3WTF24StringWrapperCFAllocatorL8allocateElmPv StringImplCF.cpp:67 | WTF::fastMalloc(unsigned long) FastMalloc.cpp:268 | malloc | malloc_zone_malloc
total: 5,888 bytes (0 bytes excluded)."""
detector._port._run_script = mock_run_script
leak_files = ['/mock-results/DumpRenderTree-1234-leaks.txt', '/mock-results/DumpRenderTree-1235-leaks.txt']
expected_stdout = "MOCK _run_script: parse-malloc-history ['--merge-depth', 5, '/mock-results/DumpRenderTree-1234-leaks.txt', '/mock-results/DumpRenderTree-1235-leaks.txt']\n"
results_tuple = OutputCapture().assert_outputs(self, detector.count_total_bytes_and_unique_leaks, [leak_files], expected_stdout=expected_stdout)
self.assertEqual(results_tuple, ("5,888 bytes", 2))
def test_count_total_leaks(self):
detector = self._make_detector()
detector._filesystem = MockFileSystem({
# The \xff is some non-utf8 characters to make sure we don't blow up trying to parse the file.
'/mock-results/DumpRenderTree-1234-leaks.txt': '\xff\nProcess 1234: 12 leaks for 40 total leaked bytes.\n\xff\n',
'/mock-results/DumpRenderTree-23423-leaks.txt': 'Process 1235: 12341 leaks for 27934 total leaked bytes.\n',
'/mock-results/DumpRenderTree-823-leaks.txt': 'Process 12356: 23412 leaks for 18 total leaked bytes.\n',
})
leak_file_paths = ['/mock-results/DumpRenderTree-1234-leaks.txt', '/mock-results/DumpRenderTree-23423-leaks.txt', '/mock-results/DumpRenderTree-823-leaks.txt']
self.assertEqual(detector.count_total_leaks(leak_file_paths), 35765)
| bsd-3-clause |
dave62/lpod-Chart | src/chart.py | 1 | 34156 | from lpod.element import odf_create_element, register_element_class, odf_element
from lpod.frame import odf_create_frame
from lpod.element import odf_create_element
from lpod.container import odf_new_container
from lpod.table import _get_cell_coordinates, _digit_to_alpha
from lpod.utils import _get_abspath
from string import splitfields
from os import getcwd
from sys import path
from os.path import exists
#
#Chart
#
def odf_create_chart(chart_class, size=('10cm', '10cm'), title=None,
subtitle=None, legend_position=None):
"""
Create a chart structure for basic use
Arguments
class -- 'line' 'area' 'circle' 'ring' 'scatter' 'radar'
'bar' 'stock' 'bubble' 'surface' 'gant'
size -- (str, str)
title -- str
subtitle -- str
legend_position -- None 'start' 'end' 'top' 'bottom'
'top-start' 'bottom-start'
'top-end' 'bottom-end'
"""
element = odf_create_element('chart:chart')
element.set_class(chart_class)
element.set_size(size[0], size[1])
if title is not None:
element.set_title(title)
if subtitle is not None:
element.set_subtitle(subtitle)
if legend_position is not None:
element.set_legend(legend_position)
return element
class odf_chart(odf_element):
""" This class implements an xml element that represents a chart"""
#class
def get_class(self):
return self.get_attribute('chart:class')
def set_class(self, chart_class):
"""
chart_class -- 'line' 'area' 'circle' 'ring' 'scatter' 'radar'
'bar' 'stock' 'bubble' 'surface' 'gant'
"""
self.set_attribute('chart:class', "chart:"+chart_class)
#size
def get_size(self):
h = self.get_attribute('svg:height')
w = self.get_attribute('svg:width')
return (w, h)
def set_size(self, w, h):
""" arguments -- str """
self.set_attribute('svg:width', w)
self.set_attribute('svg:height', h)
#style
def get_style(self):
return self.get_attribute('chart:style-name')
def set_style(self, style):
""" style -- str """
self.set_attribute('chart:style-name', style)
#title
def get_title(self):
elt_title = self.get_element('chart:title')
if elt_title is not None:
return elt_title.get_text_content()
else:
return None
def set_title(self, text):
""" text -- str """
elt_title = self.get_element('chart:title')
if elt_title is None:
elt_title = odf_create_element('chart:title')
self.append(elt_title)
elt_title.set_text_content(text)
def get_title_style(self):
elt_title = self.get_element('chart:title')
if elt_title is not None:
return elt_title.get_attribute('chart:style-name')
else:
return None
def set_title_style(self, style):
""" style -- str """
elt_title = self.get_element('chart:title')
if elt_title is None:
elt_title = odf_create_element('chart:title')
self.append(elt_title)
elt_title.set_attribute('chart:style-name', style)
def get_title_position(self):
elt_title = self.get_element('chart:title')
if elt_title is not None:
x = elt_title.get_attribute('svg:x')
y = elt_title.get_attribute('svg:y')
return (x, y)
else:
return None
def set_title_position(self, x, y):
""" arguments -- str """
elt_title = self.get_element('chart:title')
if elt_title is None:
elt_title = odf_create_element('chart:title')
self.append(elt_title)
elt_title.set_attribute('svg:x', x)
elt_title.set_attribute('svg:y', y)
#subtitle
def get_subtitle(self):
elt_subtitle = self.get_element('chart:subtitle')
if elt_subtitle is not None:
return elt_subtitle.get_text_content()
else:
return None
def set_subtitle(self, text):
""" text -- str """
elt_subtitle = self.get_element('chart:subtitle')
if elt_subtitle is None:
elt_subtitle = odf_create_element('chart:subtitle')
self.append(elt_subtitle)
elt_subtitle.set_text_content(text)
def get_subtitle_style(self):
elt_subtitle = self.get_element('chart:subtitle')
if elt_subtitle is not None:
return elt_subtitle.get_attribute('chart:style-name')
else:
return None
def set_subtitle_style(self, style):
""" style -- str """
elt_subtitle = self.get_element('chart:subtitle')
if elt_subtitle is None:
elt_subtitle = odf_create_element('chart:subtitle')
self.append(elt_subtitle)
elt_subtitle.set_attribute('chart:style-name', style)
def get_subtitle_position(self):
elt_subtitle = self.get_element('chart:subtitle')
if elt_subtitle is not None:
x=elt_subtitle.get_attribute('svg:x')
y=elt_subtitle.get_attribute('svg:y')
return (x, y)
else:
return None
def set_subtitle_position(self, x, y):
""" arguments -- str """
elt_subtitle = self.get_element('chart:subtitle')
if elt_subtitle is None:
elt_subtitle = odf_create_element('chart:subtitle')
self.append(elt_subtitle)
elt_subtitle.set_attribute('svg:x', x)
elt_subtitle.set_attribute('svg:y', y)
#footer
def get_footer(self):
elt_footer = self.get_element('chart:footer')
if elt_footer is not None:
return elt_footer.get_text_content()
else:
return None
def set_footer(self, text):
""" test -- str """
elt_footer = self.get_element('chart:footer')
if elt_footer is None:
elt_footer = odf_create_element('chart:footer')
self.append(elt_footer)
elt_footer.set_text_content(text)
def get_footer_style(self):
elt_footer = self.get_element('chart:footer')
if elt_footer is not None:
return elt_footer.get_attribute('chart:style-name')
else:
return None
def set_footer_style(self, style):
""" style -- str """
elt_footer = self.get_element('chart:footer')
if elt_footer is None:
elt_footer = odf_create_element('chart:footer')
self.append(elt_footer)
elt_footer.set_attribute('chart:style-name', style)
def get_footer_position(self):
elt_footer = self.get_element('chart:footer')
if elt_footer is not None:
x=elt_footer.get_attribute('svg:x')
y=elt_footer.get_attribute('svg:y')
return (x, y)
else:
return None
def set_footer_position(self, x, y):
""" arguments -- str """
elt_footer = self.get_element('chart:footer')
if elt_footer is None:
elt_footer = odf_create_element('chart:footer')
self.append(elt_footer)
elt_footer.set_attribute('svg:x', x)
elt_footer.set_attribute('svg:y', y)
#legend
def set_legend(self, position):
"""
position -- 'start' 'end' 'top' 'bottom' 'top-start' 'bottom-start'
'top-end' 'bottom-end'
"""
elt_legend = self.get_element('chart:legend')
if elt_legend is None:
elt_legend = odf_create_element('chart:legend')
self.append(elt_legend)
elt_legend.set_attribute('chart:legend-position', position)
def get_legend_position(self):
elt_legend = self.get_element('chart:legend')
if elt_legend is not None:
return elt_legend.get_attribute('chart:legend-position')
else:
return None
def set_legend_alignment(self, align):
""" align -- 'start' 'center' 'end' """
elt_legend = self.get_element('chart:legend')
if elt_legend is None:
elt_legend = odf_create_element('chart:legend')
self.append(elt_legend)
elt_legend.set_attribute('chart:legend-align', align)
def get_legend_alignment(self):
elt_legend = self.get_element('chart:legend')
if elt_legend is not None:
return elt_legend.get_attribute('chart:legend-align')
else:
return None
def set_legend_style(self, style):
""" style -- str """
elt_legend = self.get_element('chart:legend')
if elt_legend is None:
elt_legend = odf_create_element('chart:legend')
self.append(elt_legend)
elt_legend.set_attribute('chart:style-name', style)
def get_legend_style(self):
elt_legend = self.get_element('chart:legend')
if elt_legend is not None:
return elt_legend.get_attribute('chart:style-name')
else:
return None
#
#Plot Area
#
def odf_create_plot_area(cell_range):
element = odf_create_element('chart:plot-area')
element.set_cell_range_address(cell_range)
return element
class odf_plot_area(odf_element):
"""
This class implements an xml element for a plot area used
to display the chart
"""
def get_cell_range_address(self):
return self.get_attribute('table:cell-range-address')
def set_cell_range_address(self, range_address):
""" range_address -- str like "Sheet1.A1:Sheet1.A10" """
self.set_attribute('table:cell-range-address', range_address)
def get_style(self):
return self.get_attribute('chart:style-name')
def set_style(self, style):
""" style -- str """
self.set_attribute('chart:style-name', style)
def get_position(self):
x = self.get_attribute('svg:x')
y = self.get_attribute('svg:y')
return (x, y)
def set_position(self, x, y):
""" arguments -- str """
self.set_attribute('svg:x', x)
self.set_attribute('svg:y', y)
def get_size(self):
w = self.get_attribute('svg:width')
h = self.get_attribute('svg:height')
return (w, h)
def set_size(self, w, h):
""" arguments -- str """
self.set_attribute('svg:width', w)
self.set_attribute('svg:height', h)
def set_labels(self, value):
""" value -- 'none' 'column' 'row' 'both' """
self.set_attribute('chart:data-source-has-labels', value)
def get_labels(self):
return self.get_attribute("chart:data-source-has-labels")
#Axis
#It might have three axis elements so they are defined by their dimension
def set_axis(self, dimension, title=None, grid=None):
"""
dimension -- 'x' 'y' 'z'
title -- str
grid -- None 'major' 'minor'
"""
axis = self.get_element("//chart:axis[@chart:dimension='" + dimension +
"']")
if axis is None:
axis = odf_create_element('chart:axis')
axis.set_attribute("chart:dimension", dimension)
self.append(axis)
if title is not None:
self.set_axis_title(dimension, title)
if grid is not None:
self.set_axis_grid(dimension, grid)
def set_axis_title(self, dimension, title):
"""
dimension -- 'x' 'y' 'z'
title -- str
"""
axis = self.get_element("//chart:axis[@chart:dimension='" + dimension +
"']")
if axis is None:
self.set_axis(dimension)
axis = self.get_element("//chart:axis[@chart:dimension='" +
dimension + "']")
axis_title = axis.get_element('chart:title')
if axis_title is None:
axis_title = odf_create_element('chart:title')
axis.append(axis_title)
axis_title.set_text_content(title)
def get_axis_title(self, dimension):
""" dimension -- 'x' 'y' 'z' """
axis = self.get_element("//chart:axis[@chart:dimension='" + dimension +
"']")
if axis is not None:
title = axis.get_element('chart:title')
if title is not None:
return title.get_text_content()
else:
return None
else:
return None
def set_axis_title_style(self, dimension, style):
""" dimension -- 'x' 'y' 'z' """
axis = self.get_element("//chart:axis[@chart:dimension='" + dimension +
"']")
if axis is None:
self.set_axis(dimension)
axis = self.get_element("//chart:axis[@chart:dimension='" +
dimension + "']")
axis_title = axis.get_element('chart:title')
if axis_title is None:
axis_title = odf_create_element('chart:title')
axis.append(axis_title)
axis_title.set_attribute("chart:style-name", style)
def get_axis_title_style(self, dimension):
""" dimension -- 'x' 'y' 'z' """
axis = self.get_element("//chart:axis[@chart:dimension='" + dimension +
"']")
if axis is not None:
title = axis.get_element('chart:title')
if title is not None:
return title.get_attribute("chart:style-name")
else:
return None
else:
return None
def set_axis_grid(self, dimension, grid):
"""
dimension -- 'x' 'y' 'z'
grid -- 'major' 'minor'
"""
axis = self.get_element("//chart:axis[@chart:dimension='" + dimension +
"']")
axis_grid = axis.get_element('chart:grid')
if axis_grid is None:
axis_grid = odf_create_element('chart:grid')
axis.append(axis_grid)
axis_grid.set_attribute('chart:class', grid)
def get_axis_grid(self, dimension):
""" dimension -- 'x' 'y' 'z' """
axis = self.get_element("//chart:axis[@chart:dimension='" + dimension +
"']")
if axis is not None:
grid = axis.get_element('chart:grid')
if grid is not None:
return grid.get_attribute('chart:class')
else:
return None
else:
return None
def get_axis_style(self, dimension):
""" dimension -- 'x' 'y' 'z' """
axis = self.get_element("//chart:axis[@chart:dimension='" + dimension +
"']")
if axis is not None:
return axis.get_attribute("chart:style-name")
else:
return None
def set_axis_style(self, dimension, style):
"""
style -- str
dimension -- 'x' 'y' 'z'
"""
axis = self.get_element("//chart:axis[@chart:dimension='" + dimension +
"']")
if axis is not None:
axis.set_attribute("chart:style-name", style)
else:
raise "axis element not found"
def get_grid_style(self, dimension):
""" dimension -- 'x' 'y' 'z' """
axis = self.get_element("//chart:axis[@chart:dimension='" + dimension +
"']")
if axis is not None:
grid = axis.get_element('chart:grid')
if grid is not None:
return grid.get_attribute("chart:style-name")
else:
return None
else:
return None
def set_grid_style(self, dimension, style):
"""
style -- str
dimension -- 'x' 'y' 'z'
"""
axis = self.get_element("//chart:axis[@chart:dimension='" + dimension +
"']")
if axis is not None:
grid = axis.get_element('chart:grid')
if grid is not None:
grid.set_attribute("chart:style-name", style)
else:
raise "grid element not found"
else:
raise "axis element not found"
def set_categories(self, dimension, range_address):
"""
dimension -- 'x' 'y' 'z'
range_address -- str
"""
axis = self.get_element("//chart:axis[@chart:dimension='" + dimension +
"']")
if axis is not None:
cat = axis.get_element("chart:categories")
if cat is None:
cat = odf_create_element("chart:categories")
axis.append(cat)
cat.set_attribute("table:cell-range-address", range_address)
else:
raise "axis element not found"
def get_categories(self, dimension):
""" dimension -- 'x' 'y' 'z' """
axis = self.get_element("//chart:axis[@chart:dimension='" + dimension +
"']")
if axis is not None:
cat = axis.get_element("chart:categories")
if cat is not None:
return cat.get_attribute("table:cell-range-address")
else:
return None
else:
return None
#Series
#A plot area might have many chart:series element
#chart:series are defined with their values attribute
def set_chart_series(self, values, chart_class):
"""
values -- str like "Sheet1.A1:Sheet1.A2"
chart_class -- 'line' 'area' 'circle' 'ring' 'scatter' 'radar'
'bar' 'stock' 'bubble' 'surface' 'gant'
"""
series = \
self.get_element("chart:series[@chart:values-cell-range-address='"
+ values + "']")
if series is None:
series = odf_create_element('chart:series')
series.set_attribute('chart:values-cell-range-address', values)
self.append(series)
series.set_attribute('chart:class', "chart:"+chart_class)
def get_chart_series_values(self):
series = self.get_elements("//chart:series")
return [s.get_attribute("chart:values-cell-range-address") for s in
series]
def set_chart_series_style(self, values, style):
""" style -- str """
series = \
self.get_element("chart:series[@chart:values-cell-range-address='"
+ values + "']")
if series is not None:
series.set_attribute("chart:style-name", style)
else:
raise "Series Element not found"
def get_chart_series_style(self, values):
series = \
self.get_element("chart:series[@chart:values-cell-range-address='"
+ values + "']")
if series is not None:
return series.get_attribute("chart:style-name")
else:
return None
def set_chart_series_label(self, values, label):
""" label -- str """
series = \
self.get_element("chart:series[@chart:values-cell-range-address='"
+ values + "']")
if series is not None:
series.set_attribute("chart:label-cell-address", label)
else:
raise "Series Element not found"
def get_chart_series_label(self, values):
series = \
self.get_element("chart:series[@chart:values-cell-range-address='"
+ values + "']")
if series is not None:
return series.get_attribute("chart:label-cell-address")
else:
return None
def set_chart_series_domain(self, values, cell_range):
""" cell_range -- str like 'Sheet1.A1:Sheet.A10' """
series = \
self.get_element("chart:series[@chart:values-cell-range-address='"
+ values + "']")
if series is not None:
domain = odf_create_element('chart:domain')
domain.set_attribute("table:cell-range-address", cell_range)
series.append(domain)
else:
raise "Series Element not found"
def get_chart_series_domain(self, values):
domain = \
self.get_element("chart:series[@chart:values-cell-range-address='"
+ values + "']/chart:domain")
if domain is not None:
return domain.get_attribute("table:cell-range-address")
else:
return None
#Floor
def set_floor(self, width=None, style=None):
""" arguments -- str """
floor = self.get_element("chart:floor")
if floor is None:
floor = odf_create_element('chart:floor')
self.append(floor)
if width is not None:
floor.set_attribute("svg:width", width)
if style is not None:
floor.set_attribute("chart:style-name", style)
def get_floor_width(self):
floor = self.get_element("chart:floor")
if floor is not None:
return floor.get_attribute("svg:width")
else:
return None
def get_floor_style(self):
floor = self.get_element("chart:floor")
if floor is not None:
return floor.get_attribute("chart:style-name")
else:
return None
#Wall
def set_wall(self, width=None, style=None):
""" arguments -- str """
wall = self.get_element("chart:wall")
if wall is None:
wall = odf_create_element('chart:wall')
self.append(wall)
if width is not None:
wall.set_attribute("svg:width", width)
if style is not None:
wall.set_attribute("chart:style-name", style)
def get_wall_width(self):
wall = self.get_element("chart:wall")
if wall is not None:
return wall.get_attribute("svg:width")
else:
return None
def get_wall_style(self):
wall = self.get_element("chart:wall")
if wall is not None:
return wall.get_attribute("chart:style-name")
else:
return None
#stock
def set_stock_loss_marker(self, style=None):
elt = self.get_element("chart:stock-loss-marker")
if elt is None:
elt = odf_create_element("chart:stock-loss-marker")
self.append(elt)
if style is not None:
elt.set_attribute("chart:style-name", style)
def get_stock_loss_marker_style(self):
elt = self.get_element("chart:stock-loss-marker")
if elt is not None:
return elt.get_attribute("chart:style-name")
else:
return None
def set_stock_gain_marker(self, style=None):
elt = self.get_element("chart:stock-gain-marker")
if elt is None:
elt = odf_create_element("chart:stock-gain-marker")
self.append(elt)
if style is not None:
elt.set_attribute("chart:style-name", style)
def get_stock_gain_marker_style(self):
elt = self.get_element("chart:stock-gain-marker")
if elt is not None:
return elt.get_attribute("chart:style-name")
else:
return None
def set_stock_range_line(self, style=None):
elt = self.get_element("chart:stock-range-line")
if elt is None:
elt = odf_create_element("chart:stock-range-line")
self.append(elt)
if style is not None:
elt.set_attribute("chart:style-name", style)
def get_stock_range_line_style(self):
elt = self.get_element("chart:stock-range-line")
if elt is not None:
return elt.get_attribute("chart:style-name")
else:
return None
###
#Quick charts
###
def create_simple_chart(chart_type, cell_range, chart_title=None,
data_in_columns=True, legend="none", x_labels="none"):
"""
Create a complete chart element for basics chart creation
Legend and x_labels allows user to inform that the first row
(or column) contain legend (or x_labels) data
chart_type -- 'line' 'area' 'circle' 'ring' 'scatter' 'radar'
'bar' 'stock' 'bubble' 'surface' 'gant'
cell_range -- str like 'Sheet.A1:Sheet.D5'
chart_title -- str
data_in_columns -- boolean
legend, x_labels -- 'none' 'column' 'row'
"""
chart = odf_create_chart(chart_type, title=chart_title,
legend_position='bottom')
po = odf_create_plot_area(cell_range)
po.set_axis('x')
po.set_axis('y', grid='minor')
datas = split_range(cell_range, legend, x_labels)
po.set_categories('x', datas["labels"])
legend_list = ()
if legend == "column":
legend_list = divide_range(datas["legend"], by="rows")
elif legend == "row":
legend_list = divide_range(datas["legend"], by="columns")
i = 0
if data_in_columns:
range_list = divide_range(datas["data"])
for r in range_list:
po.set_chart_series(r, chart_type)
if legend is not "none":
po.set_chart_series_label(r, legend_list[i])
i = i+1
else:
range_list = divide_range(datas["data"], by="rows")
for r in range_list:
po.set_chart_series(r, chart_type)
if legend is not "none":
po.set_chart_series_label(r, legend_list[i])
i = i+1
po.set_floor()
po.set_wall()
chart.append(po)
return chart
###
#Useful functions
###
def split_range(cell_range, legend="row", x_labels="column"):
"""
returns a dictionnary with data_range, legend_range and labels for the
x-axis
cell_range - str like Sheet.A1:SheetD5
legend, x_labels - 'none', 'row', 'column'
"""
my_dict = {"data":cell_range, "legend":"", "labels":""}
sheet1=splitfields(splitfields(cell_range, ":")[0],".")[0]
cell1=splitfields(splitfields(cell_range, ":")[0],".")[1]
sheet2=splitfields(splitfields(cell_range, ":")[1],".")[0]
cell2=splitfields(splitfields(cell_range, ":")[1],".")[1]
tmp_coord1 = _get_cell_coordinates(cell1)
tmp_coord2 = _get_cell_coordinates(cell2)
#we need the coordinate in a crescent order
coord1 = (min(tmp_coord1[0], tmp_coord2[0]),min(tmp_coord1[1],
tmp_coord2[1]))
coord2 = (max(tmp_coord1[0], tmp_coord2[0]),max(tmp_coord1[1],
tmp_coord2[1]))
if legend is not 'none' and x_labels is not 'none':
#if we have both conditions, we have to delete the first cell
if legend == "row":
my_dict["legend"] = sheet1 + "." + _digit_to_alpha(coord1[0]+1) + \
str(coord1[1]+1) + ":"+sheet2 + "." + \
_digit_to_alpha(coord2[0]) + str(coord1[1]+1)
elif legend == "column":
my_dict["legend"] = sheet1 + "." + _digit_to_alpha(coord1[0]) + \
str(coord1[1]+2) + ":"+sheet2 + "." + \
_digit_to_alpha(coord1[0]) + str(coord2[1]+1)
if x_labels == "row":
my_dict["labels"] = sheet1 + "." + _digit_to_alpha(coord1[0]+1) + \
str(coord1[1]+1) + ":"+sheet2 + "." + \
_digit_to_alpha(coord2[0]) + str(coord1[1]+1)
elif x_labels == "column":
my_dict["labels"] = sheet1 + "." + _digit_to_alpha(coord1[0]) + \
str(coord1[1]+2) + ":"+sheet2 + "." + \
_digit_to_alpha(coord1[0]) + str(coord2[1]+1)
my_dict["data"] = sheet1 + "." + _digit_to_alpha(coord1[0]+1) + \
str(coord1[1]+2) + ":"+sheet2 + "." + \
_digit_to_alpha(coord2[0]) + str(coord2[1]+1)
else:
if legend == "row":
my_dict["legend"] = sheet1 + "." + _digit_to_alpha(coord1[0]) + \
str(coord1[1]+1) + ":"+sheet2 + "." + \
_digit_to_alpha(coord2[0]) + str(coord1[1]+1)
my_dict["data"] = sheet1 + "." + _digit_to_alpha(coord1[0]) + \
str(coord1[1]+2) + ":" + sheet2 + "." + \
_digit_to_alpha(coord2[0]) + str(coord2[1]+1)
elif legend == "column":
my_dict["legend"] = sheet1 + "." + _digit_to_alpha(coord1[0]) + \
str(coord1[1]+1) + ":"+sheet2 + "." + \
_digit_to_alpha(coord1[0]) + str(coord2[1]+1)
my_dict["data"] = sheet1 + "." + _digit_to_alpha(coord1[0]+1) + \
str(coord1[1]+1) + ":"+sheet2 + \
"."+_digit_to_alpha(coord2[0]) + str(coord2[1]+1)
if x_labels == "row":
my_dict["labels"] = sheet1 + "." + _digit_to_alpha(coord1[0]) + \
str(coord1[1]+1) + ":" + sheet2 + "." + \
_digit_to_alpha(coord2[0]) + str(coord1[1]+1)
my_dict["data"] = sheet1 + "." + _digit_to_alpha(coord1[0]) + \
str(coord1[1]+2) + ":"+sheet2 + "." + \
_digit_to_alpha(coord2[0]) + str(coord2[1]+1)
elif x_labels == "column":
my_dict["labels"] = sheet1 + "." + _digit_to_alpha(coord1[0]) + \
str(coord1[1]+1) + ":"+sheet2 + "." + \
_digit_to_alpha(coord1[0]) + str(coord2[1]+1)
my_dict["data"] = sheet1 + "."+ _digit_to_alpha(coord1[0]+1) + \
str(coord1[1]+1) + ":"+sheet2 + "." + \
_digit_to_alpha(coord2[0]) + str(coord2[1]+1)
return my_dict
def divide_range(cell_range, by="columns"):
"""
Returns a list of cell range (string) divided in columns or rows
cell_range -- str like Sheet.A1:Sheet.C5
by -- 'columns' 'rows'
"""
my_list = []
sheet1=splitfields(splitfields(cell_range, ":")[0],".")[0]
cell1=splitfields(splitfields(cell_range, ":")[0],".")[1]
sheet2=splitfields(splitfields(cell_range, ":")[1],".")[0]
cell2=splitfields(splitfields(cell_range, ":")[1],".")[1]
tmp_coord1 = _get_cell_coordinates(cell1)
tmp_coord2 = _get_cell_coordinates(cell2)
#we need the coordinate in a crescent order
coord1 = (min(tmp_coord1[0], tmp_coord2[0]),min(tmp_coord1[1],
tmp_coord2[1]))
coord2 = (max(tmp_coord1[0], tmp_coord2[0]),max(tmp_coord1[1],
tmp_coord2[1]))
if by == 'columns':
for i in range(coord1[0], coord2[0]+1):
tmp_cell_range = sheet1 + "." + _digit_to_alpha(i) + \
str(coord1[1]+1) + ":" + sheet2 + "." + \
_digit_to_alpha(i) + str(coord2[1]+1)
my_list.append(tmp_cell_range)
return my_list
elif by == 'rows':
for i in range(coord1[1], coord2[1]+1):
tmp_cell_range = sheet1 + "." + _digit_to_alpha(coord1[0]) + \
str(i+1) + ":" + sheet2 + "." + \
_digit_to_alpha(coord2[0]) + str(i+1) \
my_list.append(tmp_cell_range)
return my_list
else:
raise AttributeError
def add_chart_structure_in_document(document):
"""Search the .otc template installed and put xml files in document """
#We search the template
my_template = ""
for p in path:
if exists(p+"/chart/templates/chart.otc"):
my_template=p+"/chart/templates/chart.otc"
break
if my_template == "":
raise IOError, "Template .otc not found"
#we have to create a folder for the chart
i = 1
obj_created = False
name=""
manifest = document.get_part('META-INF/manifest.xml')
while not obj_created:
name="Object "+str(i)
if manifest.get_media_type(name+'/') is None:
manifest.add_full_path(name+'/',
"application/vnd.oasis.opendocument.chart")
document.container.set_part(name+'/', '')
obj_created = True
else:
i = i+1
#We open the template
chart = odf_new_container(my_template)
#we add templates files in the document
document.set_part(name+'/content.xml', chart.get_part('content.xml'))
manifest.add_full_path(name+'/content.xml' , "text/xml")
document.set_part(name+'/styles.xml', chart.get_part('styles.xml'))
manifest.add_full_path(name+'/styles.xml' , "text/xml")
document.set_part(name+'/meta.xml', chart.get_part('meta.xml'))
manifest.add_full_path(name+'/meta.xml' , "text/xml")
return name
def attach_chart_to_cell(name_obj, cell):
"""
create a frame in 'cell' in order to display the chart 'name_obj'
name_obj - str
cell - odf_cell
"""
#We need a frame
frame = odf_create_frame(size=("10cm", "10cm"))
#We need a draw:object element
element = odf_create_element("draw:object")
element.set_attribute("xlink:href", "./"+name_obj)
element.set_attribute("xlink:type", "simple")
element.set_attribute("xlink:show", "embed")
element.set_attribute("xlink:actuate", "onLoad")
frame.append(element)
cell.append(frame)
return cell
#register
register_element_class('chart:chart', odf_chart)
register_element_class('chart:plot-area', odf_plot_area)
| gpl-3.0 |
Cinntax/home-assistant | homeassistant/components/device_automation/toggle_entity.py | 1 | 5616 | """Device automation helpers for toggle entity."""
from typing import Any, Dict, List
import voluptuous as vol
from homeassistant.core import Context, HomeAssistant, CALLBACK_TYPE
from homeassistant.components.automation import state, AutomationActionType
from homeassistant.components.device_automation.const import (
CONF_IS_OFF,
CONF_IS_ON,
CONF_TOGGLE,
CONF_TURN_OFF,
CONF_TURN_ON,
CONF_TURNED_OFF,
CONF_TURNED_ON,
)
from homeassistant.const import CONF_CONDITION, CONF_ENTITY_ID, CONF_PLATFORM, CONF_TYPE
from homeassistant.helpers.entity_registry import async_entries_for_device
from homeassistant.helpers import condition, config_validation as cv, service
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from . import TRIGGER_BASE_SCHEMA
# mypy: allow-untyped-calls, allow-untyped-defs
ENTITY_ACTIONS = [
{
# Turn entity off
CONF_TYPE: CONF_TURN_OFF
},
{
# Turn entity on
CONF_TYPE: CONF_TURN_ON
},
{
# Toggle entity
CONF_TYPE: CONF_TOGGLE
},
]
ENTITY_CONDITIONS = [
{
# True when entity is turned off
CONF_CONDITION: "device",
CONF_TYPE: CONF_IS_OFF,
},
{
# True when entity is turned on
CONF_CONDITION: "device",
CONF_TYPE: CONF_IS_ON,
},
]
ENTITY_TRIGGERS = [
{
# Trigger when entity is turned off
CONF_PLATFORM: "device",
CONF_TYPE: CONF_TURNED_OFF,
},
{
# Trigger when entity is turned on
CONF_PLATFORM: "device",
CONF_TYPE: CONF_TURNED_ON,
},
]
ACTION_SCHEMA = cv.DEVICE_ACTION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In([CONF_TOGGLE, CONF_TURN_OFF, CONF_TURN_ON]),
}
)
CONDITION_SCHEMA = cv.DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In([CONF_IS_OFF, CONF_IS_ON]),
}
)
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In([CONF_TURNED_OFF, CONF_TURNED_ON]),
}
)
async def async_call_action_from_config(
hass: HomeAssistant,
config: ConfigType,
variables: TemplateVarsType,
context: Context,
domain: str,
) -> None:
"""Change state based on configuration."""
config = ACTION_SCHEMA(config)
action_type = config[CONF_TYPE]
if action_type == CONF_TURN_ON:
action = "turn_on"
elif action_type == CONF_TURN_OFF:
action = "turn_off"
else:
action = "toggle"
service_action = {
service.CONF_SERVICE: "{}.{}".format(domain, action),
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
}
await service.async_call_from_config(
hass, service_action, blocking=True, variables=variables, context=context
)
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Evaluate state based on configuration."""
condition_type = config[CONF_TYPE]
if condition_type == CONF_IS_ON:
stat = "on"
else:
stat = "off"
state_config = {
condition.CONF_CONDITION: "state",
condition.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
condition.CONF_STATE: stat,
}
return condition.state_from_config(state_config, config_validation)
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Listen for state changes based on configuration."""
trigger_type = config[CONF_TYPE]
if trigger_type == CONF_TURNED_ON:
from_state = "off"
to_state = "on"
else:
from_state = "on"
to_state = "off"
state_config = {
state.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state.CONF_FROM: from_state,
state.CONF_TO: to_state,
}
return await state.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
async def _async_get_automations(
hass: HomeAssistant, device_id: str, automation_templates: List[dict], domain: str
) -> List[dict]:
"""List device automations."""
automations: List[Dict[str, Any]] = []
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entries = [
entry
for entry in async_entries_for_device(entity_registry, device_id)
if entry.domain == domain
]
for entry in entries:
automations.extend(
(
{
**template,
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": domain,
}
for template in automation_templates
)
)
return automations
async def async_get_actions(
hass: HomeAssistant, device_id: str, domain: str
) -> List[dict]:
"""List device actions."""
return await _async_get_automations(hass, device_id, ENTITY_ACTIONS, domain)
async def async_get_conditions(
hass: HomeAssistant, device_id: str, domain: str
) -> List[dict]:
"""List device conditions."""
return await _async_get_automations(hass, device_id, ENTITY_CONDITIONS, domain)
async def async_get_triggers(
hass: HomeAssistant, device_id: str, domain: str
) -> List[dict]:
"""List device triggers."""
return await _async_get_automations(hass, device_id, ENTITY_TRIGGERS, domain)
| apache-2.0 |
midori1/midorinoblog | site-packages/django/contrib/gis/geos/tests/test_geos_mutation.py | 112 | 5462 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
import unittest
from unittest import skipUnless
from .. import HAS_GEOS
if HAS_GEOS:
from .. import fromstr, LinearRing, LineString, MultiPoint, Point, Polygon
from ..error import GEOSIndexError
if HAS_GEOS:
def api_get_distance(x):
return x.distance(Point(-200, -200))
def api_get_buffer(x):
return x.buffer(10)
def api_get_geom_typeid(x):
return x.geom_typeid
def api_get_num_coords(x):
return x.num_coords
def api_get_centroid(x):
return x.centroid
def api_get_empty(x):
return x.empty
def api_get_valid(x):
return x.valid
def api_get_simple(x):
return x.simple
def api_get_ring(x):
return x.ring
def api_get_boundary(x):
return x.boundary
def api_get_convex_hull(x):
return x.convex_hull
def api_get_extent(x):
return x.extent
def api_get_area(x):
return x.area
def api_get_length(x):
return x.length
geos_function_tests = [val for name, val in vars().items()
if hasattr(val, '__call__')
and name.startswith('api_get_')]
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSMutationTest(unittest.TestCase):
"""
Tests Pythonic Mutability of Python GEOS geometry wrappers
get/set/delitem on a slice, normal list methods
"""
def test00_GEOSIndexException(self):
'Testing Geometry GEOSIndexError'
p = Point(1, 2)
for i in range(-2, 2):
p._checkindex(i)
self.assertRaises(GEOSIndexError, p._checkindex, 2)
self.assertRaises(GEOSIndexError, p._checkindex, -3)
def test01_PointMutations(self):
'Testing Point mutations'
for p in (Point(1, 2, 3), fromstr('POINT (1 2 3)')):
self.assertEqual(p._get_single_external(1), 2.0, 'Point _get_single_external')
# _set_single
p._set_single(0, 100)
self.assertEqual(p.coords, (100.0, 2.0, 3.0), 'Point _set_single')
# _set_list
p._set_list(2, (50, 3141))
self.assertEqual(p.coords, (50.0, 3141.0), 'Point _set_list')
def test02_PointExceptions(self):
'Testing Point exceptions'
self.assertRaises(TypeError, Point, range(1))
self.assertRaises(TypeError, Point, range(4))
def test03_PointApi(self):
'Testing Point API'
q = Point(4, 5, 3)
for p in (Point(1, 2, 3), fromstr('POINT (1 2 3)')):
p[0:2] = [4, 5]
for f in geos_function_tests:
self.assertEqual(f(q), f(p), 'Point ' + f.__name__)
def test04_LineStringMutations(self):
'Testing LineString mutations'
for ls in (LineString((1, 0), (4, 1), (6, -1)),
fromstr('LINESTRING (1 0,4 1,6 -1)')):
self.assertEqual(ls._get_single_external(1), (4.0, 1.0), 'LineString _get_single_external')
# _set_single
ls._set_single(0, (-50, 25))
self.assertEqual(ls.coords, ((-50.0, 25.0), (4.0, 1.0), (6.0, -1.0)), 'LineString _set_single')
# _set_list
ls._set_list(2, ((-50.0, 25.0), (6.0, -1.0)))
self.assertEqual(ls.coords, ((-50.0, 25.0), (6.0, -1.0)), 'LineString _set_list')
lsa = LineString(ls.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(ls), 'LineString ' + f.__name__)
def test05_Polygon(self):
'Testing Polygon mutations'
for pg in (Polygon(((1, 0), (4, 1), (6, -1), (8, 10), (1, 0)),
((5, 4), (6, 4), (6, 3), (5, 4))),
fromstr('POLYGON ((1 0,4 1,6 -1,8 10,1 0),(5 4,6 4,6 3,5 4))')):
self.assertEqual(pg._get_single_external(0),
LinearRing((1, 0), (4, 1), (6, -1), (8, 10), (1, 0)),
'Polygon _get_single_external(0)')
self.assertEqual(pg._get_single_external(1),
LinearRing((5, 4), (6, 4), (6, 3), (5, 4)),
'Polygon _get_single_external(1)')
# _set_list
pg._set_list(2, (((1, 2), (10, 0), (12, 9), (-1, 15), (1, 2)),
((4, 2), (5, 2), (5, 3), (4, 2))))
self.assertEqual(
pg.coords,
(((1.0, 2.0), (10.0, 0.0), (12.0, 9.0), (-1.0, 15.0), (1.0, 2.0)),
((4.0, 2.0), (5.0, 2.0), (5.0, 3.0), (4.0, 2.0))),
'Polygon _set_list')
lsa = Polygon(*pg.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(pg), 'Polygon ' + f.__name__)
def test06_Collection(self):
'Testing Collection mutations'
for mp in (MultiPoint(*map(Point, ((3, 4), (-1, 2), (5, -4), (2, 8)))),
fromstr('MULTIPOINT (3 4,-1 2,5 -4,2 8)')):
self.assertEqual(mp._get_single_external(2), Point(5, -4), 'Collection _get_single_external')
mp._set_list(3, map(Point, ((5, 5), (3, -2), (8, 1))))
self.assertEqual(mp.coords, ((5.0, 5.0), (3.0, -2.0), (8.0, 1.0)), 'Collection _set_list')
lsa = MultiPoint(*map(Point, ((5, 5), (3, -2), (8, 1))))
for f in geos_function_tests:
self.assertEqual(f(lsa), f(mp), 'MultiPoint ' + f.__name__)
| apache-2.0 |
anomalchik/android_kernel_xiaomi | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
piffey/ansible | lib/ansible/modules/database/proxysql/proxysql_mysql_users.py | 42 | 16189 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxysql_mysql_users
version_added: "2.3"
author: "Ben Mildren (@bmildren)"
short_description: Adds or removes mysql users from proxysql admin interface.
description:
- The M(proxysql_mysql_users) module adds or removes mysql users using the
proxysql admin interface.
options:
username:
description:
- Name of the user connecting to the mysqld or ProxySQL instance.
required: True
password:
description:
- Password of the user connecting to the mysqld or ProxySQL instance.
active:
description:
- A user with I(active) set to C(False) will be tracked in the database,
but will be never loaded in the in-memory data structures. If omitted
the proxysql database default for I(active) is C(True).
use_ssl:
description:
- If I(use_ssl) is set to C(True), connections by this user will be made
using SSL connections. If omitted the proxysql database default for
I(use_ssl) is C(False).
default_hostgroup:
description:
- If there is no matching rule for the queries sent by this user, the
traffic it generates is sent to the specified hostgroup.
If omitted the proxysql database default for I(use_ssl) is 0.
default_schema:
description:
- The schema to which the connection should change to by default.
transaction_persistent:
description:
- If this is set for the user with which the MySQL client is connecting
to ProxySQL (thus a "frontend" user), transactions started within a
hostgroup will remain within that hostgroup regardless of any other
rules.
If omitted the proxysql database default for I(transaction_persistent)
is C(False).
fast_forward:
description:
- If I(fast_forward) is set to C(True), I(fast_forward) will bypass the
query processing layer (rewriting, caching) and pass through the query
directly as is to the backend server. If omitted the proxysql database
default for I(fast_forward) is C(False).
backend:
description:
- If I(backend) is set to C(True), this (username, password) pair is
used for authenticating to the ProxySQL instance.
default: True
frontend:
description:
- If I(frontend) is set to C(True), this (username, password) pair is
used for authenticating to the mysqld servers against any hostgroup.
default: True
max_connections:
description:
- The maximum number of connections ProxySQL will open to the backend for
this user. If omitted the proxysql database default for
I(max_connections) is 10000.
state:
description:
- When C(present) - adds the user, when C(absent) - removes the user.
choices: [ "present", "absent" ]
default: present
extends_documentation_fragment:
- proxysql.managing_config
- proxysql.connectivity
'''
EXAMPLES = '''
---
# This example adds a user, it saves the mysql user config to disk, but
# avoids loading the mysql user config to runtime (this might be because
# several users are being added and the user wants to push the config to
# runtime in a single batch using the M(proxysql_manage_config) module). It
# uses supplied credentials to connect to the proxysql admin interface.
- proxysql_mysql_users:
login_user: 'admin'
login_password: 'admin'
username: 'productiondba'
state: present
load_to_runtime: False
# This example removes a user, saves the mysql user config to disk, and
# dynamically loads the mysql user config to runtime. It uses credentials
# in a supplied config file to connect to the proxysql admin interface.
- proxysql_mysql_users:
config_file: '~/proxysql.cnf'
username: 'mysqlboy'
state: absent
'''
RETURN = '''
stdout:
description: The mysql user modified or removed from proxysql
returned: On create/update will return the newly modified user, on delete
it will return the deleted record.
type: dict
sample:
changed: true
msg: Added user to mysql_users
state: present
user:
active: 1
backend: 1
default_hostgroup: 1
default_schema: null
fast_forward: 0
frontend: 1
max_connections: 10000
password: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
schema_locked: 0
transaction_persistent: 0
use_ssl: 0
username: guest_ro
username: guest_ro
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.mysql import mysql_connect
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
try:
import MySQLdb
import MySQLdb.cursors
except ImportError:
MYSQLDB_FOUND = False
else:
MYSQLDB_FOUND = True
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if not MYSQLDB_FOUND:
module.fail_json(
msg="the python mysqldb module is required"
)
def save_config_to_disk(cursor):
cursor.execute("SAVE MYSQL USERS TO DISK")
return True
def load_config_to_runtime(cursor):
cursor.execute("LOAD MYSQL USERS TO RUNTIME")
return True
class ProxySQLUser(object):
def __init__(self, module):
self.state = module.params["state"]
self.save_to_disk = module.params["save_to_disk"]
self.load_to_runtime = module.params["load_to_runtime"]
self.username = module.params["username"]
self.backend = module.params["backend"]
self.frontend = module.params["frontend"]
config_data_keys = ["password",
"active",
"use_ssl",
"default_hostgroup",
"default_schema",
"transaction_persistent",
"fast_forward",
"max_connections"]
self.config_data = dict((k, module.params[k])
for k in config_data_keys)
def check_user_config_exists(self, cursor):
query_string = \
"""SELECT count(*) AS `user_count`
FROM mysql_users
WHERE username = %s
AND backend = %s
AND frontend = %s"""
query_data = \
[self.username,
self.backend,
self.frontend]
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['user_count']) > 0)
def check_user_privs(self, cursor):
query_string = \
"""SELECT count(*) AS `user_count`
FROM mysql_users
WHERE username = %s
AND backend = %s
AND frontend = %s"""
query_data = \
[self.username,
self.backend,
self.frontend]
for col, val in iteritems(self.config_data):
if val is not None:
query_data.append(val)
query_string += "\n AND " + col + " = %s"
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['user_count']) > 0)
def get_user_config(self, cursor):
query_string = \
"""SELECT *
FROM mysql_users
WHERE username = %s
AND backend = %s
AND frontend = %s"""
query_data = \
[self.username,
self.backend,
self.frontend]
cursor.execute(query_string, query_data)
user = cursor.fetchone()
return user
def create_user_config(self, cursor):
query_string = \
"""INSERT INTO mysql_users (
username,
backend,
frontend"""
cols = 3
query_data = \
[self.username,
self.backend,
self.frontend]
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
query_string += ",\n" + col
query_string += \
(")\n" +
"VALUES (" +
"%s ," * cols)
query_string = query_string[:-2]
query_string += ")"
cursor.execute(query_string, query_data)
return True
def update_user_config(self, cursor):
query_string = """UPDATE mysql_users"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\nSET " + col + "= %s,"
else:
query_string += "\n " + col + " = %s,"
query_string = query_string[:-1]
query_string += ("\nWHERE username = %s\n AND backend = %s" +
"\n AND frontend = %s")
query_data.append(self.username)
query_data.append(self.backend)
query_data.append(self.frontend)
cursor.execute(query_string, query_data)
return True
def delete_user_config(self, cursor):
query_string = \
"""DELETE FROM mysql_users
WHERE username = %s
AND backend = %s
AND frontend = %s"""
query_data = \
[self.username,
self.backend,
self.frontend]
cursor.execute(query_string, query_data)
return True
def manage_config(self, cursor, state):
if state:
if self.save_to_disk:
save_config_to_disk(cursor)
if self.load_to_runtime:
load_config_to_runtime(cursor)
def create_user(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.create_user_config(cursor)
result['msg'] = "Added user to mysql_users"
result['user'] = \
self.get_user_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("User would have been added to" +
" mysql_users, however check_mode" +
" is enabled.")
def update_user(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.update_user_config(cursor)
result['msg'] = "Updated user in mysql_users"
result['user'] = \
self.get_user_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("User would have been updated in" +
" mysql_users, however check_mode" +
" is enabled.")
def delete_user(self, check_mode, result, cursor):
if not check_mode:
result['user'] = \
self.get_user_config(cursor)
result['changed'] = \
self.delete_user_config(cursor)
result['msg'] = "Deleted user from mysql_users"
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("User would have been deleted from" +
" mysql_users, however check_mode is" +
" enabled.")
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default="127.0.0.1"),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default='', type='path'),
username=dict(required=True, type='str'),
password=dict(no_log=True, type='str'),
active=dict(type='bool'),
use_ssl=dict(type='bool'),
default_hostgroup=dict(type='int'),
default_schema=dict(type='str'),
transaction_persistent=dict(type='bool'),
fast_forward=dict(type='bool'),
backend=dict(default=True, type='bool'),
frontend=dict(default=True, type='bool'),
max_connections=dict(type='int'),
state=dict(default='present', choices=['present',
'absent']),
save_to_disk=dict(default=True, type='bool'),
load_to_runtime=dict(default=True, type='bool')
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
cursor = None
try:
cursor = mysql_connect(module,
login_user,
login_password,
config_file,
cursor_class=MySQLdb.cursors.DictCursor)
except MySQLdb.Error as e:
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e)
)
proxysql_user = ProxySQLUser(module)
result = {}
result['state'] = proxysql_user.state
if proxysql_user.username:
result['username'] = proxysql_user.username
if proxysql_user.state == "present":
try:
if not proxysql_user.check_user_privs(cursor):
if not proxysql_user.check_user_config_exists(cursor):
proxysql_user.create_user(module.check_mode,
result,
cursor)
else:
proxysql_user.update_user(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The user already exists in mysql_users" +
" and doesn't need to be updated.")
result['user'] = \
proxysql_user.get_user_config(cursor)
except MySQLdb.Error as e:
module.fail_json(
msg="unable to modify user.. %s" % to_native(e)
)
elif proxysql_user.state == "absent":
try:
if proxysql_user.check_user_config_exists(cursor):
proxysql_user.delete_user(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The user is already absent from the" +
" mysql_users memory configuration")
except MySQLdb.Error as e:
module.fail_json(
msg="unable to remove user.. %s" % to_native(e)
)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
haridsv/pip | pip/_vendor/requests/exceptions.py | 895 | 2517 | # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request."""
def __init__(self, *args, **kwargs):
"""
Initialize RequestException with `request` and `response` objects.
"""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed"""
class RetryError(RequestException):
"""Custom retries logic failed"""
| mit |
dpac-vlsi/SynchroTrace | tests/long/fs/80.solaris-boot/test.py | 90 | 1630 | # Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
root.system.readfile = os.path.join(tests_root, 'halt.sh')
| bsd-3-clause |
Pica4x6/megaradrp | megaradrp/recipes/calibration/trace.py | 1 | 3127 | #
# Copyright 2011-2015 Universidad Complutense de Madrid
#
# This file is part of Megara DRP
#
# Megara DRP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Megara DRP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Megara DRP. If not, see <http://www.gnu.org/licenses/>.
#
"""Fiber tracing Recipe."""
from __future__ import division, print_function
import logging
import numpy
from numina.array.trace.traces import trace
from numina.core import Product
from numina.core.requirements import ObservationResultRequirement
from megaradrp.products import MasterFiberFlatFrame, TraceMap
from megaradrp.core.recipe import MegaraBaseRecipe
from megaradrp.requirements import MasterBiasRequirement, MasterBPMRequirement
from megaradrp.requirements import MasterDarkRequirement
from megaradrp.trace.traces import init_traces
_logger = logging.getLogger('numina.recipes.megara')
class TraceMapRecipe(MegaraBaseRecipe):
obresult = ObservationResultRequirement()
master_bias = MasterBiasRequirement()
master_dark = MasterDarkRequirement()
master_bpm = MasterBPMRequirement()
fiberflat_frame = Product(MasterFiberFlatFrame)
master_traces = Product(TraceMap)
def __init__(self):
super(TraceMapRecipe, self).__init__(
version="0.1.0"
)
def run(self, rinput):
parameters = self.get_parameters(rinput)
reduced = self.bias_process_common(rinput.obresult, parameters)
data = reduced[0].data
cstart = 2000
hs = 3
step1 = 2
background1 = 150.0
maxdis1 = 2.0
_logger.info('find peaks in column %i', cstart)
central_peaks = init_traces(data, center=cstart, hs=hs,
background=background1)
_logger.info(' %i peaks found', len(central_peaks))
# The byteswapping is required by the cython module
if data.dtype.byteorder != '=':
_logger.debug('byteswapping image')
image2 = data.byteswap().newbyteorder()
else:
image2 = data
tracelist = []
_logger.info('trace peaks')
for dtrace in central_peaks.values():
mm = trace(image2, x=cstart, y=dtrace.start[1], step=step1,
hs=hs, background=background1, maxdis=maxdis1)
pfit = numpy.polyfit(mm[:,0], mm[:,1], deg=5)
tracelist.append({'fibid': dtrace.fibid, 'boxid': dtrace.boxid,
'start':0, 'stop':4095,
'fitparms': pfit.tolist()})
return self.create_result(fiberflat_frame=reduced,
master_traces=tracelist)
| gpl-3.0 |
rohe/pysaml2-3 | src/saml2/authn_context/timesync.py | 37 | 107827 | #!/usr/bin/env python
#
# Generated Thu May 16 21:05:38 2013 by parse_xsd.py version 0.5.
#
import saml2
from saml2 import SamlBase
NAMESPACE = 'urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken'
class PhysicalVerification(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:
PhysicalVerification element """
c_tag = 'PhysicalVerification'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['credentialLevel'] = ('credential_level', 'None', False)
def __init__(self,
credential_level=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.credential_level = credential_level
def physical_verification_from_string(xml_string):
return saml2.create_class_from_xml_string(PhysicalVerification, xml_string)
class Generation(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:Generation
element """
c_tag = 'Generation'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['mechanism'] = ('mechanism', 'None', True)
def __init__(self,
mechanism=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.mechanism = mechanism
def generation_from_string(xml_string):
return saml2.create_class_from_xml_string(Generation, xml_string)
class NymType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:nymType
element """
c_tag = 'nymType'
c_namespace = NAMESPACE
c_value_type = {'base': 'xs:NMTOKEN',
'enumeration': ['anonymity', 'verinymity', 'pseudonymity']}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def nym_type__from_string(xml_string):
return saml2.create_class_from_xml_string(NymType_, xml_string)
class GoverningAgreementRefType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:
GoverningAgreementRefType element """
c_tag = 'GoverningAgreementRefType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['governingAgreementRef'] = (
'governing_agreement_ref', 'anyURI', True)
def __init__(self,
governing_agreement_ref=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.governing_agreement_ref = governing_agreement_ref
def governing_agreement_ref_type__from_string(xml_string):
return saml2.create_class_from_xml_string(GoverningAgreementRefType_,
xml_string)
class KeySharingType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:KeySharingType
element """
c_tag = 'KeySharingType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['sharing'] = ('sharing', 'boolean', True)
def __init__(self,
sharing=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.sharing = sharing
def key_sharing_type__from_string(xml_string):
return saml2.create_class_from_xml_string(KeySharingType_, xml_string)
class RestrictedLengthType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:
RestrictedLengthType element """
c_tag = 'RestrictedLengthType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['min'] = ('min', 'None', True)
c_attributes['max'] = ('max', 'integer', False)
def __init__(self,
min=None,
max=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.min = min
self.max = max
def restricted_length_type__from_string(xml_string):
return saml2.create_class_from_xml_string(RestrictedLengthType_, xml_string)
class AlphabetType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:AlphabetType
element """
c_tag = 'AlphabetType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['requiredChars'] = ('required_chars', 'string', True)
c_attributes['excludedChars'] = ('excluded_chars', 'string', False)
c_attributes['case'] = ('case', 'string', False)
def __init__(self,
required_chars=None,
excluded_chars=None,
case=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.required_chars = required_chars
self.excluded_chars = excluded_chars
self.case = case
def alphabet_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AlphabetType_, xml_string)
class DeviceTypeType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:DeviceTypeType
element """
c_tag = 'DeviceTypeType'
c_namespace = NAMESPACE
c_value_type = {'base': 'xs:NMTOKEN',
'enumeration': ['hardware', 'software']}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def device_type_type__from_string(xml_string):
return saml2.create_class_from_xml_string(DeviceTypeType_, xml_string)
class BooleanType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:booleanType
element """
c_tag = 'booleanType'
c_namespace = NAMESPACE
c_value_type = {'base': 'xs:NMTOKEN', 'enumeration': ['true', 'false']}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def boolean_type__from_string(xml_string):
return saml2.create_class_from_xml_string(BooleanType_, xml_string)
class ActivationLimitDurationType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:
ActivationLimitDurationType element """
c_tag = 'ActivationLimitDurationType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['duration'] = ('duration', 'duration', True)
def __init__(self,
duration=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.duration = duration
def activation_limit_duration_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ActivationLimitDurationType_,
xml_string)
class ActivationLimitUsagesType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:
ActivationLimitUsagesType element """
c_tag = 'ActivationLimitUsagesType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['number'] = ('number', 'integer', True)
def __init__(self,
number=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.number = number
def activation_limit_usages_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ActivationLimitUsagesType_,
xml_string)
class ActivationLimitSessionType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:
ActivationLimitSessionType element """
c_tag = 'ActivationLimitSessionType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def activation_limit_session_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ActivationLimitSessionType_,
xml_string)
class LengthType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:LengthType
element """
c_tag = 'LengthType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['min'] = ('min', 'integer', True)
c_attributes['max'] = ('max', 'integer', False)
def __init__(self,
min=None,
max=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.min = min
self.max = max
def length_type__from_string(xml_string):
return saml2.create_class_from_xml_string(LengthType_, xml_string)
class MediumType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:mediumType
element """
c_tag = 'mediumType'
c_namespace = NAMESPACE
c_value_type = {'base': 'xs:NMTOKEN',
'enumeration': ['memory', 'smartcard', 'token',
'MobileDevice', 'MobileAuthCard']}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def medium_type__from_string(xml_string):
return saml2.create_class_from_xml_string(MediumType_, xml_string)
class KeyStorageType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:KeyStorageType
element """
c_tag = 'KeyStorageType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['medium'] = ('medium', MediumType_, True)
def __init__(self,
medium=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.medium = medium
def key_storage_type__from_string(xml_string):
return saml2.create_class_from_xml_string(KeyStorageType_, xml_string)
class ExtensionType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ExtensionType
element """
c_tag = 'ExtensionType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def extension_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ExtensionType_, xml_string)
class TimeSyncTokenType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:
TimeSyncTokenType element """
c_tag = 'TimeSyncTokenType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['DeviceType'] = ('device_type', 'None', True)
c_attributes['SeedLength'] = ('seed_length', 'None', True)
c_attributes['DeviceInHand'] = ('device_in_hand', 'None', True)
def __init__(self,
device_type=None,
seed_length=None,
device_in_hand=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.device_type = device_type
self.seed_length = seed_length
self.device_in_hand = device_in_hand
def time_sync_token_type__from_string(xml_string):
return saml2.create_class_from_xml_string(TimeSyncTokenType_, xml_string)
class KeySharing(KeySharingType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:KeySharing
element """
c_tag = 'KeySharing'
c_namespace = NAMESPACE
c_children = KeySharingType_.c_children.copy()
c_attributes = KeySharingType_.c_attributes.copy()
c_child_order = KeySharingType_.c_child_order[:]
c_cardinality = KeySharingType_.c_cardinality.copy()
def key_sharing_from_string(xml_string):
return saml2.create_class_from_xml_string(KeySharing, xml_string)
class KeyStorage(KeyStorageType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:KeyStorage
element """
c_tag = 'KeyStorage'
c_namespace = NAMESPACE
c_children = KeyStorageType_.c_children.copy()
c_attributes = KeyStorageType_.c_attributes.copy()
c_child_order = KeyStorageType_.c_child_order[:]
c_cardinality = KeyStorageType_.c_cardinality.copy()
def key_storage_from_string(xml_string):
return saml2.create_class_from_xml_string(KeyStorage, xml_string)
class TimeSyncToken(TimeSyncTokenType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:TimeSyncToken
element """
c_tag = 'TimeSyncToken'
c_namespace = NAMESPACE
c_children = TimeSyncTokenType_.c_children.copy()
c_attributes = TimeSyncTokenType_.c_attributes.copy()
c_child_order = TimeSyncTokenType_.c_child_order[:]
c_cardinality = TimeSyncTokenType_.c_cardinality.copy()
def time_sync_token_from_string(xml_string):
return saml2.create_class_from_xml_string(TimeSyncToken, xml_string)
class Length(LengthType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:Length element """
c_tag = 'Length'
c_namespace = NAMESPACE
c_children = LengthType_.c_children.copy()
c_attributes = LengthType_.c_attributes.copy()
c_child_order = LengthType_.c_child_order[:]
c_cardinality = LengthType_.c_cardinality.copy()
def length_from_string(xml_string):
return saml2.create_class_from_xml_string(Length, xml_string)
class GoverningAgreementRef(GoverningAgreementRefType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:GoverningAgreementRef element """
c_tag = 'GoverningAgreementRef'
c_namespace = NAMESPACE
c_children = GoverningAgreementRefType_.c_children.copy()
c_attributes = GoverningAgreementRefType_.c_attributes.copy()
c_child_order = GoverningAgreementRefType_.c_child_order[:]
c_cardinality = GoverningAgreementRefType_.c_cardinality.copy()
def governing_agreement_ref_from_string(xml_string):
return saml2.create_class_from_xml_string(GoverningAgreementRef, xml_string)
class GoverningAgreementsType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:
GoverningAgreementsType element """
c_tag = 'GoverningAgreementsType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}GoverningAgreementRef'] = (
'governing_agreement_ref', [GoverningAgreementRef])
c_cardinality['governing_agreement_ref'] = {"min": 1}
c_child_order.extend(['governing_agreement_ref'])
def __init__(self,
governing_agreement_ref=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.governing_agreement_ref = governing_agreement_ref or []
def governing_agreements_type__from_string(xml_string):
return saml2.create_class_from_xml_string(GoverningAgreementsType_,
xml_string)
class RestrictedPasswordType_Length(RestrictedLengthType_):
c_tag = 'Length'
c_namespace = NAMESPACE
c_children = RestrictedLengthType_.c_children.copy()
c_attributes = RestrictedLengthType_.c_attributes.copy()
c_child_order = RestrictedLengthType_.c_child_order[:]
c_cardinality = RestrictedLengthType_.c_cardinality.copy()
def restricted_password_type__length_from_string(xml_string):
return saml2.create_class_from_xml_string(RestrictedPasswordType_Length,
xml_string)
class Alphabet(AlphabetType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:Alphabet element """
c_tag = 'Alphabet'
c_namespace = NAMESPACE
c_children = AlphabetType_.c_children.copy()
c_attributes = AlphabetType_.c_attributes.copy()
c_child_order = AlphabetType_.c_child_order[:]
c_cardinality = AlphabetType_.c_cardinality.copy()
def alphabet_from_string(xml_string):
return saml2.create_class_from_xml_string(Alphabet, xml_string)
class ActivationLimitDuration(ActivationLimitDurationType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ActivationLimitDuration element """
c_tag = 'ActivationLimitDuration'
c_namespace = NAMESPACE
c_children = ActivationLimitDurationType_.c_children.copy()
c_attributes = ActivationLimitDurationType_.c_attributes.copy()
c_child_order = ActivationLimitDurationType_.c_child_order[:]
c_cardinality = ActivationLimitDurationType_.c_cardinality.copy()
def activation_limit_duration_from_string(xml_string):
return saml2.create_class_from_xml_string(ActivationLimitDuration,
xml_string)
class ActivationLimitUsages(ActivationLimitUsagesType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ActivationLimitUsages element """
c_tag = 'ActivationLimitUsages'
c_namespace = NAMESPACE
c_children = ActivationLimitUsagesType_.c_children.copy()
c_attributes = ActivationLimitUsagesType_.c_attributes.copy()
c_child_order = ActivationLimitUsagesType_.c_child_order[:]
c_cardinality = ActivationLimitUsagesType_.c_cardinality.copy()
def activation_limit_usages_from_string(xml_string):
return saml2.create_class_from_xml_string(ActivationLimitUsages, xml_string)
class ActivationLimitSession(ActivationLimitSessionType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ActivationLimitSession element """
c_tag = 'ActivationLimitSession'
c_namespace = NAMESPACE
c_children = ActivationLimitSessionType_.c_children.copy()
c_attributes = ActivationLimitSessionType_.c_attributes.copy()
c_child_order = ActivationLimitSessionType_.c_child_order[:]
c_cardinality = ActivationLimitSessionType_.c_cardinality.copy()
def activation_limit_session_from_string(xml_string):
return saml2.create_class_from_xml_string(ActivationLimitSession,
xml_string)
class Extension(ExtensionType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:Extension element """
c_tag = 'Extension'
c_namespace = NAMESPACE
c_children = ExtensionType_.c_children.copy()
c_attributes = ExtensionType_.c_attributes.copy()
c_child_order = ExtensionType_.c_child_order[:]
c_cardinality = ExtensionType_.c_cardinality.copy()
def extension_from_string(xml_string):
return saml2.create_class_from_xml_string(Extension, xml_string)
class TokenType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:TokenType element """
c_tag = 'TokenType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}TimeSyncToken'] = (
'time_sync_token', TimeSyncToken)
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_child_order.extend(['time_sync_token', 'extension'])
def __init__(self,
time_sync_token=None,
extension=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.time_sync_token = time_sync_token
self.extension = extension or []
def token_type__from_string(xml_string):
return saml2.create_class_from_xml_string(TokenType_, xml_string)
class Token(TokenType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:Token element """
c_tag = 'Token'
c_namespace = NAMESPACE
c_children = TokenType_.c_children.copy()
c_attributes = TokenType_.c_attributes.copy()
c_child_order = TokenType_.c_child_order[:]
c_cardinality = TokenType_.c_cardinality.copy()
def token_from_string(xml_string):
return saml2.create_class_from_xml_string(Token, xml_string)
class SharedSecretChallengeResponseType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:SharedSecretChallengeResponseType element """
c_tag = 'SharedSecretChallengeResponseType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_attributes['method'] = ('method', 'anyURI', False)
c_child_order.extend(['extension'])
def __init__(self,
extension=None,
method=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.extension = extension or []
self.method = method
def shared_secret_challenge_response_type__from_string(xml_string):
return saml2.create_class_from_xml_string(
SharedSecretChallengeResponseType_, xml_string)
class PublicKeyType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:PublicKeyType element """
c_tag = 'PublicKeyType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_attributes['keyValidation'] = ('key_validation', 'None', False)
c_child_order.extend(['extension'])
def __init__(self,
extension=None,
key_validation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.extension = extension or []
self.key_validation = key_validation
def public_key_type__from_string(xml_string):
return saml2.create_class_from_xml_string(PublicKeyType_, xml_string)
class GoverningAgreements(GoverningAgreementsType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:GoverningAgreements element """
c_tag = 'GoverningAgreements'
c_namespace = NAMESPACE
c_children = GoverningAgreementsType_.c_children.copy()
c_attributes = GoverningAgreementsType_.c_attributes.copy()
c_child_order = GoverningAgreementsType_.c_child_order[:]
c_cardinality = GoverningAgreementsType_.c_cardinality.copy()
def governing_agreements_from_string(xml_string):
return saml2.create_class_from_xml_string(GoverningAgreements, xml_string)
class PasswordType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:PasswordType element """
c_tag = 'PasswordType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Length'] = (
'length', Length)
c_cardinality['length'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Alphabet'] = (
'alphabet', Alphabet)
c_cardinality['alphabet'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Generation'] = (
'generation', Generation)
c_cardinality['generation'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_attributes['ExternalVerification'] = (
'external_verification', 'anyURI', False)
c_child_order.extend(['length', 'alphabet', 'generation', 'extension'])
def __init__(self,
length=None,
alphabet=None,
generation=None,
extension=None,
external_verification=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.length = length
self.alphabet = alphabet
self.generation = generation
self.extension = extension or []
self.external_verification = external_verification
def password_type__from_string(xml_string):
return saml2.create_class_from_xml_string(PasswordType_, xml_string)
class RestrictedPasswordType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:RestrictedPasswordType element """
c_tag = 'RestrictedPasswordType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Length'] = (
'length', RestrictedPasswordType_Length)
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Generation'] = (
'generation', Generation)
c_cardinality['generation'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_attributes['ExternalVerification'] = (
'external_verification', 'anyURI', False)
c_child_order.extend(['length', 'generation', 'extension'])
def __init__(self,
length=None,
generation=None,
extension=None,
external_verification=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.length = length
self.generation = generation
self.extension = extension or []
self.external_verification = external_verification
def restricted_password_type__from_string(xml_string):
return saml2.create_class_from_xml_string(RestrictedPasswordType_,
xml_string)
class ActivationLimitType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ActivationLimitType element """
c_tag = 'ActivationLimitType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ActivationLimitDuration'] = (
'activation_limit_duration', ActivationLimitDuration)
c_cardinality['activation_limit_duration'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ActivationLimitUsages'] = (
'activation_limit_usages', ActivationLimitUsages)
c_cardinality['activation_limit_usages'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ActivationLimitSession'] = (
'activation_limit_session', ActivationLimitSession)
c_cardinality['activation_limit_session'] = {"min": 0, "max": 1}
c_child_order.extend(
['activation_limit_duration', 'activation_limit_usages',
'activation_limit_session'])
def __init__(self,
activation_limit_duration=None,
activation_limit_usages=None,
activation_limit_session=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.activation_limit_duration = activation_limit_duration
self.activation_limit_usages = activation_limit_usages
self.activation_limit_session = activation_limit_session
def activation_limit_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ActivationLimitType_, xml_string)
class ExtensionOnlyType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ExtensionOnlyType element """
c_tag = 'ExtensionOnlyType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_child_order.extend(['extension'])
def __init__(self,
extension=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.extension = extension or []
def extension_only_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ExtensionOnlyType_, xml_string)
class PrincipalAuthenticationMechanismType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:PrincipalAuthenticationMechanismType element """
c_tag = 'PrincipalAuthenticationMechanismType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Token'] = (
'token', Token)
c_child_order.extend(['token'])
def __init__(self,
token=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.token = token
def principal_authentication_mechanism_type__from_string(xml_string):
return saml2.create_class_from_xml_string(
PrincipalAuthenticationMechanismType_, xml_string)
class WrittenConsent(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:WrittenConsent element """
c_tag = 'WrittenConsent'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def written_consent_from_string(xml_string):
return saml2.create_class_from_xml_string(WrittenConsent, xml_string)
class SubscriberLineNumber(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:SubscriberLineNumber element """
c_tag = 'SubscriberLineNumber'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def subscriber_line_number_from_string(xml_string):
return saml2.create_class_from_xml_string(SubscriberLineNumber, xml_string)
class UserSuffix(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:UserSuffix element """
c_tag = 'UserSuffix'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def user_suffix_from_string(xml_string):
return saml2.create_class_from_xml_string(UserSuffix, xml_string)
class Password(PasswordType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:Password element """
c_tag = 'Password'
c_namespace = NAMESPACE
c_children = PasswordType_.c_children.copy()
c_attributes = PasswordType_.c_attributes.copy()
c_child_order = PasswordType_.c_child_order[:]
c_cardinality = PasswordType_.c_cardinality.copy()
def password_from_string(xml_string):
return saml2.create_class_from_xml_string(Password, xml_string)
class Smartcard(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:Smartcard element """
c_tag = 'Smartcard'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def smartcard_from_string(xml_string):
return saml2.create_class_from_xml_string(Smartcard, xml_string)
class ActivationLimit(ActivationLimitType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ActivationLimit element """
c_tag = 'ActivationLimit'
c_namespace = NAMESPACE
c_children = ActivationLimitType_.c_children.copy()
c_attributes = ActivationLimitType_.c_attributes.copy()
c_child_order = ActivationLimitType_.c_child_order[:]
c_cardinality = ActivationLimitType_.c_cardinality.copy()
def activation_limit_from_string(xml_string):
return saml2.create_class_from_xml_string(ActivationLimit, xml_string)
class PrincipalAuthenticationMechanism(PrincipalAuthenticationMechanismType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:PrincipalAuthenticationMechanism element """
c_tag = 'PrincipalAuthenticationMechanism'
c_namespace = NAMESPACE
c_children = PrincipalAuthenticationMechanismType_.c_children.copy()
c_attributes = PrincipalAuthenticationMechanismType_.c_attributes.copy()
c_child_order = PrincipalAuthenticationMechanismType_.c_child_order[:]
c_cardinality = PrincipalAuthenticationMechanismType_.c_cardinality.copy()
def principal_authentication_mechanism_from_string(xml_string):
return saml2.create_class_from_xml_string(PrincipalAuthenticationMechanism,
xml_string)
class PreviousSession(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:PreviousSession element """
c_tag = 'PreviousSession'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def previous_session_from_string(xml_string):
return saml2.create_class_from_xml_string(PreviousSession, xml_string)
class ResumeSession(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ResumeSession element """
c_tag = 'ResumeSession'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def resume_session_from_string(xml_string):
return saml2.create_class_from_xml_string(ResumeSession, xml_string)
class ZeroKnowledge(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ZeroKnowledge element """
c_tag = 'ZeroKnowledge'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def zero_knowledge_from_string(xml_string):
return saml2.create_class_from_xml_string(ZeroKnowledge, xml_string)
class SharedSecretChallengeResponse(SharedSecretChallengeResponseType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:SharedSecretChallengeResponse element """
c_tag = 'SharedSecretChallengeResponse'
c_namespace = NAMESPACE
c_children = SharedSecretChallengeResponseType_.c_children.copy()
c_attributes = SharedSecretChallengeResponseType_.c_attributes.copy()
c_child_order = SharedSecretChallengeResponseType_.c_child_order[:]
c_cardinality = SharedSecretChallengeResponseType_.c_cardinality.copy()
def shared_secret_challenge_response_from_string(xml_string):
return saml2.create_class_from_xml_string(SharedSecretChallengeResponse,
xml_string)
class DigSig(PublicKeyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:DigSig element """
c_tag = 'DigSig'
c_namespace = NAMESPACE
c_children = PublicKeyType_.c_children.copy()
c_attributes = PublicKeyType_.c_attributes.copy()
c_child_order = PublicKeyType_.c_child_order[:]
c_cardinality = PublicKeyType_.c_cardinality.copy()
def dig_sig_from_string(xml_string):
return saml2.create_class_from_xml_string(DigSig, xml_string)
class AsymmetricDecryption(PublicKeyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:AsymmetricDecryption element """
c_tag = 'AsymmetricDecryption'
c_namespace = NAMESPACE
c_children = PublicKeyType_.c_children.copy()
c_attributes = PublicKeyType_.c_attributes.copy()
c_child_order = PublicKeyType_.c_child_order[:]
c_cardinality = PublicKeyType_.c_cardinality.copy()
def asymmetric_decryption_from_string(xml_string):
return saml2.create_class_from_xml_string(AsymmetricDecryption, xml_string)
class AsymmetricKeyAgreement(PublicKeyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:AsymmetricKeyAgreement element """
c_tag = 'AsymmetricKeyAgreement'
c_namespace = NAMESPACE
c_children = PublicKeyType_.c_children.copy()
c_attributes = PublicKeyType_.c_attributes.copy()
c_child_order = PublicKeyType_.c_child_order[:]
c_cardinality = PublicKeyType_.c_cardinality.copy()
def asymmetric_key_agreement_from_string(xml_string):
return saml2.create_class_from_xml_string(AsymmetricKeyAgreement,
xml_string)
class IPAddress(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:IPAddress element """
c_tag = 'IPAddress'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def ip_address_from_string(xml_string):
return saml2.create_class_from_xml_string(IPAddress, xml_string)
class SharedSecretDynamicPlaintext(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:SharedSecretDynamicPlaintext element """
c_tag = 'SharedSecretDynamicPlaintext'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def shared_secret_dynamic_plaintext_from_string(xml_string):
return saml2.create_class_from_xml_string(SharedSecretDynamicPlaintext,
xml_string)
class HTTP(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:HTTP element """
c_tag = 'HTTP'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def http_from_string(xml_string):
return saml2.create_class_from_xml_string(HTTP, xml_string)
class IPSec(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:IPSec element """
c_tag = 'IPSec'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def ip_sec_from_string(xml_string):
return saml2.create_class_from_xml_string(IPSec, xml_string)
class WTLS(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:WTLS element """
c_tag = 'WTLS'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def wtls_from_string(xml_string):
return saml2.create_class_from_xml_string(WTLS, xml_string)
class MobileNetworkNoEncryption(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:MobileNetworkNoEncryption element """
c_tag = 'MobileNetworkNoEncryption'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def mobile_network_no_encryption_from_string(xml_string):
return saml2.create_class_from_xml_string(MobileNetworkNoEncryption,
xml_string)
class MobileNetworkRadioEncryption(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:MobileNetworkRadioEncryption element """
c_tag = 'MobileNetworkRadioEncryption'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def mobile_network_radio_encryption_from_string(xml_string):
return saml2.create_class_from_xml_string(MobileNetworkRadioEncryption,
xml_string)
class MobileNetworkEndToEndEncryption(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:MobileNetworkEndToEndEncryption element """
c_tag = 'MobileNetworkEndToEndEncryption'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def mobile_network_end_to_end_encryption_from_string(xml_string):
return saml2.create_class_from_xml_string(MobileNetworkEndToEndEncryption,
xml_string)
class SSL(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:SSL element """
c_tag = 'SSL'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def ssl_from_string(xml_string):
return saml2.create_class_from_xml_string(SSL, xml_string)
class PSTN(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:PSTN element """
c_tag = 'PSTN'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def pstn_from_string(xml_string):
return saml2.create_class_from_xml_string(PSTN, xml_string)
class ISDN(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ISDN element """
c_tag = 'ISDN'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def isdn_from_string(xml_string):
return saml2.create_class_from_xml_string(ISDN, xml_string)
class ADSL(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ADSL element """
c_tag = 'ADSL'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def adsl_from_string(xml_string):
return saml2.create_class_from_xml_string(ADSL, xml_string)
class SwitchAudit(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:SwitchAudit element """
c_tag = 'SwitchAudit'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def switch_audit_from_string(xml_string):
return saml2.create_class_from_xml_string(SwitchAudit, xml_string)
class DeactivationCallCenter(ExtensionOnlyType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:DeactivationCallCenter element """
c_tag = 'DeactivationCallCenter'
c_namespace = NAMESPACE
c_children = ExtensionOnlyType_.c_children.copy()
c_attributes = ExtensionOnlyType_.c_attributes.copy()
c_child_order = ExtensionOnlyType_.c_child_order[:]
c_cardinality = ExtensionOnlyType_.c_cardinality.copy()
def deactivation_call_center_from_string(xml_string):
return saml2.create_class_from_xml_string(DeactivationCallCenter,
xml_string)
class IdentificationType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:IdentificationType element """
c_tag = 'IdentificationType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}PhysicalVerification'] = (
'physical_verification', PhysicalVerification)
c_cardinality['physical_verification'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}WrittenConsent'] = (
'written_consent', WrittenConsent)
c_cardinality['written_consent'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}GoverningAgreements'] = (
'governing_agreements', GoverningAgreements)
c_cardinality['governing_agreements'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_attributes['nym'] = ('nym', NymType_, False)
c_child_order.extend(
['physical_verification', 'written_consent', 'governing_agreements',
'extension'])
def __init__(self,
physical_verification=None,
written_consent=None,
governing_agreements=None,
extension=None,
nym=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.physical_verification = physical_verification
self.written_consent = written_consent
self.governing_agreements = governing_agreements
self.extension = extension or []
self.nym = nym
def identification_type__from_string(xml_string):
return saml2.create_class_from_xml_string(IdentificationType_, xml_string)
class AuthenticatorTransportProtocolType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:AuthenticatorTransportProtocolType element """
c_tag = 'AuthenticatorTransportProtocolType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}HTTP'] = (
'http', HTTP)
c_cardinality['http'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}SSL'] = (
'ssl', SSL)
c_cardinality['ssl'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}MobileNetworkNoEncryption'] = (
'mobile_network_no_encryption', MobileNetworkNoEncryption)
c_cardinality['mobile_network_no_encryption'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}MobileNetworkRadioEncryption'] = (
'mobile_network_radio_encryption', MobileNetworkRadioEncryption)
c_cardinality['mobile_network_radio_encryption'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}MobileNetworkEndToEndEncryption'] = (
'mobile_network_end_to_end_encryption', MobileNetworkEndToEndEncryption)
c_cardinality['mobile_network_end_to_end_encryption'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}WTLS'] = (
'wtls', WTLS)
c_cardinality['wtls'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}IPSec'] = (
'ip_sec', IPSec)
c_cardinality['ip_sec'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}PSTN'] = (
'pstn', PSTN)
c_cardinality['pstn'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ISDN'] = (
'isdn', ISDN)
c_cardinality['isdn'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ADSL'] = (
'adsl', ADSL)
c_cardinality['adsl'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_child_order.extend(['http', 'ssl', 'mobile_network_no_encryption',
'mobile_network_radio_encryption',
'mobile_network_end_to_end_encryption', 'wtls',
'ip_sec', 'pstn', 'isdn', 'adsl', 'extension'])
def __init__(self,
http=None,
ssl=None,
mobile_network_no_encryption=None,
mobile_network_radio_encryption=None,
mobile_network_end_to_end_encryption=None,
wtls=None,
ip_sec=None,
pstn=None,
isdn=None,
adsl=None,
extension=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.http = http
self.ssl = ssl
self.mobile_network_no_encryption = mobile_network_no_encryption
self.mobile_network_radio_encryption = mobile_network_radio_encryption
self.mobile_network_end_to_end_encryption = mobile_network_end_to_end_encryption
self.wtls = wtls
self.ip_sec = ip_sec
self.pstn = pstn
self.isdn = isdn
self.adsl = adsl
self.extension = extension or []
def authenticator_transport_protocol_type__from_string(xml_string):
return saml2.create_class_from_xml_string(
AuthenticatorTransportProtocolType_, xml_string)
class RestrictedPassword(RestrictedPasswordType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:RestrictedPassword element """
c_tag = 'RestrictedPassword'
c_namespace = NAMESPACE
c_children = RestrictedPasswordType_.c_children.copy()
c_attributes = RestrictedPasswordType_.c_attributes.copy()
c_child_order = RestrictedPasswordType_.c_child_order[:]
c_cardinality = RestrictedPasswordType_.c_cardinality.copy()
def restricted_password_from_string(xml_string):
return saml2.create_class_from_xml_string(RestrictedPassword, xml_string)
class ActivationPinType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ActivationPinType element """
c_tag = 'ActivationPinType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Length'] = (
'length', Length)
c_cardinality['length'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Alphabet'] = (
'alphabet', Alphabet)
c_cardinality['alphabet'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Generation'] = (
'generation', Generation)
c_cardinality['generation'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ActivationLimit'] = (
'activation_limit', ActivationLimit)
c_cardinality['activation_limit'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_child_order.extend(
['length', 'alphabet', 'generation', 'activation_limit', 'extension'])
def __init__(self,
length=None,
alphabet=None,
generation=None,
activation_limit=None,
extension=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.length = length
self.alphabet = alphabet
self.generation = generation
self.activation_limit = activation_limit
self.extension = extension or []
def activation_pin_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ActivationPinType_, xml_string)
class SecurityAuditType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:SecurityAuditType element """
c_tag = 'SecurityAuditType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}SwitchAudit'] = (
'switch_audit', SwitchAudit)
c_cardinality['switch_audit'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_child_order.extend(['switch_audit', 'extension'])
def __init__(self,
switch_audit=None,
extension=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.switch_audit = switch_audit
self.extension = extension or []
def security_audit_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SecurityAuditType_, xml_string)
class Identification(IdentificationType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:Identification element """
c_tag = 'Identification'
c_namespace = NAMESPACE
c_children = IdentificationType_.c_children.copy()
c_attributes = IdentificationType_.c_attributes.copy()
c_child_order = IdentificationType_.c_child_order[:]
c_cardinality = IdentificationType_.c_cardinality.copy()
def identification_from_string(xml_string):
return saml2.create_class_from_xml_string(Identification, xml_string)
class ActivationPin(ActivationPinType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ActivationPin element """
c_tag = 'ActivationPin'
c_namespace = NAMESPACE
c_children = ActivationPinType_.c_children.copy()
c_attributes = ActivationPinType_.c_attributes.copy()
c_child_order = ActivationPinType_.c_child_order[:]
c_cardinality = ActivationPinType_.c_cardinality.copy()
def activation_pin_from_string(xml_string):
return saml2.create_class_from_xml_string(ActivationPin, xml_string)
class AuthenticatorTransportProtocol(AuthenticatorTransportProtocolType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:AuthenticatorTransportProtocol element """
c_tag = 'AuthenticatorTransportProtocol'
c_namespace = NAMESPACE
c_children = AuthenticatorTransportProtocolType_.c_children.copy()
c_attributes = AuthenticatorTransportProtocolType_.c_attributes.copy()
c_child_order = AuthenticatorTransportProtocolType_.c_child_order[:]
c_cardinality = AuthenticatorTransportProtocolType_.c_cardinality.copy()
def authenticator_transport_protocol_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthenticatorTransportProtocol,
xml_string)
class SecurityAudit(SecurityAuditType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:SecurityAudit element """
c_tag = 'SecurityAudit'
c_namespace = NAMESPACE
c_children = SecurityAuditType_.c_children.copy()
c_attributes = SecurityAuditType_.c_attributes.copy()
c_child_order = SecurityAuditType_.c_child_order[:]
c_cardinality = SecurityAuditType_.c_cardinality.copy()
def security_audit_from_string(xml_string):
return saml2.create_class_from_xml_string(SecurityAudit, xml_string)
class OperationalProtectionType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:OperationalProtectionType element """
c_tag = 'OperationalProtectionType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}SecurityAudit'] = (
'security_audit', SecurityAudit)
c_cardinality['security_audit'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}DeactivationCallCenter'] = (
'deactivation_call_center', DeactivationCallCenter)
c_cardinality['deactivation_call_center'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_child_order.extend(
['security_audit', 'deactivation_call_center', 'extension'])
def __init__(self,
security_audit=None,
deactivation_call_center=None,
extension=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.security_audit = security_audit
self.deactivation_call_center = deactivation_call_center
self.extension = extension or []
def operational_protection_type__from_string(xml_string):
return saml2.create_class_from_xml_string(OperationalProtectionType_,
xml_string)
class KeyActivationType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:KeyActivationType element """
c_tag = 'KeyActivationType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ActivationPin'] = (
'activation_pin', ActivationPin)
c_cardinality['activation_pin'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_child_order.extend(['activation_pin', 'extension'])
def __init__(self,
activation_pin=None,
extension=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.activation_pin = activation_pin
self.extension = extension or []
def key_activation_type__from_string(xml_string):
return saml2.create_class_from_xml_string(KeyActivationType_, xml_string)
class KeyActivation(KeyActivationType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:KeyActivation element """
c_tag = 'KeyActivation'
c_namespace = NAMESPACE
c_children = KeyActivationType_.c_children.copy()
c_attributes = KeyActivationType_.c_attributes.copy()
c_child_order = KeyActivationType_.c_child_order[:]
c_cardinality = KeyActivationType_.c_cardinality.copy()
def key_activation_from_string(xml_string):
return saml2.create_class_from_xml_string(KeyActivation, xml_string)
class OperationalProtection(OperationalProtectionType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:OperationalProtection element """
c_tag = 'OperationalProtection'
c_namespace = NAMESPACE
c_children = OperationalProtectionType_.c_children.copy()
c_attributes = OperationalProtectionType_.c_attributes.copy()
c_child_order = OperationalProtectionType_.c_child_order[:]
c_cardinality = OperationalProtectionType_.c_cardinality.copy()
def operational_protection_from_string(xml_string):
return saml2.create_class_from_xml_string(OperationalProtection, xml_string)
class PrivateKeyProtectionType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:PrivateKeyProtectionType element """
c_tag = 'PrivateKeyProtectionType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}KeyActivation'] = (
'key_activation', KeyActivation)
c_cardinality['key_activation'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}KeyStorage'] = (
'key_storage', KeyStorage)
c_cardinality['key_storage'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}KeySharing'] = (
'key_sharing', KeySharing)
c_cardinality['key_sharing'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_child_order.extend(
['key_activation', 'key_storage', 'key_sharing', 'extension'])
def __init__(self,
key_activation=None,
key_storage=None,
key_sharing=None,
extension=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.key_activation = key_activation
self.key_storage = key_storage
self.key_sharing = key_sharing
self.extension = extension or []
def private_key_protection_type__from_string(xml_string):
return saml2.create_class_from_xml_string(PrivateKeyProtectionType_,
xml_string)
class SecretKeyProtectionType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:SecretKeyProtectionType element """
c_tag = 'SecretKeyProtectionType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}KeyActivation'] = (
'key_activation', KeyActivation)
c_cardinality['key_activation'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}KeyStorage'] = (
'key_storage', KeyStorage)
c_cardinality['key_storage'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_child_order.extend(['key_activation', 'key_storage', 'extension'])
def __init__(self,
key_activation=None,
key_storage=None,
extension=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.key_activation = key_activation
self.key_storage = key_storage
self.extension = extension or []
def secret_key_protection_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SecretKeyProtectionType_,
xml_string)
class SecretKeyProtection(SecretKeyProtectionType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:SecretKeyProtection element """
c_tag = 'SecretKeyProtection'
c_namespace = NAMESPACE
c_children = SecretKeyProtectionType_.c_children.copy()
c_attributes = SecretKeyProtectionType_.c_attributes.copy()
c_child_order = SecretKeyProtectionType_.c_child_order[:]
c_cardinality = SecretKeyProtectionType_.c_cardinality.copy()
def secret_key_protection_from_string(xml_string):
return saml2.create_class_from_xml_string(SecretKeyProtection, xml_string)
class PrivateKeyProtection(PrivateKeyProtectionType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:PrivateKeyProtection element """
c_tag = 'PrivateKeyProtection'
c_namespace = NAMESPACE
c_children = PrivateKeyProtectionType_.c_children.copy()
c_attributes = PrivateKeyProtectionType_.c_attributes.copy()
c_child_order = PrivateKeyProtectionType_.c_child_order[:]
c_cardinality = PrivateKeyProtectionType_.c_cardinality.copy()
def private_key_protection_from_string(xml_string):
return saml2.create_class_from_xml_string(PrivateKeyProtection, xml_string)
class TechnicalProtectionBaseType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:TechnicalProtectionBaseType element """
c_tag = 'TechnicalProtectionBaseType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}PrivateKeyProtection'] = (
'private_key_protection', PrivateKeyProtection)
c_cardinality['private_key_protection'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}SecretKeyProtection'] = (
'secret_key_protection', SecretKeyProtection)
c_cardinality['secret_key_protection'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_child_order.extend(
['private_key_protection', 'secret_key_protection', 'extension'])
def __init__(self,
private_key_protection=None,
secret_key_protection=None,
extension=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.private_key_protection = private_key_protection
self.secret_key_protection = secret_key_protection
self.extension = extension or []
def technical_protection_base_type__from_string(xml_string):
return saml2.create_class_from_xml_string(TechnicalProtectionBaseType_,
xml_string)
class TechnicalProtection(TechnicalProtectionBaseType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:TechnicalProtection element """
c_tag = 'TechnicalProtection'
c_namespace = NAMESPACE
c_children = TechnicalProtectionBaseType_.c_children.copy()
c_attributes = TechnicalProtectionBaseType_.c_attributes.copy()
c_child_order = TechnicalProtectionBaseType_.c_child_order[:]
c_cardinality = TechnicalProtectionBaseType_.c_cardinality.copy()
def technical_protection_from_string(xml_string):
return saml2.create_class_from_xml_string(TechnicalProtection, xml_string)
#..................
# ['ComplexAuthenticator', 'Authenticator', 'AuthnMethod', 'ComplexAuthenticatorType', 'AuthenticatorBaseType', 'AuthnContextDeclarationBaseType', 'AuthnMethodBaseType', 'AuthenticationContextDeclaration']
class ComplexAuthenticatorType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ComplexAuthenticatorType element """
c_tag = 'ComplexAuthenticatorType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}PreviousSession'] = (
'previous_session', PreviousSession)
c_cardinality['previous_session'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ResumeSession'] = (
'resume_session', ResumeSession)
c_cardinality['resume_session'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}DigSig'] = (
'dig_sig', DigSig)
c_cardinality['dig_sig'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Password'] = (
'password', Password)
c_cardinality['password'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}RestrictedPassword'] = (
'restricted_password', RestrictedPassword)
c_cardinality['restricted_password'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ZeroKnowledge'] = (
'zero_knowledge', ZeroKnowledge)
c_cardinality['zero_knowledge'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}SharedSecretChallengeResponse'] = (
'shared_secret_challenge_response', SharedSecretChallengeResponse)
c_cardinality['shared_secret_challenge_response'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}SharedSecretDynamicPlaintext'] = (
'shared_secret_dynamic_plaintext', SharedSecretDynamicPlaintext)
c_cardinality['shared_secret_dynamic_plaintext'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}IPAddress'] = (
'ip_address', IPAddress)
c_cardinality['ip_address'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}AsymmetricDecryption'] = (
'asymmetric_decryption', AsymmetricDecryption)
c_cardinality['asymmetric_decryption'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}AsymmetricKeyAgreement'] = (
'asymmetric_key_agreement', AsymmetricKeyAgreement)
c_cardinality['asymmetric_key_agreement'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}SubscriberLineNumber'] = (
'subscriber_line_number', SubscriberLineNumber)
c_cardinality['subscriber_line_number'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}UserSuffix'] = (
'user_suffix', UserSuffix)
c_cardinality['user_suffix'] = {"min": 0, "max": 1}
c_cardinality['complex_authenticator'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_child_order.extend(
['previous_session', 'resume_session', 'dig_sig', 'password',
'restricted_password', 'zero_knowledge',
'shared_secret_challenge_response', 'shared_secret_dynamic_plaintext',
'ip_address', 'asymmetric_decryption', 'asymmetric_key_agreement',
'subscriber_line_number', 'user_suffix', 'complex_authenticator',
'extension'])
def __init__(self,
previous_session=None,
resume_session=None,
dig_sig=None,
password=None,
restricted_password=None,
zero_knowledge=None,
shared_secret_challenge_response=None,
shared_secret_dynamic_plaintext=None,
ip_address=None,
asymmetric_decryption=None,
asymmetric_key_agreement=None,
subscriber_line_number=None,
user_suffix=None,
complex_authenticator=None,
extension=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.previous_session = previous_session
self.resume_session = resume_session
self.dig_sig = dig_sig
self.password = password
self.restricted_password = restricted_password
self.zero_knowledge = zero_knowledge
self.shared_secret_challenge_response = shared_secret_challenge_response
self.shared_secret_dynamic_plaintext = shared_secret_dynamic_plaintext
self.ip_address = ip_address
self.asymmetric_decryption = asymmetric_decryption
self.asymmetric_key_agreement = asymmetric_key_agreement
self.subscriber_line_number = subscriber_line_number
self.user_suffix = user_suffix
self.complex_authenticator = complex_authenticator
self.extension = extension or []
def complex_authenticator_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ComplexAuthenticatorType_,
xml_string)
class ComplexAuthenticator(ComplexAuthenticatorType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:ComplexAuthenticator element """
c_tag = 'ComplexAuthenticator'
c_namespace = NAMESPACE
c_children = ComplexAuthenticatorType_.c_children.copy()
c_attributes = ComplexAuthenticatorType_.c_attributes.copy()
c_child_order = ComplexAuthenticatorType_.c_child_order[:]
c_cardinality = ComplexAuthenticatorType_.c_cardinality.copy()
def complex_authenticator_from_string(xml_string):
return saml2.create_class_from_xml_string(ComplexAuthenticator, xml_string)
class AuthenticatorBaseType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:AuthenticatorBaseType element """
c_tag = 'AuthenticatorBaseType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}PreviousSession'] = (
'previous_session', PreviousSession)
c_cardinality['previous_session'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ResumeSession'] = (
'resume_session', ResumeSession)
c_cardinality['resume_session'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}DigSig'] = (
'dig_sig', DigSig)
c_cardinality['dig_sig'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Password'] = (
'password', Password)
c_cardinality['password'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}RestrictedPassword'] = (
'restricted_password', RestrictedPassword)
c_cardinality['restricted_password'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ZeroKnowledge'] = (
'zero_knowledge', ZeroKnowledge)
c_cardinality['zero_knowledge'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}SharedSecretChallengeResponse'] = (
'shared_secret_challenge_response', SharedSecretChallengeResponse)
c_cardinality['shared_secret_challenge_response'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}SharedSecretDynamicPlaintext'] = (
'shared_secret_dynamic_plaintext', SharedSecretDynamicPlaintext)
c_cardinality['shared_secret_dynamic_plaintext'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}IPAddress'] = (
'ip_address', IPAddress)
c_cardinality['ip_address'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}AsymmetricDecryption'] = (
'asymmetric_decryption', AsymmetricDecryption)
c_cardinality['asymmetric_decryption'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}AsymmetricKeyAgreement'] = (
'asymmetric_key_agreement', AsymmetricKeyAgreement)
c_cardinality['asymmetric_key_agreement'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}SubscriberLineNumber'] = (
'subscriber_line_number', SubscriberLineNumber)
c_cardinality['subscriber_line_number'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}UserSuffix'] = (
'user_suffix', UserSuffix)
c_cardinality['user_suffix'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ComplexAuthenticator'] = (
'complex_authenticator', ComplexAuthenticator)
c_cardinality['complex_authenticator'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_child_order.extend(
['previous_session', 'resume_session', 'dig_sig', 'password',
'restricted_password', 'zero_knowledge',
'shared_secret_challenge_response', 'shared_secret_dynamic_plaintext',
'ip_address', 'asymmetric_decryption', 'asymmetric_key_agreement',
'subscriber_line_number', 'user_suffix', 'complex_authenticator',
'extension'])
def __init__(self,
previous_session=None,
resume_session=None,
dig_sig=None,
password=None,
restricted_password=None,
zero_knowledge=None,
shared_secret_challenge_response=None,
shared_secret_dynamic_plaintext=None,
ip_address=None,
asymmetric_decryption=None,
asymmetric_key_agreement=None,
subscriber_line_number=None,
user_suffix=None,
complex_authenticator=None,
extension=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.previous_session = previous_session
self.resume_session = resume_session
self.dig_sig = dig_sig
self.password = password
self.restricted_password = restricted_password
self.zero_knowledge = zero_knowledge
self.shared_secret_challenge_response = shared_secret_challenge_response
self.shared_secret_dynamic_plaintext = shared_secret_dynamic_plaintext
self.ip_address = ip_address
self.asymmetric_decryption = asymmetric_decryption
self.asymmetric_key_agreement = asymmetric_key_agreement
self.subscriber_line_number = subscriber_line_number
self.user_suffix = user_suffix
self.complex_authenticator = complex_authenticator
self.extension = extension or []
def authenticator_base_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AuthenticatorBaseType_,
xml_string)
class Authenticator(AuthenticatorBaseType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:Authenticator element """
c_tag = 'Authenticator'
c_namespace = NAMESPACE
c_children = AuthenticatorBaseType_.c_children.copy()
c_attributes = AuthenticatorBaseType_.c_attributes.copy()
c_child_order = AuthenticatorBaseType_.c_child_order[:]
c_cardinality = AuthenticatorBaseType_.c_cardinality.copy()
def authenticator_from_string(xml_string):
return saml2.create_class_from_xml_string(Authenticator, xml_string)
class AuthnMethodBaseType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:AuthnMethodBaseType element """
c_tag = 'AuthnMethodBaseType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}PrincipalAuthenticationMechanism'] = (
'principal_authentication_mechanism', PrincipalAuthenticationMechanism)
c_cardinality['principal_authentication_mechanism'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Authenticator'] = (
'authenticator', Authenticator)
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}AuthenticatorTransportProtocol'] = (
'authenticator_transport_protocol', AuthenticatorTransportProtocol)
c_cardinality['authenticator_transport_protocol'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_child_order.extend(['principal_authentication_mechanism', 'authenticator',
'authenticator_transport_protocol', 'extension'])
def __init__(self,
principal_authentication_mechanism=None,
authenticator=None,
authenticator_transport_protocol=None,
extension=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.principal_authentication_mechanism = principal_authentication_mechanism
self.authenticator = authenticator
self.authenticator_transport_protocol = authenticator_transport_protocol
self.extension = extension or []
def authn_method_base_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnMethodBaseType_, xml_string)
class AuthnMethod(AuthnMethodBaseType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:AuthnMethod element """
c_tag = 'AuthnMethod'
c_namespace = NAMESPACE
c_children = AuthnMethodBaseType_.c_children.copy()
c_attributes = AuthnMethodBaseType_.c_attributes.copy()
c_child_order = AuthnMethodBaseType_.c_child_order[:]
c_cardinality = AuthnMethodBaseType_.c_cardinality.copy()
def authn_method_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnMethod, xml_string)
class AuthnContextDeclarationBaseType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:AuthnContextDeclarationBaseType element """
c_tag = 'AuthnContextDeclarationBaseType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Identification'] = (
'identification', Identification)
c_cardinality['identification'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}TechnicalProtection'] = (
'technical_protection', TechnicalProtection)
c_cardinality['technical_protection'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}OperationalProtection'] = (
'operational_protection', OperationalProtection)
c_cardinality['operational_protection'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}AuthnMethod'] = (
'authn_method', AuthnMethod)
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}GoverningAgreements'] = (
'governing_agreements', GoverningAgreements)
c_cardinality['governing_agreements'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}Extension'] = (
'extension', [Extension])
c_cardinality['extension'] = {"min": 0}
c_attributes['ID'] = ('id', 'ID', False)
c_child_order.extend(
['identification', 'technical_protection', 'operational_protection',
'authn_method', 'governing_agreements', 'extension'])
def __init__(self,
identification=None,
technical_protection=None,
operational_protection=None,
authn_method=None,
governing_agreements=None,
extension=None,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.identification = identification
self.technical_protection = technical_protection
self.operational_protection = operational_protection
self.authn_method = authn_method
self.governing_agreements = governing_agreements
self.extension = extension or []
self.id = id
def authn_context_declaration_base_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnContextDeclarationBaseType_,
xml_string)
class AuthenticationContextDeclaration(AuthnContextDeclarationBaseType_):
"""The urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken:AuthenticationContextDeclaration element """
c_tag = 'AuthenticationContextDeclaration'
c_namespace = NAMESPACE
c_children = AuthnContextDeclarationBaseType_.c_children.copy()
c_attributes = AuthnContextDeclarationBaseType_.c_attributes.copy()
c_child_order = AuthnContextDeclarationBaseType_.c_child_order[:]
c_cardinality = AuthnContextDeclarationBaseType_.c_cardinality.copy()
def authentication_context_declaration_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthenticationContextDeclaration,
xml_string)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
ComplexAuthenticatorType_.c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ComplexAuthenticator'] = (
'complex_authenticator', ComplexAuthenticator)
ComplexAuthenticator.c_children[
'{urn:oasis:names:tc:SAML:2.0:ac:classes:TimeSyncToken}ComplexAuthenticator'] = (
'complex_authenticator', ComplexAuthenticator)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
ELEMENT_FROM_STRING = {
AuthenticationContextDeclaration.c_tag: authentication_context_declaration_from_string,
Identification.c_tag: identification_from_string,
PhysicalVerification.c_tag: physical_verification_from_string,
WrittenConsent.c_tag: written_consent_from_string,
TechnicalProtection.c_tag: technical_protection_from_string,
SecretKeyProtection.c_tag: secret_key_protection_from_string,
PrivateKeyProtection.c_tag: private_key_protection_from_string,
KeyActivation.c_tag: key_activation_from_string,
KeySharing.c_tag: key_sharing_from_string,
KeyStorage.c_tag: key_storage_from_string,
SubscriberLineNumber.c_tag: subscriber_line_number_from_string,
UserSuffix.c_tag: user_suffix_from_string,
Password.c_tag: password_from_string,
ActivationPin.c_tag: activation_pin_from_string,
Token.c_tag: token_from_string,
TimeSyncToken.c_tag: time_sync_token_from_string,
Smartcard.c_tag: smartcard_from_string,
Length.c_tag: length_from_string,
ActivationLimit.c_tag: activation_limit_from_string,
Generation.c_tag: generation_from_string,
AuthnMethod.c_tag: authn_method_from_string,
PrincipalAuthenticationMechanism.c_tag: principal_authentication_mechanism_from_string,
Authenticator.c_tag: authenticator_from_string,
ComplexAuthenticator.c_tag: complex_authenticator_from_string,
PreviousSession.c_tag: previous_session_from_string,
ResumeSession.c_tag: resume_session_from_string,
ZeroKnowledge.c_tag: zero_knowledge_from_string,
SharedSecretChallengeResponse.c_tag: shared_secret_challenge_response_from_string,
SharedSecretChallengeResponseType_.c_tag: shared_secret_challenge_response_type__from_string,
DigSig.c_tag: dig_sig_from_string,
AsymmetricDecryption.c_tag: asymmetric_decryption_from_string,
AsymmetricKeyAgreement.c_tag: asymmetric_key_agreement_from_string,
PublicKeyType_.c_tag: public_key_type__from_string,
IPAddress.c_tag: ip_address_from_string,
SharedSecretDynamicPlaintext.c_tag: shared_secret_dynamic_plaintext_from_string,
AuthenticatorTransportProtocol.c_tag: authenticator_transport_protocol_from_string,
HTTP.c_tag: http_from_string,
IPSec.c_tag: ip_sec_from_string,
WTLS.c_tag: wtls_from_string,
MobileNetworkNoEncryption.c_tag: mobile_network_no_encryption_from_string,
MobileNetworkRadioEncryption.c_tag: mobile_network_radio_encryption_from_string,
MobileNetworkEndToEndEncryption.c_tag: mobile_network_end_to_end_encryption_from_string,
SSL.c_tag: ssl_from_string,
PSTN.c_tag: pstn_from_string,
ISDN.c_tag: isdn_from_string,
ADSL.c_tag: adsl_from_string,
OperationalProtection.c_tag: operational_protection_from_string,
SecurityAudit.c_tag: security_audit_from_string,
SwitchAudit.c_tag: switch_audit_from_string,
DeactivationCallCenter.c_tag: deactivation_call_center_from_string,
GoverningAgreements.c_tag: governing_agreements_from_string,
GoverningAgreementRef.c_tag: governing_agreement_ref_from_string,
NymType_.c_tag: nym_type__from_string,
IdentificationType_.c_tag: identification_type__from_string,
TechnicalProtectionBaseType_.c_tag: technical_protection_base_type__from_string,
OperationalProtectionType_.c_tag: operational_protection_type__from_string,
GoverningAgreementsType_.c_tag: governing_agreements_type__from_string,
GoverningAgreementRefType_.c_tag: governing_agreement_ref_type__from_string,
AuthenticatorBaseType_.c_tag: authenticator_base_type__from_string,
ComplexAuthenticatorType_.c_tag: complex_authenticator_type__from_string,
AuthenticatorTransportProtocolType_.c_tag: authenticator_transport_protocol_type__from_string,
KeyActivationType_.c_tag: key_activation_type__from_string,
KeySharingType_.c_tag: key_sharing_type__from_string,
PrivateKeyProtectionType_.c_tag: private_key_protection_type__from_string,
PasswordType_.c_tag: password_type__from_string,
RestrictedPassword.c_tag: restricted_password_from_string,
RestrictedPasswordType_.c_tag: restricted_password_type__from_string,
RestrictedLengthType_.c_tag: restricted_length_type__from_string,
ActivationPinType_.c_tag: activation_pin_type__from_string,
Alphabet.c_tag: alphabet_from_string,
AlphabetType_.c_tag: alphabet_type__from_string,
DeviceTypeType_.c_tag: device_type_type__from_string,
BooleanType_.c_tag: boolean_type__from_string,
ActivationLimitType_.c_tag: activation_limit_type__from_string,
ActivationLimitDuration.c_tag: activation_limit_duration_from_string,
ActivationLimitUsages.c_tag: activation_limit_usages_from_string,
ActivationLimitSession.c_tag: activation_limit_session_from_string,
ActivationLimitDurationType_.c_tag: activation_limit_duration_type__from_string,
ActivationLimitUsagesType_.c_tag: activation_limit_usages_type__from_string,
ActivationLimitSessionType_.c_tag: activation_limit_session_type__from_string,
LengthType_.c_tag: length_type__from_string,
MediumType_.c_tag: medium_type__from_string,
KeyStorageType_.c_tag: key_storage_type__from_string,
SecretKeyProtectionType_.c_tag: secret_key_protection_type__from_string,
SecurityAuditType_.c_tag: security_audit_type__from_string,
ExtensionOnlyType_.c_tag: extension_only_type__from_string,
Extension.c_tag: extension_from_string,
ExtensionType_.c_tag: extension_type__from_string,
AuthnContextDeclarationBaseType_.c_tag: authn_context_declaration_base_type__from_string,
AuthnMethodBaseType_.c_tag: authn_method_base_type__from_string,
PrincipalAuthenticationMechanismType_.c_tag: principal_authentication_mechanism_type__from_string,
TokenType_.c_tag: token_type__from_string,
TimeSyncTokenType_.c_tag: time_sync_token_type__from_string,
}
ELEMENT_BY_TAG = {
'AuthenticationContextDeclaration': AuthenticationContextDeclaration,
'Identification': Identification,
'PhysicalVerification': PhysicalVerification,
'WrittenConsent': WrittenConsent,
'TechnicalProtection': TechnicalProtection,
'SecretKeyProtection': SecretKeyProtection,
'PrivateKeyProtection': PrivateKeyProtection,
'KeyActivation': KeyActivation,
'KeySharing': KeySharing,
'KeyStorage': KeyStorage,
'SubscriberLineNumber': SubscriberLineNumber,
'UserSuffix': UserSuffix,
'Password': Password,
'ActivationPin': ActivationPin,
'Token': Token,
'TimeSyncToken': TimeSyncToken,
'Smartcard': Smartcard,
'Length': Length,
'ActivationLimit': ActivationLimit,
'Generation': Generation,
'AuthnMethod': AuthnMethod,
'PrincipalAuthenticationMechanism': PrincipalAuthenticationMechanism,
'Authenticator': Authenticator,
'ComplexAuthenticator': ComplexAuthenticator,
'PreviousSession': PreviousSession,
'ResumeSession': ResumeSession,
'ZeroKnowledge': ZeroKnowledge,
'SharedSecretChallengeResponse': SharedSecretChallengeResponse,
'SharedSecretChallengeResponseType': SharedSecretChallengeResponseType_,
'DigSig': DigSig,
'AsymmetricDecryption': AsymmetricDecryption,
'AsymmetricKeyAgreement': AsymmetricKeyAgreement,
'PublicKeyType': PublicKeyType_,
'IPAddress': IPAddress,
'SharedSecretDynamicPlaintext': SharedSecretDynamicPlaintext,
'AuthenticatorTransportProtocol': AuthenticatorTransportProtocol,
'HTTP': HTTP,
'IPSec': IPSec,
'WTLS': WTLS,
'MobileNetworkNoEncryption': MobileNetworkNoEncryption,
'MobileNetworkRadioEncryption': MobileNetworkRadioEncryption,
'MobileNetworkEndToEndEncryption': MobileNetworkEndToEndEncryption,
'SSL': SSL,
'PSTN': PSTN,
'ISDN': ISDN,
'ADSL': ADSL,
'OperationalProtection': OperationalProtection,
'SecurityAudit': SecurityAudit,
'SwitchAudit': SwitchAudit,
'DeactivationCallCenter': DeactivationCallCenter,
'GoverningAgreements': GoverningAgreements,
'GoverningAgreementRef': GoverningAgreementRef,
'nymType': NymType_,
'IdentificationType': IdentificationType_,
'TechnicalProtectionBaseType': TechnicalProtectionBaseType_,
'OperationalProtectionType': OperationalProtectionType_,
'GoverningAgreementsType': GoverningAgreementsType_,
'GoverningAgreementRefType': GoverningAgreementRefType_,
'AuthenticatorBaseType': AuthenticatorBaseType_,
'ComplexAuthenticatorType': ComplexAuthenticatorType_,
'AuthenticatorTransportProtocolType': AuthenticatorTransportProtocolType_,
'KeyActivationType': KeyActivationType_,
'KeySharingType': KeySharingType_,
'PrivateKeyProtectionType': PrivateKeyProtectionType_,
'PasswordType': PasswordType_,
'RestrictedPassword': RestrictedPassword,
'RestrictedPasswordType': RestrictedPasswordType_,
'RestrictedLengthType': RestrictedLengthType_,
'ActivationPinType': ActivationPinType_,
'Alphabet': Alphabet,
'AlphabetType': AlphabetType_,
'DeviceTypeType': DeviceTypeType_,
'booleanType': BooleanType_,
'ActivationLimitType': ActivationLimitType_,
'ActivationLimitDuration': ActivationLimitDuration,
'ActivationLimitUsages': ActivationLimitUsages,
'ActivationLimitSession': ActivationLimitSession,
'ActivationLimitDurationType': ActivationLimitDurationType_,
'ActivationLimitUsagesType': ActivationLimitUsagesType_,
'ActivationLimitSessionType': ActivationLimitSessionType_,
'LengthType': LengthType_,
'mediumType': MediumType_,
'KeyStorageType': KeyStorageType_,
'SecretKeyProtectionType': SecretKeyProtectionType_,
'SecurityAuditType': SecurityAuditType_,
'ExtensionOnlyType': ExtensionOnlyType_,
'Extension': Extension,
'ExtensionType': ExtensionType_,
'AuthnContextDeclarationBaseType': AuthnContextDeclarationBaseType_,
'AuthnMethodBaseType': AuthnMethodBaseType_,
'PrincipalAuthenticationMechanismType': PrincipalAuthenticationMechanismType_,
'TokenType': TokenType_,
'TimeSyncTokenType': TimeSyncTokenType_,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
| bsd-2-clause |
mydongistiny/kernel_huawei_angler-ak | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
koljanos/dakoljanos-anotherone | jni/libhpdf-2.3.0RC2/if/python/hpdf_errorcode.py | 32 | 6102 | error_detail={
0x1001: 'Internal error. The consistency of the data was lost.',
0x1002: 'Internal error. The consistency of the data was lost.',
0x1003: 'Internal error. The consistency of the data was lost.',
0x1004: 'The length of the data exceeds HPDF_LIMIT_MAX_STRING_LEN.',
0x1005: 'Cannot get a pallet data from PNG image.',
0x1007: 'The count of elements of a dictionary exceeds HPDF_LIMIT_MAX_DICT_ELEMENT',
0x1008: 'Internal error. The consistency of the data was lost.',
0x1009: 'Internal error. The consistency of the data was lost.',
0x100A: 'Internal error. The consistency of the data was lost.',
0x100B: 'HPDF_SetPermission() OR HPDF_SetEncryptMode() was called before a password is set.',
0x100C: 'Internal error. The consistency of the data was lost.',
0x100E: 'Tried to register a font that has been registered.',
0x100F: 'Cannot register a character to the japanese word wrap characters list.',
0x1011: 'Tried to set the owner password to NULL.\nThe owner password and user password is the same.',
0x1013: 'Internal error. The consistency of the data was lost.',
0x1014: 'The depth of the stack exceeded HPDF_LIMIT_MAX_GSTATE.',
0x1015: 'Memory allocation failed.',
0x1016: 'File processing failed. (A detailed code is set.)',
0x1017: 'Cannot open a file. (A detailed code is set.)',
0x1019: 'Tried to load a font that has been registered.',
0x101A: 'The format of a font-file is invalid .\nInternal error. The consistency of the data was lost.',
0x101B: 'Cannot recognize a header of an afm file.',
0x101C: 'The specified annotation handle is invalid.',
0x101E: 'Bit-per-component of a image which was set as mask-image is invalid.',
0x101F: 'Cannot recognize char-matrics-data of an afm file.',
0x1020: '1. The color_space parameter of HPDF_LoadRawImage is invalid.\n2. Color-space of a image which was set as mask-image is invalid.\n3. The function which is invalid in the present color-space was invoked.',
0x1021: 'Invalid value was set when invoking HPDF_SetCommpressionMode().',
0x1022: 'An invalid date-time value was set.',
0x1023: 'An invalid destination handle was set.',
0x1025: 'An invalid document handle is set.',
0x1026: 'The function which is invalid in the present state was invoked.',
0x1027: 'An invalid encoder handle is set.',
0x1028: 'A combination between font and encoder is wrong.',
0x102B: 'An Invalid encoding name is specified.',
0x102C: 'The lengh of the key of encryption is invalid.',
0x102D: '1. An invalid font handle was set.\n2. Unsupported font format.',
0x102E: 'Internal error. The consistency of the data was lost.',
0x102F: 'A font which has the specified name is not found.',
0x1030: 'Unsupported image format.',
0x1031: 'Unsupported image format.',
0x1032: 'Cannot read a postscript-name from an afm file.',
0x1033: '1. An invalid object is set.\n2. Internal error. The consistency of the data was lost.',
0x1034: 'Internal error. The consistency of the data was lost.',
0x1035: '1. Invoked HPDF_Image_SetColorMask() against the image-object which was set a mask-image.',
0x1036: 'An invalid outline-handle was specified.',
0x1037: 'An invalid page-handle was specified.',
0x1038: 'An invalid pages-handle was specified. (internel error)',
0x1039: 'An invalid value is set.',
0x103B: 'Invalid PNG image format.',
0x103C: 'Internal error. The consistency of the data was lost.',
0x103D: 'Internal error. The "_FILE_NAME" entry for delayed loading is missing.',
0x103F: 'Invalid .TTC file format.',
0x1040: 'The index parameter was exceed the number of included fonts',
0x1041: 'Cannot read a width-data from an afm file.',
0x1042: 'Internal error. The consistency of the data was lost.',
0x1043: 'An error has returned from PNGLIB while loading an image.',
0x1044: 'Internal error. The consistency of the data was lost.',
0x1045: 'Internal error. The consistency of the data was lost.',
0x1049: 'Internal error. The consistency of the data was lost.',
0x104A: 'Internal error. The consistency of the data was lost.',
0x104B: 'Internal error. The consistency of the data was lost.',
0x104C: 'There are no graphics-states to be restored.',
0x104D: 'Internal error. The consistency of the data was lost.',
0x104E: 'The current font is not set.',
0x104F: 'An invalid font-handle was spacified.',
0x1050: 'An invalid font-size was set.',
0x1051: 'See Graphics mode.',
0x1052: 'Internal error. The consistency of the data was lost.',
0x1053: 'The specified value is not a multiple of 90.',
0x1054: 'An invalid page-size was set.',
0x1055: 'An invalid image-handle was set.',
0x1056: 'The specified value is out of range.',
0x1057: 'The specified value is out of range.',
0x1058: 'Unexpected EOF marker was detected.',
0x1059: 'Internal error. The consistency of the data was lost.',
0x105B: 'The length of the specified text is too long.',
0x105C: 'The execution of a function was skipped because of other errors.',
0x105D: 'This font cannot be embedded. (restricted by license)',
0x105E: 'Unsupported ttf format. (cannot find unicode cmap.)',
0x105F: 'Unsupported ttf format.',
0x1060: 'Unsupported ttf format. (cannot find a necessary table) ',
0x1061: 'Internal error. The consistency of the data was lost.',
0x1062: '1. The library is not configured to use PNGLIB.\n2. Internal error. The consistency of the data was lost.',
0x1063: 'Unsupported Jpeg format.',
0x1064: 'Failed to parse .PFB file.',
0x1065: 'Internal error. The consistency of the data was lost.',
0x1066: 'An error has occurred while executing a function of Zlib.',
0x1067: 'An error returned from Zlib.',
0x1068: 'An invalid URI was set.',
0x1069: 'An invalid page-layout was set.',
0x1070: 'An invalid page-mode was set.',
0x1071: 'An invalid page-num-style was set.',
0x1072: 'An invalid icon was set.',
0x1073: 'An invalid border-style was set.',
0x1074: 'An invalid page-direction was set.',
0x1075: 'An invalid font-handle was specified.',
} | gpl-3.0 |
ericpre/hyperspy | hyperspy/_signals/lazy.py | 1 | 50395 | # -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
from functools import partial
import warnings
import numpy as np
import dask.array as da
import dask.delayed as dd
import dask
from dask.diagnostics import ProgressBar
from itertools import product
from distutils.version import LooseVersion
from hyperspy.signal import BaseSignal
from hyperspy.defaults_parser import preferences
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.external.progressbar import progressbar
from hyperspy.misc.array_tools import (_requires_linear_rebin,
get_signal_chunk_slice)
from hyperspy.misc.hist_tools import histogram_dask
from hyperspy.misc.machine_learning import import_sklearn
from hyperspy.misc.utils import (multiply, dummy_context_manager, isiterable,
process_function_blockwise, guess_output_signal_size,)
_logger = logging.getLogger(__name__)
lazyerror = NotImplementedError('This method is not available in lazy signals')
def to_array(thing, chunks=None):
"""Accepts BaseSignal, dask or numpy arrays and always produces either
numpy or dask array.
Parameters
----------
thing : {BaseSignal, dask.array.Array, numpy.ndarray}
the thing to be converted
chunks : {None, tuple of tuples}
If None, the returned value is a numpy array. Otherwise returns dask
array with the chunks as specified.
Returns
-------
res : {numpy.ndarray, dask.array.Array}
"""
if thing is None:
return None
if isinstance(thing, BaseSignal):
thing = thing.data
if chunks is None:
if isinstance(thing, da.Array):
thing = thing.compute()
if isinstance(thing, np.ndarray):
return thing
else:
raise ValueError
else:
if isinstance(thing, np.ndarray):
thing = da.from_array(thing, chunks=chunks)
if isinstance(thing, da.Array):
if thing.chunks != chunks:
thing = thing.rechunk(chunks)
return thing
else:
raise ValueError
class LazySignal(BaseSignal):
"""A Lazy Signal instance that delays computation until explicitly saved
(assuming storing the full result of computation in memory is not feasible)
"""
_lazy = True
def compute(self, close_file=False, show_progressbar=None, **kwargs):
"""Attempt to store the full signal in memory.
Parameters
----------
close_file : bool, default False
If True, attemp to close the file associated with the dask
array data if any. Note that closing the file will make all other
associated lazy signals inoperative.
%s
Returns
-------
None
"""
if "progressbar" in kwargs:
warnings.warn(
"The `progressbar` keyword is deprecated and will be removed "
"in HyperSpy 2.0. Use `show_progressbar` instead.",
VisibleDeprecationWarning,
)
show_progressbar = kwargs["progressbar"]
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
cm = ProgressBar if show_progressbar else dummy_context_manager
with cm():
da = self.data
data = da.compute()
if close_file:
self.close_file()
self.data = data
self._lazy = False
self._assign_subclass()
compute.__doc__ %= SHOW_PROGRESSBAR_ARG
def rechunk(self,
nav_chunks="auto",
sig_chunks=-1,
inplace=True,
**kwargs):
"""Rechunks the data using the same rechunking formula from Dask
expect that the navigation and signal chunks are defined seperately.
Note, for most functions sig_chunks should remain ``None`` so that it
spans the entire signal axes.
Parameters
----------
nav_chunks : {tuple, int, "auto", None}
The navigation block dimensions to create.
-1 indicates the full size of the corresponding dimension.
Default is “auto” which automatically determines chunk sizes.
sig_chunks : {tuple, int, "auto", None}
The signal block dimensions to create.
-1 indicates the full size of the corresponding dimension.
Default is -1 which automatically spans the full signal dimension
**kwargs : dict
Any other keyword arguments for :py:func:`dask.array.rechunk`.
"""
if not isinstance(sig_chunks, tuple):
sig_chunks = (sig_chunks,)*len(self.axes_manager.signal_shape)
if not isinstance(nav_chunks, tuple):
nav_chunks = (nav_chunks,)*len(self.axes_manager.navigation_shape)
new_chunks = nav_chunks + sig_chunks
if inplace:
self.data = self.data.rechunk(new_chunks,
**kwargs)
else:
return self._deepcopy_with_new_data(self.data.rechunk(new_chunks,
**kwargs)
)
def close_file(self):
"""Closes the associated data file if any.
Currently it only supports closing the file associated with a dask
array created from an h5py DataSet (default HyperSpy hdf5 reader).
"""
arrkey = None
for key in self.data.dask.keys():
if "array-original" in key:
arrkey = key
break
if arrkey:
try:
self.data.dask[arrkey].file.close()
except AttributeError:
_logger.exception("Failed to close lazy Signal file")
def _get_dask_chunks(self, axis=None, dtype=None):
"""Returns dask chunks.
Aims:
- Have at least one signal (or specified axis) in a single chunk,
or as many as fit in memory
Parameters
----------
axis : {int, string, None, axis, tuple}
If axis is None (default), returns chunks for current data shape so
that at least one signal is in the chunk. If an axis is specified,
only that particular axis is guaranteed to be "not sliced".
dtype : {string, np.dtype}
The dtype of target chunks.
Returns
-------
Tuple of tuples, dask chunks
"""
dc = self.data
dcshape = dc.shape
for _axis in self.axes_manager._axes:
if _axis.index_in_array < len(dcshape):
_axis.size = int(dcshape[_axis.index_in_array])
if axis is not None:
need_axes = self.axes_manager[axis]
if not np.iterable(need_axes):
need_axes = [need_axes, ]
else:
need_axes = self.axes_manager.signal_axes
if dtype is None:
dtype = dc.dtype
elif not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
typesize = max(dtype.itemsize, dc.dtype.itemsize)
want_to_keep = multiply([ax.size for ax in need_axes]) * typesize
# @mrocklin reccomends to have around 100MB chunks, so we do that:
num_that_fit = int(100. * 2.**20 / want_to_keep)
# want to have at least one "signal" per chunk
if num_that_fit < 2:
chunks = [tuple(1 for _ in range(i)) for i in dc.shape]
for ax in need_axes:
chunks[ax.index_in_array] = dc.shape[ax.index_in_array],
return tuple(chunks)
sizes = [
ax.size for ax in self.axes_manager._axes if ax not in need_axes
]
indices = [
ax.index_in_array for ax in self.axes_manager._axes
if ax not in need_axes
]
while True:
if multiply(sizes) <= num_that_fit:
break
i = np.argmax(sizes)
sizes[i] = np.floor(sizes[i] / 2)
chunks = []
ndim = len(dc.shape)
for i in range(ndim):
if i in indices:
size = float(dc.shape[i])
split_array = np.array_split(
np.arange(size), np.ceil(size / sizes[indices.index(i)]))
chunks.append(tuple(len(sp) for sp in split_array))
else:
chunks.append((dc.shape[i], ))
return tuple(chunks)
def _get_navigation_chunk_size(self):
nav_axes = self.axes_manager.navigation_indices_in_array
nav_chunks = tuple([self.data.chunks[i] for i in sorted(nav_axes)])
return nav_chunks
def _make_lazy(self, axis=None, rechunk=False, dtype=None):
self.data = self._lazy_data(axis=axis, rechunk=rechunk, dtype=dtype)
def change_dtype(self, dtype, rechunk=True):
# To be consistent with the rechunk argument of other method, we use
# 'dask_auto' in favour of a chunking which doesn't split signal space.
if rechunk:
rechunk = 'dask_auto'
from hyperspy.misc import rgb_tools
if not isinstance(dtype, np.dtype) and (dtype not in
rgb_tools.rgb_dtypes):
dtype = np.dtype(dtype)
super().change_dtype(dtype)
self._make_lazy(rechunk=rechunk, dtype=dtype)
change_dtype.__doc__ = BaseSignal.change_dtype.__doc__
def _lazy_data(self, axis=None, rechunk=True, dtype=None):
"""Return the data as a dask array, rechunked if necessary.
Parameters
----------
axis: None, DataAxis or tuple of data axes
The data axis that must not be broken into chunks when `rechunk`
is `True`. If None, it defaults to the current signal axes.
rechunk: bool, "dask_auto"
If `True`, it rechunks the data if necessary making sure that the
axes in ``axis`` are not split into chunks. If `False` it does
not rechunk at least the data is not a dask array, in which case
it chunks as if rechunk was `True`. If "dask_auto", rechunk if
necessary using dask's automatic chunk guessing.
"""
if rechunk == "dask_auto":
new_chunks = "auto"
else:
new_chunks = self._get_dask_chunks(axis=axis, dtype=dtype)
if isinstance(self.data, da.Array):
res = self.data
if self.data.chunks != new_chunks and rechunk:
_logger.info(
"Rechunking.\nOriginal chunks: %s" % str(self.data.chunks))
res = self.data.rechunk(new_chunks)
_logger.info(
"Final chunks: %s " % str(res.chunks))
else:
if isinstance(self.data, np.ma.masked_array):
data = np.where(self.data.mask, np.nan, self.data)
else:
data = self.data
res = da.from_array(data, chunks=new_chunks)
assert isinstance(res, da.Array)
return res
def _apply_function_on_data_and_remove_axis(self, function, axes,
out=None, rechunk=True):
def get_dask_function(numpy_name):
# Translate from the default numpy to dask functions
translations = {'amax': 'max', 'amin': 'min'}
if numpy_name in translations:
numpy_name = translations[numpy_name]
return getattr(da, numpy_name)
function = get_dask_function(function.__name__)
axes = self.axes_manager[axes]
if not np.iterable(axes):
axes = (axes, )
ar_axes = tuple(ax.index_in_array for ax in axes)
if len(ar_axes) == 1:
ar_axes = ar_axes[0]
# For reduce operations the actual signal and navigation
# axes configuration does not matter. Hence we leave
# dask guess the chunks
if rechunk is True:
rechunk = "dask_auto"
current_data = self._lazy_data(rechunk=rechunk)
# Apply reducing function
new_data = function(current_data, axis=ar_axes)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s = self._deepcopy_with_new_data(new_data)
s._remove_axis([ax.index_in_axes_manager for ax in axes])
return s
def rebin(self, new_shape=None, scale=None,
crop=False, dtype=None, out=None, rechunk=True):
factors = self._validate_rebin_args_and_get_factors(
new_shape=new_shape,
scale=scale)
if _requires_linear_rebin(arr=self.data, scale=factors):
if new_shape:
raise NotImplementedError(
"Lazy rebin requires that the new shape is a divisor "
"of the original signal shape e.g. if original shape "
"(10| 6), new_shape=(5| 3) is valid, (3 | 4) is not.")
else:
raise NotImplementedError(
"Lazy rebin requires scale to be integer and divisor of the "
"original signal shape")
axis = {ax.index_in_array: ax
for ax in self.axes_manager._axes}[factors.argmax()]
self._make_lazy(axis=axis, rechunk=rechunk)
return super().rebin(new_shape=new_shape, scale=scale, crop=crop,
dtype=dtype, out=out)
rebin.__doc__ = BaseSignal.rebin.__doc__
def __array__(self, dtype=None):
return self.data.__array__(dtype=dtype)
def _make_sure_data_is_contiguous(self):
self._make_lazy(rechunk=True)
def diff(self, axis, order=1, out=None, rechunk=True):
arr_axis = self.axes_manager[axis].index_in_array
def dask_diff(arr, n, axis):
# assume arr is da.Array already
n = int(n)
if n == 0:
return arr
if n < 0:
raise ValueError("order must be positive")
nd = len(arr.shape)
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return dask_diff(arr[slice1] - arr[slice2], n - 1, axis=axis)
else:
return arr[slice1] - arr[slice2]
current_data = self._lazy_data(axis=axis, rechunk=rechunk)
new_data = dask_diff(current_data, order, arr_axis)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
axis2 = s.axes_manager[axis]
new_offset = self.axes_manager[axis].offset + (order * axis2.scale / 2)
axis2.offset = new_offset
s.get_dimensions_from_data()
if out is None:
return s
else:
out.events.data_changed.trigger(obj=out)
diff.__doc__ = BaseSignal.diff.__doc__
def integrate_simpson(self, axis, out=None):
axis = self.axes_manager[axis]
from scipy import integrate
axis = self.axes_manager[axis]
data = self._lazy_data(axis=axis, rechunk=True)
new_data = data.map_blocks(
integrate.simps,
x=axis.axis,
axis=axis.index_in_array,
drop_axis=axis.index_in_array,
dtype=data.dtype)
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s._remove_axis(axis.index_in_axes_manager)
return s
integrate_simpson.__doc__ = BaseSignal.integrate_simpson.__doc__
def valuemax(self, axis, out=None, rechunk=True):
idx = self.indexmax(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemax.__doc__ = BaseSignal.valuemax.__doc__
def valuemin(self, axis, out=None, rechunk=True):
idx = self.indexmin(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemin.__doc__ = BaseSignal.valuemin.__doc__
def get_histogram(self, bins='fd', out=None, rechunk=True, **kwargs):
if 'range_bins' in kwargs:
_logger.warning("'range_bins' argument not supported for lazy "
"signals")
del kwargs['range_bins']
from hyperspy.signals import Signal1D
data = self._lazy_data(rechunk=rechunk).flatten()
hist, bin_edges = histogram_dask(data, bins=bins, **kwargs)
if out is None:
hist_spec = Signal1D(hist)
hist_spec._lazy = True
hist_spec._assign_subclass()
else:
hist_spec = out
# we always overwrite the data because the computation is lazy ->
# the result signal is lazy. Assume that the `out` is already lazy
hist_spec.data = hist
hist_spec.axes_manager[0].scale = bin_edges[1] - bin_edges[0]
hist_spec.axes_manager[0].offset = bin_edges[0]
hist_spec.axes_manager[0].size = hist.shape[-1]
hist_spec.axes_manager[0].name = 'value'
hist_spec.axes_manager[0].is_binned = True
hist_spec.metadata.General.title = (
self.metadata.General.title + " histogram")
if out is None:
return hist_spec
else:
out.events.data_changed.trigger(obj=out)
get_histogram.__doc__ = BaseSignal.get_histogram.__doc__
@staticmethod
def _estimate_poissonian_noise_variance(dc, gain_factor, gain_offset,
correlation_factor):
variance = (dc * gain_factor + gain_offset) * correlation_factor
# The lower bound of the variance is the gaussian noise.
variance = da.clip(variance, gain_offset * correlation_factor, np.inf)
return variance
# def _get_navigation_signal(self, data=None, dtype=None):
# return super()._get_navigation_signal(data=data, dtype=dtype).as_lazy()
# _get_navigation_signal.__doc__ = BaseSignal._get_navigation_signal.__doc__
# def _get_signal_signal(self, data=None, dtype=None):
# return super()._get_signal_signal(data=data, dtype=dtype).as_lazy()
# _get_signal_signal.__doc__ = BaseSignal._get_signal_signal.__doc__
def _calculate_summary_statistics(self, rechunk=True):
if rechunk is True:
# Use dask auto rechunk instead of HyperSpy's one, what should be
# better for these operations
rechunk = "dask_auto"
data = self._lazy_data(rechunk=rechunk)
_raveled = data.ravel()
_mean, _std, _min, _q1, _q2, _q3, _max = da.compute(
da.nanmean(data),
da.nanstd(data),
da.nanmin(data),
da.percentile(_raveled, [25, ]),
da.percentile(_raveled, [50, ]),
da.percentile(_raveled, [75, ]),
da.nanmax(data), )
return _mean, _std, _min, _q1, _q2, _q3, _max
def _map_all(self, function, inplace=True, **kwargs):
calc_result = dd(function)(self.data, **kwargs)
if inplace:
self.data = da.from_delayed(calc_result, shape=self.data.shape,
dtype=self.data.dtype)
return None
return self._deepcopy_with_new_data(calc_result)
def _map_iterate(self,
function,
iterating_kwargs=None,
show_progressbar=None,
parallel=None,
max_workers=None,
ragged=False,
inplace=True,
output_signal_size=None,
output_dtype=None,
**kwargs):
# unpacking keyword arguments
if iterating_kwargs is None:
iterating_kwargs = {}
elif isinstance(iterating_kwargs, (tuple, list)):
iterating_kwargs = dict((k, v) for k, v in iterating_kwargs)
nav_indexes = self.axes_manager.navigation_indices_in_array
if ragged and inplace:
raise ValueError("Ragged and inplace are not compatible with a lazy signal")
chunk_span = np.equal(self.data.chunksize, self.data.shape)
chunk_span = [chunk_span[i] for i in self.axes_manager.signal_indices_in_array]
if not all(chunk_span):
_logger.info("The chunk size needs to span the full signal size, rechunking...")
old_sig = self.rechunk(inplace=False)
else:
old_sig = self
autodetermine = (output_signal_size is None or output_dtype is None) # try to guess output dtype and sig size?
nav_chunks = old_sig._get_navigation_chunk_size()
args = ()
arg_keys = ()
for key in iterating_kwargs:
if not isinstance(iterating_kwargs[key], BaseSignal):
iterating_kwargs[key] = BaseSignal(iterating_kwargs[key].T).T
warnings.warn(
"Passing arrays as keyword arguments can be ambigous. "
"This is deprecated and will be removed in HyperSpy 2.0. "
"Pass signal instances instead.",
VisibleDeprecationWarning)
if iterating_kwargs[key]._lazy:
if iterating_kwargs[key]._get_navigation_chunk_size() != nav_chunks:
iterating_kwargs[key].rechunk(nav_chunks=nav_chunks)
else:
iterating_kwargs[key] = iterating_kwargs[key].as_lazy()
iterating_kwargs[key].rechunk(nav_chunks=nav_chunks)
extra_dims = (len(old_sig.axes_manager.signal_shape) -
len(iterating_kwargs[key].axes_manager.signal_shape))
if extra_dims > 0:
old_shape = iterating_kwargs[key].data.shape
new_shape = old_shape + (1,)*extra_dims
args += (iterating_kwargs[key].data.reshape(new_shape), )
else:
args += (iterating_kwargs[key].data, )
arg_keys += (key,)
if autodetermine: #trying to guess the output d-type and size from one signal
testing_kwargs = {}
for key in iterating_kwargs:
test_ind = (0,) * len(old_sig.axes_manager.navigation_axes)
testing_kwargs[key] = np.squeeze(iterating_kwargs[key].inav[test_ind].data).compute()
testing_kwargs = {**kwargs, **testing_kwargs}
test_data = np.array(old_sig.inav[(0,) * len(old_sig.axes_manager.navigation_shape)].data.compute())
output_signal_size, output_dtype = guess_output_signal_size(test_signal=test_data,
function=function,
ragged=ragged,
**testing_kwargs)
# Dropping/Adding Axes
if output_signal_size == old_sig.axes_manager.signal_shape:
drop_axis = None
new_axis = None
axes_changed = False
else:
axes_changed = True
if len(output_signal_size) != len(old_sig.axes_manager.signal_shape):
drop_axis = old_sig.axes_manager.signal_indices_in_array
new_axis = tuple(range(len(output_signal_size)))
else:
drop_axis = [it for (o, i, it) in zip(output_signal_size,
old_sig.axes_manager.signal_shape,
old_sig.axes_manager.signal_indices_in_array)
if o != i]
new_axis = drop_axis
chunks = tuple([old_sig.data.chunks[i] for i in sorted(nav_indexes)]) + output_signal_size
mapped = da.map_blocks(process_function_blockwise,
old_sig.data,
*args,
function=function,
nav_indexes=nav_indexes,
drop_axis=drop_axis,
new_axis=new_axis,
output_signal_size=output_signal_size,
dtype=output_dtype,
chunks=chunks,
arg_keys=arg_keys,
**kwargs)
if inplace:
self.data = mapped
sig = self
else:
sig = self._deepcopy_with_new_data(mapped)
if ragged:
sig.axes_manager.remove(sig.axes_manager.signal_axes)
sig._lazy = True
sig._assign_subclass()
return sig
# remove if too many axes
if axes_changed:
sig.axes_manager.remove(sig.axes_manager.signal_axes[len(output_signal_size):])
# add additional required axes
for ind in range(
len(output_signal_size) - sig.axes_manager.signal_dimension, 0, -1):
sig.axes_manager._append_axis(output_signal_size[-ind], navigate=False)
if not ragged:
sig.get_dimensions_from_data()
return sig
def _iterate_signal(self):
if self.axes_manager.navigation_size < 2:
yield self()
return
nav_dim = self.axes_manager.navigation_dimension
sig_dim = self.axes_manager.signal_dimension
nav_indices = self.axes_manager.navigation_indices_in_array[::-1]
nav_lengths = np.atleast_1d(
np.array(self.data.shape)[list(nav_indices)])
getitem = [slice(None)] * (nav_dim + sig_dim)
data = self._lazy_data()
for indices in product(*[range(l) for l in nav_lengths]):
for res, ind in zip(indices, nav_indices):
getitem[ind] = res
yield data[tuple(getitem)]
def _block_iterator(self,
flat_signal=True,
get=dask.threaded.get,
navigation_mask=None,
signal_mask=None):
"""A function that allows iterating lazy signal data by blocks,
defining the dask.Array.
Parameters
----------
flat_signal: bool
returns each block flattened, such that the shape (for the
particular block) is (navigation_size, signal_size), with
optionally masked elements missing. If false, returns
the equivalent of s.inav[{blocks}].data, where masked elements are
set to np.nan or 0.
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not returned (flat) or
set to NaN or 0.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not returned (flat) or set
to NaN or 0.
"""
self._make_lazy()
data = self._data_aligned_with_axes
nav_chunks = data.chunks[:self.axes_manager.navigation_dimension]
indices = product(*[range(len(c)) for c in nav_chunks])
signalsize = self.axes_manager.signal_size
sig_reshape = (signalsize,) if signalsize else ()
data = data.reshape((self.axes_manager.navigation_shape[::-1] +
sig_reshape))
if signal_mask is None:
signal_mask = slice(None) if flat_signal else \
np.zeros(self.axes_manager.signal_size, dtype='bool')
else:
try:
signal_mask = to_array(signal_mask).ravel()
except ValueError:
# re-raise with a message
raise ValueError("signal_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(signal_mask)))
if flat_signal:
signal_mask = ~signal_mask
if navigation_mask is None:
nav_mask = da.zeros(
self.axes_manager.navigation_shape[::-1],
chunks=nav_chunks,
dtype='bool')
else:
try:
nav_mask = to_array(navigation_mask, chunks=nav_chunks)
except ValueError:
# re-raise with a message
raise ValueError("navigation_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(navigation_mask)))
if flat_signal:
nav_mask = ~nav_mask
for ind in indices:
chunk = get(data.dask,
(data.name, ) + ind + (0,) * bool(signalsize))
n_mask = get(nav_mask.dask, (nav_mask.name, ) + ind)
if flat_signal:
yield chunk[n_mask, ...][..., signal_mask]
else:
chunk = chunk.copy()
value = np.nan if np.can_cast('float', chunk.dtype) else 0
chunk[n_mask, ...] = value
chunk[..., signal_mask] = value
yield chunk.reshape(chunk.shape[:-1] +
self.axes_manager.signal_shape[::-1])
def decomposition(
self,
normalize_poissonian_noise=False,
algorithm="SVD",
output_dimension=None,
signal_mask=None,
navigation_mask=None,
get=dask.threaded.get,
num_chunks=None,
reproject=True,
print_info=True,
**kwargs
):
"""Perform Incremental (Batch) decomposition on the data.
The results are stored in ``self.learning_results``.
Read more in the :ref:`User Guide <big_data.decomposition>`.
Parameters
----------
normalize_poissonian_noise : bool, default False
If True, scale the signal to normalize Poissonian noise using
the approach described in [KeenanKotula2004]_.
algorithm : {'SVD', 'PCA', 'ORPCA', 'ORNMF'}, default 'SVD'
The decomposition algorithm to use.
output_dimension : int or None, default None
Number of components to keep/calculate. If None, keep all
(only valid for 'SVD' algorithm)
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
num_chunks : int or None, default None
the number of dask chunks to pass to the decomposition model.
More chunks require more memory, but should run faster. Will be
increased to contain at least ``output_dimension`` signals.
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not used in the
decomposition. Not implemented for the 'SVD' algorithm.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not used in the
decomposition. Not implemented for the 'SVD' algorithm.
reproject : bool, default True
Reproject data on the learnt components (factors) after learning.
print_info : bool, default True
If True, print information about the decomposition being performed.
In the case of sklearn.decomposition objects, this includes the
values of all arguments of the chosen sklearn algorithm.
**kwargs
passed to the partial_fit/fit functions.
References
----------
.. [KeenanKotula2004] M. Keenan and P. Kotula, "Accounting for Poisson noise
in the multivariate analysis of ToF-SIMS spectrum images", Surf.
Interface Anal 36(3) (2004): 203-212.
See Also
--------
* :py:meth:`~.learn.mva.MVA.decomposition` for non-lazy signals
* :py:func:`dask.array.linalg.svd`
* :py:class:`sklearn.decomposition.IncrementalPCA`
* :py:class:`~.learn.rpca.ORPCA`
* :py:class:`~.learn.ornmf.ORNMF`
"""
if kwargs.get("bounds", False):
warnings.warn(
"The `bounds` keyword is deprecated and will be removed "
"in v2.0. Since version > 1.3 this has no effect.",
VisibleDeprecationWarning,
)
kwargs.pop("bounds", None)
# Deprecate 'ONMF' for 'ORNMF'
if algorithm == "ONMF":
warnings.warn(
"The argument `algorithm='ONMF'` has been deprecated and will "
"be removed in future. Please use `algorithm='ORNMF'` instead.",
VisibleDeprecationWarning,
)
algorithm = "ORNMF"
# Check algorithms requiring output_dimension
algorithms_require_dimension = ["PCA", "ORPCA", "ORNMF"]
if algorithm in algorithms_require_dimension and output_dimension is None:
raise ValueError(
"`output_dimension` must be specified for '{}'".format(algorithm)
)
explained_variance = None
explained_variance_ratio = None
_al_data = self._data_aligned_with_axes
nav_chunks = _al_data.chunks[: self.axes_manager.navigation_dimension]
sig_chunks = _al_data.chunks[self.axes_manager.navigation_dimension :]
num_chunks = 1 if num_chunks is None else num_chunks
blocksize = np.min([multiply(ar) for ar in product(*nav_chunks)])
nblocks = multiply([len(c) for c in nav_chunks])
if output_dimension and blocksize / output_dimension < num_chunks:
num_chunks = np.ceil(blocksize / output_dimension)
blocksize *= num_chunks
# Initialize print_info
to_print = [
"Decomposition info:",
f" normalize_poissonian_noise={normalize_poissonian_noise}",
f" algorithm={algorithm}",
f" output_dimension={output_dimension}"
]
# LEARN
if algorithm == "PCA":
if not import_sklearn.sklearn_installed:
raise ImportError("algorithm='PCA' requires scikit-learn")
obj = import_sklearn.sklearn.decomposition.IncrementalPCA(n_components=output_dimension)
method = partial(obj.partial_fit, **kwargs)
reproject = True
to_print.extend(["scikit-learn estimator:", obj])
elif algorithm == "ORPCA":
from hyperspy.learn.rpca import ORPCA
batch_size = kwargs.pop("batch_size", None)
obj = ORPCA(output_dimension, **kwargs)
method = partial(obj.fit, batch_size=batch_size)
elif algorithm == "ORNMF":
from hyperspy.learn.ornmf import ORNMF
batch_size = kwargs.pop("batch_size", None)
obj = ORNMF(output_dimension, **kwargs)
method = partial(obj.fit, batch_size=batch_size)
elif algorithm != "SVD":
raise ValueError("'algorithm' not recognised")
original_data = self.data
try:
_logger.info("Performing decomposition analysis")
if normalize_poissonian_noise:
_logger.info("Scaling the data to normalize Poissonian noise")
data = self._data_aligned_with_axes
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
nm = da.logical_not(
da.zeros(self.axes_manager.navigation_shape[::-1], chunks=nav_chunks)
if navigation_mask is None
else to_array(navigation_mask, chunks=nav_chunks)
)
sm = da.logical_not(
da.zeros(self.axes_manager.signal_shape[::-1], chunks=sig_chunks)
if signal_mask is None
else to_array(signal_mask, chunks=sig_chunks)
)
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
bH, aG = da.compute(
data.sum(axis=tuple(range(ndim))),
data.sum(axis=tuple(range(ndim, ndim + sdim))),
)
bH = da.where(sm, bH, 1)
aG = da.where(nm, aG, 1)
raG = da.sqrt(aG)
rbH = da.sqrt(bH)
coeff = raG[(...,) + (None,) * rbH.ndim] * rbH[(None,) * raG.ndim + (...,)]
coeff.map_blocks(np.nan_to_num)
coeff = da.where(coeff == 0, 1, coeff)
data = data / coeff
self.data = data
# LEARN
if algorithm == "SVD":
reproject = False
from dask.array.linalg import svd
try:
self._unfolded4decomposition = self.unfold()
# TODO: implement masking
if navigation_mask is not None or signal_mask is not None:
raise NotImplementedError("Masking is not yet implemented for lazy SVD")
U, S, V = svd(self.data)
if output_dimension is None:
min_shape = min(min(U.shape), min(V.shape))
else:
min_shape = output_dimension
U = U[:, :min_shape]
S = S[:min_shape]
V = V[:min_shape]
factors = V.T
explained_variance = S ** 2 / self.data.shape[0]
loadings = U * S
finally:
if self._unfolded4decomposition is True:
self.fold()
self._unfolded4decomposition is False
else:
self._check_navigation_mask(navigation_mask)
self._check_signal_mask(signal_mask)
this_data = []
try:
for chunk in progressbar(
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
),
total=nblocks,
leave=True,
desc="Learn",
):
this_data.append(chunk)
if len(this_data) == num_chunks:
thedata = np.concatenate(this_data, axis=0)
method(thedata)
this_data = []
if len(this_data):
thedata = np.concatenate(this_data, axis=0)
method(thedata)
except KeyboardInterrupt: # pragma: no cover
pass
# GET ALREADY CALCULATED RESULTS
if algorithm == "PCA":
explained_variance = obj.explained_variance_
explained_variance_ratio = obj.explained_variance_ratio_
factors = obj.components_.T
elif algorithm == "ORPCA":
factors, loadings = obj.finish()
loadings = loadings.T
elif algorithm == "ORNMF":
factors, loadings = obj.finish()
loadings = loadings.T
# REPROJECT
if reproject:
if algorithm == "PCA":
method = obj.transform
def post(a):
return np.concatenate(a, axis=0)
elif algorithm == "ORPCA":
method = obj.project
def post(a):
return np.concatenate(a, axis=1).T
elif algorithm == "ORNMF":
method = obj.project
def post(a):
return np.concatenate(a, axis=1).T
_map = map(
lambda thing: method(thing),
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
),
)
H = []
try:
for thing in progressbar(_map, total=nblocks, desc="Project"):
H.append(thing)
except KeyboardInterrupt: # pragma: no cover
pass
loadings = post(H)
if explained_variance is not None and explained_variance_ratio is None:
explained_variance_ratio = explained_variance / explained_variance.sum()
# RESHUFFLE "blocked" LOADINGS
ndim = self.axes_manager.navigation_dimension
if algorithm != "SVD": # Only needed for online algorithms
try:
loadings = _reshuffle_mixed_blocks(
loadings, ndim, (output_dimension,), nav_chunks
).reshape((-1, output_dimension))
except ValueError:
# In case the projection step was not finished, it's left
# as scrambled
pass
finally:
self.data = original_data
target = self.learning_results
target.decomposition_algorithm = algorithm
target.output_dimension = output_dimension
if algorithm != "SVD":
target._object = obj
target.factors = factors
target.loadings = loadings
target.explained_variance = explained_variance
target.explained_variance_ratio = explained_variance_ratio
# Rescale the results if the noise was normalized
if normalize_poissonian_noise is True:
target.factors = target.factors * rbH.ravel()[:, np.newaxis]
target.loadings = target.loadings * raG.ravel()[:, np.newaxis]
# Print details about the decomposition we just performed
if print_info:
print("\n".join([str(pr) for pr in to_print]))
def plot(self, navigator='auto', **kwargs):
if isinstance(navigator, str):
if navigator == 'spectrum':
# We don't support the 'spectrum' option to keep it simple
_logger.warning("The `navigator='spectrum'` option is not "
"supported for lazy signals, 'auto' is used "
"instead.")
navigator = 'auto'
if navigator == 'auto':
nav_dim = self.axes_manager.navigation_dimension
if nav_dim in [1, 2]:
if self.navigator is None:
self.compute_navigator()
navigator = self.navigator
elif nav_dim > 2:
navigator = 'slider'
super().plot(navigator=navigator, **kwargs)
def compute_navigator(self, index=None, chunks_number=None,
show_progressbar=None):
"""
Compute the navigator by taking the sum over a single chunk contained
the specified coordinate. Taking the sum over a single chunk is a
computationally efficient approach to compute the navigator. The data
can be rechunk by specifying the ``chunks_number`` argument.
Parameters
----------
index : (int, float, None) or iterable, optional
Specified where to take the sum, follows HyperSpy indexing syntax
for integer and float. If None, the index is the centre of the
signal_space
chunks_number : (int, None) or iterable, optional
Define the number of chunks in the signal space used for rechunk
the when calculating of the navigator. Useful to define the range
over which the sum is calculated.
If None, the existing chunking will be considered when picking the
chunk used in the navigator calculation.
%s
Returns
-------
None.
Note
----
The number of chunks will affect where the sum is taken. If the sum
needs to be taken in the centre of the signal space (for example, in
the case of diffraction pattern), the number of chunk needs to be an
odd number, so that the middle is centered.
"""
signal_shape = self.axes_manager.signal_shape
if index is None:
index = [round(shape / 2) for shape in signal_shape]
else:
if not isiterable(index):
index = [index] * len(signal_shape)
index = [axis._get_index(_idx)
for _idx, axis in zip(index, self.axes_manager.signal_axes)]
_logger.info(f"Using index: {index}")
if chunks_number is None:
chunks = self.data.chunks
else:
if not isiterable(chunks_number):
chunks_number = [chunks_number] * len(signal_shape)
# Determine the chunk size
signal_chunks = da.core.normalize_chunks(
[int(size / cn) for cn, size in zip(chunks_number, signal_shape)],
shape=signal_shape
)
# Needs to reverse the chunks list to match dask chunking order
signal_chunks = list(signal_chunks)[::-1]
navigation_chunks = ['auto'] * len(self.axes_manager.navigation_shape)
if LooseVersion(dask.__version__) >= LooseVersion("2.30.0"):
kwargs = {'balance':True}
else:
kwargs = {}
chunks = self.data.rechunk([*navigation_chunks, *signal_chunks],
**kwargs).chunks
# Get the slice of the corresponding chunk
signal_size = len(signal_shape)
signal_chunks = tuple(chunks[i-signal_size] for i in range(signal_size))
_logger.info(f"Signal chunks: {signal_chunks}")
isig_slice = get_signal_chunk_slice(index, chunks)
_logger.info(f'Computing sum over signal dimension: {isig_slice}')
axes = [axis.index_in_array for axis in self.axes_manager.signal_axes]
navigator = self.isig[isig_slice].sum(axes)
navigator.compute(show_progressbar=show_progressbar)
navigator.original_metadata.set_item('sum_from', str(isig_slice))
self.navigator = navigator.T
compute_navigator.__doc__ %= SHOW_PROGRESSBAR_ARG
def _reshuffle_mixed_blocks(array, ndim, sshape, nav_chunks):
"""Reshuffles dask block-shuffled array
Parameters
----------
array : np.ndarray
the array to reshuffle
ndim : int
the number of navigation (shuffled) dimensions
sshape : tuple of ints
The shape
"""
splits = np.cumsum([multiply(ar)
for ar in product(*nav_chunks)][:-1]).tolist()
if splits:
all_chunks = [
ar.reshape(shape + sshape)
for shape, ar in zip(
product(*nav_chunks), np.split(array, splits))
]
def split_stack_list(what, step, axis):
total = len(what)
if total != step:
return [
np.concatenate(
what[i:i + step], axis=axis)
for i in range(0, total, step)
]
else:
return np.concatenate(what, axis=axis)
for chunks, axis in zip(nav_chunks[::-1], range(ndim - 1, -1, -1)):
step = len(chunks)
all_chunks = split_stack_list(all_chunks, step, axis)
return all_chunks
else:
return array
| gpl-3.0 |
obnam-mirror/obnam | obnamlib/blob_store.py | 1 | 3369 | # Copyright 2015-2016 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# =*= License: GPL-3+ =*=
import obnamlib
class BlobStore(object):
def __init__(self):
self._bag_store = None
self._bag = None
self._max_bag_size = 0
self._cached_blobs = BlobCache()
self._cached_blobs.set_max_bytes(2**24)
def set_bag_store(self, bag_store):
self._bag_store = bag_store
def set_max_bag_size(self, max_bag_size):
self._max_bag_size = max_bag_size
def set_max_cache_bytes(self, max_bytes): # pragma: no cover
self._cached_blobs.set_max_bytes(max_bytes)
def get_blob(self, blob_id):
bag_id, index = obnamlib.parse_object_id(blob_id)
if self._bag and bag_id == self._bag.get_id():
return self._bag[index]
if blob_id in self._cached_blobs:
return self._cached_blobs.get(blob_id)
if self._bag_store.has_bag(bag_id):
bag = self._bag_store.get_bag(bag_id)
for i, this_blob in enumerate(bag):
this_id = obnamlib.make_object_id(bag_id, i)
self._cached_blobs.put(this_id, this_blob)
return bag[index]
return None
def put_blob(self, blob):
if self._bag is None:
self._bag = self._new_bag()
blob_id = self._bag.append(blob)
if self._bag.get_bytes() >= self._max_bag_size:
self.flush()
return blob_id
def _new_bag(self):
bag = obnamlib.Bag()
bag.set_id(self._bag_store.reserve_bag_id())
return bag
def get_well_known_blob(self, well_known_name):
if self._bag_store.has_bag(well_known_name):
bag = self._bag_store.get_bag(well_known_name)
if len(bag) > 0:
return bag[0]
return None
def put_well_known_blob(self, well_known_name, blob):
bag = obnamlib.Bag()
bag.set_id(well_known_name)
bag.append(blob)
self._bag_store.put_bag(bag)
def flush(self):
if self._bag is not None:
self._bag_store.put_bag(self._bag)
self._bag = None
class BlobCache(object):
def __init__(self):
self._max_bytes = None
self._clear()
def _clear(self):
self._cache = {}
self._cache_size = 0
def set_max_bytes(self, max_bytes):
self._max_bytes = max_bytes
def put(self, blob_id, blob):
if self._cache_size + len(blob) > self._max_bytes: # pragma: no cover
self._clear()
self._cache[blob_id] = blob
self._cache_size += len(blob)
def get(self, blob_id):
return self._cache[blob_id]
def __contains__(self, blob_id):
return blob_id in self._cache
| gpl-3.0 |
ezotrank/mysql-connector-python | python3/mysql/connector/locales/eng/client_error.py | 4 | 5943 | # -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This file was auto-generated.
_GENERATED_ON = '2013-10-04'
_MYSQL_VERSION = (5, 7, 2)
# Start MySQL Error messages
CR_UNKNOWN_ERROR = "Unknown MySQL error"
CR_SOCKET_CREATE_ERROR = "Can't create UNIX socket (%s)"
CR_CONNECTION_ERROR = "Can't connect to local MySQL server through socket '%-.100s' (%s)"
CR_CONN_HOST_ERROR = "Can't connect to MySQL server on '%-.100s' (%s)"
CR_IPSOCK_ERROR = "Can't create TCP/IP socket (%s)"
CR_UNKNOWN_HOST = "Unknown MySQL server host '%-.100s' (%s)"
CR_SERVER_GONE_ERROR = "MySQL server has gone away"
CR_VERSION_ERROR = "Protocol mismatch; server version = %s, client version = %s"
CR_OUT_OF_MEMORY = "MySQL client ran out of memory"
CR_WRONG_HOST_INFO = "Wrong host info"
CR_LOCALHOST_CONNECTION = "Localhost via UNIX socket"
CR_TCP_CONNECTION = "%-.100s via TCP/IP"
CR_SERVER_HANDSHAKE_ERR = "Error in server handshake"
CR_SERVER_LOST = "Lost connection to MySQL server during query"
CR_COMMANDS_OUT_OF_SYNC = "Commands out of sync; you can't run this command now"
CR_NAMEDPIPE_CONNECTION = "Named pipe: %-.32s"
CR_NAMEDPIPEWAIT_ERROR = "Can't wait for named pipe to host: %-.64s pipe: %-.32s (%s)"
CR_NAMEDPIPEOPEN_ERROR = "Can't open named pipe to host: %-.64s pipe: %-.32s (%s)"
CR_NAMEDPIPESETSTATE_ERROR = "Can't set state of named pipe to host: %-.64s pipe: %-.32s (%s)"
CR_CANT_READ_CHARSET = "Can't initialize character set %-.32s (path: %-.100s)"
CR_NET_PACKET_TOO_LARGE = "Got packet bigger than 'max_allowed_packet' bytes"
CR_EMBEDDED_CONNECTION = "Embedded server"
CR_PROBE_SLAVE_STATUS = "Error on SHOW SLAVE STATUS:"
CR_PROBE_SLAVE_HOSTS = "Error on SHOW SLAVE HOSTS:"
CR_PROBE_SLAVE_CONNECT = "Error connecting to slave:"
CR_PROBE_MASTER_CONNECT = "Error connecting to master:"
CR_SSL_CONNECTION_ERROR = "SSL connection error: %-.100s"
CR_MALFORMED_PACKET = "Malformed packet"
CR_WRONG_LICENSE = "This client library is licensed only for use with MySQL servers having '%s' license"
CR_NULL_POINTER = "Invalid use of null pointer"
CR_NO_PREPARE_STMT = "Statement not prepared"
CR_PARAMS_NOT_BOUND = "No data supplied for parameters in prepared statement"
CR_DATA_TRUNCATED = "Data truncated"
CR_NO_PARAMETERS_EXISTS = "No parameters exist in the statement"
CR_INVALID_PARAMETER_NO = "Invalid parameter number"
CR_INVALID_BUFFER_USE = "Can't send long data for non-string/non-binary data types (parameter: %s)"
CR_UNSUPPORTED_PARAM_TYPE = "Using unsupported buffer type: %s (parameter: %s)"
CR_SHARED_MEMORY_CONNECTION = "Shared memory: %-.100s"
CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR = "Can't open shared memory; client could not create request event (%s)"
CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR = "Can't open shared memory; no answer event received from server (%s)"
CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = "Can't open shared memory; server could not allocate file mapping (%s)"
CR_SHARED_MEMORY_CONNECT_MAP_ERROR = "Can't open shared memory; server could not get pointer to file mapping (%s)"
CR_SHARED_MEMORY_FILE_MAP_ERROR = "Can't open shared memory; client could not allocate file mapping (%s)"
CR_SHARED_MEMORY_MAP_ERROR = "Can't open shared memory; client could not get pointer to file mapping (%s)"
CR_SHARED_MEMORY_EVENT_ERROR = "Can't open shared memory; client could not create %s event (%s)"
CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR = "Can't open shared memory; no answer from server (%s)"
CR_SHARED_MEMORY_CONNECT_SET_ERROR = "Can't open shared memory; cannot send request event to server (%s)"
CR_CONN_UNKNOW_PROTOCOL = "Wrong or unknown protocol"
CR_INVALID_CONN_HANDLE = "Invalid connection handle"
CR_SECURE_AUTH = "Connection using old (pre-4.1.1) authentication protocol refused (client option 'secure_auth' enabled)"
CR_FETCH_CANCELED = "Row retrieval was canceled by mysql_stmt_close() call"
CR_NO_DATA = "Attempt to read column without prior row fetch"
CR_NO_STMT_METADATA = "Prepared statement contains no metadata"
CR_NO_RESULT_SET = "Attempt to read a row while there is no result set associated with the statement"
CR_NOT_IMPLEMENTED = "This feature is not implemented yet"
CR_SERVER_LOST_EXTENDED = "Lost connection to MySQL server at '%s', system error: %s"
CR_STMT_CLOSED = "Statement closed indirectly because of a preceeding %s() call"
CR_NEW_STMT_METADATA = "The number of columns in the result set differs from the number of bound buffers. You must reset the statement, rebind the result set columns, and execute the statement again"
CR_ALREADY_CONNECTED = "This handle is already connected. Use a separate handle for each connection."
CR_AUTH_PLUGIN_CANNOT_LOAD = "Authentication plugin '%s' cannot be loaded: %s"
CR_DUPLICATE_CONNECTION_ATTR = "There is an attribute with the same name already"
CR_AUTH_PLUGIN_ERR = "Authentication plugin '%s' reported error: %s"
# End MySQL Error messages
| gpl-2.0 |
google-code-export/jaikuengine | common/templatetags/test/avatar.py | 35 | 1613 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import settings
from common import models
from common.templatetags import avatar
from common.test import base
class AvatarTest(base.FixturesTestCase):
def setUp(self):
self.popular = models.Actor(nick="[email protected]")
def tearDown(self):
settings.DEBUG = True
def test_avatar_url(self):
self.assertEquals("http://localhost:8080/image/avatar_default_u.jpg",
avatar.avatar_url(self.popular, "u"))
def test_avatar(self):
expected = ('<img src="http://localhost:8080/image/avatar_default_t.jpg"'
' class="photo" alt="popular" width="50" height="50" />')
self.assertEquals(expected, avatar.avatar(self.popular, "t"))
@staticmethod
def _raise_exception():
raise Exception()
def test_safe_avatar_debug(self):
f = avatar.safe_avatar(AvatarTest._raise_exception)
self.assertEquals("FAIL", f())
def test_safe_avatar_non_debug(self):
f = avatar.safe_avatar(AvatarTest._raise_exception)
settings.DEBUG = False
self.assertEquals("", f())
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.