repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Peddle/hue | apps/sqoop/src/sqoop/urls.py | 33 | 2308 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
urlpatterns = patterns('sqoop.views',
url(r'^$', 'app', name='index')
)
urlpatterns += patterns('sqoop.api',
url(r'^api/autocomplete/databases/?$', 'autocomplete', name='autocomplete_databases'),
url(r'^api/autocomplete/databases/(?P<database>.+)/tables/?$', 'autocomplete', name='autocomplete_tables'),
url(r'^api/autocomplete/databases/(?P<database>.+)/tables/(?P<table>.+)/columns/?$', 'autocomplete', name='autocomplete_fields'),
url(r'^api/driver/?$', 'driver', name='driver'),
url(r'^api/connectors', 'connectors', name='connectors'),
url(r'^api/connectors/(?P<connector_id>\d+)/?$', 'connector', name='connector'),
url(r'^api/links/?$', 'links', name='links'),
url(r'^api/links/(?P<link_id>\d+)/?$', 'link', name='link'),
url(r'^api/links/(?P<link_id>\d+)/clone/?$', 'link_clone', name='link_clone'),
url(r'^api/links/(?P<link_id>\d+)/delete/?$', 'link_delete', name='link_delete'),
url(r'^api/jobs/?$', 'jobs', name='jobs'),
url(r'^api/jobs/(?P<job_id>\d+)/?$', 'job', name='job'),
url(r'^api/jobs/(?P<job_id>\d+)/clone/?$', 'job_clone', name='job_clone'),
url(r'^api/jobs/(?P<job_id>\d+)/delete/?$', 'job_delete', name='job_delete'),
url(r'^api/jobs/(?P<job_id>\d+)/start/?$', 'job_start', name='job_start'),
url(r'^api/jobs/(?P<job_id>\d+)/stop/?$', 'job_stop', name='job_stop'),
url(r'^api/jobs/(?P<job_id>\d+)/status/?$', 'job_status', name='job_status'),
url(r'^api/submissions/?$', 'submissions', name='submissions')
)
| apache-2.0 |
fangxingli/hue | desktop/core/ext-py/django-openid-auth-0.5/django_openid_auth/management/commands/openid_cleanup.py | 45 | 1691 | # django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2009-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from django.core.management.base import NoArgsCommand
from django_openid_auth.store import DjangoOpenIDStore
class Command(NoArgsCommand):
help = 'Clean up stale OpenID associations and nonces'
def handle_noargs(self, **options):
store = DjangoOpenIDStore()
store.cleanup()
| apache-2.0 |
Vishluck/sympy | sympy/simplify/tests/test_cse.py | 47 | 12493 | import itertools
from sympy import (Add, Pow, Symbol, exp, sqrt, symbols, sympify, cse,
Matrix, S, cos, sin, Eq, Function, Tuple, RootOf,
IndexedBase, Idx, Piecewise, O)
from sympy.simplify.cse_opts import sub_pre, sub_post
from sympy.functions.special.hyper import meijerg
from sympy.simplify import cse_main, cse_opts
from sympy.utilities.pytest import XFAIL, raises
from sympy.matrices import (eye, SparseMatrix, MutableDenseMatrix,
MutableSparseMatrix, ImmutableDenseMatrix, ImmutableSparseMatrix)
from sympy.core.compatibility import range
w, x, y, z = symbols('w,x,y,z')
x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12 = symbols('x:13')
def test_numbered_symbols():
ns = cse_main.numbered_symbols(prefix='y')
assert list(itertools.islice(
ns, 0, 10)) == [Symbol('y%s' % i) for i in range(0, 10)]
ns = cse_main.numbered_symbols(prefix='y')
assert list(itertools.islice(
ns, 10, 20)) == [Symbol('y%s' % i) for i in range(10, 20)]
ns = cse_main.numbered_symbols()
assert list(itertools.islice(
ns, 0, 10)) == [Symbol('x%s' % i) for i in range(0, 10)]
# Dummy "optimization" functions for testing.
def opt1(expr):
return expr + y
def opt2(expr):
return expr*z
def test_preprocess_for_cse():
assert cse_main.preprocess_for_cse(x, [(opt1, None)]) == x + y
assert cse_main.preprocess_for_cse(x, [(None, opt1)]) == x
assert cse_main.preprocess_for_cse(x, [(None, None)]) == x
assert cse_main.preprocess_for_cse(x, [(opt1, opt2)]) == x + y
assert cse_main.preprocess_for_cse(
x, [(opt1, None), (opt2, None)]) == (x + y)*z
def test_postprocess_for_cse():
assert cse_main.postprocess_for_cse(x, [(opt1, None)]) == x
assert cse_main.postprocess_for_cse(x, [(None, opt1)]) == x + y
assert cse_main.postprocess_for_cse(x, [(None, None)]) == x
assert cse_main.postprocess_for_cse(x, [(opt1, opt2)]) == x*z
# Note the reverse order of application.
assert cse_main.postprocess_for_cse(
x, [(None, opt1), (None, opt2)]) == x*z + y
def test_cse_single():
# Simple substitution.
e = Add(Pow(x + y, 2), sqrt(x + y))
substs, reduced = cse([e])
assert substs == [(x0, x + y)]
assert reduced == [sqrt(x0) + x0**2]
def test_cse_single2():
# Simple substitution, test for being able to pass the expression directly
e = Add(Pow(x + y, 2), sqrt(x + y))
substs, reduced = cse(e)
assert substs == [(x0, x + y)]
assert reduced == [sqrt(x0) + x0**2]
substs, reduced = cse(Matrix([[1]]))
assert isinstance(reduced[0], Matrix)
def test_cse_not_possible():
# No substitution possible.
e = Add(x, y)
substs, reduced = cse([e])
assert substs == []
assert reduced == [x + y]
# issue 6329
eq = (meijerg((1, 2), (y, 4), (5,), [], x) +
meijerg((1, 3), (y, 4), (5,), [], x))
assert cse(eq) == ([], [eq])
def test_nested_substitution():
# Substitution within a substitution.
e = Add(Pow(w*x + y, 2), sqrt(w*x + y))
substs, reduced = cse([e])
assert substs == [(x0, w*x + y)]
assert reduced == [sqrt(x0) + x0**2]
def test_subtraction_opt():
# Make sure subtraction is optimized.
e = (x - y)*(z - y) + exp((x - y)*(z - y))
substs, reduced = cse(
[e], optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)])
assert substs == [(x0, (x - y)*(y - z))]
assert reduced == [-x0 + exp(-x0)]
e = -(x - y)*(z - y) + exp(-(x - y)*(z - y))
substs, reduced = cse(
[e], optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)])
assert substs == [(x0, (x - y)*(y - z))]
assert reduced == [x0 + exp(x0)]
# issue 4077
n = -1 + 1/x
e = n/x/(-n)**2 - 1/n/x
assert cse(e, optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)]) == \
([], [0])
def test_multiple_expressions():
e1 = (x + y)*z
e2 = (x + y)*w
substs, reduced = cse([e1, e2])
assert substs == [(x0, x + y)]
assert reduced == [x0*z, x0*w]
l = [w*x*y + z, w*y]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
assert substs == rsubsts
assert reduced == [z + x*x0, x0]
l = [w*x*y, w*x*y + z, w*y]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
assert substs == rsubsts
assert reduced == [x1, x1 + z, x0]
l = [(x - z)*(y - z), x - z, y - z]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
assert substs == [(x0, -z), (x1, x + x0), (x2, x0 + y)]
assert rsubsts == [(x0, -z), (x1, x0 + y), (x2, x + x0)]
assert reduced == [x1*x2, x1, x2]
l = [w*y + w + x + y + z, w*x*y]
assert cse(l) == ([(x0, w*y)], [w + x + x0 + y + z, x*x0])
assert cse([x + y, x + y + z]) == ([(x0, x + y)], [x0, z + x0])
assert cse([x + y, x + z]) == ([], [x + y, x + z])
assert cse([x*y, z + x*y, x*y*z + 3]) == \
([(x0, x*y)], [x0, z + x0, 3 + x0*z])
@XFAIL # CSE of non-commutative Mul terms is disabled
def test_non_commutative_cse():
A, B, C = symbols('A B C', commutative=False)
l = [A*B*C, A*C]
assert cse(l) == ([], l)
l = [A*B*C, A*B]
assert cse(l) == ([(x0, A*B)], [x0*C, x0])
# Test if CSE of non-commutative Mul terms is disabled
def test_bypass_non_commutatives():
A, B, C = symbols('A B C', commutative=False)
l = [A*B*C, A*C]
assert cse(l) == ([], l)
l = [A*B*C, A*B]
assert cse(l) == ([], l)
l = [B*C, A*B*C]
assert cse(l) == ([], l)
@XFAIL # CSE fails when replacing non-commutative sub-expressions
def test_non_commutative_order():
A, B, C = symbols('A B C', commutative=False)
x0 = symbols('x0', commutative=False)
l = [B+C, A*(B+C)]
assert cse(l) == ([(x0, B+C)], [x0, A*x0])
@XFAIL
def test_powers():
assert cse(x*y**2 + x*y) == ([(x0, x*y)], [x0*y + x0])
def test_issue_4498():
assert cse(w/(x - y) + z/(y - x), optimizations='basic') == \
([], [(w - z)/(x - y)])
def test_issue_4020():
assert cse(x**5 + x**4 + x**3 + x**2, optimizations='basic') \
== ([(x0, x**2)], [x0*(x**3 + x + x0 + 1)])
def test_issue_4203():
assert cse(sin(x**x)/x**x) == ([(x0, x**x)], [sin(x0)/x0])
def test_issue_6263():
e = Eq(x*(-x + 1) + x*(x - 1), 0)
assert cse(e, optimizations='basic') == ([], [True])
def test_dont_cse_tuples():
from sympy import Subs
f = Function("f")
g = Function("g")
name_val, (expr,) = cse(
Subs(f(x, y), (x, y), (0, 1))
+ Subs(g(x, y), (x, y), (0, 1)))
assert name_val == []
assert expr == (Subs(f(x, y), (x, y), (0, 1))
+ Subs(g(x, y), (x, y), (0, 1)))
name_val, (expr,) = cse(
Subs(f(x, y), (x, y), (0, x + y))
+ Subs(g(x, y), (x, y), (0, x + y)))
assert name_val == [(x0, x + y)]
assert expr == Subs(f(x, y), (x, y), (0, x0)) + \
Subs(g(x, y), (x, y), (0, x0))
def test_pow_invpow():
assert cse(1/x**2 + x**2) == \
([(x0, x**2)], [x0 + 1/x0])
assert cse(x**2 + (1 + 1/x**2)/x**2) == \
([(x0, x**2), (x1, 1/x0)], [x0 + x1*(x1 + 1)])
assert cse(1/x**2 + (1 + 1/x**2)*x**2) == \
([(x0, x**2), (x1, 1/x0)], [x0*(x1 + 1) + x1])
assert cse(cos(1/x**2) + sin(1/x**2)) == \
([(x0, x**(-2))], [sin(x0) + cos(x0)])
assert cse(cos(x**2) + sin(x**2)) == \
([(x0, x**2)], [sin(x0) + cos(x0)])
assert cse(y/(2 + x**2) + z/x**2/y) == \
([(x0, x**2)], [y/(x0 + 2) + z/(x0*y)])
assert cse(exp(x**2) + x**2*cos(1/x**2)) == \
([(x0, x**2)], [x0*cos(1/x0) + exp(x0)])
assert cse((1 + 1/x**2)/x**2) == \
([(x0, x**(-2))], [x0*(x0 + 1)])
assert cse(x**(2*y) + x**(-2*y)) == \
([(x0, x**(2*y))], [x0 + 1/x0])
def test_postprocess():
eq = (x + 1 + exp((x + 1)/(y + 1)) + cos(y + 1))
assert cse([eq, Eq(x, z + 1), z - 2, (z + 1)*(x + 1)],
postprocess=cse_main.cse_separate) == \
[[(x1, y + 1), (x2, z + 1), (x, x2), (x0, x + 1)],
[x0 + exp(x0/x1) + cos(x1), z - 2, x0*x2]]
def test_issue_4499():
# previously, this gave 16 constants
from sympy.abc import a, b
B = Function('B')
G = Function('G')
t = Tuple(*
(a, a + S(1)/2, 2*a, b, 2*a - b + 1, (sqrt(z)/2)**(-2*a + 1)*B(2*a -
b, sqrt(z))*B(b - 1, sqrt(z))*G(b)*G(2*a - b + 1),
sqrt(z)*(sqrt(z)/2)**(-2*a + 1)*B(b, sqrt(z))*B(2*a - b,
sqrt(z))*G(b)*G(2*a - b + 1), sqrt(z)*(sqrt(z)/2)**(-2*a + 1)*B(b - 1,
sqrt(z))*B(2*a - b + 1, sqrt(z))*G(b)*G(2*a - b + 1),
(sqrt(z)/2)**(-2*a + 1)*B(b, sqrt(z))*B(2*a - b + 1,
sqrt(z))*G(b)*G(2*a - b + 1), 1, 0, S(1)/2, z/2, -b + 1, -2*a + b,
-2*a))
c = cse(t)
ans = (
[(x0, 2*a), (x1, -b), (x2, x1 + 1), (x3, x0 + x2), (x4, sqrt(z)), (x5,
B(x0 + x1, x4)), (x6, G(b)), (x7, G(x3)), (x8, -x0), (x9,
(x4/2)**(x8 + 1)), (x10, x6*x7*x9*B(b - 1, x4)), (x11, x6*x7*x9*B(b,
x4)), (x12, B(x3, x4))], [(a, a + S(1)/2, x0, b, x3, x10*x5,
x11*x4*x5, x10*x12*x4, x11*x12, 1, 0, S(1)/2, z/2, x2, b + x8, x8)])
assert ans == c
def test_issue_6169():
r = RootOf(x**6 - 4*x**5 - 2, 1)
assert cse(r) == ([], [r])
# and a check that the right thing is done with the new
# mechanism
assert sub_post(sub_pre((-x - y)*z - x - y)) == -z*(x + y) - x - y
def test_cse_Indexed():
len_y = 5
y = IndexedBase('y', shape=(len_y,))
x = IndexedBase('x', shape=(len_y,))
Dy = IndexedBase('Dy', shape=(len_y-1,))
i = Idx('i', len_y-1)
expr1 = (y[i+1]-y[i])/(x[i+1]-x[i])
expr2 = 1/(x[i+1]-x[i])
replacements, reduced_exprs = cse([expr1, expr2])
assert len(replacements) > 0
@XFAIL
def test_cse_MatrixSymbol():
from sympy import MatrixSymbol
A = MatrixSymbol('A', 3, 3)
y = MatrixSymbol('y', 3, 1)
expr1 = (A.T*A).I * A * y
expr2 = (A.T*A) * A * y
replacements, reduced_exprs = cse([expr1, expr2])
assert len(replacements) > 0
def test_Piecewise():
f = Piecewise((-z + x*y, Eq(y, 0)), (-z - x*y, True))
ans = cse(f)
actual_ans = ([(x0, -z), (x1, x*y)], [Piecewise((x0+x1, Eq(y, 0)), (x0 - x1, True))])
assert ans == actual_ans
def test_ignore_order_terms():
eq = exp(x).series(x,0,3) + sin(y+x**3) - 1
assert cse(eq) == ([], [sin(x**3 + y) + x + x**2/2 + O(x**3)])
def test_name_conflict():
z1 = x0 + y
z2 = x2 + x3
l = [cos(z1) + z1, cos(z2) + z2, x0 + x2]
substs, reduced = cse(l)
assert [e.subs(reversed(substs)) for e in reduced] == l
def test_name_conflict_cust_symbols():
z1 = x0 + y
z2 = x2 + x3
l = [cos(z1) + z1, cos(z2) + z2, x0 + x2]
substs, reduced = cse(l, symbols("x:10"))
assert [e.subs(reversed(substs)) for e in reduced] == l
def test_symbols_exhausted_error():
l = cos(x+y)+x+y+cos(w+y)+sin(w+y)
sym = [x, y, z]
with raises(ValueError) as excinfo:
cse(l, symbols=sym)
def test_issue_7840():
# daveknippers' example
C393 = sympify( \
'Piecewise((C391 - 1.65, C390 < 0.5), (Piecewise((C391 - 1.65, \
C391 > 2.35), (C392, True)), True))'
)
C391 = sympify( \
'Piecewise((2.05*C390**(-1.03), C390 < 0.5), (2.5*C390**(-0.625), True))'
)
C393 = C393.subs('C391',C391)
# simple substitution
sub = {}
sub['C390'] = 0.703451854
sub['C392'] = 1.01417794
ss_answer = C393.subs(sub)
# cse
substitutions,new_eqn = cse(C393)
for pair in substitutions:
sub[pair[0].name] = pair[1].subs(sub)
cse_answer = new_eqn[0].subs(sub)
# both methods should be the same
assert ss_answer == cse_answer
# GitRay's example
expr = sympify(
"Piecewise((Symbol('ON'), Equality(Symbol('mode'), Symbol('ON'))), \
(Piecewise((Piecewise((Symbol('OFF'), StrictLessThan(Symbol('x'), \
Symbol('threshold'))), (Symbol('ON'), S.true)), Equality(Symbol('mode'), \
Symbol('AUTO'))), (Symbol('OFF'), S.true)), S.true))"
)
substitutions, new_eqn = cse(expr)
# this Piecewise should be exactly the same
assert new_eqn[0] == expr
# there should not be any replacements
assert len(substitutions) < 1
def test_issue_8891():
for cls in (MutableDenseMatrix, MutableSparseMatrix,
ImmutableDenseMatrix, ImmutableSparseMatrix):
m = cls(2, 2, [x + y, 0, 0, 0])
res = cse([x + y, m])
ans = ([(x0, x + y)], [x0, cls([[x0, 0], [0, 0]])])
assert res == ans
assert isinstance(res[1][-1], cls)
| bsd-3-clause |
ritchyteam/odoo | openerp/addons/test_workflow/tests/test_workflow.py | 392 | 6232 | # -*- coding: utf-8 -*-
import openerp
from openerp import SUPERUSER_ID
from openerp.tests import common
class test_workflows(common.TransactionCase):
def check_activities(self, model_name, i, names):
""" Check that the record i has workitems in the given activity names.
"""
instance = self.registry('workflow.instance')
workitem = self.registry('workflow.workitem')
# Given the workflow instance associated to the record ...
instance_id = instance.search(
self.cr, SUPERUSER_ID,
[('res_type', '=', model_name), ('res_id', '=', i)])
self.assertTrue( instance_id, 'A workflow instance is expected.')
# ... get all its workitems ...
workitem_ids = workitem.search(
self.cr, SUPERUSER_ID,
[('inst_id', '=', instance_id[0])])
self.assertTrue(
workitem_ids,
'The workflow instance should have workitems.')
# ... and check the activity the are in against the provided names.
workitem_records = workitem.browse(
self.cr, SUPERUSER_ID, workitem_ids)
self.assertEqual(
sorted([item.act_id.name for item in workitem_records]),
sorted(names))
def check_value(self, model_name, i, value):
""" Check that the record i has the given value.
"""
model = self.registry(model_name)
record = model.read(self.cr, SUPERUSER_ID, [i], ['value'])[0]
self.assertEqual(record['value'], value)
def test_workflow(self):
model = self.registry('test.workflow.model')
trigger = self.registry('test.workflow.trigger')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
# a -> b is just a signal.
model.signal_workflow(self.cr, SUPERUSER_ID, [i], 'a-b')
self.check_activities(model._name, i, ['b'])
# b -> c is a trigger (which is False),
# so we remain in the b activity.
model.trigger(self.cr, SUPERUSER_ID)
self.check_activities(model._name, i, ['b'])
# b -> c is a trigger (which is set to True).
# so we go in c when the trigger is called.
trigger.write(self.cr, SUPERUSER_ID, [1], {'value': True})
model.trigger(self.cr, SUPERUSER_ID)
self.check_activities(model._name, i, ['c'])
self.assertEqual(
True,
True)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_a(self):
model = self.registry('test.workflow.model.a')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 0)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_b(self):
model = self.registry('test.workflow.model.b')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_c(self):
model = self.registry('test.workflow.model.c')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 0)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_d(self):
model = self.registry('test.workflow.model.d')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_e(self):
model = self.registry('test.workflow.model.e')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['b'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_f(self):
model = self.registry('test.workflow.model.f')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.signal_workflow(self.cr, SUPERUSER_ID, [i], 'a-b')
self.check_activities(model._name, i, ['b'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_g(self):
model = self.registry('test.workflow.model.g')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_h(self):
model = self.registry('test.workflow.model.h')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['b', 'c'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_i(self):
model = self.registry('test.workflow.model.i')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['b'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_j(self):
model = self.registry('test.workflow.model.j')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_k(self):
model = self.registry('test.workflow.model.k')
i = model.create(self.cr, SUPERUSER_ID, {})
# Non-determinisitic: can be b or c
# self.check_activities(model._name, i, ['b'])
# self.check_activities(model._name, i, ['c'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_l(self):
model = self.registry('test.workflow.model.l')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['c', 'c', 'd'])
self.check_value(model._name, i, 3)
model.unlink(self.cr, SUPERUSER_ID, [i])
| agpl-3.0 |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/idlelib/rpc.py | 9 | 20150 | """RPC Implementation, originally written for the Python Idle IDE
For security reasons, GvR requested that Idle's Python execution server process
connect to the Idle process, which listens for the connection. Since Idle has
only one client per server, this was not a limitation.
+---------------------------------+ +-------------+
| SocketServer.BaseRequestHandler | | SocketIO |
+---------------------------------+ +-------------+
^ | register() |
| | unregister()|
| +-------------+
| ^ ^
| | |
| + -------------------+ |
| | |
+-------------------------+ +-----------------+
| RPCHandler | | RPCClient |
| [attribute of RPCServer]| | |
+-------------------------+ +-----------------+
The RPCServer handler class is expected to provide register/unregister methods.
RPCHandler inherits the mix-in class SocketIO, which provides these methods.
See the Idle run.main() docstring for further information on how this was
accomplished in Idle.
"""
import sys
import os
import socket
import select
import SocketServer
import struct
import cPickle as pickle
import threading
import Queue
import traceback
import copy_reg
import types
import marshal
def unpickle_code(ms):
co = marshal.loads(ms)
assert isinstance(co, types.CodeType)
return co
def pickle_code(co):
assert isinstance(co, types.CodeType)
ms = marshal.dumps(co)
return unpickle_code, (ms,)
# XXX KBK 24Aug02 function pickling capability not used in Idle
# def unpickle_function(ms):
# return ms
# def pickle_function(fn):
# assert isinstance(fn, type.FunctionType)
# return repr(fn)
copy_reg.pickle(types.CodeType, pickle_code, unpickle_code)
# copy_reg.pickle(types.FunctionType, pickle_function, unpickle_function)
BUFSIZE = 8*1024
LOCALHOST = '127.0.0.1'
class RPCServer(SocketServer.TCPServer):
def __init__(self, addr, handlerclass=None):
if handlerclass is None:
handlerclass = RPCHandler
SocketServer.TCPServer.__init__(self, addr, handlerclass)
def server_bind(self):
"Override TCPServer method, no bind() phase for connecting entity"
pass
def server_activate(self):
"""Override TCPServer method, connect() instead of listen()
Due to the reversed connection, self.server_address is actually the
address of the Idle Client to which we are connecting.
"""
self.socket.connect(self.server_address)
def get_request(self):
"Override TCPServer method, return already connected socket"
return self.socket, self.server_address
def handle_error(self, request, client_address):
"""Override TCPServer method
Error message goes to __stderr__. No error message if exiting
normally or socket raised EOF. Other exceptions not handled in
server code will cause os._exit.
"""
try:
raise
except SystemExit:
raise
except:
erf = sys.__stderr__
print>>erf, '\n' + '-'*40
print>>erf, 'Unhandled server exception!'
print>>erf, 'Thread: %s' % threading.currentThread().getName()
print>>erf, 'Client Address: ', client_address
print>>erf, 'Request: ', repr(request)
traceback.print_exc(file=erf)
print>>erf, '\n*** Unrecoverable, server exiting!'
print>>erf, '-'*40
os._exit(0)
#----------------- end class RPCServer --------------------
objecttable = {}
request_queue = Queue.Queue(0)
response_queue = Queue.Queue(0)
class SocketIO(object):
nextseq = 0
def __init__(self, sock, objtable=None, debugging=None):
self.sockthread = threading.currentThread()
if debugging is not None:
self.debugging = debugging
self.sock = sock
if objtable is None:
objtable = objecttable
self.objtable = objtable
self.responses = {}
self.cvars = {}
def close(self):
sock = self.sock
self.sock = None
if sock is not None:
sock.close()
def exithook(self):
"override for specific exit action"
os._exit(0)
def debug(self, *args):
if not self.debugging:
return
s = self.location + " " + str(threading.currentThread().getName())
for a in args:
s = s + " " + str(a)
print>>sys.__stderr__, s
def register(self, oid, object):
self.objtable[oid] = object
def unregister(self, oid):
try:
del self.objtable[oid]
except KeyError:
pass
def localcall(self, seq, request):
self.debug("localcall:", request)
try:
how, (oid, methodname, args, kwargs) = request
except TypeError:
return ("ERROR", "Bad request format")
if oid not in self.objtable:
return ("ERROR", "Unknown object id: %r" % (oid,))
obj = self.objtable[oid]
if methodname == "__methods__":
methods = {}
_getmethods(obj, methods)
return ("OK", methods)
if methodname == "__attributes__":
attributes = {}
_getattributes(obj, attributes)
return ("OK", attributes)
if not hasattr(obj, methodname):
return ("ERROR", "Unsupported method name: %r" % (methodname,))
method = getattr(obj, methodname)
try:
if how == 'CALL':
ret = method(*args, **kwargs)
if isinstance(ret, RemoteObject):
ret = remoteref(ret)
return ("OK", ret)
elif how == 'QUEUE':
request_queue.put((seq, (method, args, kwargs)))
return("QUEUED", None)
else:
return ("ERROR", "Unsupported message type: %s" % how)
except SystemExit:
raise
except socket.error:
raise
except:
msg = "*** Internal Error: rpc.py:SocketIO.localcall()\n\n"\
" Object: %s \n Method: %s \n Args: %s\n"
print>>sys.__stderr__, msg % (oid, method, args)
traceback.print_exc(file=sys.__stderr__)
return ("EXCEPTION", None)
def remotecall(self, oid, methodname, args, kwargs):
self.debug("remotecall:asynccall: ", oid, methodname)
seq = self.asynccall(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def remotequeue(self, oid, methodname, args, kwargs):
self.debug("remotequeue:asyncqueue: ", oid, methodname)
seq = self.asyncqueue(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def asynccall(self, oid, methodname, args, kwargs):
request = ("CALL", (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.currentThread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug(("asynccall:%d:" % seq), oid, methodname, args, kwargs)
self.putmessage((seq, request))
return seq
def asyncqueue(self, oid, methodname, args, kwargs):
request = ("QUEUE", (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.currentThread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug(("asyncqueue:%d:" % seq), oid, methodname, args, kwargs)
self.putmessage((seq, request))
return seq
def asyncreturn(self, seq):
self.debug("asyncreturn:%d:call getresponse(): " % seq)
response = self.getresponse(seq, wait=0.05)
self.debug(("asyncreturn:%d:response: " % seq), response)
return self.decoderesponse(response)
def decoderesponse(self, response):
how, what = response
if how == "OK":
return what
if how == "QUEUED":
return None
if how == "EXCEPTION":
self.debug("decoderesponse: EXCEPTION")
return None
if how == "EOF":
self.debug("decoderesponse: EOF")
self.decode_interrupthook()
return None
if how == "ERROR":
self.debug("decoderesponse: Internal ERROR:", what)
raise RuntimeError, what
raise SystemError, (how, what)
def decode_interrupthook(self):
""
raise EOFError
def mainloop(self):
"""Listen on socket until I/O not ready or EOF
pollresponse() will loop looking for seq number None, which
never comes, and exit on EOFError.
"""
try:
self.getresponse(myseq=None, wait=0.05)
except EOFError:
self.debug("mainloop:return")
return
def getresponse(self, myseq, wait):
response = self._getresponse(myseq, wait)
if response is not None:
how, what = response
if how == "OK":
response = how, self._proxify(what)
return response
def _proxify(self, obj):
if isinstance(obj, RemoteProxy):
return RPCProxy(self, obj.oid)
if isinstance(obj, types.ListType):
return map(self._proxify, obj)
# XXX Check for other types -- not currently needed
return obj
def _getresponse(self, myseq, wait):
self.debug("_getresponse:myseq:", myseq)
if threading.currentThread() is self.sockthread:
# this thread does all reading of requests or responses
while 1:
response = self.pollresponse(myseq, wait)
if response is not None:
return response
else:
# wait for notification from socket handling thread
cvar = self.cvars[myseq]
cvar.acquire()
while myseq not in self.responses:
cvar.wait()
response = self.responses[myseq]
self.debug("_getresponse:%s: thread woke up: response: %s" %
(myseq, response))
del self.responses[myseq]
del self.cvars[myseq]
cvar.release()
return response
def newseq(self):
self.nextseq = seq = self.nextseq + 2
return seq
def putmessage(self, message):
self.debug("putmessage:%d:" % message[0])
try:
s = pickle.dumps(message)
except pickle.PicklingError:
print >>sys.__stderr__, "Cannot pickle:", repr(message)
raise
s = struct.pack("<i", len(s)) + s
while len(s) > 0:
try:
r, w, x = select.select([], [self.sock], [])
n = self.sock.send(s[:BUFSIZE])
except (AttributeError, TypeError):
raise IOError, "socket no longer exists"
s = s[n:]
buffer = ""
bufneed = 4
bufstate = 0 # meaning: 0 => reading count; 1 => reading data
def pollpacket(self, wait):
self._stage0()
if len(self.buffer) < self.bufneed:
r, w, x = select.select([self.sock.fileno()], [], [], wait)
if len(r) == 0:
return None
try:
s = self.sock.recv(BUFSIZE)
except socket.error:
raise EOFError
if len(s) == 0:
raise EOFError
self.buffer += s
self._stage0()
return self._stage1()
def _stage0(self):
if self.bufstate == 0 and len(self.buffer) >= 4:
s = self.buffer[:4]
self.buffer = self.buffer[4:]
self.bufneed = struct.unpack("<i", s)[0]
self.bufstate = 1
def _stage1(self):
if self.bufstate == 1 and len(self.buffer) >= self.bufneed:
packet = self.buffer[:self.bufneed]
self.buffer = self.buffer[self.bufneed:]
self.bufneed = 4
self.bufstate = 0
return packet
def pollmessage(self, wait):
packet = self.pollpacket(wait)
if packet is None:
return None
try:
message = pickle.loads(packet)
except pickle.UnpicklingError:
print >>sys.__stderr__, "-----------------------"
print >>sys.__stderr__, "cannot unpickle packet:", repr(packet)
traceback.print_stack(file=sys.__stderr__)
print >>sys.__stderr__, "-----------------------"
raise
return message
def pollresponse(self, myseq, wait):
"""Handle messages received on the socket.
Some messages received may be asynchronous 'call' or 'queue' requests,
and some may be responses for other threads.
'call' requests are passed to self.localcall() with the expectation of
immediate execution, during which time the socket is not serviced.
'queue' requests are used for tasks (which may block or hang) to be
processed in a different thread. These requests are fed into
request_queue by self.localcall(). Responses to queued requests are
taken from response_queue and sent across the link with the associated
sequence numbers. Messages in the queues are (sequence_number,
request/response) tuples and code using this module removing messages
from the request_queue is responsible for returning the correct
sequence number in the response_queue.
pollresponse() will loop until a response message with the myseq
sequence number is received, and will save other responses in
self.responses and notify the owning thread.
"""
while 1:
# send queued response if there is one available
try:
qmsg = response_queue.get(0)
except Queue.Empty:
pass
else:
seq, response = qmsg
message = (seq, ('OK', response))
self.putmessage(message)
# poll for message on link
try:
message = self.pollmessage(wait)
if message is None: # socket not ready
return None
except EOFError:
self.handle_EOF()
return None
except AttributeError:
return None
seq, resq = message
how = resq[0]
self.debug("pollresponse:%d:myseq:%s" % (seq, myseq))
# process or queue a request
if how in ("CALL", "QUEUE"):
self.debug("pollresponse:%d:localcall:call:" % seq)
response = self.localcall(seq, resq)
self.debug("pollresponse:%d:localcall:response:%s"
% (seq, response))
if how == "CALL":
self.putmessage((seq, response))
elif how == "QUEUE":
# don't acknowledge the 'queue' request!
pass
continue
# return if completed message transaction
elif seq == myseq:
return resq
# must be a response for a different thread:
else:
cv = self.cvars.get(seq, None)
# response involving unknown sequence number is discarded,
# probably intended for prior incarnation of server
if cv is not None:
cv.acquire()
self.responses[seq] = resq
cv.notify()
cv.release()
continue
def handle_EOF(self):
"action taken upon link being closed by peer"
self.EOFhook()
self.debug("handle_EOF")
for key in self.cvars:
cv = self.cvars[key]
cv.acquire()
self.responses[key] = ('EOF', None)
cv.notify()
cv.release()
# call our (possibly overridden) exit function
self.exithook()
def EOFhook(self):
"Classes using rpc client/server can override to augment EOF action"
pass
#----------------- end class SocketIO --------------------
class RemoteObject(object):
# Token mix-in class
pass
def remoteref(obj):
oid = id(obj)
objecttable[oid] = obj
return RemoteProxy(oid)
class RemoteProxy(object):
def __init__(self, oid):
self.oid = oid
class RPCHandler(SocketServer.BaseRequestHandler, SocketIO):
debugging = False
location = "#S" # Server
def __init__(self, sock, addr, svr):
svr.current_handler = self ## cgt xxx
SocketIO.__init__(self, sock)
SocketServer.BaseRequestHandler.__init__(self, sock, addr, svr)
def handle(self):
"handle() method required by SocketServer"
self.mainloop()
def get_remote_proxy(self, oid):
return RPCProxy(self, oid)
class RPCClient(SocketIO):
debugging = False
location = "#C" # Client
nextseq = 1 # Requests coming from the client are odd numbered
def __init__(self, address, family=socket.AF_INET, type=socket.SOCK_STREAM):
self.listening_sock = socket.socket(family, type)
self.listening_sock.bind(address)
self.listening_sock.listen(1)
def accept(self):
working_sock, address = self.listening_sock.accept()
if self.debugging:
print>>sys.__stderr__, "****** Connection request from ", address
if address[0] == LOCALHOST:
SocketIO.__init__(self, working_sock)
else:
print>>sys.__stderr__, "** Invalid host: ", address
raise socket.error
def get_remote_proxy(self, oid):
return RPCProxy(self, oid)
class RPCProxy(object):
__methods = None
__attributes = None
def __init__(self, sockio, oid):
self.sockio = sockio
self.oid = oid
def __getattr__(self, name):
if self.__methods is None:
self.__getmethods()
if self.__methods.get(name):
return MethodProxy(self.sockio, self.oid, name)
if self.__attributes is None:
self.__getattributes()
if name in self.__attributes:
value = self.sockio.remotecall(self.oid, '__getattribute__',
(name,), {})
return value
else:
raise AttributeError, name
def __getattributes(self):
self.__attributes = self.sockio.remotecall(self.oid,
"__attributes__", (), {})
def __getmethods(self):
self.__methods = self.sockio.remotecall(self.oid,
"__methods__", (), {})
def _getmethods(obj, methods):
# Helper to get a list of methods from an object
# Adds names to dictionary argument 'methods'
for name in dir(obj):
attr = getattr(obj, name)
if hasattr(attr, '__call__'):
methods[name] = 1
if type(obj) == types.InstanceType:
_getmethods(obj.__class__, methods)
if type(obj) == types.ClassType:
for super in obj.__bases__:
_getmethods(super, methods)
def _getattributes(obj, attributes):
for name in dir(obj):
attr = getattr(obj, name)
if not hasattr(attr, '__call__'):
attributes[name] = 1
class MethodProxy(object):
def __init__(self, sockio, oid, name):
self.sockio = sockio
self.oid = oid
self.name = name
def __call__(self, *args, **kwargs):
value = self.sockio.remotecall(self.oid, self.name, args, kwargs)
return value
# XXX KBK 09Sep03 We need a proper unit test for this module. Previously
# existing test code was removed at Rev 1.27 (r34098).
| bsd-3-clause |
gemalto/pycryptoki | pycryptoki/cryptoki/ck_defs.py | 1 | 25581 | """
Structure & PKCS11-specific definitions.
"""
from ctypes import CFUNCTYPE, Structure
from pycryptoki.cryptoki.c_defs import *
from pycryptoki.cryptoki.helpers import struct_def
# values for unnamed enumeration
CK_MECHANISM_TYPE = CK_ULONG
CK_MECHANISM_TYPE_PTR = POINTER(CK_MECHANISM_TYPE)
CK_USER_TYPE = CK_ULONG
CK_SESSION_HANDLE = CK_ULONG
CK_SESSION_HANDLE_PTR = POINTER(CK_SESSION_HANDLE)
CK_OBJECT_HANDLE = CK_ULONG
CK_OBJECT_HANDLE_PTR = POINTER(CK_OBJECT_HANDLE)
CK_STATE = CK_ULONG
CK_OBJECT_CLASS = CK_ULONG
CK_OBJECT_CLASS_PTR = POINTER(CK_OBJECT_CLASS)
CK_HW_FEATURE_TYPE = CK_ULONG
CK_KEY_TYPE = CK_ULONG
CK_CERTIFICATE_TYPE = CK_ULONG
CK_ATTRIBUTE_TYPE = CK_ULONG
class CK_MECHANISM(Structure):
pass
class CK_ATTRIBUTE(Structure):
pass
CK_MECHANISM_PTR = POINTER(CK_MECHANISM)
CK_ATTRIBUTE_PTR = POINTER(CK_ATTRIBUTE)
class CK_AES_GCM_PARAMS(Structure):
pass
struct_def(
CK_AES_GCM_PARAMS,
[
("pIv", CK_BYTE_PTR),
("ulIvLen", CK_ULONG),
("ulIvBits", CK_ULONG),
("pAAD", CK_BYTE_PTR),
("ulAADLen", CK_ULONG),
("ulTagBits", CK_ULONG),
],
)
CK_AES_GCM_PARAMS_PTR = CK_AES_GCM_PARAMS
class CK_XOR_BASE_DATA_KDF_PARAMS(Structure):
pass
CK_EC_KDF_TYPE = CK_ULONG
struct_def(
CK_XOR_BASE_DATA_KDF_PARAMS,
[("kdf", CK_EC_KDF_TYPE), ("ulSharedDataLen", CK_ULONG), ("pSharedData", CK_BYTE_PTR)],
)
CK_XOR_BASE_DATA_KDF_PARAMS_PTR = POINTER(CK_XOR_BASE_DATA_KDF_PARAMS)
class CK_AES_XTS_PARAMS(Structure):
pass
struct_def(CK_AES_XTS_PARAMS, [("hTweakKey", CK_OBJECT_HANDLE), ("cb", CK_BYTE * 16)])
CK_AES_XTS_PARAMS_PTR = POINTER(CK_AES_XTS_PARAMS)
CK_EC_DH_PRIMITIVE = CK_ULONG
CK_EC_ENC_SCHEME = CK_ULONG
CK_EC_MAC_SCHEME = CK_ULONG
class CK_ECIES_PARAMS(Structure):
pass
struct_def(
CK_ECIES_PARAMS,
[
("dhPrimitive", CK_EC_DH_PRIMITIVE),
("kdf", CK_EC_KDF_TYPE),
("ulSharedDataLen1", CK_ULONG),
("pSharedData1", CK_BYTE_PTR),
("encScheme", CK_EC_ENC_SCHEME),
("ulEncKeyLenInBits", CK_ULONG),
("macScheme", CK_EC_MAC_SCHEME),
("ulMacKeyLenInBits", CK_ULONG),
("ulMacLenInBits", CK_ULONG),
("ulSharedDataLen2", CK_ULONG),
("pSharedData2", CK_BYTE_PTR),
],
)
CK_ECIES_PARAMS_PTR = POINTER(CK_ECIES_PARAMS)
CK_KDF_PRF_TYPE = CK_ULONG
CK_KDF_PRF_ENCODING_SCHEME = CK_ULONG
class CK_KDF_PRF_PARAMS(Structure):
pass
struct_def(
CK_KDF_PRF_PARAMS,
[
("prfType", CK_KDF_PRF_TYPE),
("pLabel", CK_BYTE_PTR),
("ulLabelLen", CK_ULONG),
("pContext", CK_BYTE_PTR),
("ulContextLen", CK_ULONG),
("ulCounter", CK_ULONG),
("ulEncodingScheme", CK_KDF_PRF_ENCODING_SCHEME),
],
)
CK_PRF_KDF_PARAMS = CK_KDF_PRF_PARAMS
CK_KDF_PRF_PARAMS_PTR = POINTER(CK_PRF_KDF_PARAMS)
class CK_AES_CTR_PARAMS(Structure):
pass
CK_SEED_CTR_PARAMS = CK_AES_CTR_PARAMS
CK_SEED_CTR_PARAMS_PTR = POINTER(CK_SEED_CTR_PARAMS)
CK_ARIA_CTR_PARAMS = CK_AES_CTR_PARAMS
CK_ARIA_CTR_PARAMS_PTR = POINTER(CK_ARIA_CTR_PARAMS)
class CK_DES_CTR_PARAMS(Structure):
pass
struct_def(CK_DES_CTR_PARAMS, [("ulCounterBits", CK_ULONG), ("cb", CK_BYTE * 8)])
CK_DES_CTR_PARAMS_PTR = POINTER(CK_DES_CTR_PARAMS)
CK_AES_GMAC_PARAMS = CK_AES_GCM_PARAMS
CK_AES_GMAC_PARAMS_PTR = POINTER(CK_AES_GMAC_PARAMS)
class HSM_STATS_PARAMS(Structure):
pass
struct_def(
HSM_STATS_PARAMS, [("ulId", CK_ULONG), ("ulHighValue", CK_ULONG), ("ulLowValue", CK_ULONG)]
)
class CA_ROLE_STATE(Structure):
pass
struct_def(
CA_ROLE_STATE,
[
("flags", CK_BYTE),
("loginAttemptsLeft", CK_BYTE),
("primaryAuthMech", CK_BYTE),
("secondaryAuthMech", CK_BYTE),
],
)
class CA_MOFN_GENERATION(Structure):
pass
struct_def(
CA_MOFN_GENERATION,
[("ulWeight", CK_ULONG), ("pVector", CK_BYTE_PTR), ("ulVectorLen", CK_ULONG)],
)
CA_MOFN_GENERATION_PTR = POINTER(CA_MOFN_GENERATION)
class CA_MOFN_ACTIVATION(Structure):
pass
struct_def(CA_MOFN_ACTIVATION, [("pVector", CK_BYTE_PTR), ("ulVectorLen", CK_ULONG)])
CA_MOFN_ACTIVATION_PTR = POINTER(CA_MOFN_ACTIVATION)
class CA_M_OF_N_STATUS(Structure):
pass
struct_def(
CA_M_OF_N_STATUS,
[
("ulID", CK_ULONG),
("ulM", CK_ULONG),
("ulN", CK_ULONG),
("ulSecretSize", CK_ULONG),
("ulFlag", CK_ULONG),
],
)
CA_MOFN_STATUS = CA_M_OF_N_STATUS
CA_MOFN_STATUS_PTR = POINTER(CA_MOFN_STATUS)
CKCA_MODULE_ID = CK_ULONG
CKCA_MODULE_ID_PTR = POINTER(CKCA_MODULE_ID)
class CKCA_MODULE_INFO(Structure):
pass
class CK_VERSION(Structure):
pass
struct_def(CK_VERSION, [("major", CK_BYTE), ("minor", CK_BYTE)])
struct_def(
CKCA_MODULE_INFO,
[
("ulModuleSize", CK_ULONG),
("developerName", CK_CHAR * 32),
("moduleDescription", CK_CHAR * 32),
("moduleVersion", CK_VERSION),
],
)
CKCA_MODULE_INFO_PTR = POINTER(CKCA_MODULE_INFO)
class CK_HA_MEMBER(Structure):
pass
struct_def(CK_HA_MEMBER, [("memberSerial", CK_CHAR * 20), ("memberStatus", CK_RV)])
class CK_HA_STATUS(Structure):
pass
struct_def(
CK_HA_STATUS,
[("groupSerial", CK_CHAR * 20), ("memberList", CK_HA_MEMBER * 32), ("listSize", CK_ULONG)],
)
CK_HA_MEMBER_PTR = POINTER(CK_HA_MEMBER)
CK_HA_STATE_PTR = POINTER(CK_HA_STATUS)
CKA_SIM_AUTH_FORM = CK_ULONG
class CT_Token(Structure):
pass
struct_def(CT_Token, [])
CT_TokenHndle = POINTER(CT_Token)
class CK_AES_CBC_PAD_EXTRACT_PARAMS(Structure):
pass
struct_def(
CK_AES_CBC_PAD_EXTRACT_PARAMS,
[
("ulType", CK_ULONG),
("ulHandle", CK_ULONG),
("ulDeleteAfterExtract", CK_ULONG),
("pBuffer", CK_BYTE_PTR),
("pulBufferLen", CK_ULONG_PTR),
("ulStorage", CK_ULONG),
("pedId", CK_ULONG),
("pbFileName", CK_BYTE_PTR),
("ctxID", CK_ULONG),
],
)
CK_AES_CBC_PAD_EXTRACT_PARAMS_PTR = POINTER(CK_AES_CBC_PAD_EXTRACT_PARAMS)
class CK_AES_CBC_PAD_INSERT_PARAMS(Structure):
pass
struct_def(
CK_AES_CBC_PAD_INSERT_PARAMS,
[
("ulStorageType", CK_ULONG),
("ulContainerState", CK_ULONG),
("pBuffer", CK_BYTE_PTR),
("ulBufferLen", CK_ULONG),
("pulType", CK_ULONG_PTR),
("pulHandle", CK_ULONG_PTR),
("ulStorage", CK_ULONG),
("pedId", CK_ULONG),
("pbFileName", CK_BYTE_PTR),
("ctxID", CK_ULONG),
],
)
CK_AES_CBC_PAD_INSERT_PARAMS_PTR = POINTER(CK_AES_CBC_PAD_INSERT_PARAMS)
class CK_CLUSTER_STATE(Structure):
pass
struct_def(CK_CLUSTER_STATE, [("bMembers", CK_BYTE * 32 * 8), ("ulMemberStatus", CK_ULONG * 8)])
CK_CLUSTER_STATE_PTR = POINTER(CK_CLUSTER_STATE)
class CK_LKM_TOKEN_ID_S(Structure):
pass
struct_def(CK_LKM_TOKEN_ID_S, [("id", CK_BYTE * 20)])
CK_LKM_TOKEN_ID = CK_LKM_TOKEN_ID_S
CK_LKM_TOKEN_ID_PTR = POINTER(CK_LKM_TOKEN_ID)
class CK_UTILIZATION_COUNTER(Structure):
pass
struct_def(
CK_UTILIZATION_COUNTER,
[
("ullSerialNumber", CK_ULONGLONG),
("label", CK_CHAR * 66),
("ulBindId", CK_ULONG),
("ulCounterId", CK_ULONG),
("ullCount", CK_ULONGLONG),
],
)
CK_UTILIZATION_COUNTER_PTR = POINTER(CK_UTILIZATION_COUNTER)
# pka
class CK_KEY_STATUS(Structure):
pass
struct_def(
CK_KEY_STATUS,
[
("flags", CK_BYTE),
("failedAuthCountLimit", CK_BYTE),
("reserved1", CK_BYTE),
("reserved2", CK_BYTE),
],
)
class CK_FUNCTION_LIST(Structure):
pass
class CK_INFO(Structure):
pass
CK_INFO_PTR = POINTER(CK_INFO)
class CK_SLOT_INFO(Structure):
pass
CK_SLOT_INFO_PTR = POINTER(CK_SLOT_INFO)
class CK_TOKEN_INFO(Structure):
pass
CK_TOKEN_INFO_PTR = POINTER(CK_TOKEN_INFO)
class CK_MECHANISM_INFO(Structure):
pass
CK_MECHANISM_INFO_PTR = POINTER(CK_MECHANISM_INFO)
class CK_SESSION_INFO(Structure):
pass
CK_SESSION_INFO_PTR = POINTER(CK_SESSION_INFO)
CK_VERSION_PTR = POINTER(CK_VERSION)
struct_def(
CK_INFO,
[
("cryptokiVersion", CK_VERSION),
("manufacturerID", CK_UTF8CHAR * 32),
("flags", CK_FLAGS),
("libraryDescription", CK_UTF8CHAR * 32),
("libraryVersion", CK_VERSION),
],
)
struct_def(
CK_SLOT_INFO,
[
("slotDescription", CK_UTF8CHAR * 64),
("manufacturerID", CK_UTF8CHAR * 32),
("flags", CK_FLAGS),
("hardwareVersion", CK_VERSION),
("firmwareVersion", CK_VERSION),
],
)
struct_def(
CK_TOKEN_INFO,
[
("label", CK_UTF8CHAR * 32),
("manufacturerID", CK_UTF8CHAR * 32),
("model", CK_UTF8CHAR * 16),
("serialNumber", CK_CHAR * 16),
("flags", CK_FLAGS),
("usMaxSessionCount", CK_ULONG),
("usSessionCount", CK_ULONG),
("usMaxRwSessionCount", CK_ULONG),
("usRwSessionCount", CK_ULONG),
("usMaxPinLen", CK_ULONG),
("usMinPinLen", CK_ULONG),
("ulTotalPublicMemory", CK_ULONG),
("ulFreePublicMemory", CK_ULONG),
("ulTotalPrivateMemory", CK_ULONG),
("ulFreePrivateMemory", CK_ULONG),
("hardwareVersion", CK_VERSION),
("firmwareVersion", CK_VERSION),
("utcTime", CK_CHAR * 16),
],
)
struct_def(
CK_SESSION_INFO,
[("slotID", CK_SLOT_ID), ("state", CK_STATE), ("flags", CK_FLAGS), ("usDeviceError", CK_ULONG)],
)
struct_def(
CK_ATTRIBUTE, [("type", CK_ATTRIBUTE_TYPE), ("pValue", CK_VOID_PTR), ("usValueLen", CK_ULONG)]
)
class CK_DATE(Structure):
pass
struct_def(CK_DATE, [("year", CK_CHAR * 4), ("month", CK_CHAR * 2), ("day", CK_CHAR * 2)])
struct_def(
CK_MECHANISM,
[("mechanism", CK_MECHANISM_TYPE), ("pParameter", CK_VOID_PTR), ("usParameterLen", CK_ULONG)],
)
struct_def(
CK_MECHANISM_INFO, [("ulMinKeySize", CK_ULONG), ("ulMaxKeySize", CK_ULONG), ("flags", CK_FLAGS)]
)
CK_CREATEMUTEX = CFUNCTYPE(CK_RV, CK_VOID_PTR_PTR)
CK_DESTROYMUTEX = CFUNCTYPE(CK_RV, CK_VOID_PTR)
CK_LOCKMUTEX = CFUNCTYPE(CK_RV, CK_VOID_PTR)
CK_UNLOCKMUTEX = CFUNCTYPE(CK_RV, CK_VOID_PTR)
class CK_C_INITIALIZE_ARGS(Structure):
pass
struct_def(
CK_C_INITIALIZE_ARGS,
[
("CreateMutex", CK_CREATEMUTEX),
("DestroyMutex", CK_DESTROYMUTEX),
("LockMutex", CK_LOCKMUTEX),
("UnlockMutex", CK_UNLOCKMUTEX),
("flags", CK_FLAGS),
("pReserved", CK_VOID_PTR),
],
)
CK_C_INITIALIZE_ARGS_PTR = POINTER(CK_C_INITIALIZE_ARGS)
CK_RSA_PKCS_MGF_TYPE = CK_ULONG
CK_RSA_PKCS_MGF_TYPE_PTR = POINTER(CK_RSA_PKCS_MGF_TYPE)
CK_RSA_PKCS_OAEP_SOURCE_TYPE = CK_ULONG
CK_RSA_PKCS_OAEP_SOURCE_TYPE_PTR = POINTER(CK_RSA_PKCS_OAEP_SOURCE_TYPE)
class CK_RSA_PKCS_OAEP_PARAMS(Structure):
pass
struct_def(
CK_RSA_PKCS_OAEP_PARAMS,
[
("hashAlg", CK_MECHANISM_TYPE),
("mgf", CK_RSA_PKCS_MGF_TYPE),
("source", CK_RSA_PKCS_OAEP_SOURCE_TYPE),
("pSourceData", CK_VOID_PTR),
("ulSourceDataLen", CK_ULONG),
],
)
CK_RSA_PKCS_OAEP_PARAMS_PTR = POINTER(CK_RSA_PKCS_OAEP_PARAMS)
class CK_RSA_PKCS_PSS_PARAMS(Structure):
pass
struct_def(
CK_RSA_PKCS_PSS_PARAMS,
[("hashAlg", CK_MECHANISM_TYPE), ("mgf", CK_RSA_PKCS_MGF_TYPE), ("usSaltLen", CK_ULONG)],
)
CK_RSA_PKCS_PSS_PARAMS_PTR = POINTER(CK_RSA_PKCS_PSS_PARAMS)
class CK_ECDH1_DERIVE_PARAMS(Structure):
pass
struct_def(
CK_ECDH1_DERIVE_PARAMS,
[
("kdf", CK_EC_KDF_TYPE),
("ulSharedDataLen", CK_ULONG),
("pSharedData", CK_BYTE_PTR),
("ulPublicDataLen", CK_ULONG),
("pPublicData", CK_BYTE_PTR),
],
)
CK_ECDH1_DERIVE_PARAMS_PTR = POINTER(CK_ECDH1_DERIVE_PARAMS)
class CK_ECDH2_DERIVE_PARAMS(Structure):
pass
struct_def(
CK_ECDH2_DERIVE_PARAMS,
[
("kdf", CK_EC_KDF_TYPE),
("ulSharedDataLen", CK_ULONG),
("pSharedData", CK_BYTE_PTR),
("ulPublicDataLen", CK_ULONG),
("pPublicData", CK_BYTE_PTR),
("ulPrivateDataLen", CK_ULONG),
("hPrivateData", CK_OBJECT_HANDLE),
("ulPublicDataLen2", CK_ULONG),
("pPublicData2", CK_BYTE_PTR),
],
)
CK_ECDH2_DERIVE_PARAMS_PTR = POINTER(CK_ECDH2_DERIVE_PARAMS)
class CK_ECMQV_DERIVE_PARAMS(Structure):
pass
struct_def(
CK_ECMQV_DERIVE_PARAMS,
[
("kdf", CK_EC_KDF_TYPE),
("ulSharedDataLen", CK_ULONG),
("pSharedData", CK_BYTE_PTR),
("ulPublicDataLen", CK_ULONG),
("pPublicData", CK_BYTE_PTR),
("ulPrivateDataLen", CK_ULONG),
("hPrivateData", CK_OBJECT_HANDLE),
("ulPublicDataLen2", CK_ULONG),
("pPublicData2", CK_BYTE_PTR),
("publicKey", CK_OBJECT_HANDLE),
],
)
CK_ECMQV_DERIVE_PARAMS_PTR = POINTER(CK_ECMQV_DERIVE_PARAMS)
CK_X9_42_DH_KDF_TYPE = CK_ULONG
CK_X9_42_DH_KDF_TYPE_PTR = POINTER(CK_X9_42_DH_KDF_TYPE)
class CK_X9_42_DH1_DERIVE_PARAMS(Structure):
pass
struct_def(
CK_X9_42_DH1_DERIVE_PARAMS,
[
("kdf", CK_X9_42_DH_KDF_TYPE),
("ulOtherInfoLen", CK_ULONG),
("pOtherInfo", CK_BYTE_PTR),
("ulPublicDataLen", CK_ULONG),
("pPublicData", CK_BYTE_PTR),
],
)
CK_X9_42_DH1_DERIVE_PARAMS_PTR = POINTER(CK_X9_42_DH1_DERIVE_PARAMS)
class CK_X9_42_DH2_DERIVE_PARAMS(Structure):
pass
struct_def(
CK_X9_42_DH2_DERIVE_PARAMS,
[
("kdf", CK_X9_42_DH_KDF_TYPE),
("ulOtherInfoLen", CK_ULONG),
("pOtherInfo", CK_BYTE_PTR),
("ulPublicDataLen", CK_ULONG),
("pPublicData", CK_BYTE_PTR),
("ulPrivateDataLen", CK_ULONG),
("hPrivateData", CK_OBJECT_HANDLE),
("ulPublicDataLen2", CK_ULONG),
("pPublicData2", CK_BYTE_PTR),
],
)
CK_X9_42_DH2_DERIVE_PARAMS_PTR = POINTER(CK_X9_42_DH2_DERIVE_PARAMS)
class CK_X9_42_MQV_DERIVE_PARAMS(Structure):
pass
struct_def(
CK_X9_42_MQV_DERIVE_PARAMS,
[
("kdf", CK_X9_42_DH_KDF_TYPE),
("ulOtherInfoLen", CK_ULONG),
("pOtherInfo", CK_BYTE_PTR),
("ulPublicDataLen", CK_ULONG),
("pPublicData", CK_BYTE_PTR),
("ulPrivateDataLen", CK_ULONG),
("hPrivateData", CK_OBJECT_HANDLE),
("ulPublicDataLen2", CK_ULONG),
("pPublicData2", CK_BYTE_PTR),
("publicKey", CK_OBJECT_HANDLE),
],
)
CK_X9_42_MQV_DERIVE_PARAMS_PTR = POINTER(CK_X9_42_MQV_DERIVE_PARAMS)
class CK_KEA_DERIVE_PARAMS(Structure):
pass
struct_def(
CK_KEA_DERIVE_PARAMS,
[
("isSender", CK_BBOOL),
("ulRandomLen", CK_ULONG),
("pRandomA", CK_BYTE_PTR),
("pRandomB", CK_BYTE_PTR),
("ulPublicDataLen", CK_ULONG),
("pPublicData", CK_BYTE_PTR),
],
)
CK_KEA_DERIVE_PARAMS_PTR = POINTER(CK_KEA_DERIVE_PARAMS)
CK_RC2_PARAMS = CK_ULONG
CK_RC2_PARAMS_PTR = POINTER(CK_RC2_PARAMS)
class CK_RC2_CBC_PARAMS(Structure):
pass
struct_def(CK_RC2_CBC_PARAMS, [("usEffectiveBits", CK_ULONG), ("iv", CK_BYTE * 8)])
CK_RC2_CBC_PARAMS_PTR = POINTER(CK_RC2_CBC_PARAMS)
class CK_RC2_MAC_GENERAL_PARAMS(Structure):
pass
struct_def(CK_RC2_MAC_GENERAL_PARAMS, [("usEffectiveBits", CK_ULONG), ("ulMacLength", CK_ULONG)])
CK_RC2_MAC_GENERAL_PARAMS_PTR = POINTER(CK_RC2_MAC_GENERAL_PARAMS)
class CK_RC5_PARAMS(Structure):
pass
struct_def(CK_RC5_PARAMS, [("ulWordsize", CK_ULONG), ("ulRounds", CK_ULONG)])
CK_RC5_PARAMS_PTR = POINTER(CK_RC5_PARAMS)
class CK_RC5_CBC_PARAMS(Structure):
pass
struct_def(
CK_RC5_CBC_PARAMS,
[("ulWordsize", CK_ULONG), ("ulRounds", CK_ULONG), ("pIv", CK_BYTE_PTR), ("ulIvLen", CK_ULONG)],
)
CK_RC5_CBC_PARAMS_PTR = POINTER(CK_RC5_CBC_PARAMS)
class CK_RC5_MAC_GENERAL_PARAMS(Structure):
pass
struct_def(
CK_RC5_MAC_GENERAL_PARAMS,
[("ulWordsize", CK_ULONG), ("ulRounds", CK_ULONG), ("ulMacLength", CK_ULONG)],
)
CK_RC5_MAC_GENERAL_PARAMS_PTR = POINTER(CK_RC5_MAC_GENERAL_PARAMS)
CK_MAC_GENERAL_PARAMS = CK_ULONG
CK_MAC_GENERAL_PARAMS_PTR = POINTER(CK_MAC_GENERAL_PARAMS)
class CK_DES_CBC_ENCRYPT_DATA_PARAMS(Structure):
pass
struct_def(
CK_DES_CBC_ENCRYPT_DATA_PARAMS,
[("iv", CK_BYTE * 8), ("pData", CK_BYTE_PTR), ("length", CK_ULONG)],
)
CK_DES_CBC_ENCRYPT_DATA_PARAMS_PTR = POINTER(CK_DES_CBC_ENCRYPT_DATA_PARAMS)
class CK_AES_CBC_ENCRYPT_DATA_PARAMS(Structure):
pass
struct_def(
CK_AES_CBC_ENCRYPT_DATA_PARAMS,
[("iv", CK_BYTE * 16), ("pData", CK_BYTE_PTR), ("length", CK_ULONG)],
)
CK_AES_CBC_ENCRYPT_DATA_PARAMS_PTR = POINTER(CK_AES_CBC_ENCRYPT_DATA_PARAMS)
class CK_SKIPJACK_PRIVATE_WRAP_PARAMS(Structure):
pass
struct_def(
CK_SKIPJACK_PRIVATE_WRAP_PARAMS,
[
("usPasswordLen", CK_ULONG),
("pPassword", CK_BYTE_PTR),
("ulPublicDataLen", CK_ULONG),
("pPublicData", CK_BYTE_PTR),
("ulPAndGLen", CK_ULONG),
("ulQLen", CK_ULONG),
("ulRandomLen", CK_ULONG),
("pRandomA", CK_BYTE_PTR),
("pPrimeP", CK_BYTE_PTR),
("pBaseG", CK_BYTE_PTR),
("pSubprimeQ", CK_BYTE_PTR),
],
)
CK_SKIPJACK_PRIVATE_WRAP_PTR = POINTER(CK_SKIPJACK_PRIVATE_WRAP_PARAMS)
class CK_SKIPJACK_RELAYX_PARAMS(Structure):
pass
struct_def(
CK_SKIPJACK_RELAYX_PARAMS,
[
("ulOldWrappedXLen", CK_ULONG),
("pOldWrappedX", CK_BYTE_PTR),
("ulOldPasswordLen", CK_ULONG),
("pOldPassword", CK_BYTE_PTR),
("ulOldPublicDataLen", CK_ULONG),
("pOldPublicData", CK_BYTE_PTR),
("ulOldRandomLen", CK_ULONG),
("pOldRandomA", CK_BYTE_PTR),
("ulNewPasswordLen", CK_ULONG),
("pNewPassword", CK_BYTE_PTR),
("ulNewPublicDataLen", CK_ULONG),
("pNewPublicData", CK_BYTE_PTR),
("ulNewRandomLen", CK_ULONG),
("pNewRandomA", CK_BYTE_PTR),
],
)
CK_SKIPJACK_RELAYX_PARAMS_PTR = POINTER(CK_SKIPJACK_RELAYX_PARAMS)
class CK_PBE_PARAMS(Structure):
pass
struct_def(
CK_PBE_PARAMS,
[
("pInitVector", CK_BYTE_PTR),
("pPassword", CK_UTF8CHAR_PTR),
("usPasswordLen", CK_ULONG),
("pSalt", CK_BYTE_PTR),
("usSaltLen", CK_ULONG),
("usIteration", CK_ULONG),
],
)
CK_PBE_PARAMS_PTR = POINTER(CK_PBE_PARAMS)
class CK_KEY_WRAP_SET_OAEP_PARAMS(Structure):
pass
struct_def(
CK_KEY_WRAP_SET_OAEP_PARAMS, [("bBC", CK_BYTE), ("pX", CK_BYTE_PTR), ("ulXLen", CK_ULONG)]
)
CK_KEY_WRAP_SET_OAEP_PARAMS_PTR = POINTER(CK_KEY_WRAP_SET_OAEP_PARAMS)
class CK_SSL3_RANDOM_DATA(Structure):
pass
struct_def(
CK_SSL3_RANDOM_DATA,
[
("pClientRandom", CK_BYTE_PTR),
("ulClientRandomLen", CK_ULONG),
("pServerRandom", CK_BYTE_PTR),
("ulServerRandomLen", CK_ULONG),
],
)
class CK_SSL3_MASTER_KEY_DERIVE_PARAMS(Structure):
pass
struct_def(
CK_SSL3_MASTER_KEY_DERIVE_PARAMS,
[("RandomInfo", CK_SSL3_RANDOM_DATA), ("pVersion", CK_VERSION_PTR)],
)
CK_SSL3_MASTER_KEY_DERIVE_PARAMS_PTR = POINTER(CK_SSL3_MASTER_KEY_DERIVE_PARAMS)
class CK_SSL3_KEY_MAT_OUT(Structure):
pass
struct_def(
CK_SSL3_KEY_MAT_OUT,
[
("hClientMacSecret", CK_OBJECT_HANDLE),
("hServerMacSecret", CK_OBJECT_HANDLE),
("hClientKey", CK_OBJECT_HANDLE),
("hServerKey", CK_OBJECT_HANDLE),
("pIVClient", CK_BYTE_PTR),
("pIVServer", CK_BYTE_PTR),
],
)
CK_SSL3_KEY_MAT_OUT_PTR = POINTER(CK_SSL3_KEY_MAT_OUT)
class CK_SSL3_KEY_MAT_PARAMS(Structure):
pass
struct_def(
CK_SSL3_KEY_MAT_PARAMS,
[
("ulMacSizeInBits", CK_ULONG),
("ulKeySizeInBits", CK_ULONG),
("ulIVSizeInBits", CK_ULONG),
("bIsExport", CK_BBOOL),
("RandomInfo", CK_SSL3_RANDOM_DATA),
("pReturnedKeyMaterial", CK_SSL3_KEY_MAT_OUT_PTR),
],
)
CK_SSL3_KEY_MAT_PARAMS_PTR = POINTER(CK_SSL3_KEY_MAT_PARAMS)
class CK_TLS_PRF_PARAMS(Structure):
pass
struct_def(
CK_TLS_PRF_PARAMS,
[
("pSeed", CK_BYTE_PTR),
("ulSeedLen", CK_ULONG),
("pLabel", CK_BYTE_PTR),
("ulLabelLen", CK_ULONG),
("pOutput", CK_BYTE_PTR),
("pulOutputLen", CK_ULONG_PTR),
],
)
CK_TLS_PRF_PARAMS_PTR = POINTER(CK_TLS_PRF_PARAMS)
class CK_WTLS_RANDOM_DATA(Structure):
pass
struct_def(
CK_WTLS_RANDOM_DATA,
[
("pClientRandom", CK_BYTE_PTR),
("ulClientRandomLen", CK_ULONG),
("pServerRandom", CK_BYTE_PTR),
("ulServerRandomLen", CK_ULONG),
],
)
CK_WTLS_RANDOM_DATA_PTR = POINTER(CK_WTLS_RANDOM_DATA)
class CK_WTLS_MASTER_KEY_DERIVE_PARAMS(Structure):
pass
struct_def(
CK_WTLS_MASTER_KEY_DERIVE_PARAMS,
[
("DigestMechanism", CK_MECHANISM_TYPE),
("RandomInfo", CK_WTLS_RANDOM_DATA),
("pVersion", CK_BYTE_PTR),
],
)
CK_WTLS_MASTER_KEY_DERIVE_PARAMS_PTR = POINTER(CK_WTLS_MASTER_KEY_DERIVE_PARAMS)
class CK_WTLS_PRF_PARAMS(Structure):
pass
struct_def(
CK_WTLS_PRF_PARAMS,
[
("DigestMechanism", CK_MECHANISM_TYPE),
("pSeed", CK_BYTE_PTR),
("ulSeedLen", CK_ULONG),
("pLabel", CK_BYTE_PTR),
("ulLabelLen", CK_ULONG),
("pOutput", CK_BYTE_PTR),
("pulOutputLen", CK_ULONG_PTR),
],
)
CK_WTLS_PRF_PARAMS_PTR = POINTER(CK_WTLS_PRF_PARAMS)
class CK_WTLS_KEY_MAT_OUT(Structure):
pass
struct_def(
CK_WTLS_KEY_MAT_OUT,
[("hMacSecret", CK_OBJECT_HANDLE), ("hKey", CK_OBJECT_HANDLE), ("pIV", CK_BYTE_PTR)],
)
CK_WTLS_KEY_MAT_OUT_PTR = POINTER(CK_WTLS_KEY_MAT_OUT)
class CK_WTLS_KEY_MAT_PARAMS(Structure):
pass
struct_def(
CK_WTLS_KEY_MAT_PARAMS,
[
("DigestMechanism", CK_MECHANISM_TYPE),
("ulMacSizeInBits", CK_ULONG),
("ulKeySizeInBits", CK_ULONG),
("ulIVSizeInBits", CK_ULONG),
("ulSequenceNumber", CK_ULONG),
("bIsExport", CK_BBOOL),
("RandomInfo", CK_WTLS_RANDOM_DATA),
("pReturnedKeyMaterial", CK_WTLS_KEY_MAT_OUT_PTR),
],
)
CK_WTLS_KEY_MAT_PARAMS_PTR = POINTER(CK_WTLS_KEY_MAT_PARAMS)
class CK_CMS_SIG_PARAMS(Structure):
pass
struct_def(
CK_CMS_SIG_PARAMS,
[
("certificateHandle", CK_OBJECT_HANDLE),
("pSigningMechanism", CK_MECHANISM_PTR),
("pDigestMechanism", CK_MECHANISM_PTR),
("pContentType", CK_UTF8CHAR_PTR),
("pRequestedAttributes", CK_BYTE_PTR),
("ulRequestedAttributesLen", CK_ULONG),
("pRequiredAttributes", CK_BYTE_PTR),
("ulRequiredAttributesLen", CK_ULONG),
],
)
CK_CMS_SIG_PARAMS_PTR = POINTER(CK_CMS_SIG_PARAMS)
class CK_KEY_DERIVATION_STRING_DATA(Structure):
pass
struct_def(CK_KEY_DERIVATION_STRING_DATA, [("pData", CK_BYTE_PTR), ("ulLen", CK_ULONG)])
CK_KEY_DERIVATION_STRING_DATA_PTR = POINTER(CK_KEY_DERIVATION_STRING_DATA)
CK_EXTRACT_PARAMS = CK_ULONG
CK_EXTRACT_PARAMS_PTR = POINTER(CK_EXTRACT_PARAMS)
CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE = CK_ULONG
CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE_PTR = POINTER(CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE)
CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE = CK_ULONG
CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE_PTR = POINTER(CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE)
class CK_PKCS5_PBKD2_PARAMS(Structure):
pass
struct_def(
CK_PKCS5_PBKD2_PARAMS,
[
("saltSource", CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE),
("pSaltSourceData", CK_VOID_PTR),
("ulSaltSourceDataLen", CK_ULONG),
("iterations", CK_ULONG),
("prf", CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE),
("pPrfData", CK_VOID_PTR),
("ulPrfDataLen", CK_ULONG),
("pPassword", CK_UTF8CHAR_PTR),
("usPasswordLen", CK_ULONG),
],
)
CK_PKCS5_PBKD2_PARAMS_PTR = POINTER(CK_PKCS5_PBKD2_PARAMS)
CK_OTP_PARAM_TYPE = CK_ULONG
CK_PARAM_TYPE = CK_OTP_PARAM_TYPE
class CK_OTP_PARAM(Structure):
pass
struct_def(
CK_OTP_PARAM, [("type", CK_OTP_PARAM_TYPE), ("pValue", CK_VOID_PTR), ("usValueLen", CK_ULONG)]
)
CK_OTP_PARAM_PTR = POINTER(CK_OTP_PARAM)
class CK_OTP_PARAMS(Structure):
pass
struct_def(CK_OTP_PARAMS, [("pParams", CK_OTP_PARAM_PTR), ("ulCount", CK_ULONG)])
CK_OTP_PARAMS_PTR = POINTER(CK_OTP_PARAMS)
class CK_OTP_SIGNATURE_INFO(Structure):
pass
struct_def(CK_OTP_SIGNATURE_INFO, [("pParams", CK_OTP_PARAM_PTR), ("ulCount", CK_ULONG)])
CK_OTP_SIGNATURE_INFO_PTR = POINTER(CK_OTP_SIGNATURE_INFO)
class CK_KIP_PARAMS(Structure):
pass
struct_def(
CK_KIP_PARAMS,
[
("pMechanism", CK_MECHANISM_PTR),
("hKey", CK_OBJECT_HANDLE),
("pSeed", CK_BYTE_PTR),
("ulSeedLen", CK_ULONG),
],
)
CK_KIP_PARAMS_PTR = POINTER(CK_KIP_PARAMS)
struct_def(CK_AES_CTR_PARAMS, [("ulCounterBits", CK_ULONG), ("cb", CK_BYTE * 16)])
CK_AES_CTR_PARAMS_PTR = POINTER(CK_AES_CTR_PARAMS)
class CK_CAMELLIA_CTR_PARAMS(Structure):
pass
struct_def(CK_CAMELLIA_CTR_PARAMS, [("ulCounterBits", CK_ULONG), ("cb", CK_BYTE * 16)])
CK_CAMELLIA_CTR_PARAMS_PTR = POINTER(CK_CAMELLIA_CTR_PARAMS)
class CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS(Structure):
pass
struct_def(
CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS,
[("iv", CK_BYTE * 16), ("pData", CK_BYTE_PTR), ("length", CK_ULONG)],
)
CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS_PTR = POINTER(CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS)
class CK_ARIA_CBC_ENCRYPT_DATA_PARAMS(Structure):
pass
struct_def(
CK_ARIA_CBC_ENCRYPT_DATA_PARAMS,
[("iv", CK_BYTE * 16), ("pData", CK_BYTE_PTR), ("length", CK_ULONG)],
)
CK_ARIA_CBC_ENCRYPT_DATA_PARAMS_PTR = POINTER(CK_ARIA_CBC_ENCRYPT_DATA_PARAMS)
class CK_APPLICATION_ID(Structure):
def __init__(self, aid=None):
if aid is None:
aid = []
self.id = (CK_BYTE * 16)(*aid)
struct_def(CK_APPLICATION_ID, [("id", CK_BYTE * 16)])
| apache-2.0 |
zx8/youtube-dl | youtube_dl/extractor/comedycentral.py | 92 | 12008 | from __future__ import unicode_literals
import re
from .mtv import MTVServicesInfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
float_or_none,
unified_strdate,
)
class ComedyCentralIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
(video-clips|episodes|cc-studios|video-collections|full-episodes)
/(?P<title>.*)'''
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TEST = {
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
'info_dict': {
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
'ext': 'mp4',
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
},
}
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
IE_DESC = 'The Daily Show / The Colbert Report'
# urls can be abbreviations like :thedailyshow
# urls for episodes like:
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
_VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow)
|https?://(:www\.)?
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
(?P<clip>
(?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))
|(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
)|
(?P<interview>
extended-interviews/(?P<interID>[0-9a-z]+)/
(?:playlist_tds_extended_)?(?P<interview_title>[^/?#]*?)
(?:/[^/?#]?|[?#]|$))))
'''
_TESTS = [{
'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
'info_dict': {
'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55',
'ext': 'mp4',
'upload_date': '20121213',
'description': 'Kristen Stewart learns to let loose in "On the Road."',
'uploader': 'thedailyshow',
'title': 'thedailyshow kristen-stewart part 1',
}
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/b6364d/sarah-chayes-extended-interview',
'info_dict': {
'id': 'sarah-chayes-extended-interview',
'description': 'Carnegie Endowment Senior Associate Sarah Chayes discusses how corrupt institutions function throughout the world in her book "Thieves of State: Why Corruption Threatens Global Security."',
'title': 'thedailyshow Sarah Chayes Extended Interview',
},
'playlist': [
{
'info_dict': {
'id': '0baad492-cbec-4ec1-9e50-ad91c291127f',
'ext': 'mp4',
'upload_date': '20150129',
'description': 'Carnegie Endowment Senior Associate Sarah Chayes discusses how corrupt institutions function throughout the world in her book "Thieves of State: Why Corruption Threatens Global Security."',
'uploader': 'thedailyshow',
'title': 'thedailyshow sarah-chayes-extended-interview part 1',
},
},
{
'info_dict': {
'id': '1e4fb91b-8ce7-4277-bd7c-98c9f1bbd283',
'ext': 'mp4',
'upload_date': '20150129',
'description': 'Carnegie Endowment Senior Associate Sarah Chayes discusses how corrupt institutions function throughout the world in her book "Thieves of State: Why Corruption Threatens Global Security."',
'uploader': 'thedailyshow',
'title': 'thedailyshow sarah-chayes-extended-interview part 2',
},
},
],
'params': {
'skip_download': True,
},
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/t6d9sg/the-daily-show-20038-highlights/be3cwo',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',
'only_matching': True,
}]
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
_video_extensions = {
'3500': 'mp4',
'2200': 'mp4',
'1700': 'mp4',
'1200': 'mp4',
'750': 'mp4',
'400': 'mp4',
}
_video_dimensions = {
'3500': (1280, 720),
'2200': (960, 540),
'1700': (768, 432),
'1200': (640, 360),
'750': (512, 288),
'400': (384, 216),
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
url = 'http://thedailyshow.cc.com/full-episodes/'
else:
url = 'http://thecolbertreport.cc.com/full-episodes/'
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
assert mobj is not None
if mobj.group('clip'):
if mobj.group('videotitle'):
epTitle = mobj.group('videotitle')
elif mobj.group('showname') == 'thedailyshow':
epTitle = mobj.group('tdstitle')
else:
epTitle = mobj.group('cntitle')
dlNewest = False
elif mobj.group('interview'):
epTitle = mobj.group('interview_title')
dlNewest = False
else:
dlNewest = not mobj.group('episode')
if dlNewest:
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
show_name = mobj.group('showname')
webpage, htmlHandle = self._download_webpage_handle(url, epTitle)
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid redirected URL: ' + url)
if mobj.group('episode') == '':
raise ExtractorError('Redirected URL is still not specific: ' + url)
epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1]
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
if len(mMovieParams) == 0:
# The Colbert Report embeds the information in a without
# a URL prefix; so extract the alternate reference
# and then add the URL prefix manually.
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video|playlist).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
raise ExtractorError('unable to find Flash URL in webpage ' + url)
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
# Correct cc.com in uri
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.com', uri)
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
idoc = self._download_xml(
index_url, epTitle,
'Downloading show index', 'Unable to download episode index')
title = idoc.find('./channel/title').text
description = idoc.find('./channel/description').text
entries = []
item_els = idoc.findall('.//item')
for part_num, itemEl in enumerate(item_els):
upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text)
thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url')
content = itemEl.find('.//{http://search.yahoo.com/mrss/}content')
duration = float_or_none(content.attrib.get('duration'))
mediagen_url = content.attrib['url']
guid = itemEl.find('./guid').text.rpartition(':')[-1]
cdoc = self._download_xml(
mediagen_url, epTitle,
'Downloading configuration for segment %d / %d' % (part_num + 1, len(item_els)))
turls = []
for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo)
formats = []
for format, rtmp_video_url in turls:
w, h = self._video_dimensions.get(format, (None, None))
formats.append({
'format_id': 'vhttp-%s' % format,
'url': self._transform_rtmp_url(rtmp_video_url),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
formats.append({
'format_id': 'rtmp-%s' % format,
'url': rtmp_video_url.replace('viacomccstrm', 'viacommtvstrm'),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
self._sort_formats(formats)
subtitles = self._extract_subtitles(cdoc, guid)
virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1)
entries.append({
'id': guid,
'title': virtual_id,
'formats': formats,
'uploader': show_name,
'upload_date': upload_date,
'duration': duration,
'thumbnail': thumbnail,
'description': description,
'subtitles': subtitles,
})
return {
'_type': 'playlist',
'id': epTitle,
'entries': entries,
'title': show_name + ' ' + title,
'description': description,
}
| unlicense |
ArcherSys/ArcherSys | Lib/test/test_strptime.py | 1 | 79523 | <<<<<<< HEAD
<<<<<<< HEAD
"""PyUnit testing against strptime"""
import unittest
import time
import locale
import re
import sys
from test import support
from datetime import date as datetime_date
import _strptime
class getlang_Tests(unittest.TestCase):
"""Test _getlang"""
def test_basic(self):
self.assertEqual(_strptime._getlang(), locale.getlocale(locale.LC_TIME))
class LocaleTime_Tests(unittest.TestCase):
"""Tests for _strptime.LocaleTime.
All values are lower-cased when stored in LocaleTime, so make sure to
compare values after running ``lower`` on them.
"""
def setUp(self):
"""Create time tuple based on current time."""
self.time_tuple = time.localtime()
self.LT_ins = _strptime.LocaleTime()
def compare_against_time(self, testing, directive, tuple_position,
error_msg):
"""Helper method that tests testing against directive based on the
tuple_position of time_tuple. Uses error_msg as error message.
"""
strftime_output = time.strftime(directive, self.time_tuple).lower()
comparison = testing[self.time_tuple[tuple_position]]
self.assertIn(strftime_output, testing,
"%s: not found in tuple" % error_msg)
self.assertEqual(comparison, strftime_output,
"%s: position within tuple incorrect; %s != %s" %
(error_msg, comparison, strftime_output))
def test_weekday(self):
# Make sure that full and abbreviated weekday names are correct in
# both string and position with tuple
self.compare_against_time(self.LT_ins.f_weekday, '%A', 6,
"Testing of full weekday name failed")
self.compare_against_time(self.LT_ins.a_weekday, '%a', 6,
"Testing of abbreviated weekday name failed")
def test_month(self):
# Test full and abbreviated month names; both string and position
# within the tuple
self.compare_against_time(self.LT_ins.f_month, '%B', 1,
"Testing against full month name failed")
self.compare_against_time(self.LT_ins.a_month, '%b', 1,
"Testing against abbreviated month name failed")
def test_am_pm(self):
# Make sure AM/PM representation done properly
strftime_output = time.strftime("%p", self.time_tuple).lower()
self.assertIn(strftime_output, self.LT_ins.am_pm,
"AM/PM representation not in tuple")
if self.time_tuple[3] < 12: position = 0
else: position = 1
self.assertEqual(self.LT_ins.am_pm[position], strftime_output,
"AM/PM representation in the wrong position within the tuple")
def test_timezone(self):
# Make sure timezone is correct
timezone = time.strftime("%Z", self.time_tuple).lower()
if timezone:
self.assertTrue(timezone in self.LT_ins.timezone[0] or
timezone in self.LT_ins.timezone[1],
"timezone %s not found in %s" %
(timezone, self.LT_ins.timezone))
def test_date_time(self):
# Check that LC_date_time, LC_date, and LC_time are correct
# the magic date is used so as to not have issues with %c when day of
# the month is a single digit and has a leading space. This is not an
# issue since strptime still parses it correctly. The problem is
# testing these directives for correctness by comparing strftime
# output.
magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0)
strftime_output = time.strftime("%c", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_date_time, magic_date),
strftime_output, "LC_date_time incorrect")
strftime_output = time.strftime("%x", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_date, magic_date),
strftime_output, "LC_date incorrect")
strftime_output = time.strftime("%X", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_time, magic_date),
strftime_output, "LC_time incorrect")
LT = _strptime.LocaleTime()
LT.am_pm = ('', '')
self.assertTrue(LT.LC_time, "LocaleTime's LC directives cannot handle "
"empty strings")
def test_lang(self):
# Make sure lang is set to what _getlang() returns
# Assuming locale has not changed between now and when self.LT_ins was created
self.assertEqual(self.LT_ins.lang, _strptime._getlang())
class TimeRETests(unittest.TestCase):
"""Tests for TimeRE."""
def setUp(self):
"""Construct generic TimeRE object."""
self.time_re = _strptime.TimeRE()
self.locale_time = _strptime.LocaleTime()
def test_pattern(self):
# Test TimeRE.pattern
pattern_string = self.time_re.pattern(r"%a %A %d")
self.assertTrue(pattern_string.find(self.locale_time.a_weekday[2]) != -1,
"did not find abbreviated weekday in pattern string '%s'" %
pattern_string)
self.assertTrue(pattern_string.find(self.locale_time.f_weekday[4]) != -1,
"did not find full weekday in pattern string '%s'" %
pattern_string)
self.assertTrue(pattern_string.find(self.time_re['d']) != -1,
"did not find 'd' directive pattern string '%s'" %
pattern_string)
def test_pattern_escaping(self):
# Make sure any characters in the format string that might be taken as
# regex syntax is escaped.
pattern_string = self.time_re.pattern("\d+")
self.assertIn(r"\\d\+", pattern_string,
"%s does not have re characters escaped properly" %
pattern_string)
def test_compile(self):
# Check that compiled regex is correct
found = self.time_re.compile(r"%A").match(self.locale_time.f_weekday[6])
self.assertTrue(found and found.group('A') == self.locale_time.f_weekday[6],
"re object for '%A' failed")
compiled = self.time_re.compile(r"%a %b")
found = compiled.match("%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4]))
self.assertTrue(found,
"Match failed with '%s' regex and '%s' string" %
(compiled.pattern, "%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4])))
self.assertTrue(found.group('a') == self.locale_time.a_weekday[4] and
found.group('b') == self.locale_time.a_month[4],
"re object couldn't find the abbreviated weekday month in "
"'%s' using '%s'; group 'a' = '%s', group 'b' = %s'" %
(found.string, found.re.pattern, found.group('a'),
found.group('b')))
for directive in ('a','A','b','B','c','d','H','I','j','m','M','p','S',
'U','w','W','x','X','y','Y','Z','%'):
compiled = self.time_re.compile("%" + directive)
found = compiled.match(time.strftime("%" + directive))
self.assertTrue(found, "Matching failed on '%s' using '%s' regex" %
(time.strftime("%" + directive),
compiled.pattern))
def test_blankpattern(self):
# Make sure when tuple or something has no values no regex is generated.
# Fixes bug #661354
test_locale = _strptime.LocaleTime()
test_locale.timezone = (frozenset(), frozenset())
self.assertEqual(_strptime.TimeRE(test_locale).pattern("%Z"), '',
"with timezone == ('',''), TimeRE().pattern('%Z') != ''")
def test_matching_with_escapes(self):
# Make sure a format that requires escaping of characters works
compiled_re = self.time_re.compile("\w+ %m")
found = compiled_re.match("\w+ 10")
self.assertTrue(found, "Escaping failed of format '\w+ 10'")
def test_locale_data_w_regex_metacharacters(self):
# Check that if locale data contains regex metacharacters they are
# escaped properly.
# Discovered by bug #1039270 .
locale_time = _strptime.LocaleTime()
locale_time.timezone = (frozenset(("utc", "gmt",
"Tokyo (standard time)")),
frozenset("Tokyo (daylight time)"))
time_re = _strptime.TimeRE(locale_time)
self.assertTrue(time_re.compile("%Z").match("Tokyo (standard time)"),
"locale data that contains regex metacharacters is not"
" properly escaped")
def test_whitespace_substitution(self):
# When pattern contains whitespace, make sure it is taken into account
# so as to not allow to subpatterns to end up next to each other and
# "steal" characters from each other.
pattern = self.time_re.pattern('%j %H')
self.assertFalse(re.match(pattern, "180"))
self.assertTrue(re.match(pattern, "18 0"))
class StrptimeTests(unittest.TestCase):
"""Tests for _strptime.strptime."""
def setUp(self):
"""Create testing time tuple."""
self.time_tuple = time.gmtime()
def test_ValueError(self):
# Make sure ValueError is raised when match fails or format is bad
self.assertRaises(ValueError, _strptime._strptime_time, data_string="%d",
format="%A")
for bad_format in ("%", "% ", "%e"):
try:
_strptime._strptime_time("2005", bad_format)
except ValueError:
continue
except Exception as err:
self.fail("'%s' raised %s, not ValueError" %
(bad_format, err.__class__.__name__))
else:
self.fail("'%s' did not raise ValueError" % bad_format)
def test_strptime_exception_context(self):
# check that this doesn't chain exceptions needlessly (see #17572)
with self.assertRaises(ValueError) as e:
_strptime._strptime_time('', '%D')
self.assertIs(e.exception.__suppress_context__, True)
# additional check for IndexError branch (issue #19545)
with self.assertRaises(ValueError) as e:
_strptime._strptime_time('19', '%Y %')
self.assertIs(e.exception.__suppress_context__, True)
def test_unconverteddata(self):
# Check ValueError is raised when there is unconverted data
self.assertRaises(ValueError, _strptime._strptime_time, "10 12", "%m")
def helper(self, directive, position):
"""Helper fxn in testing."""
strf_output = time.strftime("%" + directive, self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%" + directive)
self.assertTrue(strp_output[position] == self.time_tuple[position],
"testing of '%s' directive failed; '%s' -> %s != %s" %
(directive, strf_output, strp_output[position],
self.time_tuple[position]))
def test_year(self):
# Test that the year is handled properly
for directive in ('y', 'Y'):
self.helper(directive, 0)
# Must also make sure %y values are correct for bounds set by Open Group
for century, bounds in ((1900, ('69', '99')), (2000, ('00', '68'))):
for bound in bounds:
strp_output = _strptime._strptime_time(bound, '%y')
expected_result = century + int(bound)
self.assertTrue(strp_output[0] == expected_result,
"'y' test failed; passed in '%s' "
"and returned '%s'" % (bound, strp_output[0]))
def test_month(self):
# Test for month directives
for directive in ('B', 'b', 'm'):
self.helper(directive, 1)
def test_day(self):
# Test for day directives
self.helper('d', 2)
def test_hour(self):
# Test hour directives
self.helper('H', 3)
strf_output = time.strftime("%I %p", self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%I %p")
self.assertTrue(strp_output[3] == self.time_tuple[3],
"testing of '%%I %%p' directive failed; '%s' -> %s != %s" %
(strf_output, strp_output[3], self.time_tuple[3]))
def test_minute(self):
# Test minute directives
self.helper('M', 4)
def test_second(self):
# Test second directives
self.helper('S', 5)
def test_fraction(self):
# Test microseconds
import datetime
d = datetime.datetime(2012, 12, 20, 12, 34, 56, 78987)
tup, frac = _strptime._strptime(str(d), format="%Y-%m-%d %H:%M:%S.%f")
self.assertEqual(frac, d.microsecond)
def test_weekday(self):
# Test weekday directives
for directive in ('A', 'a', 'w'):
self.helper(directive,6)
def test_julian(self):
# Test julian directives
self.helper('j', 7)
def test_timezone(self):
# Test timezone directives.
# When gmtime() is used with %Z, entire result of strftime() is empty.
# Check for equal timezone names deals with bad locale info when this
# occurs; first found in FreeBSD 4.4.
strp_output = _strptime._strptime_time("UTC", "%Z")
self.assertEqual(strp_output.tm_isdst, 0)
strp_output = _strptime._strptime_time("GMT", "%Z")
self.assertEqual(strp_output.tm_isdst, 0)
time_tuple = time.localtime()
strf_output = time.strftime("%Z") #UTC does not have a timezone
strp_output = _strptime._strptime_time(strf_output, "%Z")
locale_time = _strptime.LocaleTime()
if time.tzname[0] != time.tzname[1] or not time.daylight:
self.assertTrue(strp_output[8] == time_tuple[8],
"timezone check failed; '%s' -> %s != %s" %
(strf_output, strp_output[8], time_tuple[8]))
else:
self.assertTrue(strp_output[8] == -1,
"LocaleTime().timezone has duplicate values and "
"time.daylight but timezone value not set to -1")
def test_bad_timezone(self):
# Explicitly test possibility of bad timezone;
# when time.tzname[0] == time.tzname[1] and time.daylight
tz_name = time.tzname[0]
if tz_name.upper() in ("UTC", "GMT"):
self.skipTest('need non-UTC/GMT timezone')
try:
original_tzname = time.tzname
original_daylight = time.daylight
time.tzname = (tz_name, tz_name)
time.daylight = 1
tz_value = _strptime._strptime_time(tz_name, "%Z")[8]
self.assertEqual(tz_value, -1,
"%s lead to a timezone value of %s instead of -1 when "
"time.daylight set to %s and passing in %s" %
(time.tzname, tz_value, time.daylight, tz_name))
finally:
time.tzname = original_tzname
time.daylight = original_daylight
def test_date_time(self):
# Test %c directive
for position in range(6):
self.helper('c', position)
def test_date(self):
# Test %x directive
for position in range(0,3):
self.helper('x', position)
def test_time(self):
# Test %X directive
for position in range(3,6):
self.helper('X', position)
def test_percent(self):
# Make sure % signs are handled properly
strf_output = time.strftime("%m %% %Y", self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%m %% %Y")
self.assertTrue(strp_output[0] == self.time_tuple[0] and
strp_output[1] == self.time_tuple[1],
"handling of percent sign failed")
def test_caseinsensitive(self):
# Should handle names case-insensitively.
strf_output = time.strftime("%B", self.time_tuple)
self.assertTrue(_strptime._strptime_time(strf_output.upper(), "%B"),
"strptime does not handle ALL-CAPS names properly")
self.assertTrue(_strptime._strptime_time(strf_output.lower(), "%B"),
"strptime does not handle lowercase names properly")
self.assertTrue(_strptime._strptime_time(strf_output.capitalize(), "%B"),
"strptime does not handle capword names properly")
def test_defaults(self):
# Default return value should be (1900, 1, 1, 0, 0, 0, 0, 1, 0)
defaults = (1900, 1, 1, 0, 0, 0, 0, 1, -1)
strp_output = _strptime._strptime_time('1', '%m')
self.assertTrue(strp_output == defaults,
"Default values for strptime() are incorrect;"
" %s != %s" % (strp_output, defaults))
def test_escaping(self):
# Make sure all characters that have regex significance are escaped.
# Parentheses are in a purposeful order; will cause an error of
# unbalanced parentheses when the regex is compiled if they are not
# escaped.
# Test instigated by bug #796149 .
need_escaping = ".^$*+?{}\[]|)("
self.assertTrue(_strptime._strptime_time(need_escaping, need_escaping))
def test_feb29_on_leap_year_without_year(self):
time.strptime("Feb 29", "%b %d")
def test_mar1_comes_after_feb29_even_when_omitting_the_year(self):
self.assertLess(
time.strptime("Feb 29", "%b %d"),
time.strptime("Mar 1", "%b %d"))
class Strptime12AMPMTests(unittest.TestCase):
"""Test a _strptime regression in '%I %p' at 12 noon (12 PM)"""
def test_twelve_noon_midnight(self):
eq = self.assertEqual
eq(time.strptime('12 PM', '%I %p')[3], 12)
eq(time.strptime('12 AM', '%I %p')[3], 0)
eq(_strptime._strptime_time('12 PM', '%I %p')[3], 12)
eq(_strptime._strptime_time('12 AM', '%I %p')[3], 0)
class JulianTests(unittest.TestCase):
"""Test a _strptime regression that all julian (1-366) are accepted"""
def test_all_julian_days(self):
eq = self.assertEqual
for i in range(1, 367):
# use 2004, since it is a leap year, we have 366 days
eq(_strptime._strptime_time('%d 2004' % i, '%j %Y')[7], i)
class CalculationTests(unittest.TestCase):
"""Test that strptime() fills in missing info correctly"""
def setUp(self):
self.time_tuple = time.gmtime()
def test_julian_calculation(self):
# Make sure that when Julian is missing that it is calculated
format_string = "%Y %m %d %H %M %S %w %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_yday == self.time_tuple.tm_yday,
"Calculation of tm_yday failed; %s != %s" %
(result.tm_yday, self.time_tuple.tm_yday))
def test_gregorian_calculation(self):
# Test that Gregorian date can be calculated from Julian day
format_string = "%Y %H %M %S %w %j %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_year == self.time_tuple.tm_year and
result.tm_mon == self.time_tuple.tm_mon and
result.tm_mday == self.time_tuple.tm_mday,
"Calculation of Gregorian date failed;"
"%s-%s-%s != %s-%s-%s" %
(result.tm_year, result.tm_mon, result.tm_mday,
self.time_tuple.tm_year, self.time_tuple.tm_mon,
self.time_tuple.tm_mday))
def test_day_of_week_calculation(self):
# Test that the day of the week is calculated as needed
format_string = "%Y %m %d %H %S %j %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_wday == self.time_tuple.tm_wday,
"Calculation of day of the week failed;"
"%s != %s" % (result.tm_wday, self.time_tuple.tm_wday))
def test_week_of_year_and_day_of_week_calculation(self):
# Should be able to infer date if given year, week of year (%U or %W)
# and day of the week
def test_helper(ymd_tuple, test_reason):
for directive in ('W', 'U'):
format_string = "%%Y %%%s %%w" % directive
dt_date = datetime_date(*ymd_tuple)
strp_input = dt_date.strftime(format_string)
strp_output = _strptime._strptime_time(strp_input, format_string)
self.assertTrue(strp_output[:3] == ymd_tuple,
"%s(%s) test failed w/ '%s': %s != %s (%s != %s)" %
(test_reason, directive, strp_input,
strp_output[:3], ymd_tuple,
strp_output[7], dt_date.timetuple()[7]))
test_helper((1901, 1, 3), "week 0")
test_helper((1901, 1, 8), "common case")
test_helper((1901, 1, 13), "day on Sunday")
test_helper((1901, 1, 14), "day on Monday")
test_helper((1905, 1, 1), "Jan 1 on Sunday")
test_helper((1906, 1, 1), "Jan 1 on Monday")
test_helper((1906, 1, 7), "first Sunday in a year starting on Monday")
test_helper((1905, 12, 31), "Dec 31 on Sunday")
test_helper((1906, 12, 31), "Dec 31 on Monday")
test_helper((2008, 12, 29), "Monday in the last week of the year")
test_helper((2008, 12, 22), "Monday in the second-to-last week of the "
"year")
test_helper((1978, 10, 23), "randomly chosen date")
test_helper((2004, 12, 18), "randomly chosen date")
test_helper((1978, 10, 23), "year starting and ending on Monday while "
"date not on Sunday or Monday")
test_helper((1917, 12, 17), "year starting and ending on Monday with "
"a Monday not at the beginning or end "
"of the year")
test_helper((1917, 12, 31), "Dec 31 on Monday with year starting and "
"ending on Monday")
test_helper((2007, 1, 7), "First Sunday of 2007")
test_helper((2007, 1, 14), "Second Sunday of 2007")
test_helper((2006, 12, 31), "Last Sunday of 2006")
test_helper((2006, 12, 24), "Second to last Sunday of 2006")
class CacheTests(unittest.TestCase):
"""Test that caching works properly."""
def test_time_re_recreation(self):
# Make sure cache is recreated when current locale does not match what
# cached object was created with.
_strptime._strptime_time("10", "%d")
_strptime._strptime_time("2005", "%Y")
_strptime._TimeRE_cache.locale_time.lang = "Ni"
original_time_re = _strptime._TimeRE_cache
_strptime._strptime_time("10", "%d")
self.assertIsNot(original_time_re, _strptime._TimeRE_cache)
self.assertEqual(len(_strptime._regex_cache), 1)
def test_regex_cleanup(self):
# Make sure cached regexes are discarded when cache becomes "full".
try:
del _strptime._regex_cache['%d']
except KeyError:
pass
bogus_key = 0
while len(_strptime._regex_cache) <= _strptime._CACHE_MAX_SIZE:
_strptime._regex_cache[bogus_key] = None
bogus_key += 1
_strptime._strptime_time("10", "%d")
self.assertEqual(len(_strptime._regex_cache), 1)
def test_new_localetime(self):
# A new LocaleTime instance should be created when a new TimeRE object
# is created.
locale_time_id = _strptime._TimeRE_cache.locale_time
_strptime._TimeRE_cache.locale_time.lang = "Ni"
_strptime._strptime_time("10", "%d")
self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time)
def test_TimeRE_recreation(self):
# The TimeRE instance should be recreated upon changing the locale.
locale_info = locale.getlocale(locale.LC_TIME)
try:
locale.setlocale(locale.LC_TIME, ('en_US', 'UTF8'))
except locale.Error:
self.skipTest('test needs en_US.UTF8 locale')
try:
_strptime._strptime_time('10', '%d')
# Get id of current cache object.
first_time_re = _strptime._TimeRE_cache
try:
# Change the locale and force a recreation of the cache.
locale.setlocale(locale.LC_TIME, ('de_DE', 'UTF8'))
_strptime._strptime_time('10', '%d')
# Get the new cache object's id.
second_time_re = _strptime._TimeRE_cache
# They should not be equal.
self.assertIsNot(first_time_re, second_time_re)
# Possible test locale is not supported while initial locale is.
# If this is the case just suppress the exception and fall-through
# to the resetting to the original locale.
except locale.Error:
self.skipTest('test needs de_DE.UTF8 locale')
# Make sure we don't trample on the locale setting once we leave the
# test.
finally:
locale.setlocale(locale.LC_TIME, locale_info)
def test_main():
support.run_unittest(
getlang_Tests,
LocaleTime_Tests,
TimeRETests,
StrptimeTests,
Strptime12AMPMTests,
JulianTests,
CalculationTests,
CacheTests
)
if __name__ == '__main__':
test_main()
=======
"""PyUnit testing against strptime"""
import unittest
import time
import locale
import re
import sys
from test import support
from datetime import date as datetime_date
import _strptime
class getlang_Tests(unittest.TestCase):
"""Test _getlang"""
def test_basic(self):
self.assertEqual(_strptime._getlang(), locale.getlocale(locale.LC_TIME))
class LocaleTime_Tests(unittest.TestCase):
"""Tests for _strptime.LocaleTime.
All values are lower-cased when stored in LocaleTime, so make sure to
compare values after running ``lower`` on them.
"""
def setUp(self):
"""Create time tuple based on current time."""
self.time_tuple = time.localtime()
self.LT_ins = _strptime.LocaleTime()
def compare_against_time(self, testing, directive, tuple_position,
error_msg):
"""Helper method that tests testing against directive based on the
tuple_position of time_tuple. Uses error_msg as error message.
"""
strftime_output = time.strftime(directive, self.time_tuple).lower()
comparison = testing[self.time_tuple[tuple_position]]
self.assertIn(strftime_output, testing,
"%s: not found in tuple" % error_msg)
self.assertEqual(comparison, strftime_output,
"%s: position within tuple incorrect; %s != %s" %
(error_msg, comparison, strftime_output))
def test_weekday(self):
# Make sure that full and abbreviated weekday names are correct in
# both string and position with tuple
self.compare_against_time(self.LT_ins.f_weekday, '%A', 6,
"Testing of full weekday name failed")
self.compare_against_time(self.LT_ins.a_weekday, '%a', 6,
"Testing of abbreviated weekday name failed")
def test_month(self):
# Test full and abbreviated month names; both string and position
# within the tuple
self.compare_against_time(self.LT_ins.f_month, '%B', 1,
"Testing against full month name failed")
self.compare_against_time(self.LT_ins.a_month, '%b', 1,
"Testing against abbreviated month name failed")
def test_am_pm(self):
# Make sure AM/PM representation done properly
strftime_output = time.strftime("%p", self.time_tuple).lower()
self.assertIn(strftime_output, self.LT_ins.am_pm,
"AM/PM representation not in tuple")
if self.time_tuple[3] < 12: position = 0
else: position = 1
self.assertEqual(self.LT_ins.am_pm[position], strftime_output,
"AM/PM representation in the wrong position within the tuple")
def test_timezone(self):
# Make sure timezone is correct
timezone = time.strftime("%Z", self.time_tuple).lower()
if timezone:
self.assertTrue(timezone in self.LT_ins.timezone[0] or
timezone in self.LT_ins.timezone[1],
"timezone %s not found in %s" %
(timezone, self.LT_ins.timezone))
def test_date_time(self):
# Check that LC_date_time, LC_date, and LC_time are correct
# the magic date is used so as to not have issues with %c when day of
# the month is a single digit and has a leading space. This is not an
# issue since strptime still parses it correctly. The problem is
# testing these directives for correctness by comparing strftime
# output.
magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0)
strftime_output = time.strftime("%c", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_date_time, magic_date),
strftime_output, "LC_date_time incorrect")
strftime_output = time.strftime("%x", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_date, magic_date),
strftime_output, "LC_date incorrect")
strftime_output = time.strftime("%X", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_time, magic_date),
strftime_output, "LC_time incorrect")
LT = _strptime.LocaleTime()
LT.am_pm = ('', '')
self.assertTrue(LT.LC_time, "LocaleTime's LC directives cannot handle "
"empty strings")
def test_lang(self):
# Make sure lang is set to what _getlang() returns
# Assuming locale has not changed between now and when self.LT_ins was created
self.assertEqual(self.LT_ins.lang, _strptime._getlang())
class TimeRETests(unittest.TestCase):
"""Tests for TimeRE."""
def setUp(self):
"""Construct generic TimeRE object."""
self.time_re = _strptime.TimeRE()
self.locale_time = _strptime.LocaleTime()
def test_pattern(self):
# Test TimeRE.pattern
pattern_string = self.time_re.pattern(r"%a %A %d")
self.assertTrue(pattern_string.find(self.locale_time.a_weekday[2]) != -1,
"did not find abbreviated weekday in pattern string '%s'" %
pattern_string)
self.assertTrue(pattern_string.find(self.locale_time.f_weekday[4]) != -1,
"did not find full weekday in pattern string '%s'" %
pattern_string)
self.assertTrue(pattern_string.find(self.time_re['d']) != -1,
"did not find 'd' directive pattern string '%s'" %
pattern_string)
def test_pattern_escaping(self):
# Make sure any characters in the format string that might be taken as
# regex syntax is escaped.
pattern_string = self.time_re.pattern("\d+")
self.assertIn(r"\\d\+", pattern_string,
"%s does not have re characters escaped properly" %
pattern_string)
def test_compile(self):
# Check that compiled regex is correct
found = self.time_re.compile(r"%A").match(self.locale_time.f_weekday[6])
self.assertTrue(found and found.group('A') == self.locale_time.f_weekday[6],
"re object for '%A' failed")
compiled = self.time_re.compile(r"%a %b")
found = compiled.match("%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4]))
self.assertTrue(found,
"Match failed with '%s' regex and '%s' string" %
(compiled.pattern, "%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4])))
self.assertTrue(found.group('a') == self.locale_time.a_weekday[4] and
found.group('b') == self.locale_time.a_month[4],
"re object couldn't find the abbreviated weekday month in "
"'%s' using '%s'; group 'a' = '%s', group 'b' = %s'" %
(found.string, found.re.pattern, found.group('a'),
found.group('b')))
for directive in ('a','A','b','B','c','d','H','I','j','m','M','p','S',
'U','w','W','x','X','y','Y','Z','%'):
compiled = self.time_re.compile("%" + directive)
found = compiled.match(time.strftime("%" + directive))
self.assertTrue(found, "Matching failed on '%s' using '%s' regex" %
(time.strftime("%" + directive),
compiled.pattern))
def test_blankpattern(self):
# Make sure when tuple or something has no values no regex is generated.
# Fixes bug #661354
test_locale = _strptime.LocaleTime()
test_locale.timezone = (frozenset(), frozenset())
self.assertEqual(_strptime.TimeRE(test_locale).pattern("%Z"), '',
"with timezone == ('',''), TimeRE().pattern('%Z') != ''")
def test_matching_with_escapes(self):
# Make sure a format that requires escaping of characters works
compiled_re = self.time_re.compile("\w+ %m")
found = compiled_re.match("\w+ 10")
self.assertTrue(found, "Escaping failed of format '\w+ 10'")
def test_locale_data_w_regex_metacharacters(self):
# Check that if locale data contains regex metacharacters they are
# escaped properly.
# Discovered by bug #1039270 .
locale_time = _strptime.LocaleTime()
locale_time.timezone = (frozenset(("utc", "gmt",
"Tokyo (standard time)")),
frozenset("Tokyo (daylight time)"))
time_re = _strptime.TimeRE(locale_time)
self.assertTrue(time_re.compile("%Z").match("Tokyo (standard time)"),
"locale data that contains regex metacharacters is not"
" properly escaped")
def test_whitespace_substitution(self):
# When pattern contains whitespace, make sure it is taken into account
# so as to not allow to subpatterns to end up next to each other and
# "steal" characters from each other.
pattern = self.time_re.pattern('%j %H')
self.assertFalse(re.match(pattern, "180"))
self.assertTrue(re.match(pattern, "18 0"))
class StrptimeTests(unittest.TestCase):
"""Tests for _strptime.strptime."""
def setUp(self):
"""Create testing time tuple."""
self.time_tuple = time.gmtime()
def test_ValueError(self):
# Make sure ValueError is raised when match fails or format is bad
self.assertRaises(ValueError, _strptime._strptime_time, data_string="%d",
format="%A")
for bad_format in ("%", "% ", "%e"):
try:
_strptime._strptime_time("2005", bad_format)
except ValueError:
continue
except Exception as err:
self.fail("'%s' raised %s, not ValueError" %
(bad_format, err.__class__.__name__))
else:
self.fail("'%s' did not raise ValueError" % bad_format)
def test_strptime_exception_context(self):
# check that this doesn't chain exceptions needlessly (see #17572)
with self.assertRaises(ValueError) as e:
_strptime._strptime_time('', '%D')
self.assertIs(e.exception.__suppress_context__, True)
# additional check for IndexError branch (issue #19545)
with self.assertRaises(ValueError) as e:
_strptime._strptime_time('19', '%Y %')
self.assertIs(e.exception.__suppress_context__, True)
def test_unconverteddata(self):
# Check ValueError is raised when there is unconverted data
self.assertRaises(ValueError, _strptime._strptime_time, "10 12", "%m")
def helper(self, directive, position):
"""Helper fxn in testing."""
strf_output = time.strftime("%" + directive, self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%" + directive)
self.assertTrue(strp_output[position] == self.time_tuple[position],
"testing of '%s' directive failed; '%s' -> %s != %s" %
(directive, strf_output, strp_output[position],
self.time_tuple[position]))
def test_year(self):
# Test that the year is handled properly
for directive in ('y', 'Y'):
self.helper(directive, 0)
# Must also make sure %y values are correct for bounds set by Open Group
for century, bounds in ((1900, ('69', '99')), (2000, ('00', '68'))):
for bound in bounds:
strp_output = _strptime._strptime_time(bound, '%y')
expected_result = century + int(bound)
self.assertTrue(strp_output[0] == expected_result,
"'y' test failed; passed in '%s' "
"and returned '%s'" % (bound, strp_output[0]))
def test_month(self):
# Test for month directives
for directive in ('B', 'b', 'm'):
self.helper(directive, 1)
def test_day(self):
# Test for day directives
self.helper('d', 2)
def test_hour(self):
# Test hour directives
self.helper('H', 3)
strf_output = time.strftime("%I %p", self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%I %p")
self.assertTrue(strp_output[3] == self.time_tuple[3],
"testing of '%%I %%p' directive failed; '%s' -> %s != %s" %
(strf_output, strp_output[3], self.time_tuple[3]))
def test_minute(self):
# Test minute directives
self.helper('M', 4)
def test_second(self):
# Test second directives
self.helper('S', 5)
def test_fraction(self):
# Test microseconds
import datetime
d = datetime.datetime(2012, 12, 20, 12, 34, 56, 78987)
tup, frac = _strptime._strptime(str(d), format="%Y-%m-%d %H:%M:%S.%f")
self.assertEqual(frac, d.microsecond)
def test_weekday(self):
# Test weekday directives
for directive in ('A', 'a', 'w'):
self.helper(directive,6)
def test_julian(self):
# Test julian directives
self.helper('j', 7)
def test_timezone(self):
# Test timezone directives.
# When gmtime() is used with %Z, entire result of strftime() is empty.
# Check for equal timezone names deals with bad locale info when this
# occurs; first found in FreeBSD 4.4.
strp_output = _strptime._strptime_time("UTC", "%Z")
self.assertEqual(strp_output.tm_isdst, 0)
strp_output = _strptime._strptime_time("GMT", "%Z")
self.assertEqual(strp_output.tm_isdst, 0)
time_tuple = time.localtime()
strf_output = time.strftime("%Z") #UTC does not have a timezone
strp_output = _strptime._strptime_time(strf_output, "%Z")
locale_time = _strptime.LocaleTime()
if time.tzname[0] != time.tzname[1] or not time.daylight:
self.assertTrue(strp_output[8] == time_tuple[8],
"timezone check failed; '%s' -> %s != %s" %
(strf_output, strp_output[8], time_tuple[8]))
else:
self.assertTrue(strp_output[8] == -1,
"LocaleTime().timezone has duplicate values and "
"time.daylight but timezone value not set to -1")
def test_bad_timezone(self):
# Explicitly test possibility of bad timezone;
# when time.tzname[0] == time.tzname[1] and time.daylight
tz_name = time.tzname[0]
if tz_name.upper() in ("UTC", "GMT"):
self.skipTest('need non-UTC/GMT timezone')
try:
original_tzname = time.tzname
original_daylight = time.daylight
time.tzname = (tz_name, tz_name)
time.daylight = 1
tz_value = _strptime._strptime_time(tz_name, "%Z")[8]
self.assertEqual(tz_value, -1,
"%s lead to a timezone value of %s instead of -1 when "
"time.daylight set to %s and passing in %s" %
(time.tzname, tz_value, time.daylight, tz_name))
finally:
time.tzname = original_tzname
time.daylight = original_daylight
def test_date_time(self):
# Test %c directive
for position in range(6):
self.helper('c', position)
def test_date(self):
# Test %x directive
for position in range(0,3):
self.helper('x', position)
def test_time(self):
# Test %X directive
for position in range(3,6):
self.helper('X', position)
def test_percent(self):
# Make sure % signs are handled properly
strf_output = time.strftime("%m %% %Y", self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%m %% %Y")
self.assertTrue(strp_output[0] == self.time_tuple[0] and
strp_output[1] == self.time_tuple[1],
"handling of percent sign failed")
def test_caseinsensitive(self):
# Should handle names case-insensitively.
strf_output = time.strftime("%B", self.time_tuple)
self.assertTrue(_strptime._strptime_time(strf_output.upper(), "%B"),
"strptime does not handle ALL-CAPS names properly")
self.assertTrue(_strptime._strptime_time(strf_output.lower(), "%B"),
"strptime does not handle lowercase names properly")
self.assertTrue(_strptime._strptime_time(strf_output.capitalize(), "%B"),
"strptime does not handle capword names properly")
def test_defaults(self):
# Default return value should be (1900, 1, 1, 0, 0, 0, 0, 1, 0)
defaults = (1900, 1, 1, 0, 0, 0, 0, 1, -1)
strp_output = _strptime._strptime_time('1', '%m')
self.assertTrue(strp_output == defaults,
"Default values for strptime() are incorrect;"
" %s != %s" % (strp_output, defaults))
def test_escaping(self):
# Make sure all characters that have regex significance are escaped.
# Parentheses are in a purposeful order; will cause an error of
# unbalanced parentheses when the regex is compiled if they are not
# escaped.
# Test instigated by bug #796149 .
need_escaping = ".^$*+?{}\[]|)("
self.assertTrue(_strptime._strptime_time(need_escaping, need_escaping))
def test_feb29_on_leap_year_without_year(self):
time.strptime("Feb 29", "%b %d")
def test_mar1_comes_after_feb29_even_when_omitting_the_year(self):
self.assertLess(
time.strptime("Feb 29", "%b %d"),
time.strptime("Mar 1", "%b %d"))
class Strptime12AMPMTests(unittest.TestCase):
"""Test a _strptime regression in '%I %p' at 12 noon (12 PM)"""
def test_twelve_noon_midnight(self):
eq = self.assertEqual
eq(time.strptime('12 PM', '%I %p')[3], 12)
eq(time.strptime('12 AM', '%I %p')[3], 0)
eq(_strptime._strptime_time('12 PM', '%I %p')[3], 12)
eq(_strptime._strptime_time('12 AM', '%I %p')[3], 0)
class JulianTests(unittest.TestCase):
"""Test a _strptime regression that all julian (1-366) are accepted"""
def test_all_julian_days(self):
eq = self.assertEqual
for i in range(1, 367):
# use 2004, since it is a leap year, we have 366 days
eq(_strptime._strptime_time('%d 2004' % i, '%j %Y')[7], i)
class CalculationTests(unittest.TestCase):
"""Test that strptime() fills in missing info correctly"""
def setUp(self):
self.time_tuple = time.gmtime()
def test_julian_calculation(self):
# Make sure that when Julian is missing that it is calculated
format_string = "%Y %m %d %H %M %S %w %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_yday == self.time_tuple.tm_yday,
"Calculation of tm_yday failed; %s != %s" %
(result.tm_yday, self.time_tuple.tm_yday))
def test_gregorian_calculation(self):
# Test that Gregorian date can be calculated from Julian day
format_string = "%Y %H %M %S %w %j %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_year == self.time_tuple.tm_year and
result.tm_mon == self.time_tuple.tm_mon and
result.tm_mday == self.time_tuple.tm_mday,
"Calculation of Gregorian date failed;"
"%s-%s-%s != %s-%s-%s" %
(result.tm_year, result.tm_mon, result.tm_mday,
self.time_tuple.tm_year, self.time_tuple.tm_mon,
self.time_tuple.tm_mday))
def test_day_of_week_calculation(self):
# Test that the day of the week is calculated as needed
format_string = "%Y %m %d %H %S %j %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_wday == self.time_tuple.tm_wday,
"Calculation of day of the week failed;"
"%s != %s" % (result.tm_wday, self.time_tuple.tm_wday))
def test_week_of_year_and_day_of_week_calculation(self):
# Should be able to infer date if given year, week of year (%U or %W)
# and day of the week
def test_helper(ymd_tuple, test_reason):
for directive in ('W', 'U'):
format_string = "%%Y %%%s %%w" % directive
dt_date = datetime_date(*ymd_tuple)
strp_input = dt_date.strftime(format_string)
strp_output = _strptime._strptime_time(strp_input, format_string)
self.assertTrue(strp_output[:3] == ymd_tuple,
"%s(%s) test failed w/ '%s': %s != %s (%s != %s)" %
(test_reason, directive, strp_input,
strp_output[:3], ymd_tuple,
strp_output[7], dt_date.timetuple()[7]))
test_helper((1901, 1, 3), "week 0")
test_helper((1901, 1, 8), "common case")
test_helper((1901, 1, 13), "day on Sunday")
test_helper((1901, 1, 14), "day on Monday")
test_helper((1905, 1, 1), "Jan 1 on Sunday")
test_helper((1906, 1, 1), "Jan 1 on Monday")
test_helper((1906, 1, 7), "first Sunday in a year starting on Monday")
test_helper((1905, 12, 31), "Dec 31 on Sunday")
test_helper((1906, 12, 31), "Dec 31 on Monday")
test_helper((2008, 12, 29), "Monday in the last week of the year")
test_helper((2008, 12, 22), "Monday in the second-to-last week of the "
"year")
test_helper((1978, 10, 23), "randomly chosen date")
test_helper((2004, 12, 18), "randomly chosen date")
test_helper((1978, 10, 23), "year starting and ending on Monday while "
"date not on Sunday or Monday")
test_helper((1917, 12, 17), "year starting and ending on Monday with "
"a Monday not at the beginning or end "
"of the year")
test_helper((1917, 12, 31), "Dec 31 on Monday with year starting and "
"ending on Monday")
test_helper((2007, 1, 7), "First Sunday of 2007")
test_helper((2007, 1, 14), "Second Sunday of 2007")
test_helper((2006, 12, 31), "Last Sunday of 2006")
test_helper((2006, 12, 24), "Second to last Sunday of 2006")
class CacheTests(unittest.TestCase):
"""Test that caching works properly."""
def test_time_re_recreation(self):
# Make sure cache is recreated when current locale does not match what
# cached object was created with.
_strptime._strptime_time("10", "%d")
_strptime._strptime_time("2005", "%Y")
_strptime._TimeRE_cache.locale_time.lang = "Ni"
original_time_re = _strptime._TimeRE_cache
_strptime._strptime_time("10", "%d")
self.assertIsNot(original_time_re, _strptime._TimeRE_cache)
self.assertEqual(len(_strptime._regex_cache), 1)
def test_regex_cleanup(self):
# Make sure cached regexes are discarded when cache becomes "full".
try:
del _strptime._regex_cache['%d']
except KeyError:
pass
bogus_key = 0
while len(_strptime._regex_cache) <= _strptime._CACHE_MAX_SIZE:
_strptime._regex_cache[bogus_key] = None
bogus_key += 1
_strptime._strptime_time("10", "%d")
self.assertEqual(len(_strptime._regex_cache), 1)
def test_new_localetime(self):
# A new LocaleTime instance should be created when a new TimeRE object
# is created.
locale_time_id = _strptime._TimeRE_cache.locale_time
_strptime._TimeRE_cache.locale_time.lang = "Ni"
_strptime._strptime_time("10", "%d")
self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time)
def test_TimeRE_recreation(self):
# The TimeRE instance should be recreated upon changing the locale.
locale_info = locale.getlocale(locale.LC_TIME)
try:
locale.setlocale(locale.LC_TIME, ('en_US', 'UTF8'))
except locale.Error:
self.skipTest('test needs en_US.UTF8 locale')
try:
_strptime._strptime_time('10', '%d')
# Get id of current cache object.
first_time_re = _strptime._TimeRE_cache
try:
# Change the locale and force a recreation of the cache.
locale.setlocale(locale.LC_TIME, ('de_DE', 'UTF8'))
_strptime._strptime_time('10', '%d')
# Get the new cache object's id.
second_time_re = _strptime._TimeRE_cache
# They should not be equal.
self.assertIsNot(first_time_re, second_time_re)
# Possible test locale is not supported while initial locale is.
# If this is the case just suppress the exception and fall-through
# to the resetting to the original locale.
except locale.Error:
self.skipTest('test needs de_DE.UTF8 locale')
# Make sure we don't trample on the locale setting once we leave the
# test.
finally:
locale.setlocale(locale.LC_TIME, locale_info)
def test_main():
support.run_unittest(
getlang_Tests,
LocaleTime_Tests,
TimeRETests,
StrptimeTests,
Strptime12AMPMTests,
JulianTests,
CalculationTests,
CacheTests
)
if __name__ == '__main__':
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""PyUnit testing against strptime"""
import unittest
import time
import locale
import re
import sys
from test import support
from datetime import date as datetime_date
import _strptime
class getlang_Tests(unittest.TestCase):
"""Test _getlang"""
def test_basic(self):
self.assertEqual(_strptime._getlang(), locale.getlocale(locale.LC_TIME))
class LocaleTime_Tests(unittest.TestCase):
"""Tests for _strptime.LocaleTime.
All values are lower-cased when stored in LocaleTime, so make sure to
compare values after running ``lower`` on them.
"""
def setUp(self):
"""Create time tuple based on current time."""
self.time_tuple = time.localtime()
self.LT_ins = _strptime.LocaleTime()
def compare_against_time(self, testing, directive, tuple_position,
error_msg):
"""Helper method that tests testing against directive based on the
tuple_position of time_tuple. Uses error_msg as error message.
"""
strftime_output = time.strftime(directive, self.time_tuple).lower()
comparison = testing[self.time_tuple[tuple_position]]
self.assertIn(strftime_output, testing,
"%s: not found in tuple" % error_msg)
self.assertEqual(comparison, strftime_output,
"%s: position within tuple incorrect; %s != %s" %
(error_msg, comparison, strftime_output))
def test_weekday(self):
# Make sure that full and abbreviated weekday names are correct in
# both string and position with tuple
self.compare_against_time(self.LT_ins.f_weekday, '%A', 6,
"Testing of full weekday name failed")
self.compare_against_time(self.LT_ins.a_weekday, '%a', 6,
"Testing of abbreviated weekday name failed")
def test_month(self):
# Test full and abbreviated month names; both string and position
# within the tuple
self.compare_against_time(self.LT_ins.f_month, '%B', 1,
"Testing against full month name failed")
self.compare_against_time(self.LT_ins.a_month, '%b', 1,
"Testing against abbreviated month name failed")
def test_am_pm(self):
# Make sure AM/PM representation done properly
strftime_output = time.strftime("%p", self.time_tuple).lower()
self.assertIn(strftime_output, self.LT_ins.am_pm,
"AM/PM representation not in tuple")
if self.time_tuple[3] < 12: position = 0
else: position = 1
self.assertEqual(self.LT_ins.am_pm[position], strftime_output,
"AM/PM representation in the wrong position within the tuple")
def test_timezone(self):
# Make sure timezone is correct
timezone = time.strftime("%Z", self.time_tuple).lower()
if timezone:
self.assertTrue(timezone in self.LT_ins.timezone[0] or
timezone in self.LT_ins.timezone[1],
"timezone %s not found in %s" %
(timezone, self.LT_ins.timezone))
def test_date_time(self):
# Check that LC_date_time, LC_date, and LC_time are correct
# the magic date is used so as to not have issues with %c when day of
# the month is a single digit and has a leading space. This is not an
# issue since strptime still parses it correctly. The problem is
# testing these directives for correctness by comparing strftime
# output.
magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0)
strftime_output = time.strftime("%c", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_date_time, magic_date),
strftime_output, "LC_date_time incorrect")
strftime_output = time.strftime("%x", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_date, magic_date),
strftime_output, "LC_date incorrect")
strftime_output = time.strftime("%X", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_time, magic_date),
strftime_output, "LC_time incorrect")
LT = _strptime.LocaleTime()
LT.am_pm = ('', '')
self.assertTrue(LT.LC_time, "LocaleTime's LC directives cannot handle "
"empty strings")
def test_lang(self):
# Make sure lang is set to what _getlang() returns
# Assuming locale has not changed between now and when self.LT_ins was created
self.assertEqual(self.LT_ins.lang, _strptime._getlang())
class TimeRETests(unittest.TestCase):
"""Tests for TimeRE."""
def setUp(self):
"""Construct generic TimeRE object."""
self.time_re = _strptime.TimeRE()
self.locale_time = _strptime.LocaleTime()
def test_pattern(self):
# Test TimeRE.pattern
pattern_string = self.time_re.pattern(r"%a %A %d")
self.assertTrue(pattern_string.find(self.locale_time.a_weekday[2]) != -1,
"did not find abbreviated weekday in pattern string '%s'" %
pattern_string)
self.assertTrue(pattern_string.find(self.locale_time.f_weekday[4]) != -1,
"did not find full weekday in pattern string '%s'" %
pattern_string)
self.assertTrue(pattern_string.find(self.time_re['d']) != -1,
"did not find 'd' directive pattern string '%s'" %
pattern_string)
def test_pattern_escaping(self):
# Make sure any characters in the format string that might be taken as
# regex syntax is escaped.
pattern_string = self.time_re.pattern("\d+")
self.assertIn(r"\\d\+", pattern_string,
"%s does not have re characters escaped properly" %
pattern_string)
def test_compile(self):
# Check that compiled regex is correct
found = self.time_re.compile(r"%A").match(self.locale_time.f_weekday[6])
self.assertTrue(found and found.group('A') == self.locale_time.f_weekday[6],
"re object for '%A' failed")
compiled = self.time_re.compile(r"%a %b")
found = compiled.match("%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4]))
self.assertTrue(found,
"Match failed with '%s' regex and '%s' string" %
(compiled.pattern, "%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4])))
self.assertTrue(found.group('a') == self.locale_time.a_weekday[4] and
found.group('b') == self.locale_time.a_month[4],
"re object couldn't find the abbreviated weekday month in "
"'%s' using '%s'; group 'a' = '%s', group 'b' = %s'" %
(found.string, found.re.pattern, found.group('a'),
found.group('b')))
for directive in ('a','A','b','B','c','d','H','I','j','m','M','p','S',
'U','w','W','x','X','y','Y','Z','%'):
compiled = self.time_re.compile("%" + directive)
found = compiled.match(time.strftime("%" + directive))
self.assertTrue(found, "Matching failed on '%s' using '%s' regex" %
(time.strftime("%" + directive),
compiled.pattern))
def test_blankpattern(self):
# Make sure when tuple or something has no values no regex is generated.
# Fixes bug #661354
test_locale = _strptime.LocaleTime()
test_locale.timezone = (frozenset(), frozenset())
self.assertEqual(_strptime.TimeRE(test_locale).pattern("%Z"), '',
"with timezone == ('',''), TimeRE().pattern('%Z') != ''")
def test_matching_with_escapes(self):
# Make sure a format that requires escaping of characters works
compiled_re = self.time_re.compile("\w+ %m")
found = compiled_re.match("\w+ 10")
self.assertTrue(found, "Escaping failed of format '\w+ 10'")
def test_locale_data_w_regex_metacharacters(self):
# Check that if locale data contains regex metacharacters they are
# escaped properly.
# Discovered by bug #1039270 .
locale_time = _strptime.LocaleTime()
locale_time.timezone = (frozenset(("utc", "gmt",
"Tokyo (standard time)")),
frozenset("Tokyo (daylight time)"))
time_re = _strptime.TimeRE(locale_time)
self.assertTrue(time_re.compile("%Z").match("Tokyo (standard time)"),
"locale data that contains regex metacharacters is not"
" properly escaped")
def test_whitespace_substitution(self):
# When pattern contains whitespace, make sure it is taken into account
# so as to not allow to subpatterns to end up next to each other and
# "steal" characters from each other.
pattern = self.time_re.pattern('%j %H')
self.assertFalse(re.match(pattern, "180"))
self.assertTrue(re.match(pattern, "18 0"))
class StrptimeTests(unittest.TestCase):
"""Tests for _strptime.strptime."""
def setUp(self):
"""Create testing time tuple."""
self.time_tuple = time.gmtime()
def test_ValueError(self):
# Make sure ValueError is raised when match fails or format is bad
self.assertRaises(ValueError, _strptime._strptime_time, data_string="%d",
format="%A")
for bad_format in ("%", "% ", "%e"):
try:
_strptime._strptime_time("2005", bad_format)
except ValueError:
continue
except Exception as err:
self.fail("'%s' raised %s, not ValueError" %
(bad_format, err.__class__.__name__))
else:
self.fail("'%s' did not raise ValueError" % bad_format)
def test_strptime_exception_context(self):
# check that this doesn't chain exceptions needlessly (see #17572)
with self.assertRaises(ValueError) as e:
_strptime._strptime_time('', '%D')
self.assertIs(e.exception.__suppress_context__, True)
# additional check for IndexError branch (issue #19545)
with self.assertRaises(ValueError) as e:
_strptime._strptime_time('19', '%Y %')
self.assertIs(e.exception.__suppress_context__, True)
def test_unconverteddata(self):
# Check ValueError is raised when there is unconverted data
self.assertRaises(ValueError, _strptime._strptime_time, "10 12", "%m")
def helper(self, directive, position):
"""Helper fxn in testing."""
strf_output = time.strftime("%" + directive, self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%" + directive)
self.assertTrue(strp_output[position] == self.time_tuple[position],
"testing of '%s' directive failed; '%s' -> %s != %s" %
(directive, strf_output, strp_output[position],
self.time_tuple[position]))
def test_year(self):
# Test that the year is handled properly
for directive in ('y', 'Y'):
self.helper(directive, 0)
# Must also make sure %y values are correct for bounds set by Open Group
for century, bounds in ((1900, ('69', '99')), (2000, ('00', '68'))):
for bound in bounds:
strp_output = _strptime._strptime_time(bound, '%y')
expected_result = century + int(bound)
self.assertTrue(strp_output[0] == expected_result,
"'y' test failed; passed in '%s' "
"and returned '%s'" % (bound, strp_output[0]))
def test_month(self):
# Test for month directives
for directive in ('B', 'b', 'm'):
self.helper(directive, 1)
def test_day(self):
# Test for day directives
self.helper('d', 2)
def test_hour(self):
# Test hour directives
self.helper('H', 3)
strf_output = time.strftime("%I %p", self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%I %p")
self.assertTrue(strp_output[3] == self.time_tuple[3],
"testing of '%%I %%p' directive failed; '%s' -> %s != %s" %
(strf_output, strp_output[3], self.time_tuple[3]))
def test_minute(self):
# Test minute directives
self.helper('M', 4)
def test_second(self):
# Test second directives
self.helper('S', 5)
def test_fraction(self):
# Test microseconds
import datetime
d = datetime.datetime(2012, 12, 20, 12, 34, 56, 78987)
tup, frac = _strptime._strptime(str(d), format="%Y-%m-%d %H:%M:%S.%f")
self.assertEqual(frac, d.microsecond)
def test_weekday(self):
# Test weekday directives
for directive in ('A', 'a', 'w'):
self.helper(directive,6)
def test_julian(self):
# Test julian directives
self.helper('j', 7)
def test_timezone(self):
# Test timezone directives.
# When gmtime() is used with %Z, entire result of strftime() is empty.
# Check for equal timezone names deals with bad locale info when this
# occurs; first found in FreeBSD 4.4.
strp_output = _strptime._strptime_time("UTC", "%Z")
self.assertEqual(strp_output.tm_isdst, 0)
strp_output = _strptime._strptime_time("GMT", "%Z")
self.assertEqual(strp_output.tm_isdst, 0)
time_tuple = time.localtime()
strf_output = time.strftime("%Z") #UTC does not have a timezone
strp_output = _strptime._strptime_time(strf_output, "%Z")
locale_time = _strptime.LocaleTime()
if time.tzname[0] != time.tzname[1] or not time.daylight:
self.assertTrue(strp_output[8] == time_tuple[8],
"timezone check failed; '%s' -> %s != %s" %
(strf_output, strp_output[8], time_tuple[8]))
else:
self.assertTrue(strp_output[8] == -1,
"LocaleTime().timezone has duplicate values and "
"time.daylight but timezone value not set to -1")
def test_bad_timezone(self):
# Explicitly test possibility of bad timezone;
# when time.tzname[0] == time.tzname[1] and time.daylight
tz_name = time.tzname[0]
if tz_name.upper() in ("UTC", "GMT"):
self.skipTest('need non-UTC/GMT timezone')
try:
original_tzname = time.tzname
original_daylight = time.daylight
time.tzname = (tz_name, tz_name)
time.daylight = 1
tz_value = _strptime._strptime_time(tz_name, "%Z")[8]
self.assertEqual(tz_value, -1,
"%s lead to a timezone value of %s instead of -1 when "
"time.daylight set to %s and passing in %s" %
(time.tzname, tz_value, time.daylight, tz_name))
finally:
time.tzname = original_tzname
time.daylight = original_daylight
def test_date_time(self):
# Test %c directive
for position in range(6):
self.helper('c', position)
def test_date(self):
# Test %x directive
for position in range(0,3):
self.helper('x', position)
def test_time(self):
# Test %X directive
for position in range(3,6):
self.helper('X', position)
def test_percent(self):
# Make sure % signs are handled properly
strf_output = time.strftime("%m %% %Y", self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%m %% %Y")
self.assertTrue(strp_output[0] == self.time_tuple[0] and
strp_output[1] == self.time_tuple[1],
"handling of percent sign failed")
def test_caseinsensitive(self):
# Should handle names case-insensitively.
strf_output = time.strftime("%B", self.time_tuple)
self.assertTrue(_strptime._strptime_time(strf_output.upper(), "%B"),
"strptime does not handle ALL-CAPS names properly")
self.assertTrue(_strptime._strptime_time(strf_output.lower(), "%B"),
"strptime does not handle lowercase names properly")
self.assertTrue(_strptime._strptime_time(strf_output.capitalize(), "%B"),
"strptime does not handle capword names properly")
def test_defaults(self):
# Default return value should be (1900, 1, 1, 0, 0, 0, 0, 1, 0)
defaults = (1900, 1, 1, 0, 0, 0, 0, 1, -1)
strp_output = _strptime._strptime_time('1', '%m')
self.assertTrue(strp_output == defaults,
"Default values for strptime() are incorrect;"
" %s != %s" % (strp_output, defaults))
def test_escaping(self):
# Make sure all characters that have regex significance are escaped.
# Parentheses are in a purposeful order; will cause an error of
# unbalanced parentheses when the regex is compiled if they are not
# escaped.
# Test instigated by bug #796149 .
need_escaping = ".^$*+?{}\[]|)("
self.assertTrue(_strptime._strptime_time(need_escaping, need_escaping))
def test_feb29_on_leap_year_without_year(self):
time.strptime("Feb 29", "%b %d")
def test_mar1_comes_after_feb29_even_when_omitting_the_year(self):
self.assertLess(
time.strptime("Feb 29", "%b %d"),
time.strptime("Mar 1", "%b %d"))
class Strptime12AMPMTests(unittest.TestCase):
"""Test a _strptime regression in '%I %p' at 12 noon (12 PM)"""
def test_twelve_noon_midnight(self):
eq = self.assertEqual
eq(time.strptime('12 PM', '%I %p')[3], 12)
eq(time.strptime('12 AM', '%I %p')[3], 0)
eq(_strptime._strptime_time('12 PM', '%I %p')[3], 12)
eq(_strptime._strptime_time('12 AM', '%I %p')[3], 0)
class JulianTests(unittest.TestCase):
"""Test a _strptime regression that all julian (1-366) are accepted"""
def test_all_julian_days(self):
eq = self.assertEqual
for i in range(1, 367):
# use 2004, since it is a leap year, we have 366 days
eq(_strptime._strptime_time('%d 2004' % i, '%j %Y')[7], i)
class CalculationTests(unittest.TestCase):
"""Test that strptime() fills in missing info correctly"""
def setUp(self):
self.time_tuple = time.gmtime()
def test_julian_calculation(self):
# Make sure that when Julian is missing that it is calculated
format_string = "%Y %m %d %H %M %S %w %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_yday == self.time_tuple.tm_yday,
"Calculation of tm_yday failed; %s != %s" %
(result.tm_yday, self.time_tuple.tm_yday))
def test_gregorian_calculation(self):
# Test that Gregorian date can be calculated from Julian day
format_string = "%Y %H %M %S %w %j %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_year == self.time_tuple.tm_year and
result.tm_mon == self.time_tuple.tm_mon and
result.tm_mday == self.time_tuple.tm_mday,
"Calculation of Gregorian date failed;"
"%s-%s-%s != %s-%s-%s" %
(result.tm_year, result.tm_mon, result.tm_mday,
self.time_tuple.tm_year, self.time_tuple.tm_mon,
self.time_tuple.tm_mday))
def test_day_of_week_calculation(self):
# Test that the day of the week is calculated as needed
format_string = "%Y %m %d %H %S %j %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_wday == self.time_tuple.tm_wday,
"Calculation of day of the week failed;"
"%s != %s" % (result.tm_wday, self.time_tuple.tm_wday))
def test_week_of_year_and_day_of_week_calculation(self):
# Should be able to infer date if given year, week of year (%U or %W)
# and day of the week
def test_helper(ymd_tuple, test_reason):
for directive in ('W', 'U'):
format_string = "%%Y %%%s %%w" % directive
dt_date = datetime_date(*ymd_tuple)
strp_input = dt_date.strftime(format_string)
strp_output = _strptime._strptime_time(strp_input, format_string)
self.assertTrue(strp_output[:3] == ymd_tuple,
"%s(%s) test failed w/ '%s': %s != %s (%s != %s)" %
(test_reason, directive, strp_input,
strp_output[:3], ymd_tuple,
strp_output[7], dt_date.timetuple()[7]))
test_helper((1901, 1, 3), "week 0")
test_helper((1901, 1, 8), "common case")
test_helper((1901, 1, 13), "day on Sunday")
test_helper((1901, 1, 14), "day on Monday")
test_helper((1905, 1, 1), "Jan 1 on Sunday")
test_helper((1906, 1, 1), "Jan 1 on Monday")
test_helper((1906, 1, 7), "first Sunday in a year starting on Monday")
test_helper((1905, 12, 31), "Dec 31 on Sunday")
test_helper((1906, 12, 31), "Dec 31 on Monday")
test_helper((2008, 12, 29), "Monday in the last week of the year")
test_helper((2008, 12, 22), "Monday in the second-to-last week of the "
"year")
test_helper((1978, 10, 23), "randomly chosen date")
test_helper((2004, 12, 18), "randomly chosen date")
test_helper((1978, 10, 23), "year starting and ending on Monday while "
"date not on Sunday or Monday")
test_helper((1917, 12, 17), "year starting and ending on Monday with "
"a Monday not at the beginning or end "
"of the year")
test_helper((1917, 12, 31), "Dec 31 on Monday with year starting and "
"ending on Monday")
test_helper((2007, 1, 7), "First Sunday of 2007")
test_helper((2007, 1, 14), "Second Sunday of 2007")
test_helper((2006, 12, 31), "Last Sunday of 2006")
test_helper((2006, 12, 24), "Second to last Sunday of 2006")
class CacheTests(unittest.TestCase):
"""Test that caching works properly."""
def test_time_re_recreation(self):
# Make sure cache is recreated when current locale does not match what
# cached object was created with.
_strptime._strptime_time("10", "%d")
_strptime._strptime_time("2005", "%Y")
_strptime._TimeRE_cache.locale_time.lang = "Ni"
original_time_re = _strptime._TimeRE_cache
_strptime._strptime_time("10", "%d")
self.assertIsNot(original_time_re, _strptime._TimeRE_cache)
self.assertEqual(len(_strptime._regex_cache), 1)
def test_regex_cleanup(self):
# Make sure cached regexes are discarded when cache becomes "full".
try:
del _strptime._regex_cache['%d']
except KeyError:
pass
bogus_key = 0
while len(_strptime._regex_cache) <= _strptime._CACHE_MAX_SIZE:
_strptime._regex_cache[bogus_key] = None
bogus_key += 1
_strptime._strptime_time("10", "%d")
self.assertEqual(len(_strptime._regex_cache), 1)
def test_new_localetime(self):
# A new LocaleTime instance should be created when a new TimeRE object
# is created.
locale_time_id = _strptime._TimeRE_cache.locale_time
_strptime._TimeRE_cache.locale_time.lang = "Ni"
_strptime._strptime_time("10", "%d")
self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time)
def test_TimeRE_recreation(self):
# The TimeRE instance should be recreated upon changing the locale.
locale_info = locale.getlocale(locale.LC_TIME)
try:
locale.setlocale(locale.LC_TIME, ('en_US', 'UTF8'))
except locale.Error:
self.skipTest('test needs en_US.UTF8 locale')
try:
_strptime._strptime_time('10', '%d')
# Get id of current cache object.
first_time_re = _strptime._TimeRE_cache
try:
# Change the locale and force a recreation of the cache.
locale.setlocale(locale.LC_TIME, ('de_DE', 'UTF8'))
_strptime._strptime_time('10', '%d')
# Get the new cache object's id.
second_time_re = _strptime._TimeRE_cache
# They should not be equal.
self.assertIsNot(first_time_re, second_time_re)
# Possible test locale is not supported while initial locale is.
# If this is the case just suppress the exception and fall-through
# to the resetting to the original locale.
except locale.Error:
self.skipTest('test needs de_DE.UTF8 locale')
# Make sure we don't trample on the locale setting once we leave the
# test.
finally:
locale.setlocale(locale.LC_TIME, locale_info)
def test_main():
support.run_unittest(
getlang_Tests,
LocaleTime_Tests,
TimeRETests,
StrptimeTests,
Strptime12AMPMTests,
JulianTests,
CalculationTests,
CacheTests
)
if __name__ == '__main__':
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit |
cython-testbed/pandas | pandas/tests/test_downstream.py | 4 | 3443 | # -*- coding: utf-8 -*-
"""
Testing that we work in the downstream packages
"""
import subprocess
import sys
import pytest
import numpy as np # noqa
from pandas import DataFrame
from pandas.compat import PY36
from pandas.util import testing as tm
import importlib
def import_module(name):
# we *only* want to skip if the module is truly not available
# and NOT just an actual import error because of pandas changes
if PY36:
try:
return importlib.import_module(name)
except ModuleNotFoundError: # noqa
pytest.skip("skipping as {} not available".format(name))
else:
try:
return importlib.import_module(name)
except ImportError as e:
if "No module named" in str(e) and name in str(e):
pytest.skip("skipping as {} not available".format(name))
raise
@pytest.fixture
def df():
return DataFrame({'A': [1, 2, 3]})
def test_dask(df):
toolz = import_module('toolz') # noqa
dask = import_module('dask') # noqa
import dask.dataframe as dd
ddf = dd.from_pandas(df, npartitions=3)
assert ddf.A is not None
assert ddf.compute() is not None
def test_xarray(df):
xarray = import_module('xarray') # noqa
assert df.to_xarray() is not None
def test_oo_optimizable():
# GH 21071
subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"])
@tm.network
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_statsmodels():
statsmodels = import_module('statsmodels') # noqa
import statsmodels.api as sm
import statsmodels.formula.api as smf
df = sm.datasets.get_rdataset("Guerry", "HistData").data
smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=df).fit()
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_scikit_learn(df):
sklearn = import_module('sklearn') # noqa
from sklearn import svm, datasets
digits = datasets.load_digits()
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(digits.data[:-1], digits.target[:-1])
clf.predict(digits.data[-1:])
# Cython import warning and traitlets
@tm.network
@pytest.mark.filterwarnings("ignore")
def test_seaborn():
seaborn = import_module('seaborn')
tips = seaborn.load_dataset("tips")
seaborn.stripplot(x="day", y="total_bill", data=tips)
def test_pandas_gbq(df):
pandas_gbq = import_module('pandas_gbq') # noqa
@pytest.mark.xfail(reason="0.7.0 pending", strict=True)
@tm.network
def test_pandas_datareader():
pandas_datareader = import_module('pandas_datareader') # noqa
pandas_datareader.DataReader(
'F', 'quandl', '2017-01-01', '2017-02-01')
# importing from pandas, Cython import warning
@pytest.mark.filterwarnings("ignore:The 'warn':DeprecationWarning")
@pytest.mark.filterwarnings("ignore:pandas.util:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_geopandas():
geopandas = import_module('geopandas') # noqa
fp = geopandas.datasets.get_path('naturalearth_lowres')
assert geopandas.read_file(fp) is not None
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_pyarrow(df):
pyarrow = import_module('pyarrow') # noqa
table = pyarrow.Table.from_pandas(df)
result = table.to_pandas()
tm.assert_frame_equal(result, df)
| bsd-3-clause |
chatchavan/pcl | doc/tutorials/content/conf.py | 66 | 4604 | # All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath', 'sphinxcontrib.doxylink.doxylink']
pngmath_dvipng_args = ['-gamma 1.5', '-D 110', '-bg Transparent']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PCL'
copyright = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = { 'rightsidebar' : 'true' }
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Home'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = '.html'
html_sidebars = {
'**': [],
'using/windows': [],
}
html_show_copyright = False
html_show_sphinx = False
html_add_permalinks = None
needs_sphinx = 1.0
file_insertion_enabled = True
raw_enabled = True
# Set up doxylink
doxylink = {'pcl' : ('../../../build/doc/doxygen/pcl.tag', 'http://docs.pointclouds.org/trunk/')}
| bsd-3-clause |
goldshtn/windbg-extensions | heap_stat.py | 1 | 4876 | from pykd import *
import re
import sys
import pickle
# TODO list:
# 1) better parameter parsing and validation
# 2) cleaner printing code
stats_only = False
save_info = False
short_output = False
pointer_size = ptrSize()
pointer_format = "%016x" if pointer_size == 8 else "%08x"
if '-stat' in sys.argv:
stats_only = True
potential_save_file = ""
if '-save' in sys.argv:
save_info = True
potential_save_file = sys.argv[sys.argv.index('-save')+1]
if '-load' in sys.argv:
if save_info:
dprintln("Error: can't use -load together with -save")
exit()
potential_save_file = sys.argv[sys.argv.index('-load')+1]
if '-short' in sys.argv:
short_output = True
min_count = 0
if '-min' in sys.argv:
min_count = int(sys.argv[sys.argv.index('-min')+1])
type_filter = ""
if '-type' in sys.argv:
type_filter = sys.argv[sys.argv.index('-type')+1]
if '-help' in sys.argv:
dprintln("")
dprintln("Usage:")
dprintln(" !py %s [-stat] [-help] [-short] [-type <typename>] [-min <count>] [-<save|load> <cache_filename>]" % sys.argv[0])
dprintln("")
dprintln(" -stat displays statistics only at the end of the run")
dprintln(" -short displays only object addresses, for scripting with .foreach and similar commands")
dprintln(" -help displays usage information")
dprintln(" -type filters output to the specified type(s) only - accepts a regular expression")
dprintln(" -min filters statistics output to types that have at least that many instances")
dprintln(" -save at the end of the run, saves type information to a file to make subsequent runs faster")
dprintln(" -load read type information from a file to make the run faster")
dprintln("")
exit()
vftables_by_address = {}
vftables_by_type = {}
typesize_by_type = {}
if not save_info and len(potential_save_file) > 0:
if not short_output:
dprint("Loading type information from save file %s..." % potential_save_file)
file = open(potential_save_file, 'rb')
(vftables_by_address, vftables_by_type, typesize_by_type) = pickle.load(file)
file.close()
if not short_output:
dprintln("DONE")
else:
if not short_output:
dprint("Running x /2 *!*`vftable' command...")
vftables = dbgCommand("x /2 *!*`vftable'").split('\n')
if not short_output:
dprintln("DONE")
for vftable in vftables:
parts = [w.lstrip().rstrip() for w in vftable.replace("::`vftable'", "").split(' ', 1)]
if len(parts) < 2: continue
(address, type) = parts
address = address.replace('`', '')
address_ptr = long(address,16)
vftables_by_address[address_ptr] = type
vftables_by_type[type] = address_ptr
if not short_output:
dprint("Running !heap -h 0 command...")
heap_output = dbgCommand('!heap -h 0').split('\n')
if not short_output:
dprintln("DONE")
stats = {}
if not short_output:
dprintln("Enumerating %d heap blocks" % len(heap_output))
blocks_done = 0
for heap_block in heap_output:
blocks_done += 1
if stats_only and blocks_done % 100 == 0 and not short_output:
dprintln(" Enumerated %d heap blocks" % blocks_done)
# example block output: 00e3f088: 00080 . 00078 [101] - busy (70)
match = re.match(r'\s+([0-9a-f]+): [0-9a-f]+ \. [0-9a-f]+ \[[0-9a-f]+\] - busy \(([0-9a-f]+)\)', heap_block)
if match:
address = long(match.group(1),16)
size = long(match.group(2),16)
ptr = address - pointer_size
while ptr < address+size:
ptr += pointer_size
try:
vftable_candidate = ptrPtr(ptr)
except:
continue
if vftable_candidate in vftables_by_address:
type_name = vftables_by_address[vftable_candidate]
if len(type_filter) > 0 and not re.match(type_filter, type_name):
continue
if not stats_only:
if short_output:
dprintln(pointer_format % ptr)
else:
dprintln((pointer_format + "\t%s") % (ptr, type_name))
if type_name in stats:
stats[type_name] += 1
else:
stats[type_name] = 1
if not short_output:
dprintln("")
dprintln("Statistics:")
dprintln("%50s\t%10s\t%s" % ("Type name", "Count", "Size"))
for type in sorted(stats, key=stats.get, reverse=True):
if stats[type] < min_count or (len(type_filter) > 0 and not re.match(type_filter, type)):
continue
if not type in typesize_by_type:
try:
type_info = typeInfo(type)
typesize_by_type[type] = type_info.size()
except:
# some types aren't included in public symbols, so we can't get their type
typesize_by_type[type] = None
size = "Unknown"
if typesize_by_type[type] is not None:
size = stats[type]*typesize_by_type[type]
dprintln("%50s\t%10d\t%s" % (type, stats[type], size))
if not short_output:
dprintln("")
if save_info and len(potential_save_file) > 0:
if not short_output:
dprint("Saving type information and vtables to file %s..." % potential_save_file)
file = open(potential_save_file, 'wb')
pickle.dump((vftables_by_address, vftables_by_type, typesize_by_type), file)
file.close()
if not short_output:
dprintln("DONE")
| apache-2.0 |
parinporecha/backend_gtgonline | GTG/tests/test_backend_tomboy.py | 1 | 15680 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2012 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
""" Tests for the tomboy backend """
from datetime import datetime
from dbus.mainloop.glib import DBusGMainLoop
import dbus
import dbus.glib
import dbus.service
import errno
import gobject
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
import unittest
import uuid
from GTG.backends import BackendFactory
from GTG.backends.genericbackend import GenericBackend
from GTG.core.datastore import DataStore
PID_TOMBOY = False
class TestBackendTomboy(unittest.TestCase):
""" Tests for the tomboy backend """
def setUp(self):
thread_tomboy = threading.Thread(target=self.spawn_fake_tomboy_server)
thread_tomboy.start()
thread_tomboy.join()
# only the test process should go further, the dbus server one should
# stop here
if not PID_TOMBOY:
return
# we create a custom dictionary listening to the server, and register
# it in GTG.
additional_dic = {}
additional_dic["use this fake connection instead"] = (
FakeTomboy.BUS_NAME, FakeTomboy.BUS_PATH, FakeTomboy.BUS_INTERFACE)
additional_dic[GenericBackend.KEY_ATTACHED_TAGS] = \
[GenericBackend.ALLTASKS_TAG]
additional_dic[GenericBackend.KEY_DEFAULT_BACKEND] = True
dic = BackendFactory().get_new_backend_dict('backend_tomboy',
additional_dic)
self.datastore = DataStore()
self.backend = self.datastore.register_backend(dic)
# waiting for the "start_get_tasks" to settle
time.sleep(1)
# we create a dbus session to speak with the server
self.bus = dbus.SessionBus()
obj = self.bus.get_object(FakeTomboy.BUS_NAME, FakeTomboy.BUS_PATH)
self.tomboy = dbus.Interface(obj, FakeTomboy.BUS_INTERFACE)
def spawn_fake_tomboy_server(self):
# the fake tomboy server has to be in a different process,
# otherwise it will lock on the GIL.
# For details, see
# http://lists.freedesktop.org/archives/dbus/2007-January/006921.html
# we use a lockfile to make sure the server is running before we start
# the test
global PID_TOMBOY
lockfile_fd, lockfile_path = tempfile.mkstemp()
PID_TOMBOY = os.fork()
if PID_TOMBOY:
# we wait in polling that the server has been started
while True:
try:
fd = os.open(lockfile_path,
os.O_CREAT | os.O_EXCL | os.O_RDWR)
except OSError, e:
if e.errno != errno.EEXIST:
raise
time.sleep(0.3)
continue
os.close(fd)
break
else:
FakeTomboy()
os.close(lockfile_fd)
os.unlink(lockfile_path)
def tearDown(self):
if not PID_TOMBOY:
return
self.datastore.save(quit=True)
time.sleep(0.5)
self.tomboy.FakeQuit()
# FIXME: self.bus.close()
os.kill(PID_TOMBOY, signal.SIGKILL)
os.waitpid(PID_TOMBOY, 0)
def test_everything(self):
# we cannot use separate test functions because we only want a single
# FakeTomboy dbus server running
if not PID_TOMBOY:
return
for function in dir(self):
if function.startswith("TEST_"):
getattr(self, function)()
self.tomboy.Reset()
for tid in self.datastore.get_all_tasks():
self.datastore.request_task_deletion(tid)
time.sleep(0.1)
def TEST_processing_tomboy_notes(self):
self.backend.set_attached_tags([GenericBackend.ALLTASKS_TAG])
# adding a note
note = self.tomboy.CreateNamedNote(str(uuid.uuid4()))
self.backend._process_tomboy_note(note)
self.assertEqual(len(self.datastore.get_all_tasks()), 1)
tid = self.backend.sync_engine.sync_memes.get_local_id(note)
task = self.datastore.get_task(tid)
# re-adding that (should not change anything)
self.backend._process_tomboy_note(note)
self.assertEqual(len(self.datastore.get_all_tasks()), 1)
self.assertEqual(
self.backend.sync_engine.sync_memes.get_local_id(note), tid)
# removing the note and updating gtg
self.tomboy.DeleteNote(note)
self.backend.set_task(task)
self.assertEqual(len(self.datastore.get_all_tasks()), 0)
def TEST_set_task(self):
self.backend.set_attached_tags([GenericBackend.ALLTASKS_TAG])
# adding a task
task = self.datastore.requester.new_task()
task.set_title("title")
self.backend.set_task(task)
self.assertEqual(len(self.tomboy.ListAllNotes()), 1)
note = self.tomboy.ListAllNotes()[0]
self.assertEqual(str(self.tomboy.GetNoteTitle(note)), task.get_title())
# re-adding that (should not change anything)
self.backend.set_task(task)
self.assertEqual(len(self.tomboy.ListAllNotes()), 1)
self.assertEqual(note, self.tomboy.ListAllNotes()[0])
# removing the task and updating tomboy
self.datastore.request_task_deletion(task.get_id())
self.backend._process_tomboy_note(note)
self.assertEqual(len(self.tomboy.ListAllNotes()), 0)
def TEST_update_newest(self):
self.backend.set_attached_tags([GenericBackend.ALLTASKS_TAG])
task = self.datastore.requester.new_task()
task.set_title("title")
self.backend.set_task(task)
note = self.tomboy.ListAllNotes()[0]
gtg_modified = task.get_modified()
tomboy_modified = self._modified_string_to_datetime(
self.tomboy.GetNoteChangeDate(note))
# no-one updated, nothing should happen
self.backend.set_task(task)
self.assertEqual(gtg_modified, task.get_modified())
self.assertEqual(tomboy_modified,
self._modified_string_to_datetime(
self.tomboy.GetNoteChangeDate(note)))
# we update the GTG task
UPDATED_GTG_TITLE = "UPDATED_GTG_TITLE"
task.set_title(UPDATED_GTG_TITLE)
self.backend.set_task(task)
self.assertTrue(gtg_modified < task.get_modified())
self.assertTrue(tomboy_modified <=
self._modified_string_to_datetime(
self.tomboy.GetNoteChangeDate(note)))
self.assertEqual(task.get_title(), UPDATED_GTG_TITLE)
self.assertEqual(self.tomboy.GetNoteTitle(note), UPDATED_GTG_TITLE)
gtg_modified = task.get_modified()
tomboy_modified = self._modified_string_to_datetime(
self.tomboy.GetNoteChangeDate(note))
# we update the TOMBOY task
UPDATED_TOMBOY_TITLE = "UPDATED_TOMBOY_TITLE"
# the resolution of tomboy notes changed time is 1 second, so we need
# to wait. This *shouldn't* be needed in the actual code because
# tomboy signals are always a few seconds late.
time.sleep(1)
self.tomboy.SetNoteContents(note, UPDATED_TOMBOY_TITLE)
self.backend._process_tomboy_note(note)
self.assertTrue(gtg_modified <= task.get_modified())
self.assertTrue(tomboy_modified <=
self._modified_string_to_datetime(
self.tomboy.GetNoteChangeDate(note)))
self.assertEqual(task.get_title(), UPDATED_TOMBOY_TITLE)
self.assertEqual(self.tomboy.GetNoteTitle(note), UPDATED_TOMBOY_TITLE)
def TEST_processing_tomboy_notes_with_tags(self):
self.backend.set_attached_tags(['@a'])
# adding a not syncable note
note = self.tomboy.CreateNamedNote("title" + str(uuid.uuid4()))
self.backend._process_tomboy_note(note)
self.assertEqual(len(self.datastore.get_all_tasks()), 0)
# re-adding that (should not change anything)
self.backend._process_tomboy_note(note)
self.assertEqual(len(self.datastore.get_all_tasks()), 0)
# adding a tag to that note
self.tomboy.SetNoteContents(note, "something with @a")
self.backend._process_tomboy_note(note)
self.assertEqual(len(self.datastore.get_all_tasks()), 1)
# removing the tag and resyncing
self.tomboy.SetNoteContents(note, "something with no tags")
self.backend._process_tomboy_note(note)
self.assertEqual(len(self.datastore.get_all_tasks()), 0)
# adding a syncable note
note = self.tomboy.CreateNamedNote("title @a" + str(uuid.uuid4()))
self.backend._process_tomboy_note(note)
self.assertEqual(len(self.datastore.get_all_tasks()), 1)
tid = self.backend.sync_engine.sync_memes.get_local_id(note)
task = self.datastore.get_task(tid)
# re-adding that (should not change anything)
self.backend._process_tomboy_note(note)
self.assertEqual(len(self.datastore.get_all_tasks()), 1)
self.assertEqual(
self.backend.sync_engine.sync_memes.get_local_id(note), tid)
# removing the note and updating gtg
self.tomboy.DeleteNote(note)
self.backend.set_task(task)
self.assertEqual(len(self.datastore.get_all_tasks()), 0)
def TEST_set_task_with_tags(self):
self.backend.set_attached_tags(['@a'])
# adding a not syncable task
task = self.datastore.requester.new_task()
task.set_title("title")
self.backend.set_task(task)
self.assertEqual(len(self.tomboy.ListAllNotes()), 0)
# making that task syncable
task.set_title("something else")
task.add_tag("@a")
self.backend.set_task(task)
self.assertEqual(len(self.tomboy.ListAllNotes()), 1)
note = self.tomboy.ListAllNotes()[0]
self.assertEqual(str(self.tomboy.GetNoteTitle(note)), task.get_title())
# re-adding that (should not change anything)
self.backend.set_task(task)
self.assertEqual(len(self.tomboy.ListAllNotes()), 1)
self.assertEqual(note, self.tomboy.ListAllNotes()[0])
# removing the syncable property and updating tomboy
task.remove_tag("@a")
self.backend.set_task(task)
self.assertEqual(len(self.tomboy.ListAllNotes()), 0)
def TEST_multiple_task_same_title(self):
self.backend.set_attached_tags(['@a'])
how_many_tasks = int(math.ceil(20 * random.random()))
for iteration in xrange(0, how_many_tasks):
task = self.datastore.requester.new_task()
task.set_title("title")
task.add_tag('@a')
self.backend.set_task(task)
self.assertEqual(len(self.tomboy.ListAllNotes()), how_many_tasks)
def _modified_string_to_datetime(self, modified_string):
return datetime.fromtimestamp(modified_string)
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(TestBackendTomboy)
class FakeTomboy(dbus.service.Object):
"""
D-Bus service object that mimics TOMBOY
"""
# We don't directly use the tomboy dbus path to avoid conflicts
# if tomboy is running during the test
BUS_NAME = "Fake.Tomboy"
BUS_PATH = "/Fake/Tomboy"
BUS_INTERFACE = "Fake.Tomboy.RemoteControl"
def __init__(self):
# Attach the object to D-Bus
DBusGMainLoop(set_as_default=True)
self.bus = dbus.SessionBus()
bus_name = dbus.service.BusName(self.BUS_NAME, bus=self.bus)
dbus.service.Object.__init__(self, bus_name, self.BUS_PATH)
self.notes = {}
threading.Thread(target=self.fake_main_loop).start()
@dbus.service.method(BUS_INTERFACE, in_signature="s", out_signature="s")
def GetNoteContents(self, note):
return self.notes[note]['content']
@dbus.service.method(BUS_INTERFACE, in_signature="s", out_signature="b")
def NoteExists(self, note):
return note in self.notes
@dbus.service.method(BUS_INTERFACE, in_signature="s", out_signature="d")
def GetNoteChangeDate(self, note):
return self.notes[note]['changed']
@dbus.service.method(BUS_INTERFACE, in_signature="ss")
def SetNoteContents(self, note, text):
self.fake_update_note(note)
self.notes[note]['content'] = text
@dbus.service.method(BUS_INTERFACE, in_signature="s", out_signature="s")
def GetNoteTitle(self, note):
return self._GetNoteTitle(note)
def _GetNoteTitle(self, note):
content = self.notes[note]['content']
try:
end_of_title = content.index('\n')
except ValueError:
return content
return content[:end_of_title]
@dbus.service.method(BUS_INTERFACE, in_signature="s")
def DeleteNote(self, note):
del self.notes[note]
@dbus.service.method(BUS_INTERFACE, in_signature="s", out_signature="s")
def CreateNamedNote(self, title):
# this is to mimic the way tomboy handles title clashes
if self._FindNote(title) != '':
return ''
note = str(uuid.uuid4())
self.notes[note] = {'content': title}
self.fake_update_note(note)
return note
@dbus.service.method(BUS_INTERFACE, in_signature="s", out_signature="s")
def FindNote(self, title):
return self._FindNote(title)
def _FindNote(self, title):
for note in self.notes:
if self._GetNoteTitle(note) == title:
return note
return ''
@dbus.service.method(BUS_INTERFACE, out_signature="as")
def ListAllNotes(self):
return list(self.notes)
@dbus.service.signal(BUS_INTERFACE, signature='s')
def NoteSaved(self, note):
pass
@dbus.service.signal(BUS_INTERFACE, signature='s')
def NoteDeleted(self, note):
pass
###############################################################################
### Function with the fake_ prefix are here to assist in testing, they do not
### need to be present in the real class
###############################################################################
def fake_update_note(self, note):
self.notes[note]['changed'] = time.mktime(datetime.now().timetuple())
def fake_main_loop(self):
gobject.threads_init()
dbus.glib.init_threads()
self.main_loop = gobject.MainLoop()
self.main_loop.run()
@dbus.service.method(BUS_INTERFACE)
def Reset(self):
self.notes = {}
@dbus.service.method(BUS_INTERFACE)
def FakeQuit(self):
threading.Timer(0.2, self._fake_quit).start()
def _fake_quit(self):
self.main_loop.quit()
sys.exit(0)
| gpl-3.0 |
avlach/univbris-ocf | optin_manager/src/python/scripts/setup_ch.py | 2 | 3214 | '''
Created on Jul 19, 2010
@author: jnaous
'''
from django.core.urlresolvers import reverse
from django.test import Client
from expedient.common.tests.client import test_get_and_post_form
from django.contrib.auth.models import User
from pyquery import PyQuery as pq
from openflow.plugin.models import OpenFlowInterface, NonOpenFlowConnection
from geni.planetlab.models import PlanetLabNode
try:
from setup_expedient_params import \
SUPERUSER_USERNAME, SUPERUSER_PASSWORD,\
USER_INFO,\
PL_AGGREGATE_INFO,\
OF_AGGREGATE_INFO,\
OF_PL_CONNECTIONS
except ImportError:
print """
Could not import setup_om_params module. Make sure this
module exists and that it contains the following variables:
SUPERUSER_USERNAME, SUPERUSER_PASSWORD,
CH_PASSWORD, CH_USERNAME
"""
raise
def run():
client = Client()
client.login(username=SUPERUSER_USERNAME,
password=SUPERUSER_PASSWORD)
# Add all planetlab aggregates
for pl_agg in PL_AGGREGATE_INFO:
print "adding pl agg %s" % pl_agg["url"]
response = test_get_and_post_form(
client,
reverse("planetlab_aggregate_create"),
pl_agg,
)
print "got response %s" % response
assert response.status_code == 302
for of_agg in OF_AGGREGATE_INFO:
print "adding of agg %s" % of_agg["url"]
response = test_get_and_post_form(
client,
reverse("openflow_aggregate_create"),
of_agg,
del_params=["verify_certs"],
)
assert response.status_code == 302
for cnxn_tuple in OF_PL_CONNECTIONS:
print "adding cnxn %s" % (cnxn_tuple,)
NonOpenFlowConnection.objects.get_or_create(
of_iface=OpenFlowInterface.objects.get(
switch__datapath_id=cnxn_tuple[0],
port_num=cnxn_tuple[1],
),
resource=PlanetLabNode.objects.get(name=cnxn_tuple[2]),
)
client.logout()
for username, info in USER_INFO.items():
# create user
User.objects.create_user(
username=username, email=info["email"], password=info["password"])
client.login(username=username, password=info["password"])
# create project and slice
for project in info["projects"]:
response = test_get_and_post_form(
client, reverse("project_create"),
params=dict(
name=project["name"],
description=project["description"],
),
)
assert response.status_code == 302
# This code is missing the project id. Need to get somehow to use reverse.
# for slice in project["slices"]:
# response = test_get_and_post_form(
# client, reverse("slice_create"),
# params=dict(
# name=slice["name"],
# description=slice["description"],
# ),
# )
# assert response.status_code == 302
client.logout()
| bsd-3-clause |
julien78910/CouchPotatoServer | libs/apscheduler/jobstores/redis_store.py | 98 | 2815 | """
Stores jobs in a Redis database.
"""
from uuid import uuid4
from datetime import datetime
import logging
from apscheduler.jobstores.base import JobStore
from apscheduler.job import Job
try:
import cPickle as pickle
except ImportError: # pragma: nocover
import pickle
try:
from redis import StrictRedis
except ImportError: # pragma: nocover
raise ImportError('RedisJobStore requires redis installed')
try:
long = long
except NameError:
long = int
logger = logging.getLogger(__name__)
class RedisJobStore(JobStore):
def __init__(self, db=0, key_prefix='jobs.',
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
self.jobs = []
self.pickle_protocol = pickle_protocol
self.key_prefix = key_prefix
if db is None:
raise ValueError('The "db" parameter must not be empty')
if not key_prefix:
raise ValueError('The "key_prefix" parameter must not be empty')
self.redis = StrictRedis(db=db, **connect_args)
def add_job(self, job):
job.id = str(uuid4())
job_state = job.__getstate__()
job_dict = {
'job_state': pickle.dumps(job_state, self.pickle_protocol),
'runs': '0',
'next_run_time': job_state.pop('next_run_time').isoformat()}
self.redis.hmset(self.key_prefix + job.id, job_dict)
self.jobs.append(job)
def remove_job(self, job):
self.redis.delete(self.key_prefix + job.id)
self.jobs.remove(job)
def load_jobs(self):
jobs = []
keys = self.redis.keys(self.key_prefix + '*')
pipeline = self.redis.pipeline()
for key in keys:
pipeline.hgetall(key)
results = pipeline.execute()
for job_dict in results:
job_state = {}
try:
job = Job.__new__(Job)
job_state = pickle.loads(job_dict['job_state'.encode()])
job_state['runs'] = long(job_dict['runs'.encode()])
dateval = job_dict['next_run_time'.encode()].decode()
job_state['next_run_time'] = datetime.strptime(
dateval, '%Y-%m-%dT%H:%M:%S')
job.__setstate__(job_state)
jobs.append(job)
except Exception:
job_name = job_state.get('name', '(unknown)')
logger.exception('Unable to restore job "%s"', job_name)
self.jobs = jobs
def update_job(self, job):
attrs = {
'next_run_time': job.next_run_time.isoformat(),
'runs': job.runs}
self.redis.hmset(self.key_prefix + job.id, attrs)
def close(self):
self.redis.connection_pool.disconnect()
def __repr__(self):
return '<%s>' % self.__class__.__name__
| gpl-3.0 |
hello-base/web | apps/events/models.py | 1 | 5284 | # -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from model_utils import Choices
from apps.accounts.models import ContributorMixin
from apps.people.models import ParticipationMixin
class Event(ContributorMixin, ParticipationMixin):
CATEGORIES = Choices(
('birthday', 'Birthday'),
('bustour', 'Bus Tour'),
('concert', 'Concert'),
('convention', 'Convention'),
('dinnershow', 'Dinner Show'),
('general', 'General'),
('hawaii', 'Hawaii'),
('live', 'Live'),
('release', 'Release'),
('promotional', 'Promotional'),
('other', 'Other'),
)
# Details.
category = models.CharField(choices=CATEGORIES, default=CATEGORIES.general, max_length=16)
romanized_name = models.CharField(max_length=200)
name = models.CharField(max_length=200)
nickname = models.CharField(max_length=200)
slug = models.SlugField()
start_date = models.DateField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
info_link_name = models.CharField(max_length=200, blank=True,
help_text='Separate multiple link names by comma (must have accompanying info link).')
info_link = models.URLField(blank=True, max_length=500,
help_text='Seperate multiple links with comma (must have accompanying link name).')
# Imagery.
logo = models.ImageField(blank=True, null=True, upload_to='events/events/')
promotional_image = models.ImageField(blank=True, null=True, upload_to='events/events/')
stage = models.ImageField(blank=True, null=True, upload_to='events/events/')
# Booleans.
has_handshake = models.BooleanField('has handshake?', default=False)
is_fanclub = models.BooleanField('fanclub?', default=False)
is_international = models.BooleanField('international?', default=False)
class Meta:
get_latest_by = 'start_date'
def __unicode__(self):
return u'%s' % (self.romanized_name)
def get_absolute_url(self):
return reverse('event-detail', kwargs={'slug': self.slug})
def clean(self, *args, **kwargs):
# Make sure that we have an equal number of info links and info link
# names, so that we can zip() them properly.
if (len(self.info_link.split(',')) != len(self.info_link_name.split(','))):
message = u'There need to be the same number of info links and info link names.'
raise ValidationError(message)
super(Event, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
self.full_clean()
super(Event, self).save(*args, **kwargs)
def get_info_links(self):
info_links = self.info_link.split(',')
info_link_names = self.info_link_name.split(',')
return zip(info_links, info_link_names)
@staticmethod
def autocomplete_search_fields():
return ('id__iexact', 'name__icontains', 'romanized_name__icontains')
class Activity(ContributorMixin):
event = models.ForeignKey(Event, related_name='activities')
# Details.
day = models.DateField()
romanized_name = models.CharField(max_length=200, blank=True)
name = models.CharField(max_length=200, blank=True)
start_time = models.TimeField(blank=True, null=True)
description = models.TextField(blank=True,
help_text='If multiple activities took place on the same day/event, it can be specified here.')
is_performance = models.BooleanField('is a performance?', default=False)
# Venue.
venue = models.ForeignKey('Venue', blank=True, null=True, related_name='activities')
venue_known_as = models.CharField(max_length=200, blank=True,
help_text='Did the venue go by another name at the time of this activity?')
# Add 'set list' field with convoluted ordering and everything...
class Meta:
get_latest_by = 'day'
ordering = ('day', 'start_time')
verbose_name_plural = 'activities'
def __unicode__(self):
if self.start_time:
return u'%s %s at %s' % (self.day, self.event.nickname, self.start_time)
return u'%s %s' % (self.day, self.event.nickname)
class Venue(ContributorMixin):
romanized_name = models.CharField(max_length=200)
name = models.CharField(max_length=200)
other_names = models.CharField(max_length=200, blank=True, null=True)
capacity = models.IntegerField(blank=True, null=True)
url = models.URLField('URL', blank=True)
slug = models.SlugField()
# Location.
romanized_address = models.CharField(max_length=200, blank=True, null=True)
address = models.CharField(max_length=200, blank=True, null=True)
country = models.CharField(max_length=200, blank=True, null=True) # Only filled if outside of Japan (maybe unnecessary).
# Imagery.
photo = models.ImageField(blank=True, null=True, upload_to='events/venues/')
def __unicode__(self):
return u'%s' % (self.romanized_name)
def get_absolute_url(self):
return reverse('venue-detail', kwargs={'slug': self.slug})
@staticmethod
def autocomplete_search_fields():
return ('id__iexact', 'name__icontains', 'romanized_name__icontains')
| apache-2.0 |
ohsu-computational-biology/common-workflow-language | draft-3/salad/schema_salad/makedoc.py | 10 | 15362 | import mistune
import schema
import json
import yaml
import os
import copy
import re
import sys
import StringIO
import logging
import urlparse
from aslist import aslist
import re
import argparse
_logger = logging.getLogger("salad")
def has_types(items):
r = []
if isinstance(items, dict):
if items["type"] == "https://w3id.org/cwl/salad#record":
return [items["name"]]
for n in ("type", "items", "values"):
if n in items:
r.extend(has_types(items[n]))
return r
if isinstance(items, list):
for i in items:
r.extend(has_types(i))
return r
if isinstance(items, basestring):
return [items]
return []
def linkto(item):
_, frg = urlparse.urldefrag(item)
return "[%s](#%s)" % (frg, to_id(frg))
class MyRenderer(mistune.Renderer):
def header(self, text, level, raw=None):
return """<h%i id="%s">%s</h1>""" % (level, to_id(text), text)
def to_id(text):
textid = text
if text[0] in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"):
try:
textid = text[text.index(" ")+1:]
except ValueError:
pass
textid = textid.replace(" ", "_")
return textid
class ToC(object):
def __init__(self):
self.first_toc_entry = True
self.numbering = [0]
self.toc = ""
self.start_numbering = True
def add_entry(self, thisdepth, title):
depth = len(self.numbering)
if thisdepth < depth:
self.toc += "</ol>"
for n in range(0, depth-thisdepth):
self.numbering.pop()
self.toc += "</li></ol>"
self.numbering[-1] += 1
elif thisdepth == depth:
if not self.first_toc_entry:
self.toc += "</ol>"
else:
self.first_toc_entry = False
self.numbering[-1] += 1
elif thisdepth > depth:
self.numbering.append(1)
if self.start_numbering:
num = "%i.%s" % (self.numbering[0], ".".join([str(n) for n in self.numbering[1:]]))
else:
num = ""
self.toc += """<li><a href="#%s">%s %s</a><ol>\n""" %(to_id(title),
num, title)
return num
def contents(self, id):
c = """<h1 id="%s">Table of contents</h1>
<nav class="tocnav"><ol>%s""" % (id, self.toc)
c += "</ol>"
for i in range(0, len(self.numbering)):
c += "</li></ol>"
c += """</nav>"""
return c
basicTypes = ("https://w3id.org/cwl/salad#null",
"http://www.w3.org/2001/XMLSchema#boolean",
"http://www.w3.org/2001/XMLSchema#int",
"http://www.w3.org/2001/XMLSchema#long",
"http://www.w3.org/2001/XMLSchema#float",
"http://www.w3.org/2001/XMLSchema#double",
"http://www.w3.org/2001/XMLSchema#string",
"https://w3id.org/cwl/salad#record",
"https://w3id.org/cwl/salad#enum",
"https://w3id.org/cwl/salad#array")
def add_dictlist(di, key, val):
if key not in di:
di[key] = []
di[key].append(val)
def number_headings(toc, maindoc):
mdlines = []
skip = False
for line in maindoc.splitlines():
if line.strip() == "# Introduction":
toc.start_numbering = True
toc.numbering = [0]
if line == "```":
skip = not skip
if not skip:
m = re.match(r'^(#+) (.*)', line)
if m:
num = toc.add_entry(len(m.group(1)), m.group(2))
line = "%s %s %s" % (m.group(1), num, m.group(2))
line = re.sub(r'^(https?://\S+)', r'[\1](\1)', line)
mdlines.append(line)
maindoc = '\n'.join(mdlines)
return maindoc
def fix_doc(doc):
if isinstance(doc, list):
doc = "".join(doc)
return "\n".join([re.sub(r"<([^>@]+@[^>]+)>", r"[\1](mailto:\1)", d) for d in doc.splitlines()])
class RenderType(object):
def __init__(self, toc, j, renderlist, redirects):
self.typedoc = StringIO.StringIO()
self.toc = toc
self.subs = {}
self.docParent = {}
self.docAfter = {}
self.rendered = set()
self.redirects = redirects
self.title = None
for t in j:
if "extends" in t:
for e in aslist(t["extends"]):
add_dictlist(self.subs, e, t["name"])
#if "docParent" not in t and "docAfter" not in t:
# add_dictlist(self.docParent, e, t["name"])
if t.get("docParent"):
add_dictlist(self.docParent, t["docParent"], t["name"])
if t.get("docChild"):
for c in aslist(t["docChild"]):
add_dictlist(self.docParent, t["name"], c)
if t.get("docAfter"):
add_dictlist(self.docAfter, t["docAfter"], t["name"])
_, _, metaschema_loader = schema.get_metaschema()
alltypes = schema.extend_and_specialize(j, metaschema_loader)
self.typemap = {}
self.uses = {}
self.record_refs = {}
for t in alltypes:
self.typemap[t["name"]] = t
try:
if t["type"] == "record":
self.record_refs[t["name"]] = []
for f in t.get("fields", []):
p = has_types(f)
for tp in p:
if tp not in self.uses:
self.uses[tp] = []
if (t["name"], f["name"]) not in self.uses[tp]:
_, frg1 = urlparse.urldefrag(t["name"])
_, frg2 = urlparse.urldefrag(f["name"])
self.uses[tp].append((frg1, frg2))
if tp not in basicTypes and tp not in self.record_refs[t["name"]]:
self.record_refs[t["name"]].append(tp)
except KeyError as e:
_logger.error("Did not find 'type' in %s", t)
raise
for f in alltypes:
if (f["name"] in renderlist or
((not renderlist) and
("extends" not in f) and
("docParent" not in f) and
("docAfter" not in f))):
self.render_type(f, 1)
def typefmt(self, tp, redirects, nbsp=False):
global primitiveType
if isinstance(tp, list):
if nbsp and len(tp) <= 3:
return " | ".join([self.typefmt(n, redirects) for n in tp])
else:
return " | ".join([self.typefmt(n, redirects) for n in tp])
if isinstance(tp, dict):
if tp["type"] == "https://w3id.org/cwl/salad#array":
return "array<%s>" % (self.typefmt(tp["items"], redirects, nbsp=True))
if tp["type"] in ("https://w3id.org/cwl/salad#record", "https://w3id.org/cwl/salad#enum"):
frg = schema.avro_name(tp["name"])
if tp["name"] in redirects:
return """<a href="%s">%s</a>""" % (redirects[tp["name"]], frg)
elif tp["name"] in self.typemap:
return """<a href="#%s">%s</a>""" % (to_id(frg), frg)
else:
return frg
if isinstance(tp["type"], dict):
return self.typefmt(tp["type"], redirects)
else:
if str(tp) in redirects:
return """<a href="%s">%s</a>""" % (redirects[tp], redirects[tp])
elif str(tp) in basicTypes:
return """<a href="%s">%s</a>""" % (primitiveType, schema.avro_name(str(tp)))
else:
_, frg = urlparse.urldefrag(tp)
if frg:
tp = frg
return """<a href="#%s">%s</a>""" % (to_id(tp), tp)
def render_type(self, f, depth):
if f["name"] in self.rendered or f["name"] in self.redirects:
return
self.rendered.add(f["name"])
if "doc" not in f:
f["doc"] = ""
f["type"] = copy.deepcopy(f)
f["doc"] = ""
f = f["type"]
if "doc" not in f:
f["doc"] = ""
def extendsfrom(item, ex):
if "extends" in item:
for e in aslist(item["extends"]):
ex.insert(0, self.typemap[e])
extendsfrom(self.typemap[e], ex)
ex = [f]
extendsfrom(f, ex)
enumDesc = {}
if f["type"] == "enum" and isinstance(f["doc"], list):
for e in ex:
for i in e["doc"]:
idx = i.find(":")
if idx > -1:
enumDesc[i[:idx]] = i[idx+1:]
e["doc"] = [i for i in e["doc"] if i.find(":") == -1 or i.find(" ") < i.find(":")]
f["doc"] = fix_doc(f["doc"])
if f["type"] == "record":
for field in f.get("fields", []):
if "doc" not in field:
field["doc"] = ""
if f["type"] != "documentation":
lines = []
for l in f["doc"].splitlines():
if len(l) > 0 and l[0] == "#":
l = ("#" * depth) + l
lines.append(l)
f["doc"] = "\n".join(lines)
_, frg = urlparse.urldefrag(f["name"])
num = self.toc.add_entry(depth, frg)
doc = "## %s %s\n" % (num, frg)
else:
doc = ""
if self.title is None:
self.title = f["doc"][0:f["doc"].index("\n")][2:]
if f["type"] == "documentation":
f["doc"] = number_headings(self.toc, f["doc"])
#if "extends" in f:
# doc += "\n\nExtends "
# doc += ", ".join([" %s" % linkto(ex) for ex in aslist(f["extends"])])
#if f["name"] in self.subs:
# doc += "\n\nExtended by"
# doc += ", ".join([" %s" % linkto(s) for s in self.subs[f["name"]]])
#if f["name"] in self.uses:
# doc += "\n\nReferenced by"
# doc += ", ".join([" [%s.%s](#%s)" % (s[0], s[1], to_id(s[0])) for s in self.uses[f["name"]]])
doc = doc + "\n\n" + f["doc"]
doc = mistune.markdown(doc, renderer=MyRenderer())
if f["type"] == "record":
doc += "<h3>Fields</h3>"
doc += """<table class="table table-striped">"""
doc += "<tr><th>field</th><th>type</th><th>required</th><th>description</th></tr>"
required = []
optional = []
for i in f.get("fields", []):
tp = i["type"]
if isinstance(tp, list) and tp[0] == "https://w3id.org/cwl/salad#null":
opt = False
tp = tp[1:]
else:
opt = True
desc = i["doc"]
#if "inherited_from" in i:
# desc = "%s _Inherited from %s_" % (desc, linkto(i["inherited_from"]))
frg = schema.avro_name(i["name"])
tr = "<td><code>%s</code></td><td>%s</td><td>%s</td><td>%s</td>" % (frg, self.typefmt(tp, self.redirects), opt, mistune.markdown(desc))
if opt:
required.append(tr)
else:
optional.append(tr)
for i in required+optional:
doc += "<tr>" + i + "</tr>"
doc += """</table>"""
elif f["type"] == "enum":
doc += "<h3>Symbols</h3>"
doc += """<table class="table table-striped">"""
doc += "<tr><th>symbol</th><th>description</th></tr>"
for e in ex:
for i in e.get("symbols", []):
doc += "<tr>"
frg = schema.avro_name(i)
doc += "<td><code>%s</code></td><td>%s</td>" % (frg, enumDesc.get(frg, ""))
doc += "</tr>"
doc += """</table>"""
f["doc"] = doc
self.typedoc.write(f["doc"])
subs = self.docParent.get(f["name"], []) + self.record_refs.get(f["name"], [])
if len(subs) == 1:
self.render_type(self.typemap[subs[0]], depth)
else:
for s in subs:
self.render_type(self.typemap[s], depth+1)
for s in self.docAfter.get(f["name"], []):
self.render_type(self.typemap[s], depth)
def avrold_doc(j, outdoc, renderlist, redirects, brand, brandlink):
toc = ToC()
toc.start_numbering = False
rt = RenderType(toc, j, renderlist, redirects)
content = rt.typedoc.getvalue()
outdoc.write("""
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css">
""")
outdoc.write("<title>%s</title>" % (rt.title))
outdoc.write("""
<style>
:target {
padding-top: 61px;
margin-top: -61px;
}
body {
padding-top: 61px;
}
.tocnav ol {
list-style: none
}
</style>
</head>
<body>
""")
outdoc.write("""
<nav class="navbar navbar-default navbar-fixed-top">
<div class="container">
<div class="navbar-header">
<a class="navbar-brand" href="%s">%s</a>
""" % (brandlink, brand))
if u"<!--ToC-->" in content:
content = content.replace(u"<!--ToC-->", toc.contents("toc"))
outdoc.write("""
<ul class="nav navbar-nav">
<li><a href="#toc">Table of contents</a></li>
</ul>
""")
outdoc.write("""
</div>
</div>
</nav>
""")
outdoc.write("""
<div class="container">
""")
outdoc.write("""
<div class="row">
""")
outdoc.write("""
<div class="col-md-12" role="main" id="main">""")
outdoc.write(content.encode("utf-8"))
outdoc.write("""</div>""")
outdoc.write("""
</div>
</div>
</body>
</html>""")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("schema")
parser.add_argument('--only', action='append')
parser.add_argument('--redirect', action='append')
parser.add_argument('--brand')
parser.add_argument('--brandlink')
parser.add_argument('--primtype', default="#PrimitiveType")
args = parser.parse_args()
s = []
a = args.schema
with open(a) as f:
if a.endswith("md"):
s.append({"name": os.path.splitext(os.path.basename(a))[0],
"type": "documentation",
"doc": f.read().decode("utf-8")
})
else:
uri = "file://" + os.path.abspath(a)
_, _, metaschema_loader = schema.get_metaschema()
j, schema_metadata = metaschema_loader.resolve_ref(uri, "")
if isinstance(j, list):
s.extend(j)
else:
s.append(j)
primitiveType = args.primtype
redirect = {r.split("=")[0]:r.split("=")[1] for r in args.redirect} if args.redirect else {}
renderlist = args.only if args.only else []
avrold_doc(s, sys.stdout, renderlist, redirect, args.brand, args.brandlink)
| apache-2.0 |
nwjs/chromium.src | mojo/public/tools/bindings/pylib/mojom/parse/lexer.py | 2 | 6258 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
import os.path
import sys
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
imp.find_module("ply")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("mojo"), "third_party"))
from ply.lex import TOKEN
from ..error import Error
class LexError(Error):
"""Class for errors from the lexer."""
def __init__(self, filename, message, lineno):
Error.__init__(self, filename, message, lineno=lineno)
# We have methods which look like they could be functions:
# pylint: disable=R0201
class Lexer(object):
def __init__(self, filename):
self.filename = filename
######################-- PRIVATE --######################
##
## Internal auxiliary methods
##
def _error(self, msg, token):
raise LexError(self.filename, msg, token.lineno)
##
## Reserved keywords
##
keywords = (
'HANDLE',
'IMPORT',
'MODULE',
'STRUCT',
'UNION',
'INTERFACE',
'ENUM',
'CONST',
'TRUE',
'FALSE',
'DEFAULT',
'ARRAY',
'MAP',
'ASSOCIATED',
'PENDING_REMOTE',
'PENDING_RECEIVER',
'PENDING_ASSOCIATED_REMOTE',
'PENDING_ASSOCIATED_RECEIVER',
)
keyword_map = {}
for keyword in keywords:
keyword_map[keyword.lower()] = keyword
##
## All the tokens recognized by the lexer
##
tokens = keywords + (
# Identifiers
'NAME',
# Constants
'ORDINAL',
'INT_CONST_DEC', 'INT_CONST_HEX',
'FLOAT_CONST',
# String literals
'STRING_LITERAL',
# Operators
'MINUS',
'PLUS',
'AMP',
'QSTN',
# Assignment
'EQUALS',
# Request / response
'RESPONSE',
# Delimiters
'LPAREN', 'RPAREN', # ( )
'LBRACKET', 'RBRACKET', # [ ]
'LBRACE', 'RBRACE', # { }
'LANGLE', 'RANGLE', # < >
'SEMI', # ;
'COMMA', 'DOT' # , .
)
##
## Regexes for use in tokens
##
# valid C identifiers (K&R2: A.2.3)
identifier = r'[a-zA-Z_][0-9a-zA-Z_]*'
hex_prefix = '0[xX]'
hex_digits = '[0-9a-fA-F]+'
# integer constants (K&R2: A.2.5.1)
decimal_constant = '0|([1-9][0-9]*)'
hex_constant = hex_prefix+hex_digits
# Don't allow octal constants (even invalid octal).
octal_constant_disallowed = '0[0-9]+'
# character constants (K&R2: A.2.5.2)
# Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line
# directives with Windows paths as filenames (..\..\dir\file)
# For the same reason, decimal_escape allows all digit sequences. We want to
# parse all correct code, even if it means to sometimes parse incorrect
# code.
#
simple_escape = r"""([a-zA-Z._~!=&\^\-\\?'"])"""
decimal_escape = r"""(\d+)"""
hex_escape = r"""(x[0-9a-fA-F]+)"""
bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])"""
escape_sequence = \
r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))'
# string literals (K&R2: A.2.6)
string_char = r"""([^"\\\n]|"""+escape_sequence+')'
string_literal = '"'+string_char+'*"'
bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"'
# floating constants (K&R2: A.2.5.3)
exponent_part = r"""([eE][-+]?[0-9]+)"""
fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)"""
floating_constant = \
'(((('+fractional_constant+')'+ \
exponent_part+'?)|([0-9]+'+exponent_part+')))'
# Ordinals
ordinal = r'@[0-9]+'
missing_ordinal_value = r'@'
# Don't allow ordinal values in octal (even invalid octal, like 09) or
# hexadecimal.
octal_or_hex_ordinal_disallowed = r'@((0[0-9]+)|('+hex_prefix+hex_digits+'))'
##
## Rules for the normal state
##
t_ignore = ' \t\r'
# Newlines
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
# Operators
t_MINUS = r'-'
t_PLUS = r'\+'
t_AMP = r'&'
t_QSTN = r'\?'
# =
t_EQUALS = r'='
# =>
t_RESPONSE = r'=>'
# Delimiters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_LANGLE = r'<'
t_RANGLE = r'>'
t_COMMA = r','
t_DOT = r'\.'
t_SEMI = r';'
t_STRING_LITERAL = string_literal
# The following floating and integer constants are defined as
# functions to impose a strict order (otherwise, decimal
# is placed before the others because its regex is longer,
# and this is bad)
#
@TOKEN(floating_constant)
def t_FLOAT_CONST(self, t):
return t
@TOKEN(hex_constant)
def t_INT_CONST_HEX(self, t):
return t
@TOKEN(octal_constant_disallowed)
def t_OCTAL_CONSTANT_DISALLOWED(self, t):
msg = "Octal values not allowed"
self._error(msg, t)
@TOKEN(decimal_constant)
def t_INT_CONST_DEC(self, t):
return t
# unmatched string literals are caught by the preprocessor
@TOKEN(bad_string_literal)
def t_BAD_STRING_LITERAL(self, t):
msg = "String contains invalid escape code"
self._error(msg, t)
# Handle ordinal-related tokens in the right order:
@TOKEN(octal_or_hex_ordinal_disallowed)
def t_OCTAL_OR_HEX_ORDINAL_DISALLOWED(self, t):
msg = "Octal and hexadecimal ordinal values not allowed"
self._error(msg, t)
@TOKEN(ordinal)
def t_ORDINAL(self, t):
return t
@TOKEN(missing_ordinal_value)
def t_BAD_ORDINAL(self, t):
msg = "Missing ordinal value"
self._error(msg, t)
@TOKEN(identifier)
def t_NAME(self, t):
t.type = self.keyword_map.get(t.value, "NAME")
return t
# Ignore C and C++ style comments
def t_COMMENT(self, t):
r'(/\*(.|\n)*?\*/)|(//.*(\n[ \t]*//.*)*)'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
msg = "Illegal character %s" % repr(t.value[0])
self._error(msg, t)
| bsd-3-clause |
dlazz/ansible | lib/ansible/modules/network/ios/ios_linkagg.py | 57 | 9433 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_linkagg
version_added: "2.5"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage link aggregation groups on Cisco IOS network devices
description:
- This module provides declarative management of link aggregation groups
on Cisco IOS network devices.
notes:
- Tested against IOS 15.2
options:
group:
description:
- Channel-group number for the port-channel
Link aggregation group. Range 1-255.
mode:
description:
- Mode of the link aggregation group.
choices: ['active', 'on', 'passive', 'auto', 'desirable']
members:
description:
- List of members of the link aggregation group.
aggregate:
description: List of link aggregation definitions.
state:
description:
- State of the link aggregation group.
default: present
choices: ['present', 'absent']
purge:
description:
- Purge links not defined in the I(aggregate) parameter.
default: no
type: bool
extends_documentation_fragment: ios
"""
EXAMPLES = """
- name: create link aggregation group
ios_linkagg:
group: 10
state: present
- name: delete link aggregation group
ios_linkagg:
group: 10
state: absent
- name: set link aggregation group to members
ios_linkagg:
group: 200
mode: active
members:
- GigabitEthernet0/0
- GigabitEthernet0/1
- name: remove link aggregation group from GigabitEthernet0/0
ios_linkagg:
group: 200
mode: active
members:
- GigabitEthernet0/1
- name: Create aggregate of linkagg definitions
ios_linkagg:
aggregate:
- { group: 3, mode: on, members: [GigabitEthernet0/1] }
- { group: 100, mode: passive, members: [GigabitEthernet0/2] }
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface port-channel 30
- interface GigabitEthernet0/3
- channel-group 30 mode on
- no interface port-channel 30
"""
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.ios.ios import get_config, load_config
from ansible.module_utils.network.ios.ios import ios_argument_spec
def search_obj_in_list(group, lst):
for o in lst:
if o['group'] == group:
return o
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
purge = module.params['purge']
for w in want:
group = w['group']
mode = w['mode']
members = w.get('members') or []
state = w['state']
del w['state']
obj_in_have = search_obj_in_list(group, have)
if state == 'absent':
if obj_in_have:
commands.append('no interface port-channel {0}'.format(group))
elif state == 'present':
cmd = ['interface port-channel {0}'.format(group),
'end']
if not obj_in_have:
if not group:
module.fail_json(msg='group is a required option')
commands.extend(cmd)
if members:
for m in members:
commands.append('interface {0}'.format(m))
commands.append('channel-group {0} mode {1}'.format(group, mode))
else:
if members:
if 'members' not in obj_in_have.keys():
for m in members:
commands.extend(cmd)
commands.append('interface {0}'.format(m))
commands.append('channel-group {0} mode {1}'.format(group, mode))
elif set(members) != set(obj_in_have['members']):
missing_members = list(set(members) - set(obj_in_have['members']))
for m in missing_members:
commands.extend(cmd)
commands.append('interface {0}'.format(m))
commands.append('channel-group {0} mode {1}'.format(group, mode))
superfluous_members = list(set(obj_in_have['members']) - set(members))
for m in superfluous_members:
commands.extend(cmd)
commands.append('interface {0}'.format(m))
commands.append('no channel-group {0} mode {1}'.format(group, mode))
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['group'], want)
if not obj_in_want:
commands.append('no interface port-channel {0}'.format(h['group']))
return commands
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
d['group'] = str(d['group'])
obj.append(d)
else:
obj.append({
'group': str(module.params['group']),
'mode': module.params['mode'],
'members': module.params['members'],
'state': module.params['state']
})
return obj
def parse_mode(module, config, group, member):
mode = None
netcfg = CustomNetworkConfig(indent=1, contents=config)
parents = ['interface {0}'.format(member)]
body = netcfg.get_section(parents)
match_int = re.findall(r'interface {0}\n'.format(member), body, re.M)
if match_int:
match = re.search(r'channel-group {0} mode (\S+)'.format(group), body, re.M)
if match:
mode = match.group(1)
return mode
def parse_members(module, config, group):
members = []
for line in config.strip().split('!'):
l = line.strip()
if l.startswith('interface'):
match_group = re.findall(r'channel-group {0} mode'.format(group), l, re.M)
if match_group:
match = re.search(r'interface (\S+)', l, re.M)
if match:
members.append(match.group(1))
return members
def get_channel(module, config, group):
match = re.findall(r'^interface (\S+)', config, re.M)
if not match:
return {}
channel = {}
for item in set(match):
member = item
channel['mode'] = parse_mode(module, config, group, member)
channel['members'] = parse_members(module, config, group)
return channel
def map_config_to_obj(module):
objs = list()
config = get_config(module)
for line in config.split('\n'):
l = line.strip()
match = re.search(r'interface Port-channel(\S+)', l, re.M)
if match:
obj = {}
group = match.group(1)
obj['group'] = group
obj.update(get_channel(module, config, group))
objs.append(obj)
return objs
def main():
""" main entry point for module execution
"""
element_spec = dict(
group=dict(type='int'),
mode=dict(choices=['active', 'on', 'passive', 'auto', 'desirable']),
members=dict(type='list'),
state=dict(default='present',
choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['group'] = dict(required=True)
required_one_of = [['group', 'aggregate']]
required_together = [['members', 'mode']]
mutually_exclusive = [['group', 'aggregate']]
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec,
required_together=required_together),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
required_together=required_together,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
adamklawonn/CityCircles | citycircles_iphone/build_back2/iphoneDistribution-iphoneos/CityCircles.app/pntsandrects.py | 17 | 6225 | """Point and Rectangle classes.
This code is in the public domain.
Point -- point with (x,y) coordinates
Rect -- two points, forming a rectangle
"""
import math
class Point:
"""A point identified by (x,y) coordinates.
supports: +, -, *, /, str, repr
length -- calculate length of vector to point from origin
distance_to -- calculate distance between two points
as_tuple -- construct tuple (x,y)
clone -- construct a duplicate
integerize -- convert x & y to integers
floatize -- convert x & y to floats
move_to -- reset x & y
slide -- move (in place) +dx, +dy, as spec'd by point
slide_xy -- move (in place) +dx, +dy
rotate -- rotate around the origin
rotate_about -- rotate around another point
"""
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
def __add__(self, p):
"""Point(x1+x2, y1+y2)"""
return Point(self.x+p.x, self.y+p.y)
def __sub__(self, p):
"""Point(x1-x2, y1-y2)"""
return Point(self.x-p.x, self.y-p.y)
def __mul__( self, scalar ):
"""Point(x1*x2, y1*y2)"""
return Point(self.x*scalar, self.y*scalar)
def __div__(self, scalar):
"""Point(x1/x2, y1/y2)"""
return Point(self.x/scalar, self.y/scalar)
def __str__(self):
return "(%s, %s)" % (self.x, self.y)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.x, self.y)
def length(self):
return math.sqrt(self.x**2 + self.y**2)
def distance_to(self, p):
"""Calculate the distance between two points."""
return (self - p).length()
def as_tuple(self):
"""(x, y)"""
return (self.x, self.y)
def clone(self):
"""Return a full copy of this point."""
return Point(self.x, self.y)
def integerize(self):
"""Convert co-ordinate values to integers."""
self.x = int(self.x)
self.y = int(self.y)
def floatize(self):
"""Convert co-ordinate values to floats."""
self.x = float(self.x)
self.y = float(self.y)
def move_to(self, x, y):
"""Reset x & y coordinates."""
self.x = x
self.y = y
def slide(self, p):
'''Move to new (x+dx,y+dy).
Can anyone think up a better name for this function?
slide? shift? delta? move_by?
'''
self.x = self.x + p.x
self.y = self.y + p.y
def slide_xy(self, dx, dy):
'''Move to new (x+dx,y+dy).
Can anyone think up a better name for this function?
slide? shift? delta? move_by?
'''
self.x = self.x + dx
self.y = self.y + dy
def rotate(self, rad):
"""Rotate counter-clockwise by rad radians.
Positive y goes *up,* as in traditional mathematics.
Interestingly, you can use this in y-down computer graphics, if
you just remember that it turns clockwise, rather than
counter-clockwise.
The new position is returned as a new Point.
"""
s, c = [f(rad) for f in (math.sin, math.cos)]
x, y = (c*self.x - s*self.y, s*self.x + c*self.y)
return Point(x,y)
def rotate_about(self, p, theta):
"""Rotate counter-clockwise around a point, by theta degrees.
Positive y goes *up,* as in traditional mathematics.
The new position is returned as a new Point.
"""
result = self.clone()
result.slide(-p.x, -p.y)
result.rotate(theta)
result.slide(p.x, p.y)
return result
class Rect:
"""A rectangle identified by two points.
The rectangle stores left, top, right, and bottom values.
Coordinates are based on screen coordinates.
origin top
+-----> x increases |
| left -+- right
v |
y increases bottom
set_points -- reset rectangle coordinates
contains -- is a point inside?
overlaps -- does a rectangle overlap?
top_left -- get top-left corner
bottom_right -- get bottom-right corner
expanded_by -- grow (or shrink)
"""
def __init__(self, pt1, pt2):
"""Initialize a rectangle from two points."""
self.set_points(pt1, pt2)
def set_points(self, pt1, pt2):
"""Reset the rectangle coordinates."""
(x1, y1) = pt1.as_tuple()
(x2, y2) = pt2.as_tuple()
self.left = min(x1, x2)
self.top = min(y1, y2)
self.right = max(x1, x2)
self.bottom = max(y1, y2)
def contains(self, pt):
"""Return true if a point is inside the rectangle."""
x,y = pt.as_tuple()
return (self.left <= x <= self.right and
self.top <= y <= self.bottom)
def overlaps(self, other):
"""Return true if a rectangle overlaps this rectangle."""
return (self.right > other.left and self.left < other.right and
self.top < other.bottom and self.bottom > other.top)
def top_left(self):
"""Return the top-left corner as a Point."""
return Point(self.left, self.top)
def bottom_right(self):
"""Return the bottom-right corner as a Point."""
return Point(self.right, self.bottom)
def expanded_by(self, n):
"""Return a rectangle with extended borders.
Create a new rectangle that is wider and taller than the
immediate one. All sides are extended by "n" points.
"""
p1 = Point(self.left-n, self.top-n)
p2 = Point(self.right+n, self.bottom+n)
return Rect(p1, p2)
def __str__( self ):
return "<Rect (%s,%s)-(%s,%s)>" % (self.left,self.top,
self.right,self.bottom)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__,
Point(self.left, self.top),
Point(self.right, self.bottom)) | gpl-3.0 |
rosjat/python-scsi | pyscsi/pyscsi/scsi_enum_command.py | 1 | 23775 | # coding: utf-8
# Copyright (C) 2014 by Ronnie Sahlberg<[email protected]>
# Copyright (C) 2015 by Markus Rosjat<[email protected]>
# SPDX-FileCopyrightText: 2014 The python-scsi Authors
#
# SPDX-License-Identifier: LGPL-2.1-or-later
from pyscsi.pyscsi.scsi_opcode import OpCode
from pyscsi.utils.enum import Enum
# Dictionaries to define service actions and there values
#
# We use a helper to connect the service actions to the corresponding opcode.as
# The OpCode object holds a Enum object with the service actions and has a value and
# a name property to access the opcode name and value.
"""
------------------------------------------------------------------------------
Maintenance in Service Actions
------------------------------------------------------------------------------
"""
sa_maintenance_in = {'REPORT_ASSIGNED_UNASSIGNED_P_EXTENT': 0x00,
'REPORT_COMPONENT_DEVICE': 0x01,
'REPORT_COMPONENT_DEVICE_ATTACHMENTS': 0x02,
'REPORT_DEVICE_IDENTIFICATION': 0x07,
'REPORT_PERIPHERAL_DEVICE': 0x03,
'REPORT_PERIPHERAL_DEVICE_ASSOCIATIONS': 0x04,
'REPORT_PERIPHERAL_DEVICE_COMPONENT_DEVICE_IDENTIFIER': 0x05,
'REPORT_STATES': 0x06,
'REPORT_SUPPORTED_CONFIGURATION_METHOD': 0x09,
'REPORT_UNCONFIGURED_CAPACITY': 0x08, }
"""
------------------------------------------------------------------------------
Maintenance out Service Actions Dictionaries
------------------------------------------------------------------------------
"""
sa_maintenance_out = {'ADD_PERIPHERAL_DEVICE_COMPONENT_DEVICE': 0x00,
'ATTACH_TO_COMPONENT_DEVICE': 0x01,
'BREAK_PERIPHERAL_DEVICE_COMPONENT_DEVICE': 0x07,
'EXCHANGE_P_EXTENT': 0x02,
'EXCHANGE_PERIPHERAL_DEVICE_COMPONENT_DEVICE': 0x03,
'INSTRUCT_COMPONENT_DEVICE': 0x04,
'REMOVE_PERIPHERAL_DEVICE_COMPONENT_DEVICE': 0x05,
'SET_PERIPHERAL_DEVICE_COMPONENT_DEVICE_IDENTIFIER': 0x06, }
"""
------------------------------------------------------------------------------
Service Actions Dictionaries for the A3 opcode
------------------------------------------------------------------------------
"""
service_actions = {'REPORT_DEVICE_IDENTIFIER': 0x05,
'REPORT_ALIASES': 0x0b,
'REPORT_PRIORITY': 0x0e,
'REPORT_SUPPORTED_OPERATION_CODES': 0x0c,
'REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS': 0x0d,
'REPORT_TARGET_PORT_GROUPS': 0x0a,
'REPORT_TIMESTAMP': 0x0f,
'REPORT_IDENTIFYING_INFORMATION': 0x05,
'REQUEST_DATA_TRANSFER_ELEMENT_INQUIRY': 0x06,
'CHANGE_ALIASES': 0x0b,
'SET_DEVICE_IDENTIFIER': 0x06,
'SET_PRIORITY': 0x0e,
'SET_TARGET_PORT_GROUPS': 0x0a,
'SET_TIMESTAMP': 0x0f,
'SET_IDENTIFYING_INFORMATION': 0x06,
'ORWRITE_32': 0x000e,
'READ_32': 0x0009,
'VERIFY_32': 0x000a,
'WRITE_32': 0x000b,
'WRITE_AND_VERIFY_32': 0x000c,
'WRITE_SAME_32': 0x000d,
'XDREAD_32': 0x0003,
'XDWRITE_32': 0x0004,
'XDWRITEREAD_32': 0x0007,
'XPWRITE_32': 0x0006,
'GET_LBA_STATUS': 0x12,
'READ_CAPACITY_16': 0x10,
'REPORT_REFERRALS': 0x13,
'OPEN_IMPORTEXPORT_ELEMENT': 0x00,
'CLOSE_IMPORTEXPORT_ELEMENT': 0x01, }
"""
------------------------------------------------------------------------------
opcode Dictionaries
------------------------------------------------------------------------------
"""
spc_opcodes = {'SPC_OPCODE_A4': OpCode('SPC_OPCODE_A4', 0xa4, service_actions),
'SPC_OPCODE_A3': OpCode('SPC_OPCODE_A3', 0xa3, service_actions),
'ACCESS_CONTROL_IN': OpCode('ACCESS_CONTROL_IN', 0x86, {}),
'ACCESS_CONTROL_OUT': OpCode('ACCESS_CONTROL_OUT', 0x87, {}),
'EXTENDED_COPY': OpCode('EXTENDED_COPY', 0x83, {}),
'INQUIRY': OpCode('INQUIRY', 0x12, {}),
'LOG_SELECT': OpCode('LOG_SELECT', 0x4c, {}),
'LOG_SENSE': OpCode('LOG_SENSE', 0x4d, {}),
'MODE_SELECT_6': OpCode('MODE_SELECT_6', 0x15, {}),
'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}),
'MODE_SENSE_6': OpCode('MODE_SENSE_6', 0x1a, {}),
'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0x5a, {}),
'PERSISTENT_RESERVE_IN': OpCode('PERSISTENT_RESERVE_IN', 0x5e, {}),
'PERSISTENT_RESERVE_OUT': OpCode('PERSISTENT_RESERVE_OUT', 0x5f, {}),
'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}),
'READ_ATTRIBUTE': OpCode('READ_ATTRIBUTE', 0x8c, {}),
'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}),
'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}),
'READ_MEDIA_SERIAL_NUMBER': OpCode('READ_MEDIA_SERIAL_NUMBER', 0xab,
{'READ_MEDIA_SERIAL_NUMBER': 0x01, }),
'RECEIVE_COPY_RESULTS': OpCode('RECEIVE_COPY_RESULTS', 0x84, {}),
'RECEIVE_DIAGNOSTIC_RESULTS': OpCode('RECEIVE_DIAGNOSTIC_RESULTS', 0x1c, {}),
'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}),
'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}),
'SEND_DIAGNOSTIC': OpCode('SEND_DIAGNOSTIC', 0x1d, {}),
'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}),
'WRITE_ATTRIBUTE': OpCode('WRITE_ATTRIBUTE', 0x8d, {}),
'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}), }
sbc_opcodes = {'SBC_OPCODE_7F': OpCode('SBC_OPCODE_7F', 0x7f, service_actions),
'SBC_OPCODE_A4': OpCode('SBC_OPCODE_A4', 0xa4, service_actions),
'SBC_OPCODE_A3': OpCode('SBC_OPCODE_A3', 0xa3, service_actions),
'SBC_OPCODE_9E': OpCode('SBC_OPCODE_9E', 0x9e, service_actions),
'ACCESS_CONTROL_IN': OpCode('ACCESS_CONTROL_IN', 0x86, {}),
'ACCESS_CONTROL_OUT': OpCode('ACCESS_CONTROL_OUT', 0x87, {}),
'COMPARE_AND_WRITE': OpCode('COMPARE_AND_WRITE', 0x89, {}),
'EXTENDED_COPY': OpCode('EXTENDED_COPY', 0x83, {}),
'FORMAT_UNIT': OpCode('FORMAT_UNIT', 0x04, {}),
'INQUIRY': OpCode('INQUIRY', 0x12, {}),
'LOG_SELECT': OpCode('LOG_SELECT', 0x4c, {}),
'LOG_SENSE': OpCode('LOG_SENSE', 0x4d, {}),
'MAINTENANCE_IN': OpCode('MAINTENANCE_IN', 0xa3, sa_maintenance_in),
'MAINTENANCE_OUT': OpCode('MAINTENANCE_OUT', 0xa4, sa_maintenance_out),
'MODE_SELECT_6': OpCode('MODE_SELECT_6', 0x15, {}),
'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}),
'MODE_SENSE_6': OpCode('MODE_SENSE_6', 0x1a, {}),
'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0x5a, {}),
'ORWRITE_16': OpCode('ORWRITE_16', 0x8b, {}),
'PERSISTENT_RESERVE_IN': OpCode('PERSISTENT_RESERVE_IN', 0x5e, {}),
'PERSISTENT_RESERVE_OUT': OpCode('PERSISTENT_RESERVE_OUT', 0x5f, {}),
'PRE_FETCH_10': OpCode('PRE_FETCH_10', 0x34, {}),
'PRE_FETCH_16': OpCode('PRE_FETCH_16', 0x90, {}),
'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}),
'READ_6': OpCode('READ_6', 0x08, {}),
'READ_10': OpCode('READ_10', 0x28, {}),
'READ_12': OpCode('READ_12', 0xa8, {}),
'READ_16': OpCode('READ_16', 0x88, {}),
'READ_ATTRIBUTE': OpCode('READ_ATTRIBUTE', 0x8c, {}),
'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}),
'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}),
'READ_CAPACITY_10': OpCode('READ_CAPACITY_10', 0x25, {}),
'READ_DEFECT_DATA_10': OpCode('READ_DEFECT_DATA_10', 0x37, {}),
'READ_DEFECT_DATA_12': OpCode('READ_DEFECT_DATA_12', 0xb7, {}),
'READ_LONG_10': OpCode('READ_LONG_10', 0x3e, {}),
'READ_LONG_16': OpCode('READ_LONG_16', 0x9e, {'READ_LONG_16': 0x11, }),
'REASSIGN_BLOCKS': OpCode('REASSIGN_BLOCKS', 0x07, {}),
'RECEIVE_COPY_RESULTS': OpCode('RECEIVE_COPY_RESULTS', 0x84, {}),
'RECEIVE_DIAGNOSTIC_RESULTS': OpCode('RECEIVE_DIAGNOSTIC_RESULTS', 0x1c, {}),
'REDUNDANCY_GROUP_IN': OpCode('REDUNDANCY_GROUP_IN', 0xba, {}),
'REDUNDANCY_GROUP_OUT': OpCode('REDUNDANCY_GROUP_OT', 0xbb, {}),
'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}),
'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}),
'SECURITY_PROTOCOL_IN': OpCode('SECURITY_PROTOCOL_IN', 0xa2, {}),
'SECURITY_PROTOCOL_OUT': OpCode('SECURITY_PROTOCOL_OUT', 0xb5, {}),
'SEND_DIAGNOSTIC': OpCode('SEND_DIAGNOSTIC', 0x1d, {}),
'SPARE_IN': OpCode('SPARE_IN', 0xbc, {}),
'SPARE_OUT': OpCode('SPARE_OUT', 0xbd, {}),
'START_STOP_UNIT': OpCode('START_STOP_UNIT', 0x1b, {}),
'SYNCHRONIZE_CACHE_10': OpCode('SYNCHRONIZE_CACHE_10', 0x35, {}),
'SYNCHRONIZE_CACHE_16': OpCode('SYNCHRONIZE_CACHE_16', 0x91, {}),
'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}),
'UNMAP': OpCode('UNMAP', 0x42, {}),
'VERIFY_10': OpCode('VERIFY_10', 0x2f, {}),
'VERIFY_12': OpCode('VERIFY_12', 0xaf, {}),
'VERIFY_16': OpCode('VERIFY_16', 0x8f, {}),
'VOLUME_SET_IN': OpCode('VOLUME_SET_IN', 0xbe, {}),
'VOLUME_SET_OUT': OpCode('VOLUME_SET_IN', 0xbf, {}),
'WRITE_6': OpCode('WRITE_6', 0xa0, {}),
'WRITE_10': OpCode('WRITE_10', 0x2a, {}),
'WRITE_12': OpCode('WRITE_12', 0xaa, {}),
'WRITE_16': OpCode('WRITE_16', 0x8a, {}),
'WRITE_AND_VERIFY_10': OpCode('WRITE_AND_VERIFY_10', 0x2e, {}),
'WRITE_AND_VERIFY_12': OpCode('WRITE_AND_VERIFY_12', 0xae, {}),
'WRITE_AND_VERIFY_16': OpCode('WRITE_AND_VERIFY_16', 0x8e, {}),
'WRITE_ATTRIBUTE': OpCode('WRITE_ATTRIBUTE', 0x8d, {}),
'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}),
'WRITE_LONG_10': OpCode('WRITE_LONG_10', 0x3f, {}),
'WRITE_LONG_16': OpCode('WRITE_LONG_16', 0x9f, {'WRITE_LONG_16': 0x11, }),
'WRITE_SAME_10': OpCode('WRITE_SAME_10', 0x41, {}),
'WRITE_SAME_16': OpCode('WRITE_SAME_16', 0x93, {}),
'XDREAD_10': OpCode('XDREAD_10', 0x52, {}),
'XDWRITE_10': OpCode('XDWRITE_10', 0x50, {}),
'XDWRITEREAD_10': OpCode('XDWRITEREAD_10', 0x53, {}),
'XPWRITE_10': OpCode('XPWRITE_10', 0x51, {}), }
ssc_opcodes = {'SSC_OPCODE_A4': OpCode('SSC_OPCODE_A4', 0xa4, service_actions),
'SSC_OPCODE_A3': OpCode('SSC_OPCODE_A3', 0xa3, service_actions),
'ACCESS_CONTROL_IN': OpCode('ACCESS_CONTROL_IN', 0x86, {}),
'ACCESS_CONTROL_OUT': OpCode('ACCESS_CONTROL_OUT', 0x87, {}),
'ERASE_16': OpCode('ERASE_16', 0x93, {}),
'EXTENDED_COPY': OpCode('EXTENDED_COPY', 0x83, {}),
'FORMAT_MEDIUM': OpCode('FORMAT_MEDIUM', 0x04, {}),
'INQUIRY': OpCode('INQUIRY', 0x12, {}),
'LOAD_UNLOAD': OpCode('LOAD_UNLOAD', 0x1b, {}),
'LOCATE_16': OpCode('LOCATE_16', 0x92, {}),
'LOG_SELECT': OpCode('LOG_SELECT', 0x4c, {}),
'LOG_SENSE': OpCode('LOG_SENSE', 0x4d, {}),
'MODE_SELECT_6': OpCode('MODE_SELECT_6', 0x15, {}),
'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}),
'MODE_SENSE_6': OpCode('MODE_SENSE_6', 0x1a, {}),
'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0x5a, {}),
'MOVE_MEDIUM_ATTACHED': OpCode('MOVE_MEDIUM_ATTACHED', 0xa7, {}),
'PERSISTENT_RESERVE_IN': OpCode('PERSISTENT_RESERVE_IN', 0x5e, {}),
'PERSISTENT_RESERVE_OUT': OpCode('PERSISTENT_RESERVE_OUT', 0x5f, {}),
'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}),
'READ_6': OpCode('READ_6', 0x08, {}),
'READ_16': OpCode('READ_16', 0x88, {}),
'READ_ATTRIBUTE': OpCode('READ_ATTRIBUTE', 0x8c, {}),
'READ_BLOCK_LIMITS': OpCode('READ_BLOCK_LIMITS', 0x05, {}),
'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}),
'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}),
'READ_ELEMENT_STATUS_ATTACHED': OpCode('READ_ELEMENT_STATUS_ATTACHED', 0xb4, {}),
'READ_POSITION': OpCode('READ_POSITION', 0x34, {}),
'READ_REVERSE_6': OpCode('READ_REVERSE_6', 0x0f, {}),
'READ_REVERSE_16': OpCode('READ_REVERSE_16', 0x81, {}),
'RECEIVE_COPY_RESULTS': OpCode('RECEIVE_COPY_RESULTS', 0x84, {}),
'RECEIVE_DIAGNOSTIC_RESULTS': OpCode('RECEIVE_DIAGNOSTIC_RESULTS', 0x1c, {}),
'RECOVER_BUFFERED_DATA': OpCode('RECOVER_BUFFERED_DATA', 0x14, {}),
'REPORT_ALIAS': OpCode('REPORT_ALIAS', 0xa3, {'REPORT_ALIAS': 0x0b, }),
'REPORT_DENSITY_SUPPORT': OpCode('REPORT_DENSITY_SUPPORT', 0x44, {}),
'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}),
'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}),
'REWIND': OpCode('REWIND', 0x01, {}),
'SEND_DIAGNOSTIC': OpCode('SEND_DIAGNOSTIC', 0x1d, {}),
'SET_CAPACITY': OpCode('SET_CAPACITY', 0x0b, {}),
'SPACE_6': OpCode('SPACE_6', 0x11, {}),
'SPACE_16': OpCode('SPACE_16', 0x91, {}),
'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}),
'VERIFY_6': OpCode('VERIFY_6', 0x13, {}),
'VERIFY_16': OpCode('VERIFY_16', 0x8f, {}),
'WRITE_6': OpCode('WRITE_6', 0x0a, {}),
'WRITE_16': OpCode('WRITE_16', 0x8a, {}),
'WRITE_ATTRIBUTE': OpCode('WRITE_ATTRIBUTE', 0x8d, {}),
'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}),
'WRITE_FILEMARKS_6': OpCode('WRITE_FILEMARKS_6', 0x10, {}),
'WRITE_FILEMARKS_16': OpCode('WRITE_FILEMARKS_16', 0x80, {}), }
smc_opcodes = {'SMC_OPCODE_A4': OpCode('SMC_OPCODE_A4', 0xa4, service_actions),
'SMC_OPCODE_A3': OpCode('SMC_OPCODE_A3', 0xa3, service_actions),
'ACCESS_CONTROL_IN': OpCode('ACCESS_CONTROL_IN', 0x86, {}),
'ACCESS_CONTROL_OUT': OpCode('ACCESS_CONTROL_OUT', 0x87, {}),
'EXCHANGE_MEDIUM': OpCode('EXCHANGE_MEDIUM', 0xa6, {}),
'INITIALIZE_ELEMENT_STATUS': OpCode('INITIALIZE_ELEMENT_STATUS', 0x07, {}),
'INITIALIZE_ELEMENT_STATUS_WITH_RANGE': OpCode('INITIALIZE_ELEMENT_STATUS_WITH_RANGE', 0x37, {}),
'INQUIRY': OpCode('INQUIRY', 0x12, {}),
'LOG_SELECT': OpCode('LOG_SELECT', 0x4c, {}),
'LOG_SENSE': OpCode('LOG_SENSE', 0x4d, {}),
'MAINTENANCE_IN': OpCode('MAINTENANCE_IN', 0xa3, sa_maintenance_in),
'MAINTENANCE_OUT': OpCode('MAINTENANCE_OUT', 0xa4, sa_maintenance_out),
'MODE_SELECT_6': OpCode('MODE_SELECT_6', 0x15, {}),
'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}),
'MODE_SENSE_6': OpCode('MODE_SENSE_6', 0x1a, {}),
'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0x5a, {}),
'MOVE_MEDIUM': OpCode('MOVE_MEDIUM', 0xa5, {}),
'OPEN_CLOSE_IMPORT_EXPORT_ELEMENT': OpCode('SMC_OPCODE_1B', 0x1b, service_actions),
'PERSISTENT_RESERVE_IN': OpCode('PERSISTENT_RESERVE_IN', 0x5e, {}),
'PERSISTENT_RESERVE_OUT': OpCode('PERSISTENT_RESERVE_OUT', 0x5f, {}),
'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}),
'POSITION_TO_ELEMENT': OpCode('POSITION_TO_ELEMENT', 0x2b, {}),
'READ_ATTRIBUTE': OpCode('READ_ATTRIBUTE', 0x8c, {}),
'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}),
'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}),
'READ_ELEMENT_STATUS': OpCode('READ_ELEMENT_STATUS', 0xb8, {}),
'RECEIVE_DIAGNOSTIC_RESULTS': OpCode('RECEIVE_DIAGNOSTIC_RESULTS', 0x1c, {}),
'REDUNDANCY_GROUP_IN': OpCode('REDUNDANCY_GROUP_IN', 0xba, {}),
'REDUNDANCY_GROUP_OUT': OpCode('REDUNDANCY_GROUP_OUT', 0xbb, {}),
'RELEASE_6': OpCode('RELEASE_6', 0x17, {}),
'RELEASE_10': OpCode('RELEASE_10', 0x57, {}),
'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}),
'REPORT_VOLUME_TYPES_SUPPORTED': OpCode('REPORT_VOLUME_TYPES_SUPPORTED', 0x44, {}),
'REQUEST_VOLUME_ELEMENT_ADDRESS': OpCode('REQUEST_VOLUME_ELEMENT_ADDRESS', 0xb5, {}),
'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}),
'RESERVE_6': OpCode('RESERVE_6', 0x16, {}),
'RESERVE_10': OpCode('RESERVE_10', 0x56, {}),
'SEND_DIAGNOSTIC': OpCode('SEND_DIAGNOSTIC', 0x1d, {}),
'SEND_VOLUME_TAG': OpCode('SEND_VOLUME_TAG', 0xb6, {}),
'SPARE_IN': OpCode('SPARE_IN', 0xbc, {}),
'SPARE_OUT': OpCode('SPARE_OUT', 0xbd, {}),
'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}),
'VOLUME_SET_IN': OpCode('VOLUME_SET_IN', 0xbe, {}),
'VOLUME_SET_OUT': OpCode('VOLUME_SET_OUT', 0xbf, {}),
'WRITE_ATTRIBUTE': OpCode('WRITE_ATTRIBUTE', 0x8d, {}),
'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}), }
mmc_opcodes = {'BLANK': OpCode('BLANK', 0xa1, {}),
'CLOSE_TRACK_SESSION': OpCode('CLOSE_TRACK_SESSION', 0x5b, {}),
'FORMAT_UNIT': OpCode('FORMAT_UNIT', 0x04, {}),
'GET_CONFIGURATION': OpCode('GET_CONFIGURATION', 0x46, {}),
'GET_EVENT_STATUS_NOTIFICATION': OpCode('GET_EVENT_STATUS_NOTIFICATION', 0x4a, {}),
'GET_PERFORMANCE': OpCode('GET_PERFORMANCE', 0xac, {}),
'INQUIRY': OpCode('INQUIRY', 0x12, {}),
'LOAD_UNLOAD_MEDIUM': OpCode('LOAD_UNLOAD_MEDIUM', 0xa6, {}),
'MECHANISM_STATUS': OpCode('MECHANISM_STATUS', 0xbd, {}),
'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}),
'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0xa5, {}),
'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}),
'READ_10': OpCode('READ_10', 0x28, {}),
'READ_12': OpCode('READ_12', 0xa8, {}),
'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}),
'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}),
'READ_BUFFER_CAPACITY': OpCode('READ_BUFFER_CAPACITY', 0x5c, {}),
'READ_CAPACITY': OpCode('READ_CAPACITY', 0x25, {}),
'READ_CD': OpCode('READ_CD', 0xbe, {}),
'READ_CD_MSF': OpCode('READ_CD_MSF', 0xb9, {}),
'READ_DISC_INFORMATION': OpCode('READ_DISC_INFORMATION', 0x51, {}),
'READ_DISC_STRUCTURE': OpCode('READ_DISC_STRUCTURE', 0xad, {}),
'READ_FORMAT_CAPACITIES': OpCode('READ_FORMAT_CAPACITIES', 0x23, {}),
'READ_TOC_PMA_ATIP': OpCode('READ_TOC_PMA_ATIP', 0x43, {}),
'READ_TRACK_INFORMATION': OpCode('READ_TRACK_INFORMATION', 0x52, {}),
'REPAIR_TRACK': OpCode('REPAIR_TRACK', 0x58, {}),
'REPORT_KEY': OpCode('REPORT_KEY', 0xa4, {}),
'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}),
'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}),
'RESERVE_TRACK': OpCode('RESERVE_TRACK', 0x53, {}),
'SECURITY_PROTOCOL_IN': OpCode('SECURITY_PROTOCOL_IN', 0xa2, {}),
'SECURITY_PROTOCOL_OUT': OpCode('SECURITY_PROTOCOL_OUT', 0xb5, {}),
'SEEK_10': OpCode('SEEK_10', 0x2b, {}),
'SEND_CUE_SHEET': OpCode('SEND_CUE_SHEET', 0x5d, {}),
'SEND_DISC_STRUCTURE': OpCode('SEND_DISC_STRUCTURE', 0xbf, {}),
'SEND_KEY': OpCode('SEND_KEY', 0xa3, {}),
'SEND_OPC_INFORMATION': OpCode('SEND_OPC_INFORMATION', 0x54, {}),
'SET_CD_SPEED': OpCode('SET_CD_SPEED', 0xbb, {}),
'SET_READ_AHEAD': OpCode('SET_READ_AHEAD', 0xa7, {}),
'SET_STREAMING': OpCode('SET_STREAMING', 0xb6, {}),
'START_STOP_UNIT': OpCode('START_STOP_UNIT', 0x1b, {}),
'SYNCHRONIZE_CACHE': OpCode('SYNCHRONIZE_CACHE', 0x35, {}),
'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}),
'VERIFY_10': OpCode('VERIFY_10', 0x2f, {}),
'WRITE_10': OpCode('WRITE_10', 0x2a, {}),
'WRITE_12': OpCode('WRITE_12', 0xaa, {}),
'WRITE_AND_VERIFY_10': OpCode('WRITE_AND_VERIFY_10', 0x2e, {}),
'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}), }
"""
------------------------------------------------------------------------------
scsi status Dictionaries
------------------------------------------------------------------------------
"""
scsi_status = {'GOOD': 0x00,
'CHECK_CONDITION': 0x02,
'CONDITIONS_MET': 0x04,
'BUSY': 0x08,
'RESERVATION_CONFLICT': 0x18,
'TASK_SET_FULL': 0x28,
'ACA_ACTIVE': 0x30,
'TASK_ABORTED': 0x40,
'SGIO_ERROR': 0xff, }
"""
------------------------------------------------------------------------------
open/close
------------------------------------------------------------------------------
"""
action_codes = {''}
"""
------------------------------------------------------------------------------
Instantiate the Enum Objects
------------------------------------------------------------------------------
"""
SCSI_STATUS = Enum(scsi_status)
spc = Enum(spc_opcodes)
sbc = Enum(sbc_opcodes)
ssc = Enum(ssc_opcodes)
smc = Enum(smc_opcodes)
mmc = Enum(mmc_opcodes)
"""
------------------------------------------------------------------------------
Obsolete Dictionaries and Enums
------------------------------------------------------------------------------
NOTE: the dicts and Enums in this section and will be removed in a future release
"""
opcodes = {'INQUIRY': 0x12,
'MODE_SENSE_6': 0x1a,
'MOVE_MEDIUM': 0xa5,
'READ_10': 0x28,
'READ_12': 0xa8,
'READ_16': 0x88,
'READ_CAPACITY_10': 0x25,
'READ_ELEMENT_STATUS': 0xb8,
'SERVICE_ACTION_IN': 0x9e,
'TEST_UNIT_READY': 0x00,
'WRITE_10': 0x2a,
'WRITE_12': 0xaa,
'WRITE_16': 0x8a,
'WRITE_SAME_10': 0x41,
'WRITE_SAME_16': 0x93,
}
OPCODE = Enum(opcodes)
service_action_ins = {'READ_CAPACITY_16': 0x10,
'GET_LBA_STATUS': 0x12, }
SERVICE_ACTION_IN = Enum(service_action_ins)
"""
------------------------------------------------------------------------------
"""
| lgpl-2.1 |
forrestv/myhdl | myhdl/test/conversion/toVerilog/test_inc.py | 3 | 5163 | import os
path = os.path
import unittest
from unittest import TestCase
import random
from random import randrange
random.seed(2)
from myhdl import *
from util import setupCosimulation
ACTIVE_LOW, INACTIVE_HIGH = 0, 1
def incRef(count, enable, clock, reset, n):
""" Incrementer with enable.
count -- output
enable -- control input, increment when 1
clock -- clock input
reset -- asynchronous reset input
n -- counter max value
"""
@instance
def logic():
while 1:
yield clock.posedge, reset.negedge
if reset == ACTIVE_LOW:
count.next = 0
else:
if enable:
count.next = (count + 1) % n
return logic
def inc(count, enable, clock, reset, n):
""" Incrementer with enable.
count -- output
enable -- control input, increment when 1
clock -- clock input
reset -- asynchronous reset input
n -- counter max value
"""
@always(clock.posedge, reset.negedge)
def incProcess():
if reset == ACTIVE_LOW:
count.next = 0
else:
if enable:
count.next = (count + 1) % n
return incProcess
def inc2(count, enable, clock, reset, n):
@always(clock.posedge, reset.negedge)
def incProcess():
if reset == ACTIVE_LOW:
count.next = 0
else:
if enable:
if count == n-1:
count.next = 0
else:
count.next = count + 1
return incProcess
def incTask(count, enable, clock, reset, n):
def incTaskFunc(cnt, enable, reset, n):
if enable:
cnt[:] = (cnt + 1) % n
@instance
def incTaskGen():
cnt = intbv(0)[8:]
while 1:
yield clock.posedge, reset.negedge
if reset == ACTIVE_LOW:
cnt[:] = 0
count.next = 0
else:
# print count
incTaskFunc(cnt, enable, reset, n)
count.next = cnt
return incTaskGen
def incTaskFreeVar(count, enable, clock, reset, n):
def incTaskFunc():
if enable:
count.next = (count + 1) % n
@always(clock.posedge, reset.negedge)
def incTaskGen():
if reset == ACTIVE_LOW:
count.next = 0
else:
# print count
incTaskFunc()
return incTaskGen
def inc_v(name, count, enable, clock, reset):
return setupCosimulation(**locals())
class TestInc(TestCase):
def clockGen(self, clock):
while 1:
yield delay(10)
clock.next = not clock
def stimulus(self, enable, clock, reset):
reset.next = INACTIVE_HIGH
yield clock.negedge
reset.next = ACTIVE_LOW
yield clock.negedge
reset.next = INACTIVE_HIGH
for i in range(1000):
enable.next = 1
yield clock.negedge
for i in range(1000):
enable.next = min(1, randrange(5))
yield clock.negedge
raise StopSimulation
def check(self, count, count_v, enable, clock, reset, n):
expect = 0
yield reset.posedge
self.assertEqual(count, expect)
self.assertEqual(count, count_v)
while 1:
yield clock.posedge
if enable:
expect = (expect + 1) % n
yield delay(1)
# print "%d count %s expect %s count_v %s" % (now(), count, expect, count_v)
self.assertEqual(count, expect)
self.assertEqual(count, count_v)
def bench(self, inc):
m = 8
n = 2 ** m
count = Signal(intbv(0)[m:])
count_v = Signal(intbv(0)[m:])
enable = Signal(bool(0))
clock, reset = [Signal(bool()) for i in range(2)]
inc_inst_ref = incRef(count, enable, clock, reset, n=n)
inc_inst = toVerilog(inc, count, enable, clock, reset, n=n)
# inc_inst = inc(count, enable, clock, reset, n=n)
inc_inst_v = inc_v(inc.func_name, count_v, enable, clock, reset)
clk_1 = self.clockGen(clock)
st_1 = self.stimulus(enable, clock, reset)
ch_1 = self.check(count, count_v, enable, clock, reset, n=n)
sim = Simulation(inc_inst_ref, inc_inst_v, clk_1, st_1, ch_1)
return sim
def testIncRef(self):
""" Check increment operation """
sim = self.bench(incRef)
sim.run(quiet=1)
def testInc(self):
""" Check increment operation """
sim = self.bench(inc)
sim.run(quiet=1)
def testInc2(self):
""" Check increment operation """
sim = self.bench(inc2)
sim.run(quiet=1)
def testIncTask(self):
sim = self.bench(incTask)
sim.run(quiet=1)
def testIncTaskFreeVar(self):
sim = self.bench(incTaskFreeVar)
sim.run(quiet=1)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
grap/OpenUpgrade | setup/package.py | 180 | 22070 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import optparse
import os
import pexpect
import shutil
import signal
import subprocess
import tempfile
import time
import xmlrpclib
from contextlib import contextmanager
from glob import glob
from os.path import abspath, dirname, join
from sys import stdout
from tempfile import NamedTemporaryFile
#----------------------------------------------------------
# Utils
#----------------------------------------------------------
execfile(join(dirname(__file__), '..', 'openerp', 'release.py'))
version = version.split('-')[0]
timestamp = time.strftime("%Y%m%d", time.gmtime())
GPGPASSPHRASE = os.getenv('GPGPASSPHRASE')
GPGID = os.getenv('GPGID')
PUBLISH_DIRS = {
'debian': 'deb',
'redhat': 'rpm',
'tarball': 'src',
'windows': 'exe',
}
ADDONS_NOT_TO_PUBLISH = [
'web_analytics'
]
def mkdir(d):
if not os.path.isdir(d):
os.makedirs(d)
def system(l, chdir=None):
print l
if chdir:
cwd = os.getcwd()
os.chdir(chdir)
if isinstance(l, list):
rc = os.spawnvp(os.P_WAIT, l[0], l)
elif isinstance(l, str):
tmp = ['sh', '-c', l]
rc = os.spawnvp(os.P_WAIT, tmp[0], tmp)
if chdir:
os.chdir(cwd)
return rc
def _rpc_count_modules(addr='http://127.0.0.1', port=8069, dbname='mycompany'):
time.sleep(5)
modules = xmlrpclib.ServerProxy('%s:%s/xmlrpc/object' % (addr, port)).execute(
dbname, 1, 'admin', 'ir.module.module', 'search', [('state', '=', 'installed')]
)
if modules and len(modules) > 1:
time.sleep(1)
toinstallmodules = xmlrpclib.ServerProxy('%s:%s/xmlrpc/object' % (addr, port)).execute(
dbname, 1, 'admin', 'ir.module.module', 'search', [('state', '=', 'to install')]
)
if toinstallmodules:
print("Package test: FAILED. Not able to install dependencies of base.")
raise Exception("Installation of package failed")
else:
print("Package test: successfuly installed %s modules" % len(modules))
else:
print("Package test: FAILED. Not able to install base.")
raise Exception("Installation of package failed")
def publish(o, type, extensions):
def _publish(o, release):
arch = ''
filename = release.split(os.path.sep)[-1]
release_dir = PUBLISH_DIRS[type]
release_path = join(o.pub, release_dir, filename)
system('mkdir -p %s' % join(o.pub, release_dir))
shutil.move(join(o.build_dir, release), release_path)
# Latest/symlink handler
release_abspath = abspath(release_path)
latest_abspath = release_abspath.replace(timestamp, 'latest')
if os.path.islink(latest_abspath):
os.unlink(latest_abspath)
os.symlink(release_abspath, latest_abspath)
return release_path
published = []
for extension in extensions:
release = glob("%s/odoo_*.%s" % (o.build_dir, extension))[0]
published.append(_publish(o, release))
return published
class OdooDocker(object):
def __init__(self):
self.log_file = NamedTemporaryFile(mode='w+b', prefix="bash", suffix=".txt", delete=False)
self.port = 8069 # TODO sle: reliable way to get a free port?
self.prompt_re = '[root@nightly-tests] # '
self.timeout = 600
def system(self, command):
self.docker.sendline(command)
self.docker.expect_exact(self.prompt_re)
def start(self, docker_image, build_dir, pub_dir):
self.build_dir = build_dir
self.pub_dir = pub_dir
self.docker = pexpect.spawn(
'docker run -v %s:/opt/release -p 127.0.0.1:%s:8069'
' -t -i %s /bin/bash --noediting' % (self.build_dir, self.port, docker_image),
timeout=self.timeout,
searchwindowsize=len(self.prompt_re) + 1,
)
time.sleep(2) # let the bash start
self.docker.logfile_read = self.log_file
self.id = subprocess.check_output('docker ps -l -q', shell=True)
def end(self):
try:
_rpc_count_modules(port=str(self.port))
except Exception, e:
print('Exception during docker execution: %s:' % str(e))
print('Error during docker execution: printing the bash output:')
with open(self.log_file.name) as f:
print '\n'.join(f.readlines())
raise
finally:
self.docker.close()
system('docker rm -f %s' % self.id)
self.log_file.close()
os.remove(self.log_file.name)
@contextmanager
def docker(docker_image, build_dir, pub_dir):
_docker = OdooDocker()
try:
_docker.start(docker_image, build_dir, pub_dir)
try:
yield _docker
except Exception, e:
raise
finally:
_docker.end()
class KVM(object):
def __init__(self, o, image, ssh_key='', login='openerp'):
self.o = o
self.image = image
self.ssh_key = ssh_key
self.login = login
def timeout(self,signum,frame):
print "vm timeout kill",self.pid
os.kill(self.pid,15)
def start(self):
l="kvm -net nic,model=rtl8139 -net user,hostfwd=tcp:127.0.0.1:10022-:22,hostfwd=tcp:127.0.0.1:18069-:8069,hostfwd=tcp:127.0.0.1:15432-:5432 -drive".split(" ")
#l.append('file=%s,if=virtio,index=0,boot=on,snapshot=on'%self.image)
l.append('file=%s,snapshot=on'%self.image)
#l.extend(['-vnc','127.0.0.1:1'])
l.append('-nographic')
print " ".join(l)
self.pid=os.spawnvp(os.P_NOWAIT, l[0], l)
time.sleep(10)
signal.alarm(2400)
signal.signal(signal.SIGALRM, self.timeout)
try:
self.run()
finally:
signal.signal(signal.SIGALRM, signal.SIG_DFL)
os.kill(self.pid,15)
time.sleep(10)
def ssh(self,cmd):
l=['ssh','-o','UserKnownHostsFile=/dev/null','-o','StrictHostKeyChecking=no','-p','10022','-i',self.ssh_key,'%[email protected]'%self.login,cmd]
system(l)
def rsync(self,args,options='--delete --exclude .bzrignore'):
cmd ='rsync -rt -e "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 10022 -i %s" %s %s' % (self.ssh_key, options, args)
system(cmd)
def run(self):
pass
class KVMWinBuildExe(KVM):
def run(self):
with open(join(self.o.build_dir, 'setup/win32/Makefile.version'), 'w') as f:
f.write("VERSION=%s\n" % self.o.version_full)
with open(join(self.o.build_dir, 'setup/win32/Makefile.python'), 'w') as f:
f.write("PYTHON_VERSION=%s\n" % self.o.vm_winxp_python_version.replace('.', ''))
self.ssh("mkdir -p build")
self.rsync('%s/ %[email protected]:build/server/' % (self.o.build_dir, self.login))
self.ssh("cd build/server/setup/win32;time make allinone;")
self.rsync('%[email protected]:build/server/setup/win32/release/ %s/' % (self.login, self.o.build_dir), '')
print "KVMWinBuildExe.run(): done"
class KVMWinTestExe(KVM):
def run(self):
# Cannot use o.version_full when the version is not correctly parsed
# (for instance, containing *rc* or *dev*)
setuppath = glob("%s/openerp-server-setup-*.exe" % self.o.build_dir)[0]
setupfile = setuppath.split('/')[-1]
setupversion = setupfile.split('openerp-server-setup-')[1].split('.exe')[0]
self.rsync('"%s" %[email protected]:' % (setuppath, self.login))
self.ssh("TEMP=/tmp ./%s /S" % setupfile)
self.ssh('PGPASSWORD=openpgpwd /cygdrive/c/"Program Files"/"Odoo %s"/PostgreSQL/bin/createdb.exe -e -U openpg mycompany' % setupversion)
self.ssh('/cygdrive/c/"Program Files"/"Odoo %s"/server/openerp-server.exe -d mycompany -i base --stop-after-init' % setupversion)
self.ssh('net start odoo-server-8.0')
_rpc_count_modules(port=18069)
#----------------------------------------------------------
# Stage: building
#----------------------------------------------------------
def _prepare_build_dir(o, win32=False):
cmd = ['rsync', '-a', '--exclude', '.git', '--exclude', '*.pyc', '--exclude', '*.pyo']
if not win32:
cmd += ['--exclude', 'setup/win32']
system(cmd + ['%s/' % o.odoo_dir, o.build_dir])
try:
for addon_path in glob(join(o.build_dir, 'addons/*')):
if addon_path.split(os.path.sep)[-1] not in ADDONS_NOT_TO_PUBLISH:
shutil.move(addon_path, join(o.build_dir, 'openerp/addons'))
except shutil.Error:
# Thrown when the add-on is already in openerp/addons (if _prepare_build_dir
# has already been called once)
pass
def build_tgz(o):
system(['python2', 'setup.py', 'sdist', '--quiet', '--formats=gztar,zip'], o.build_dir)
system(['mv', glob('%s/dist/odoo-*.tar.gz' % o.build_dir)[0], '%s/odoo_%s.%s.tar.gz' % (o.build_dir, version, timestamp)])
system(['mv', glob('%s/dist/odoo-*.zip' % o.build_dir)[0], '%s/odoo_%s.%s.zip' % (o.build_dir, version, timestamp)])
def build_deb(o):
# Append timestamp to version for the .dsc to refer the right .tar.gz
cmd=['sed', '-i', '1s/^.*$/odoo (%s.%s) stable; urgency=low/'%(version,timestamp), 'debian/changelog']
subprocess.call(cmd, cwd=o.build_dir)
deb = pexpect.spawn('dpkg-buildpackage -rfakeroot -k%s' % GPGID, cwd=o.build_dir)
deb.logfile = stdout
if GPGPASSPHRASE:
deb.expect_exact('Enter passphrase: ', timeout=1200)
deb.send(GPGPASSPHRASE + '\r\n')
deb.expect_exact('Enter passphrase: ')
deb.send(GPGPASSPHRASE + '\r\n')
deb.expect(pexpect.EOF, timeout=1200)
system(['mv', glob('%s/../odoo_*.deb' % o.build_dir)[0], '%s' % o.build_dir])
system(['mv', glob('%s/../odoo_*.dsc' % o.build_dir)[0], '%s' % o.build_dir])
system(['mv', glob('%s/../odoo_*_amd64.changes' % o.build_dir)[0], '%s' % o.build_dir])
system(['mv', glob('%s/../odoo_*.tar.gz' % o.build_dir)[0], '%s' % o.build_dir])
def build_rpm(o):
system(['python2', 'setup.py', '--quiet', 'bdist_rpm'], o.build_dir)
system(['mv', glob('%s/dist/odoo-*.noarch.rpm' % o.build_dir)[0], '%s/odoo_%s.%s.noarch.rpm' % (o.build_dir, version, timestamp)])
def build_exe(o):
KVMWinBuildExe(o, o.vm_winxp_image, o.vm_winxp_ssh_key, o.vm_winxp_login).start()
system(['cp', glob('%s/openerp*.exe' % o.build_dir)[0], '%s/odoo_%s.%s.exe' % (o.build_dir, version, timestamp)])
#----------------------------------------------------------
# Stage: testing
#----------------------------------------------------------
def _prepare_testing(o):
if not o.no_tarball:
subprocess.call(["mkdir", "docker_src"], cwd=o.build_dir)
subprocess.call(["cp", "package.dfsrc", os.path.join(o.build_dir, "docker_src", "Dockerfile")],
cwd=os.path.join(o.odoo_dir, "setup"))
# Use rsync to copy requirements.txt in order to keep original permissions
subprocess.call(["rsync", "-a", "requirements.txt", os.path.join(o.build_dir, "docker_src")],
cwd=os.path.join(o.odoo_dir))
subprocess.call(["docker", "build", "-t", "odoo-%s-src-nightly-tests" % version, "."],
cwd=os.path.join(o.build_dir, "docker_src"))
if not o.no_debian:
subprocess.call(["mkdir", "docker_debian"], cwd=o.build_dir)
subprocess.call(["cp", "package.dfdebian", os.path.join(o.build_dir, "docker_debian", "Dockerfile")],
cwd=os.path.join(o.odoo_dir, "setup"))
# Use rsync to copy requirements.txt in order to keep original permissions
subprocess.call(["rsync", "-a", "requirements.txt", os.path.join(o.build_dir, "docker_debian")],
cwd=os.path.join(o.odoo_dir))
subprocess.call(["docker", "build", "-t", "odoo-%s-debian-nightly-tests" % version, "."],
cwd=os.path.join(o.build_dir, "docker_debian"))
if not o.no_rpm:
subprocess.call(["mkdir", "docker_centos"], cwd=o.build_dir)
subprocess.call(["cp", "package.dfcentos", os.path.join(o.build_dir, "docker_centos", "Dockerfile")],
cwd=os.path.join(o.odoo_dir, "setup"))
subprocess.call(["docker", "build", "-t", "odoo-%s-centos-nightly-tests" % version, "."],
cwd=os.path.join(o.build_dir, "docker_centos"))
def test_tgz(o):
with docker('odoo-%s-src-nightly-tests' % version, o.build_dir, o.pub) as wheezy:
wheezy.release = '*.tar.gz'
wheezy.system("service postgresql start")
wheezy.system('pip install /opt/release/%s' % wheezy.release)
wheezy.system("useradd --system --no-create-home odoo")
wheezy.system('su postgres -s /bin/bash -c "createuser -s odoo"')
wheezy.system('su postgres -s /bin/bash -c "createdb mycompany"')
wheezy.system('mkdir /var/lib/odoo')
wheezy.system('chown odoo:odoo /var/lib/odoo')
wheezy.system('su odoo -s /bin/bash -c "odoo.py --addons-path=/usr/local/lib/python2.7/dist-packages/openerp/addons -d mycompany -i base --stop-after-init"')
wheezy.system('su odoo -s /bin/bash -c "odoo.py --addons-path=/usr/local/lib/python2.7/dist-packages/openerp/addons -d mycompany &"')
def test_deb(o):
with docker('odoo-%s-debian-nightly-tests' % version, o.build_dir, o.pub) as wheezy:
wheezy.release = '*.deb'
wheezy.system("service postgresql start")
wheezy.system('su postgres -s /bin/bash -c "createdb mycompany"')
wheezy.system('/usr/bin/dpkg -i /opt/release/%s' % wheezy.release)
wheezy.system('/usr/bin/apt-get install -f -y')
wheezy.system('su odoo -s /bin/bash -c "odoo.py -c /etc/odoo/openerp-server.conf -d mycompany -i base --stop-after-init"')
wheezy.system('su odoo -s /bin/bash -c "odoo.py -c /etc/odoo/openerp-server.conf -d mycompany &"')
def test_rpm(o):
with docker('odoo-%s-centos-nightly-tests' % version, o.build_dir, o.pub) as centos7:
centos7.release = '*.noarch.rpm'
# Start postgresql
centos7.system('su postgres -c "/usr/bin/pg_ctl -D /var/lib/postgres/data start"')
centos7.system('sleep 5')
centos7.system('su postgres -c "createdb mycompany"')
# Odoo install
centos7.system('yum install -d 0 -e 0 /opt/release/%s -y' % centos7.release)
centos7.system('su odoo -s /bin/bash -c "openerp-server -c /etc/odoo/openerp-server.conf -d mycompany -i base --stop-after-init"')
centos7.system('su odoo -s /bin/bash -c "openerp-server -c /etc/odoo/openerp-server.conf -d mycompany &"')
def test_exe(o):
KVMWinTestExe(o, o.vm_winxp_image, o.vm_winxp_ssh_key, o.vm_winxp_login).start()
#---------------------------------------------------------
# Generates Packages, Sources and Release files of debian package
#---------------------------------------------------------
def gen_deb_package(o, published_files):
# Executes command to produce file_name in path, and moves it to o.pub/deb
def _gen_file(o, (command, file_name), path):
cur_tmp_file_path = os.path.join(path, file_name)
with open(cur_tmp_file_path, 'w') as out:
subprocess.call(command, stdout=out, cwd=path)
system(['cp', cur_tmp_file_path, os.path.join(o.pub, 'deb', file_name)])
# Copy files to a temp directory (required because the working directory must contain only the
# files of the last release)
temp_path = tempfile.mkdtemp(suffix='debPackages')
for pub_file_path in published_files:
system(['cp', pub_file_path, temp_path])
commands = [
(['dpkg-scanpackages', '.'], "Packages"), # Generate Packages file
(['dpkg-scansources', '.'], "Sources"), # Generate Sources file
(['apt-ftparchive', 'release', '.'], "Release") # Generate Release file
]
# Generate files
for command in commands:
_gen_file(o, command, temp_path)
# Remove temp directory
shutil.rmtree(temp_path)
# Generate Release.gpg (= signed Release)
# Options -abs: -a (Create ASCII armored output), -b (Make a detach signature), -s (Make a signature)
subprocess.call(['gpg', '--default-key', GPGID, '--passphrase', GPGPASSPHRASE, '--yes', '-abs', '--no-tty', '-o', 'Release.gpg', 'Release'], cwd=os.path.join(o.pub, 'deb'))
#---------------------------------------------------------
# Generates an RPM repo
#---------------------------------------------------------
def gen_rpm_repo(o, file_name):
# Sign the RPM
rpmsign = pexpect.spawn('/bin/bash', ['-c', 'rpm --resign %s' % file_name], cwd=os.path.join(o.pub, 'rpm'))
rpmsign.expect_exact('Enter pass phrase: ')
rpmsign.send(GPGPASSPHRASE + '\r\n')
rpmsign.expect(pexpect.EOF)
# Removes the old repodata
subprocess.call(['rm', '-rf', os.path.join(o.pub, 'rpm', 'repodata')])
# Copy files to a temp directory (required because the working directory must contain only the
# files of the last release)
temp_path = tempfile.mkdtemp(suffix='rpmPackages')
subprocess.call(['cp', file_name, temp_path])
subprocess.call(['createrepo', temp_path]) # creates a repodata folder in temp_path
subprocess.call(['cp', '-r', os.path.join(temp_path, "repodata"), os.path.join(o.pub, 'rpm')])
# Remove temp directory
shutil.rmtree(temp_path)
#----------------------------------------------------------
# Options and Main
#----------------------------------------------------------
def options():
op = optparse.OptionParser()
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
build_dir = "%s-%s" % (root, timestamp)
op.add_option("-b", "--build-dir", default=build_dir, help="build directory (%default)", metavar="DIR")
op.add_option("-p", "--pub", default=None, help="pub directory (%default)", metavar="DIR")
op.add_option("", "--no-testing", action="store_true", help="don't test the builded packages")
op.add_option("-v", "--version", default='8.0', help="version (%default)")
op.add_option("", "--no-debian", action="store_true", help="don't build the debian package")
op.add_option("", "--no-rpm", action="store_true", help="don't build the rpm package")
op.add_option("", "--no-tarball", action="store_true", help="don't build the tarball")
op.add_option("", "--no-windows", action="store_true", help="don't build the windows package")
# Windows VM
op.add_option("", "--vm-winxp-image", default='/home/odoo/vm/winxp27/winxp27.vdi', help="%default")
op.add_option("", "--vm-winxp-ssh-key", default='/home/odoo/vm/winxp27/id_rsa', help="%default")
op.add_option("", "--vm-winxp-login", default='Naresh', help="Windows login (%default)")
op.add_option("", "--vm-winxp-python-version", default='2.7', help="Windows Python version installed in the VM (default: %default)")
(o, args) = op.parse_args()
# derive other options
o.odoo_dir = root
o.pkg = join(o.build_dir, 'pkg')
o.version_full = '%s-%s' % (o.version, timestamp)
o.work = join(o.build_dir, 'openerp-%s' % o.version_full)
o.work_addons = join(o.work, 'openerp', 'addons')
return o
def main():
o = options()
_prepare_build_dir(o)
if not o.no_testing:
_prepare_testing(o)
try:
if not o.no_tarball:
build_tgz(o)
try:
if not o.no_testing:
test_tgz(o)
published_files = publish(o, 'tarball', ['tar.gz', 'zip'])
except Exception, e:
print("Won't publish the tgz release.\n Exception: %s" % str(e))
if not o.no_debian:
build_deb(o)
try:
if not o.no_testing:
test_deb(o)
published_files = publish(o, 'debian', ['deb', 'dsc', 'changes', 'tar.gz'])
gen_deb_package(o, published_files)
except Exception, e:
print("Won't publish the deb release.\n Exception: %s" % str(e))
if not o.no_rpm:
build_rpm(o)
try:
if not o.no_testing:
test_rpm(o)
published_files = publish(o, 'redhat', ['noarch.rpm'])
gen_rpm_repo(o, published_files[0])
except Exception, e:
print("Won't publish the rpm release.\n Exception: %s" % str(e))
if not o.no_windows:
_prepare_build_dir(o, win32=True)
build_exe(o)
try:
if not o.no_testing:
test_exe(o)
published_files = publish(o, 'windows', ['exe'])
except Exception, e:
print("Won't publish the exe release.\n Exception: %s" % str(e))
except:
pass
finally:
shutil.rmtree(o.build_dir)
print('Build dir %s removed' % o.build_dir)
if not o.no_testing:
system("docker rm -f `docker ps -a | awk '{print $1 }'` 2>>/dev/null")
print('Remaining dockers removed')
if __name__ == '__main__':
main()
| agpl-3.0 |
miptliot/edx-platform | openedx/core/djangoapps/ccxcon/tasks.py | 19 | 1672 | """
This file contains celery tasks for ccxcon
"""
from celery.task import task # pylint: disable=no-name-in-module, import-error
from celery.utils.log import get_task_logger # pylint: disable=no-name-in-module, import-error
from opaque_keys.edx.keys import CourseKey
from requests.exceptions import ConnectionError, HTTPError, RequestException, TooManyRedirects
from openedx.core.djangoapps.ccxcon import api
log = get_task_logger(__name__)
@task()
def update_ccxcon(course_id, cur_retry=0):
"""
Pass through function to update course information on CCXCon.
Takes care of retries in case of some specific exceptions.
Args:
course_id (str): string representing a course key
cur_retry (int): integer representing the current task retry
"""
course_key = CourseKey.from_string(course_id)
try:
api.course_info_to_ccxcon(course_key)
log.info('Course update to CCXCon returned no errors. Course key: %s', course_id)
except (ConnectionError, HTTPError, RequestException, TooManyRedirects, api.CCXConnServerError) as exp:
log.error('Course update to CCXCon failed for course_id %s with error: %s', course_id, exp)
# in case the maximum amount of retries has not been reached,
# insert another task delayed exponentially up to 5 retries
if cur_retry < 5:
update_ccxcon.apply_async(
kwargs={'course_id': course_id, 'cur_retry': cur_retry + 1},
countdown=10 ** cur_retry # number of seconds the task should be delayed
)
log.info('Requeued celery task for course key %s ; retry # %s', course_id, cur_retry + 1)
| agpl-3.0 |
neharejanjeva/techstitution | venv/lib/python2.7/site-packages/flask/testsuite/reqctx.py | 557 | 5960 | # -*- coding: utf-8 -*-
"""
flask.testsuite.reqctx
~~~~~~~~~~~~~~~~~~~~~~
Tests the request context.
:copyright: (c) 2012 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
try:
from greenlet import greenlet
except ImportError:
greenlet = None
from flask.testsuite import FlaskTestCase
class RequestContextTestCase(FlaskTestCase):
def test_teardown_on_pop(self):
buffer = []
app = flask.Flask(__name__)
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
ctx = app.test_request_context()
ctx.push()
self.assert_equal(buffer, [])
ctx.pop()
self.assert_equal(buffer, [None])
def test_proper_test_request_context(self):
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return None
@app.route('/', subdomain='foo')
def sub():
return None
with app.test_request_context('/'):
self.assert_equal(flask.url_for('index', _external=True), 'http://localhost.localdomain:5000/')
with app.test_request_context('/'):
self.assert_equal(flask.url_for('sub', _external=True), 'http://foo.localhost.localdomain:5000/')
try:
with app.test_request_context('/', environ_overrides={'HTTP_HOST': 'localhost'}):
pass
except Exception as e:
self.assert_true(isinstance(e, ValueError))
self.assert_equal(str(e), "the server name provided " +
"('localhost.localdomain:5000') does not match the " + \
"server name from the WSGI environment ('localhost')")
try:
app.config.update(SERVER_NAME='localhost')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
try:
app.config.update(SERVER_NAME='localhost:80')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost:80'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
def test_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
@app.route('/meh')
def meh():
return flask.request.url
with app.test_request_context('/?name=World'):
self.assert_equal(index(), 'Hello World!')
with app.test_request_context('/meh'):
self.assert_equal(meh(), 'http://localhost/meh')
self.assert_true(flask._request_ctx_stack.top is None)
def test_context_test(self):
app = flask.Flask(__name__)
self.assert_false(flask.request)
self.assert_false(flask.has_request_context())
ctx = app.test_request_context()
ctx.push()
try:
self.assert_true(flask.request)
self.assert_true(flask.has_request_context())
finally:
ctx.pop()
def test_manual_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
ctx = app.test_request_context('/?name=World')
ctx.push()
self.assert_equal(index(), 'Hello World!')
ctx.pop()
try:
index()
except RuntimeError:
pass
else:
self.assert_true(0, 'expected runtime error')
def test_greenlet_context_copying(self):
app = flask.Flask(__name__)
greenlets = []
@app.route('/')
def index():
reqctx = flask._request_ctx_stack.top.copy()
def g():
self.assert_false(flask.request)
self.assert_false(flask.current_app)
with reqctx:
self.assert_true(flask.request)
self.assert_equal(flask.current_app, app)
self.assert_equal(flask.request.path, '/')
self.assert_equal(flask.request.args['foo'], 'bar')
self.assert_false(flask.request)
return 42
greenlets.append(greenlet(g))
return 'Hello World!'
rv = app.test_client().get('/?foo=bar')
self.assert_equal(rv.data, b'Hello World!')
result = greenlets[0].run()
self.assert_equal(result, 42)
def test_greenlet_context_copying_api(self):
app = flask.Flask(__name__)
greenlets = []
@app.route('/')
def index():
reqctx = flask._request_ctx_stack.top.copy()
@flask.copy_current_request_context
def g():
self.assert_true(flask.request)
self.assert_equal(flask.current_app, app)
self.assert_equal(flask.request.path, '/')
self.assert_equal(flask.request.args['foo'], 'bar')
return 42
greenlets.append(greenlet(g))
return 'Hello World!'
rv = app.test_client().get('/?foo=bar')
self.assert_equal(rv.data, b'Hello World!')
result = greenlets[0].run()
self.assert_equal(result, 42)
# Disable test if we don't have greenlets available
if greenlet is None:
test_greenlet_context_copying = None
test_greenlet_context_copying_api = None
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RequestContextTestCase))
return suite
| cc0-1.0 |
lichia/luigi | luigi/contrib/hdfs/__init__.py | 12 | 3160 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Provides access to HDFS using the :py:class:`HdfsTarget`, a subclass of :py:class:`~luigi.target.Target`.
You can configure what client by setting the "client" config under the "hdfs" section in the configuration, or using the ``--hdfs-client`` command line option.
"hadoopcli" is the slowest, but should work out of the box. "snakebite" is the fastest, but requires Snakebite to be installed.
Currently (4th May) the :py:mod:`luigi.contrib.hdfs` module is under
reorganization. We recommend importing the reexports from
:py:mod:`luigi.contrib.hdfs` instead of the sub-modules, as we're not yet sure
how the final structure of the sub-modules will be. Eventually this module
will be empty and you'll have to import directly from the sub modules like
:py:mod:`luigi.contrib.hdfs.config`.
"""
# config.py
from luigi.contrib.hdfs import config as hdfs_config
hdfs = hdfs_config.hdfs
load_hadoop_cmd = hdfs_config.load_hadoop_cmd
get_configured_hadoop_version = hdfs_config.get_configured_hadoop_version
get_configured_hdfs_client = hdfs_config.get_configured_hdfs_client
tmppath = hdfs_config.tmppath
# clients
from luigi.contrib.hdfs import clients as hdfs_clients
from luigi.contrib.hdfs import error as hdfs_error
from luigi.contrib.hdfs import snakebite_client as hdfs_snakebite_client
from luigi.contrib.hdfs import hadoopcli_clients as hdfs_hadoopcli_clients
HDFSCliError = hdfs_error.HDFSCliError
call_check = hdfs_hadoopcli_clients.HdfsClient.call_check
list_path = hdfs_snakebite_client.SnakebiteHdfsClient.list_path
HdfsClient = hdfs_hadoopcli_clients.HdfsClient
SnakebiteHdfsClient = hdfs_snakebite_client.SnakebiteHdfsClient
HdfsClientCdh3 = hdfs_hadoopcli_clients.HdfsClientCdh3
HdfsClientApache1 = hdfs_hadoopcli_clients.HdfsClientApache1
create_hadoopcli_client = hdfs_hadoopcli_clients.create_hadoopcli_client
get_autoconfig_client = hdfs_clients.get_autoconfig_client
exists = hdfs_clients.exists
rename = hdfs_clients.rename
remove = hdfs_clients.remove
mkdir = hdfs_clients.mkdir
listdir = hdfs_clients.listdir
# format.py
from luigi.contrib.hdfs import format as hdfs_format
HdfsReadPipe = hdfs_format.HdfsReadPipe
HdfsAtomicWritePipe = hdfs_format.HdfsAtomicWritePipe
HdfsAtomicWriteDirPipe = hdfs_format.HdfsAtomicWriteDirPipe
PlainFormat = hdfs_format.PlainFormat
PlainDirFormat = hdfs_format.PlainDirFormat
Plain = hdfs_format.Plain
PlainDir = hdfs_format.PlainDir
CompatibleHdfsFormat = hdfs_format.CompatibleHdfsFormat
# target.py
from luigi.contrib.hdfs import target as hdfs_target
HdfsTarget = hdfs_target.HdfsTarget
| apache-2.0 |
ubiar/odoo | addons/note/tests/__init__.py | 260 | 1076 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_note
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mathgl67/pymmr | tests/file.py | 1 | 4005 | #!/usr/bin/env python
# vi:ai:et:ts=4 sw=4
#
# -*- coding: utf8 -*-
#
# PyMmr My Music Renamer
# Copyright (C) 2007-2010 [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import unittest
import os
import mmr.file
from mmr.file import BaseFile, AudioFile
class TestFileFactory(unittest.TestCase):
def setUp(self):
# create cross os compatible path
self.file_name = "name.ext"
self.file_path = os.path.join("tests", "data", "file")
self.file_fullpath = os.path.join(
self.file_path,
self.file_name
)
# create a base file object with previous data
# this will be used for all test in this class.
self.file = mmr.file.factory(self.file_fullpath)
def testName(self):
self.assertEquals(
self.file.name, self.file_name,
"Factory must set the name to '%s' and it was '%s' !" % (
self.file_name,
self.file.name
)
)
def testExtension(self):
self.assertEquals(
self.file.extension, ".ext",
"Factory must set extension to '%s' and it was '%s' !" % (
".ext",
self.file.extension
)
)
def testPath(self):
self.assertEquals(
self.file.path, self.file_path,
"Factory must set path to '%s' and it was '%s' !" % (
self.file_path,
self.file.path
)
)
def testFullpath(self):
self.assertEquals(
self.file.get_fullpath(), self.file_fullpath,
"Factory must retrieve path to '%s' (!= '%s')." % (
self.file_fullpath,
self.file.get_fullpath()
)
)
class TestFileUnknown(unittest.TestCase):
def setUp(self):
self.file = mmr.file.factory("tests/data/file/unknown")
def testObjectType(self):
self.assertTrue(isinstance(self.file, BaseFile), "file should be a BaseFile object")
def testExtention(self):
self.assertEquals(self.file.extension, None, "file extension on unknown file should be None != %s" % self.file.extension)
def testBaseFileRepr(self):
self.assertEquals(repr(self.file), "<File name='unknown' extension='None' path='tests/data/file' />")
class TestFileAudio(unittest.TestCase):
def setUp(self):
self.file = {
".mp3": mmr.file.factory("tests/data/tags/silence.mp3"),
".ogg": mmr.file.factory("tests/data/tags/silence.ogg"),
".flac":mmr.file.factory("tests/data/tags/silence.flac"),
}
def testMp3FileIsFileAudio(self):
self.assertTrue(isinstance(self.file[".mp3"], AudioFile), "File with '.mp3' extension should be 'AudioFile'")
def testOggFileIsFileAudio(self):
self.assertTrue(isinstance(self.file[".ogg"], AudioFile), "File with '.ogg' extension should be 'AudioFile'")
def testFlacFileIsFileAudio(self):
self.assertTrue(isinstance(self.file[".flac"], AudioFile), "File with '.flac' extension should be 'AudioFile'")
def testHaveTag(self):
self.assertNotEquals(self.file[".mp3"].tags, None)
self.assertNotEquals(self.file[".ogg"].tags, None)
self.assertNotEquals(self.file[".flac"].tags, None)
| gpl-2.0 |
cloudfoundry/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/test/test_int_literal.py | 138 | 9128 | """Test correct treatment of hex/oct constants.
This is complex because of changes due to PEP 237.
"""
import unittest
from test import test_support
class TestHexOctBin(unittest.TestCase):
def test_hex_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0x0, 0X0)
self.assertEqual(0x1, 0X1)
self.assertEqual(0x123456789abcdef, 0X123456789abcdef)
# Baseline tests
self.assertEqual(0x0, 0)
self.assertEqual(0x10, 16)
self.assertEqual(0x7fffffff, 2147483647)
self.assertEqual(0x7fffffffffffffff, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x0), 0)
self.assertEqual(-(0x10), -16)
self.assertEqual(-(0x7fffffff), -2147483647)
self.assertEqual(-(0x7fffffffffffffff), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0x0, 0)
self.assertEqual(-0x10, -16)
self.assertEqual(-0x7fffffff, -2147483647)
self.assertEqual(-0x7fffffffffffffff, -9223372036854775807)
def test_hex_unsigned(self):
# Positive constants
self.assertEqual(0x80000000, 2147483648L)
self.assertEqual(0xffffffff, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x80000000), -2147483648L)
self.assertEqual(-(0xffffffff), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x80000000, -2147483648L)
self.assertEqual(-0xffffffff, -4294967295L)
# Positive constants
self.assertEqual(0x8000000000000000, 9223372036854775808L)
self.assertEqual(0xffffffffffffffff, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x8000000000000000), -9223372036854775808L)
self.assertEqual(-(0xffffffffffffffff), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x8000000000000000, -9223372036854775808L)
self.assertEqual(-0xffffffffffffffff, -18446744073709551615L)
def test_oct_baseline(self):
# Baseline tests
self.assertEqual(00, 0)
self.assertEqual(020, 16)
self.assertEqual(017777777777, 2147483647)
self.assertEqual(0777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(00), 0)
self.assertEqual(-(020), -16)
self.assertEqual(-(017777777777), -2147483647)
self.assertEqual(-(0777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-00, 0)
self.assertEqual(-020, -16)
self.assertEqual(-017777777777, -2147483647)
self.assertEqual(-0777777777777777777777, -9223372036854775807)
def test_oct_baseline_new(self):
# A few upper/lowercase tests
self.assertEqual(0o0, 0O0)
self.assertEqual(0o1, 0O1)
self.assertEqual(0o1234567, 0O1234567)
# Baseline tests
self.assertEqual(0o0, 0)
self.assertEqual(0o20, 16)
self.assertEqual(0o17777777777, 2147483647)
self.assertEqual(0o777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o0), 0)
self.assertEqual(-(0o20), -16)
self.assertEqual(-(0o17777777777), -2147483647)
self.assertEqual(-(0o777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0o0, 0)
self.assertEqual(-0o20, -16)
self.assertEqual(-0o17777777777, -2147483647)
self.assertEqual(-0o777777777777777777777, -9223372036854775807)
def test_oct_unsigned(self):
# Positive constants
self.assertEqual(020000000000, 2147483648L)
self.assertEqual(037777777777, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(020000000000), -2147483648L)
self.assertEqual(-(037777777777), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-020000000000, -2147483648L)
self.assertEqual(-037777777777, -4294967295L)
# Positive constants
self.assertEqual(01000000000000000000000, 9223372036854775808L)
self.assertEqual(01777777777777777777777, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(01000000000000000000000), -9223372036854775808L)
self.assertEqual(-(01777777777777777777777), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-01000000000000000000000, -9223372036854775808L)
self.assertEqual(-01777777777777777777777, -18446744073709551615L)
def test_oct_unsigned_new(self):
# Positive constants
self.assertEqual(0o20000000000, 2147483648L)
self.assertEqual(0o37777777777, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o20000000000), -2147483648L)
self.assertEqual(-(0o37777777777), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o20000000000, -2147483648L)
self.assertEqual(-0o37777777777, -4294967295L)
# Positive constants
self.assertEqual(0o1000000000000000000000, 9223372036854775808L)
self.assertEqual(0o1777777777777777777777, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o1000000000000000000000), -9223372036854775808L)
self.assertEqual(-(0o1777777777777777777777), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o1000000000000000000000, -9223372036854775808L)
self.assertEqual(-0o1777777777777777777777, -18446744073709551615L)
def test_bin_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0b0, 0B0)
self.assertEqual(0b1, 0B1)
self.assertEqual(0b10101010101, 0B10101010101)
# Baseline tests
self.assertEqual(0b0, 0)
self.assertEqual(0b10000, 16)
self.assertEqual(0b1111111111111111111111111111111, 2147483647)
self.assertEqual(0b111111111111111111111111111111111111111111111111111111111111111, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b0), 0)
self.assertEqual(-(0b10000), -16)
self.assertEqual(-(0b1111111111111111111111111111111), -2147483647)
self.assertEqual(-(0b111111111111111111111111111111111111111111111111111111111111111), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0b0, 0)
self.assertEqual(-0b10000, -16)
self.assertEqual(-0b1111111111111111111111111111111, -2147483647)
self.assertEqual(-0b111111111111111111111111111111111111111111111111111111111111111, -9223372036854775807)
def test_bin_unsigned(self):
# Positive constants
self.assertEqual(0b10000000000000000000000000000000, 2147483648L)
self.assertEqual(0b11111111111111111111111111111111, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b10000000000000000000000000000000), -2147483648L)
self.assertEqual(-(0b11111111111111111111111111111111), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b10000000000000000000000000000000, -2147483648L)
self.assertEqual(-0b11111111111111111111111111111111, -4294967295L)
# Positive constants
self.assertEqual(0b1000000000000000000000000000000000000000000000000000000000000000, 9223372036854775808L)
self.assertEqual(0b1111111111111111111111111111111111111111111111111111111111111111, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b1000000000000000000000000000000000000000000000000000000000000000), -9223372036854775808L)
self.assertEqual(-(0b1111111111111111111111111111111111111111111111111111111111111111), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b1000000000000000000000000000000000000000000000000000000000000000, -9223372036854775808L)
self.assertEqual(-0b1111111111111111111111111111111111111111111111111111111111111111, -18446744073709551615L)
def test_main():
test_support.run_unittest(TestHexOctBin)
if __name__ == "__main__":
test_main()
| mit |
jkkummerfeld/1ec-graph-parser | properties/count_unique_dev_spines.py | 1 | 2799 | #!/usr/bin/env python3
from __future__ import print_function
import argparse
import string
import sys
def read(filename):
sent = []
spines = []
for line in open(filename):
line = line.strip()
if line.startswith("# Sentence"):
spines.append([])
sent = line.strip().split()[2:]
elif len(line) > 0 and line[0] != '#':
fields = line.split()
num = int(fields[0])
word = fields[1]
pos = fields[2]
spine = fields[3]
spines[-1].append((word, pos, spine))
return spines
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculate how many spines in the dev set are novel.')
parser.add_argument('train', help='Training data in SHP format.')
parser.add_argument('dev', help='Development data in SHP format.')
args = parser.parse_args()
train_spines = read(args.train)
word_set = set()
pos_set = set()
spine_set = set()
for spines in train_spines:
for spine in spines:
word_set.add(spine)
pos_set.add((spine[1], spine[2]))
spine_set.add(spine[2])
results = {
'Dev sentences with all seen: (word, POS, spine)': 0,
'Dev sentences with all seen: (POS, spine)': 0,
'Dev sentences with all seen: spine': 0,
'Train spines (word, POS, spine)': len(word_set),
'Train spines (POS, spine)': len(pos_set),
'Train spines spine': len(spine_set),
'Dev spines new (word, POS, spine)': 0,
'Dev spines new (POS, spine)': 0,
'Dev spines new spine': 0,
'Dev spines': 0
}
sentences = 0
for spines in read(args.dev):
sentences += 1
all_wpresent = True
all_ppresent = True
all_cpresent = True
for spine in spines:
results['Dev spines'] += 1
if spine not in word_set:
results['Dev spines new (word, POS, spine)'] += 1
all_wpresent = False
if (spine[1], spine[2]) not in pos_set:
results['Dev spines new (POS, spine)'] += 1
all_ppresent = False
if spine[2] not in spine_set:
results['Dev spines new spine'] += 1
all_cpresent = False
if all_wpresent:
results['Dev sentences with all seen: (word, POS, spine)'] += 1
if all_ppresent:
results['Dev sentences with all seen: (POS, spine)'] += 1
if all_cpresent:
results['Dev sentences with all seen: spine'] += 1
for key in results:
if key.startswith("Dev sentences"):
print("{} {} {:.1f}%".format(key, results[key], results[key] * 100 / sentences))
else:
print(key, results[key])
| isc |
ABaldwinHunter/django-clone-classic | tests/template_tests/test_logging.py | 117 | 4628 | from __future__ import unicode_literals
import logging
from django.template import Context, Engine, Variable, VariableDoesNotExist
from django.test import SimpleTestCase
class TestHandler(logging.Handler):
def __init__(self):
super(TestHandler, self).__init__()
self.log_record = None
def emit(self, record):
self.log_record = record
class BaseTemplateLoggingTestCase(SimpleTestCase):
def setUp(self):
self.test_handler = TestHandler()
self.logger = logging.getLogger('django.template')
self.original_level = self.logger.level
self.logger.addHandler(self.test_handler)
self.logger.setLevel(self.loglevel)
def tearDown(self):
self.logger.removeHandler(self.test_handler)
self.logger.level = self.original_level
class VariableResolveLoggingTests(BaseTemplateLoggingTestCase):
loglevel = logging.DEBUG
def test_log_on_variable_does_not_exist_silent(self):
class TestObject(object):
class SilentDoesNotExist(Exception):
silent_variable_failure = True
@property
def template_name(self):
return "template_name"
@property
def template(self):
return Engine().from_string('')
@property
def article(self):
raise TestObject.SilentDoesNotExist("Attribute does not exist.")
def __iter__(self):
return iter(attr for attr in dir(TestObject) if attr[:2] != "__")
def __getitem__(self, item):
return self.__dict__[item]
Variable('article').resolve(TestObject())
self.assertEqual(
self.test_handler.log_record.getMessage(),
"Exception while resolving variable 'article' in template 'template_name'."
)
self.assertIsNotNone(self.test_handler.log_record.exc_info)
raised_exception = self.test_handler.log_record.exc_info[1]
self.assertEqual(str(raised_exception), 'Attribute does not exist.')
def test_log_on_variable_does_not_exist_not_silent(self):
with self.assertRaises(VariableDoesNotExist):
Variable('article.author').resolve({'article': {'section': 'News'}})
self.assertEqual(
self.test_handler.log_record.getMessage(),
"Exception while resolving variable 'author' in template 'unknown'."
)
self.assertIsNotNone(self.test_handler.log_record.exc_info)
raised_exception = self.test_handler.log_record.exc_info[1]
self.assertEqual(
str(raised_exception),
'Failed lookup for key [author] in %r' % ("{%r: %r}" % ('section', 'News'))
)
def test_no_log_when_variable_exists(self):
Variable('article.section').resolve({'article': {'section': 'News'}})
self.assertIsNone(self.test_handler.log_record)
class IncludeNodeLoggingTests(BaseTemplateLoggingTestCase):
loglevel = logging.WARN
@classmethod
def setUpClass(cls):
super(IncludeNodeLoggingTests, cls).setUpClass()
cls.engine = Engine(loaders=[
('django.template.loaders.locmem.Loader', {
'child': '{{ raises_exception }}',
}),
], debug=False)
def error_method():
raise IndexError("some generic exception")
cls.ctx = Context({'raises_exception': error_method})
def test_logs_exceptions_during_rendering_with_debug_disabled(self):
template = self.engine.from_string('{% include "child" %}')
template.name = 'template_name'
self.assertEqual(template.render(self.ctx), '')
self.assertEqual(
self.test_handler.log_record.getMessage(),
"Exception raised while rendering {% include %} for template "
"'template_name'. Empty string rendered instead."
)
self.assertIsNotNone(self.test_handler.log_record.exc_info)
self.assertEqual(self.test_handler.log_record.levelno, logging.WARN)
def test_logs_exceptions_during_rendering_with_no_template_name(self):
template = self.engine.from_string('{% include "child" %}')
self.assertEqual(template.render(self.ctx), '')
self.assertEqual(
self.test_handler.log_record.getMessage(),
"Exception raised while rendering {% include %} for template "
"'unknown'. Empty string rendered instead."
)
self.assertIsNotNone(self.test_handler.log_record.exc_info)
self.assertEqual(self.test_handler.log_record.levelno, logging.WARN)
| bsd-3-clause |
BeyondTheClouds/nova | nova/api/openstack/compute/cells.py | 9 | 12036 | # Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
import oslo_messaging as messaging
from oslo_utils import strutils
import six
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import cells
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.cells import rpcapi as cells_rpcapi
import nova.conf
from nova import exception
from nova.i18n import _
from nova import rpc
CONF = nova.conf.CONF
ALIAS = "os-cells"
authorize = extensions.os_compute_authorizer(ALIAS)
def _filter_keys(item, keys):
"""Filters all model attributes except for keys
item is a dict
"""
return {k: v for k, v in six.iteritems(item) if k in keys}
def _fixup_cell_info(cell_info, keys):
"""If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport_url = rpc.get_transport_url(transport_url)
except messaging.InvalidTransportURL:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return
if not transport_url.hosts:
return
transport_host = transport_url.hosts[0]
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = getattr(transport_host, transport_field)
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class CellsController(wsgi.Controller):
"""Controller for Cell resources."""
def __init__(self):
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@extensions.expected_errors(501)
@common.check_cells_enabled
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@extensions.expected_errors(501)
@common.check_cells_enabled
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@extensions.expected_errors(501)
@common.check_cells_enabled
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@extensions.expected_errors((404, 501))
@common.check_cells_enabled
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v2.1, along with capabilities
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell={"capacities": capacities})
@extensions.expected_errors((404, 501))
@common.check_cells_enabled
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
# NOTE(gmann): Returns 200 for backwards compatibility but should be 204
# as this operation complete the deletion of aggregate resource and return
# no response body.
@extensions.expected_errors((403, 404, 501))
@common.check_cells_enabled
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context, action="delete")
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
if num_deleted == 0:
raise exc.HTTPNotFound(
explanation=_("Cell %s doesn't exist.") % id)
def _normalize_cell(self, cell, existing=None):
"""Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
if 'name' in cell:
cell['name'] = common.normalize_name(cell['name'])
# Start with the cell type conversion
if 'type' in cell:
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
# Avoid cell type being overwritten to 'child'
elif existing:
cell['is_parent'] = existing['is_parent']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport_url = existing.get('transport_url') if existing else None
transport_url = rpc.get_transport_url(transport_url)
if 'rpc_virtual_host' in cell:
transport_url.virtual_host = cell.pop('rpc_virtual_host')
if not transport_url.hosts:
transport_url.hosts.append(messaging.TransportHost())
transport_host = transport_url.hosts[0]
if 'rpc_port' in cell:
cell['rpc_port'] = int(cell['rpc_port'])
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
}
for key, input_field in transport_field_map.items():
# Only override the value if we're given an override
if input_field in cell:
setattr(transport_host, key, cell.pop(input_field))
# Now set the transport URL
cell['transport_url'] = str(transport_url)
# NOTE(gmann): Returns 200 for backwards compatibility but should be 201
# as this operation complete the creation of aggregates resource when
# returning a response.
@extensions.expected_errors((400, 403, 501))
@common.check_cells_enabled
@validation.schema(cells.create_v20, '2.0', '2.0')
@validation.schema(cells.create, '2.1')
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context, action="create")
cell = body['cell']
self._normalize_cell(cell)
try:
cell = self.cells_rpcapi.cell_create(context, cell)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors((400, 403, 404, 501))
@common.check_cells_enabled
@validation.schema(cells.update_v20, '2.0', '2.0')
@validation.schema(cells.update, '2.1')
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context, action="update")
cell = body['cell']
cell.pop('id', None)
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
self._normalize_cell(cell, existing)
try:
cell = self.cells_rpcapi.cell_update(context, id, cell)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
# NOTE(gmann): Returns 200 for backwards compatibility but should be 204
# as this operation complete the sync instance info and return
# no response body.
@extensions.expected_errors((400, 501))
@common.check_cells_enabled
@validation.schema(cells.sync_instances)
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context, action="sync_instances")
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if isinstance(deleted, six.string_types):
deleted = strutils.bool_from_string(deleted, strict=True)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.V21APIExtensionBase):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = ALIAS
version = 1
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension(ALIAS, CellsController(),
collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
def get_controller_extensions(self):
return []
| apache-2.0 |
tkremenek/swift | utils/build_swift/tests/build_swift/test_driver_arguments.py | 5 | 24206 | # This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
import os
import platform
import sys
import unittest
from build_swift import argparse
from build_swift import constants
from build_swift import driver_arguments
from build_swift import migration
from build_swift.presets import PresetParser
import six
from .test_presets import PRESET_DEFAULTS
from .. import expected_options as eo
from .. import utils
PRESETS_FILES = [
os.path.join(constants.UTILS_PATH, 'build-presets.ini'),
]
class ParserError(Exception):
pass
def _load_all_presets(preset_files):
parser = PresetParser()
parser.read_files(preset_files)
# Hack to filter out mixins which are not expected to be valid presets
preset_names = [
name for name in parser.preset_names
if not name.startswith('mixin')
]
presets = dict()
for name in preset_names:
preset = parser.get_preset(name, vars=PRESET_DEFAULTS)
args = migration.migrate_swift_sdks(preset.args)
presets[name] = args
return presets
class TestDriverArgumentParserMeta(type):
"""Metaclass used to dynamically generate test methods for each of the
individual options accepted by the parser and methods to validate all of
the presets.
"""
def __new__(cls, name, bases, attrs):
# Generate tests for each default value
for dest, value in eo.EXPECTED_DEFAULTS.items():
test_name = 'test_default_value_{}'.format(dest)
attrs[test_name] = cls.generate_default_value_test(dest, value)
# Generate tests for each expected option
for option in eo.EXPECTED_OPTIONS:
test_name = 'test_option_{}'.format(option.sanitized_string())
attrs[test_name] = cls.generate_option_test(option)
# Generate tests for each preset
presets = _load_all_presets(PRESETS_FILES)
for name, args in presets.items():
test_name = 'test_preset_{}'.format(name)
attrs[test_name] = cls.generate_preset_test(name, args)
if six.PY2:
name = str(name)
return super(TestDriverArgumentParserMeta, cls).__new__(
cls, name, bases, attrs)
@classmethod
def generate_default_value_test(cls, dest, default_value):
def test(self):
parsed_values = self.parse_default_args([])
parsed_value = getattr(parsed_values, dest)
if default_value.__class__ in six.string_types:
parsed_value = six.text_type(parsed_value)
self.assertEqual(default_value, parsed_value,
'Invalid default value for "{}": {} != {}'
.format(dest, default_value, parsed_value))
return test
@classmethod
def _generate_help_option_test(cls, option):
def test(self):
with utils.redirect_stdout() as output:
with self.assertRaises(ParserError):
self.parse_args([option.option_string])
self.assertNotEmpty(output)
return test
@classmethod
def _generate_set_option_test(cls, option):
def test(self):
namespace = self.parse_args([option.option_string])
self.assertEqual(getattr(namespace, option.dest), option.value)
with self.assertRaises(ParserError):
self.parse_args([option.option_string, 'foo'])
return test
@classmethod
def _generate_set_true_option_test(cls, option):
def test(self):
# TODO: Move to unit-tests for the action class
namespace = self.parse_args([])
self.assertFalse(getattr(namespace, option.dest))
namespace = self.parse_args([option.option_string])
self.assertTrue(getattr(namespace, option.dest))
return test
@classmethod
def _generate_set_false_option_test(cls, option):
def test(self):
# TODO: Move to unit-tests for the action class
namespace = self.parse_args([])
self.assertTrue(getattr(namespace, option.dest))
namespace = self.parse_args([option.option_string])
self.assertFalse(getattr(namespace, option.dest))
return test
@classmethod
def _generate_enable_option_test(cls, option):
def test(self):
# TODO: Move to unit-tests for the action class
# Test parsing True values
self.parse_args([option.option_string, '1'])
self.parse_args([option.option_string, 'true'])
self.parse_args([option.option_string, 'True'])
self.parse_args([option.option_string, 'TRUE'])
# TODO: Move to unit-tests for the action class
# Test parsing False values
self.parse_args([option.option_string, '0'])
self.parse_args([option.option_string, 'false'])
self.parse_args([option.option_string, 'False'])
self.parse_args([option.option_string, 'FALSE'])
# TODO: Move to unit-tests for the action class
# Test default value
namespace = self.parse_args([option.option_string])
self.assertTrue(getattr(namespace, option.dest))
# Test setting value to True
namespace = self.parse_args([option.option_string, 'True'])
self.assertTrue(getattr(namespace, option.dest))
# Test setting value to False
namespace = self.parse_args([option.option_string, 'False'])
self.assertFalse(getattr(namespace, option.dest))
return test
@classmethod
def _generate_disable_option_test(cls, option):
def test(self):
# TODO: Move to unit-tests for the action class
# Test parsing True values
self.parse_args([option.option_string, '1'])
self.parse_args([option.option_string, 'true'])
self.parse_args([option.option_string, 'True'])
self.parse_args([option.option_string, 'TRUE'])
# TODO: Move to unit-tests for the action class
# Test parsing False values
self.parse_args([option.option_string, '0'])
self.parse_args([option.option_string, 'false'])
self.parse_args([option.option_string, 'False'])
self.parse_args([option.option_string, 'FALSE'])
# TODO: Move to unit-tests for the action class
# Test default value
namespace = self.parse_args([option.option_string])
self.assertFalse(getattr(namespace, option.dest))
# Test setting value to True resulting in False
namespace = self.parse_args([option.option_string, 'True'])
self.assertFalse(getattr(namespace, option.dest))
# Test setting value to False resulting in True
namespace = self.parse_args([option.option_string, 'False'])
self.assertTrue(getattr(namespace, option.dest))
return test
@classmethod
def _generate_choices_option_test(cls, option):
def test(self):
for choice in option.choices:
namespace = self.parse_args(
[option.option_string, six.text_type(choice)])
self.assertEqual(getattr(namespace, option.dest), choice)
with self.assertRaises(ParserError):
self.parse_args([option.option_string, 'INVALID'])
return test
@classmethod
def _generate_int_option_test(cls, option):
def test(self):
for i in [0, 1, 42]:
namespace = self.parse_args(
[option.option_string, six.text_type(i)])
self.assertEqual(int(getattr(namespace, option.dest)), i)
# FIXME: int-type options should not accept non-int strings
# self.parse_args([option.option_string, six.text_type(0.0)])
# self.parse_args([option.option_string, six.text_type(1.0)])
# self.parse_args([option.option_string, six.text_type(3.14)])
# self.parse_args([option.option_string, 'NaN'])
return test
@classmethod
def _generate_str_option_test(cls, option):
def test(self):
self.parse_args([option.option_string, 'foo'])
return test
@classmethod
def _generate_path_option_test(cls, option):
def test(self):
self.parse_args([option.option_string, sys.executable])
# FIXME: path-type options should not accept non-path inputs
# self.parse_args([option.option_string, 'foo'])
return test
@classmethod
def _generate_append_option_test(cls, option):
def test(self):
# Range size is arbitrary, just needs to be more than once
for i in range(1, 4):
namespace = self.parse_args([option.option_string, 'ARG'] * i)
self.assertEqual(getattr(namespace, option.dest), ['ARG'] * i)
return test
@classmethod
def _generate_unsupported_option_test(cls, option):
def test(self):
with self.assertRaises(ParserError):
self.parse_args([option.option_string])
return test
@classmethod
def _generate_build_script_impl_option_test(cls, option):
def test(self):
namespace, unknown_args = self.parse_args_and_unknown_args([])
self.assertFalse(hasattr(namespace, option.dest))
self.assertEqual(unknown_args, [])
namespace, unknown_args = self.parse_args_and_unknown_args(
[option.option_string])
# The argument should never show up in the namespace
self.assertFalse(hasattr(namespace, option.dest))
# It should instead be forwareded to unkown_args
self.assertEqual(unknown_args, [option.option_string])
return test
@classmethod
def generate_option_test(cls, option):
generate_test_funcs = {
eo.HelpOption: cls._generate_help_option_test,
eo.SetOption: cls._generate_set_option_test,
eo.SetTrueOption: cls._generate_set_true_option_test,
eo.SetFalseOption: cls._generate_set_false_option_test,
eo.EnableOption: cls._generate_enable_option_test,
eo.DisableOption: cls._generate_disable_option_test,
eo.ChoicesOption: cls._generate_choices_option_test,
eo.IntOption: cls._generate_int_option_test,
eo.StrOption: cls._generate_str_option_test,
eo.PathOption: cls._generate_path_option_test,
eo.AppendOption: cls._generate_append_option_test,
eo.UnsupportedOption: cls._generate_unsupported_option_test,
eo.BuildScriptImplOption:
cls._generate_build_script_impl_option_test,
# IgnoreOptions should be manually tested
eo.IgnoreOption: lambda self: None,
}
test_func = generate_test_funcs.get(option.__class__, None)
if test_func is not None:
return test_func(option)
# Catch-all meaningless test
return lambda self: \
self.fail('unexpected option "{}"'.format(option.option_string))
@classmethod
def generate_preset_test(cls, preset_name, preset_args):
def test(self):
try:
# Windows cannot run build-script-impl to check the impl args.
is_windows = platform.system() == 'Windows'
self.parse_default_args(preset_args,
check_impl_args=not is_windows)
except ParserError as e:
self.fail('failed to parse preset "{}": {}'.format(
preset_name, e))
return test
@six.add_metaclass(TestDriverArgumentParserMeta)
class TestDriverArgumentParser(unittest.TestCase):
def _parse_args(self, args):
try:
return migration.parse_args(self.parser, args)
except (SystemExit, ValueError) as e:
raise ParserError('failed to parse arguments: {}'.format(
six.text_type(args), e))
def _check_impl_args(self, namespace):
assert hasattr(namespace, 'build_script_impl_args')
try:
migration.check_impl_args(
constants.BUILD_SCRIPT_IMPL_PATH,
namespace.build_script_impl_args)
except (SystemExit, ValueError) as e:
raise ParserError('failed to parse impl arguments: {}'.format(
six.text_type(namespace.build_script_impl_args), e))
def parse_args_and_unknown_args(self, args, namespace=None):
if namespace is None:
namespace = argparse.Namespace()
with utils.quiet_output():
try:
namespace, unknown_args = (
super(self.parser.__class__, self.parser).parse_known_args(
args, namespace))
namespace, unknown_args = (
migration._process_disambiguation_arguments(
namespace, unknown_args))
except (SystemExit, argparse.ArgumentError) as e:
raise ParserError('failed to parse arguments: {}'.format(
six.text_type(args), e))
return namespace, unknown_args
def parse_args(self, args, namespace=None):
namespace, unknown_args = self.parse_args_and_unknown_args(
args, namespace)
if unknown_args:
raise ParserError('unknown arguments: {}'.format(
six.text_type(unknown_args)))
return namespace
def parse_default_args(self, args, check_impl_args=False):
with utils.quiet_output():
namespace = self._parse_args(args)
if check_impl_args:
self._check_impl_args(namespace)
return namespace
def setUp(self):
self.parser = driver_arguments.create_argument_parser()
# -------------------------------------------------------------------------
def test_expected_options_exhaustive(self):
"""Test that we are exhaustively testing all options accepted by the
parser. If this test if failing then the parser accepts more options
than currently being tested, meaning the EXPECTED_OPTIONS list in
build_swift/tests/expected_options.py should be updated to include
the missing options.
"""
expected_options = {o.option_string for o in eo.EXPECTED_OPTIONS}
# aggregate and flatten the options_strings accepted by the parser
actual_options = [a.option_strings for a in self.parser._actions]
actual_options = set(sum(actual_options, []))
diff = actual_options - expected_options
if len(diff) > 0:
self.fail('non-exhaustive expected options, missing: {}'
.format(diff))
def test_expected_options_have_default_values(self):
"""Test that all the options in EXPECTED_OPTIONS have an associated
default value.
"""
skip_option_classes = [
eo.HelpOption,
eo.IgnoreOption,
eo.UnsupportedOption,
eo.BuildScriptImplOption,
]
missing_defaults = set()
for option in eo.EXPECTED_OPTIONS:
if option.__class__ in skip_option_classes:
continue
if option.dest not in eo.EXPECTED_DEFAULTS:
missing_defaults.add(option.dest)
if len(missing_defaults) > 0:
self.fail('non-exhaustive default values for options, missing: {}'
.format(missing_defaults))
# -------------------------------------------------------------------------
# Manual option tests
def test_option_clang_compiler_version(self):
option_string = '--clang-compiler-version'
self.parse_default_args([option_string, '5.0.0'])
self.parse_default_args([option_string, '5.0.1'])
self.parse_default_args([option_string, '5.0.0.1'])
with self.assertRaises(ParserError):
self.parse_default_args([option_string, '1'])
self.parse_default_args([option_string, '1.2'])
self.parse_default_args([option_string, '0.0.0.0.1'])
def test_option_clang_user_visible_version(self):
option_string = '--clang-user-visible-version'
self.parse_default_args([option_string, '5.0.0'])
self.parse_default_args([option_string, '5.0.1'])
self.parse_default_args([option_string, '5.0.0.1'])
with self.assertRaises(ParserError):
self.parse_default_args([option_string, '1'])
self.parse_default_args([option_string, '1.2'])
self.parse_default_args([option_string, '0.0.0.0.1'])
def test_option_swift_compiler_version(self):
option_string = '--swift-compiler-version'
self.parse_default_args([option_string, '4.1'])
self.parse_default_args([option_string, '4.0.1'])
self.parse_default_args([option_string, '200.99.1'])
with self.assertRaises(ParserError):
self.parse_default_args([option_string, '1'])
self.parse_default_args([option_string, '0.0.0.1'])
def test_option_swift_user_visible_version(self):
option_string = '--swift-user-visible-version'
self.parse_default_args([option_string, '4.1'])
self.parse_default_args([option_string, '4.0.1'])
self.parse_default_args([option_string, '200.99.1'])
with self.assertRaises(ParserError):
self.parse_default_args([option_string, '1'])
self.parse_default_args([option_string, '0.0.0.1'])
def test_option_I(self):
with self.assertRaises(ValueError):
self.parse_default_args(['-I'])
def test_option_ios_all(self):
with self.assertRaises(ValueError):
self.parse_default_args(['--ios-all'])
def test_option_tvos_all(self):
with self.assertRaises(ValueError):
self.parse_default_args(['--tvos-all'])
def test_option_watchos_all(self):
with self.assertRaises(ValueError):
self.parse_default_args(['--watchos-all'])
# -------------------------------------------------------------------------
# Implied defaults tests
def test_implied_defaults_assertions(self):
namespace = self.parse_default_args(['--assertions'])
self.assertTrue(namespace.cmark_assertions)
self.assertTrue(namespace.llvm_assertions)
self.assertTrue(namespace.swift_assertions)
self.assertTrue(namespace.swift_stdlib_assertions)
def test_implied_defaults_cmark_build_variant(self):
namespace = self.parse_default_args(['--debug-cmark'])
self.assertTrue(namespace.build_cmark)
def test_implied_defaults_lldb_build_variant(self):
namespace = self.parse_default_args(['--debug-lldb'])
self.assertTrue(namespace.build_lldb)
namespace = self.parse_default_args(['--lldb-assertions'])
self.assertTrue(namespace.build_lldb)
def test_implied_defaults_build_variant(self):
namespace = self.parse_default_args(['--debug'])
self.assertEqual(namespace.cmark_build_variant, 'Debug')
self.assertEqual(namespace.foundation_build_variant, 'Debug')
self.assertEqual(namespace.libdispatch_build_variant, 'Debug')
self.assertEqual(namespace.libicu_build_variant, 'Debug')
self.assertEqual(namespace.lldb_build_variant, 'Debug')
self.assertEqual(namespace.llvm_build_variant, 'Debug')
self.assertEqual(namespace.swift_build_variant, 'Debug')
self.assertEqual(namespace.swift_stdlib_build_variant, 'Debug')
def test_implied_defaults_skip_build_ios(self):
namespace = self.parse_default_args(['--skip-build-ios'])
self.assertFalse(namespace.build_ios_device)
self.assertFalse(namespace.build_ios_simulator)
# Also implies that the tests should be skipped
self.assertFalse(namespace.test_ios_host)
self.assertFalse(namespace.test_ios_simulator)
def test_implied_defaults_skip_build_tvos(self):
namespace = self.parse_default_args(['--skip-build-tvos'])
self.assertFalse(namespace.build_tvos_device)
self.assertFalse(namespace.build_tvos_simulator)
# Also implies that the tests should be skipped
self.assertFalse(namespace.test_tvos_host)
self.assertFalse(namespace.test_tvos_simulator)
def test_implied_defaults_skip_build_watchos(self):
namespace = self.parse_default_args(['--skip-build-watchos'])
self.assertFalse(namespace.build_watchos_device)
self.assertFalse(namespace.build_watchos_simulator)
# Also implies that the tests should be skipped
self.assertFalse(namespace.test_watchos_host)
self.assertFalse(namespace.test_watchos_simulator)
def test_implied_defaults_validation_test(self):
namespace = self.parse_default_args(['--validation-test'])
self.assertTrue(namespace.test)
def test_implied_defaults_test_optimized(self):
namespace = self.parse_default_args(['--test-optimized'])
self.assertTrue(namespace.test)
def test_implied_defaults_test_optimize_for_size(self):
namespace = self.parse_default_args(['--test-optimize-for-size'])
self.assertTrue(namespace.test)
def test_implied_defaults_test_optimize_none_with_implicit_dynamic(self):
namespace = self.parse_default_args(
['--test-optimize-none-with-implicit-dynamic'])
self.assertTrue(namespace.test)
def test_implied_defaults_skip_all_tests(self):
namespace = self.parse_default_args([
'--test', '0',
'--validation-test', '0',
'--long-test', '0',
'--stress-test', '0',
])
self.assertFalse(namespace.test_linux)
self.assertFalse(namespace.test_freebsd)
self.assertFalse(namespace.test_cygwin)
self.assertFalse(namespace.test_osx)
self.assertFalse(namespace.test_ios)
self.assertFalse(namespace.test_tvos)
self.assertFalse(namespace.test_watchos)
def test_implied_defaults_skip_test_ios(self):
namespace = self.parse_default_args(['--skip-test-ios'])
self.assertFalse(namespace.test_ios_host)
self.assertFalse(namespace.test_ios_simulator)
def test_implied_defaults_skip_test_tvos(self):
namespace = self.parse_default_args(['--skip-test-tvos'])
self.assertFalse(namespace.test_tvos_host)
self.assertFalse(namespace.test_tvos_simulator)
def test_implied_defaults_skip_test_watchos(self):
namespace = self.parse_default_args(['--skip-test-watchos'])
self.assertFalse(namespace.test_watchos_host)
self.assertFalse(namespace.test_watchos_simulator)
def test_implied_defaults_skip_build_android(self):
namespace = self.parse_default_args(['--android', '0'])
self.assertFalse(namespace.test_android_host)
namespace = self.parse_default_args(['--skip-build-android'])
self.assertFalse(namespace.test_android_host)
def test_implied_defaults_host_test(self):
namespace = self.parse_default_args(['--host-test', '0'])
self.assertFalse(namespace.test_ios_host)
self.assertFalse(namespace.test_tvos_host)
self.assertFalse(namespace.test_watchos_host)
self.assertFalse(namespace.test_android_host)
self.assertFalse(namespace.build_libparser_only)
def test_build_lib_swiftsyntaxparser_only(self):
namespace = self.parse_default_args(['--build-libparser-only'])
self.assertTrue(namespace.build_libparser_only)
| apache-2.0 |
googlecodelabs/nest-tensorflow | wwn/access_token.py | 1 | 1354 | #!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
import urllib2
import json
import os
nest_auth_url = 'https://home.nest.com/login/oauth2'
nest_access_token_url = 'https://api.home.nest.com/oauth2/access_token'
# Set your OAuth client ID and secret as environment variables.
# See docker-compose.yml for an example of where they can be set
# if not publishing that file.
client_id = os.environ.get("CLIENT_ID", None)
client_secret = os.environ.get("CLIENT_SECRET", None)
def get_access_token(authorization_code):
"""Paste get_access_token(authorization_code) snippet below this line"""
return
def authorization_url():
query = urllib.urlencode({
'client_id': client_id,
'state': 'STATE'
})
return "{0}?{1}".format(nest_auth_url, query)
| apache-2.0 |
RomanKharin/lrmq | test/async_agent_socket.py | 1 | 3459 | # -*- coding: utf8 -*-
# Low-resource message queue framework
# Access hub with tcp socket
# Copyright (c) 2016 Roman Kharin <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import asyncio
from asyncio.streams import StreamWriter, FlowControlMixin
async def run(port):
loop = asyncio.get_event_loop()
# out command queue
ans_queue = asyncio.Queue()
stdreader = None
stdwriter = None
# stdio initiation
# NOTE: os.fdopen(0, "wb") will not works in pipe
# os.fdopen(sys.stdout, "wb") may crash print()
writer_transport, writer_protocol = await loop.connect_write_pipe(
FlowControlMixin, os.fdopen(sys.stdout.fileno(), "wb"))
stdwriter = StreamWriter(writer_transport, writer_protocol,
None, loop)
stdreader = asyncio.StreamReader()
reader_protocol = asyncio.StreamReaderProtocol(stdreader)
await loop.connect_read_pipe(lambda: reader_protocol, sys.stdin.buffer)
server_coro = None
async def onclient(reader, writer):
# read from socket
async def coro_reader():
while True:
data = await stdreader.readline()
if not data:
if server_coro:
server_coro.cancel()
break
writer.write(data)
await writer.drain()
task = asyncio.ensure_future(coro_reader())
while True:
data = await reader.readline()
if not data:
break
stdwriter.write(data)
await stdwriter.drain()
task.cancel()
server_coro = asyncio.start_server(onclient, port = port, backlog = 1)
sock_server = await server_coro
await sock_server.wait_closed()
def main():
port = 5550
if len(sys.argv) > 1:
if sys.argv[1] in ("-h", "--help"):
print("Start with:")
print("\tpython3 -m lrmq -a python3 async_agent_socket.py 5550")
print("Then connect with")
print("\ttelnet 127.0.0.1 5550")
return
port = int(sys.argv[1])
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(run(port))
finally:
loop.close()
if __name__ == "__main__":
main()
| mit |
ChromiumWebApps/chromium | base/android/jni_generator/jni_generator.py | 1 | 48419 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extracts native methods from a Java file and generates the JNI bindings.
If you change this, please run and update the tests."""
import collections
import errno
import optparse
import os
import re
import string
from string import Template
import subprocess
import sys
import textwrap
import zipfile
class ParseError(Exception):
"""Exception thrown when we can't parse the input file."""
def __init__(self, description, *context_lines):
Exception.__init__(self)
self.description = description
self.context_lines = context_lines
def __str__(self):
context = '\n'.join(self.context_lines)
return '***\nERROR: %s\n\n%s\n***' % (self.description, context)
class Param(object):
"""Describes a param for a method, either java or native."""
def __init__(self, **kwargs):
self.datatype = kwargs['datatype']
self.name = kwargs['name']
class NativeMethod(object):
"""Describes a C/C++ method that is called by Java code"""
def __init__(self, **kwargs):
self.static = kwargs['static']
self.java_class_name = kwargs['java_class_name']
self.return_type = kwargs['return_type']
self.name = kwargs['name']
self.params = kwargs['params']
if self.params:
assert type(self.params) is list
assert type(self.params[0]) is Param
if (self.params and
self.params[0].datatype == kwargs.get('ptr_type', 'int') and
self.params[0].name.startswith('native')):
self.type = 'method'
self.p0_type = self.params[0].name[len('native'):]
if kwargs.get('native_class_name'):
self.p0_type = kwargs['native_class_name']
else:
self.type = 'function'
self.method_id_var_name = kwargs.get('method_id_var_name', None)
class CalledByNative(object):
"""Describes a java method exported to c/c++"""
def __init__(self, **kwargs):
self.system_class = kwargs['system_class']
self.unchecked = kwargs['unchecked']
self.static = kwargs['static']
self.java_class_name = kwargs['java_class_name']
self.return_type = kwargs['return_type']
self.name = kwargs['name']
self.params = kwargs['params']
self.method_id_var_name = kwargs.get('method_id_var_name', None)
self.signature = kwargs.get('signature')
self.is_constructor = kwargs.get('is_constructor', False)
self.env_call = GetEnvCall(self.is_constructor, self.static,
self.return_type)
self.static_cast = GetStaticCastForReturnType(self.return_type)
def JavaDataTypeToC(java_type):
"""Returns a C datatype for the given java type."""
java_pod_type_map = {
'int': 'jint',
'byte': 'jbyte',
'char': 'jchar',
'short': 'jshort',
'boolean': 'jboolean',
'long': 'jlong',
'double': 'jdouble',
'float': 'jfloat',
}
java_type_map = {
'void': 'void',
'String': 'jstring',
'java/lang/String': 'jstring',
'java/lang/Class': 'jclass',
}
if java_type in java_pod_type_map:
return java_pod_type_map[java_type]
elif java_type in java_type_map:
return java_type_map[java_type]
elif java_type.endswith('[]'):
if java_type[:-2] in java_pod_type_map:
return java_pod_type_map[java_type[:-2]] + 'Array'
return 'jobjectArray'
elif java_type.startswith('Class'):
# Checking just the start of the name, rather than a direct comparison,
# in order to handle generics.
return 'jclass'
else:
return 'jobject'
def JavaReturnValueToC(java_type):
"""Returns a valid C return value for the given java type."""
java_pod_type_map = {
'int': '0',
'byte': '0',
'char': '0',
'short': '0',
'boolean': 'false',
'long': '0',
'double': '0',
'float': '0',
'void': ''
}
return java_pod_type_map.get(java_type, 'NULL')
class JniParams(object):
_imports = []
_fully_qualified_class = ''
_package = ''
_inner_classes = []
_remappings = []
@staticmethod
def SetFullyQualifiedClass(fully_qualified_class):
JniParams._fully_qualified_class = 'L' + fully_qualified_class
JniParams._package = '/'.join(fully_qualified_class.split('/')[:-1])
@staticmethod
def ExtractImportsAndInnerClasses(contents):
contents = contents.replace('\n', '')
re_import = re.compile(r'import.*?(?P<class>\S*?);')
for match in re.finditer(re_import, contents):
JniParams._imports += ['L' + match.group('class').replace('.', '/')]
re_inner = re.compile(r'(class|interface)\s+?(?P<name>\w+?)\W')
for match in re.finditer(re_inner, contents):
inner = match.group('name')
if not JniParams._fully_qualified_class.endswith(inner):
JniParams._inner_classes += [JniParams._fully_qualified_class + '$' +
inner]
@staticmethod
def ParseJavaPSignature(signature_line):
prefix = 'Signature: '
return '"%s"' % signature_line[signature_line.index(prefix) + len(prefix):]
@staticmethod
def JavaToJni(param):
"""Converts a java param into a JNI signature type."""
pod_param_map = {
'int': 'I',
'boolean': 'Z',
'char': 'C',
'short': 'S',
'long': 'J',
'double': 'D',
'float': 'F',
'byte': 'B',
'void': 'V',
}
object_param_list = [
'Ljava/lang/Boolean',
'Ljava/lang/Integer',
'Ljava/lang/Long',
'Ljava/lang/Object',
'Ljava/lang/String',
'Ljava/lang/Class',
]
prefix = ''
# Array?
while param[-2:] == '[]':
prefix += '['
param = param[:-2]
# Generic?
if '<' in param:
param = param[:param.index('<')]
if param in pod_param_map:
return prefix + pod_param_map[param]
if '/' in param:
# Coming from javap, use the fully qualified param directly.
return prefix + 'L' + JniParams.RemapClassName(param) + ';'
for qualified_name in (object_param_list +
[JniParams._fully_qualified_class] +
JniParams._inner_classes):
if (qualified_name.endswith('/' + param) or
qualified_name.endswith('$' + param.replace('.', '$')) or
qualified_name == 'L' + param):
return prefix + JniParams.RemapClassName(qualified_name) + ';'
# Is it from an import? (e.g. referecing Class from import pkg.Class;
# note that referencing an inner class Inner from import pkg.Class.Inner
# is not supported).
for qualified_name in JniParams._imports:
if qualified_name.endswith('/' + param):
# Ensure it's not an inner class.
components = qualified_name.split('/')
if len(components) > 2 and components[-2][0].isupper():
raise SyntaxError('Inner class (%s) can not be imported '
'and used by JNI (%s). Please import the outer '
'class and use Outer.Inner instead.' %
(qualified_name, param))
return prefix + JniParams.RemapClassName(qualified_name) + ';'
# Is it an inner class from an outer class import? (e.g. referencing
# Class.Inner from import pkg.Class).
if '.' in param:
components = param.split('.')
outer = '/'.join(components[:-1])
inner = components[-1]
for qualified_name in JniParams._imports:
if qualified_name.endswith('/' + outer):
return (prefix + JniParams.RemapClassName(qualified_name) +
'$' + inner + ';')
# Type not found, falling back to same package as this class.
return (prefix + 'L' +
JniParams.RemapClassName(JniParams._package + '/' + param) + ';')
@staticmethod
def Signature(params, returns, wrap):
"""Returns the JNI signature for the given datatypes."""
items = ['(']
items += [JniParams.JavaToJni(param.datatype) for param in params]
items += [')']
items += [JniParams.JavaToJni(returns)]
if wrap:
return '\n' + '\n'.join(['"' + item + '"' for item in items])
else:
return '"' + ''.join(items) + '"'
@staticmethod
def Parse(params):
"""Parses the params into a list of Param objects."""
if not params:
return []
ret = []
for p in [p.strip() for p in params.split(',')]:
items = p.split(' ')
if 'final' in items:
items.remove('final')
param = Param(
datatype=items[0],
name=(items[1] if len(items) > 1 else 'p%s' % len(ret)),
)
ret += [param]
return ret
@staticmethod
def RemapClassName(class_name):
"""Remaps class names using the jarjar mapping table."""
for old, new in JniParams._remappings:
if old in class_name:
return class_name.replace(old, new, 1)
return class_name
@staticmethod
def SetJarJarMappings(mappings):
"""Parse jarjar mappings from a string."""
JniParams._remappings = []
for line in mappings.splitlines():
keyword, src, dest = line.split()
if keyword != 'rule':
continue
assert src.endswith('.**')
src = src[:-2].replace('.', '/')
dest = dest.replace('.', '/')
if dest.endswith('@0'):
JniParams._remappings.append((src, dest[:-2] + src))
else:
assert dest.endswith('@1')
JniParams._remappings.append((src, dest[:-2]))
def ExtractJNINamespace(contents):
re_jni_namespace = re.compile('.*?@JNINamespace\("(.*?)"\)')
m = re.findall(re_jni_namespace, contents)
if not m:
return ''
return m[0]
def ExtractFullyQualifiedJavaClassName(java_file_name, contents):
re_package = re.compile('.*?package (.*?);')
matches = re.findall(re_package, contents)
if not matches:
raise SyntaxError('Unable to find "package" line in %s' % java_file_name)
return (matches[0].replace('.', '/') + '/' +
os.path.splitext(os.path.basename(java_file_name))[0])
def ExtractNatives(contents, ptr_type):
"""Returns a list of dict containing information about a native method."""
contents = contents.replace('\n', '')
natives = []
re_native = re.compile(r'(@NativeClassQualifiedName'
'\(\"(?P<native_class_name>.*?)\"\))?\s*'
'(@NativeCall(\(\"(?P<java_class_name>.*?)\"\)))?\s*'
'(?P<qualifiers>\w+\s\w+|\w+|\s+)\s*?native '
'(?P<return_type>\S*?) '
'(?P<name>native\w+?)\((?P<params>.*?)\);')
for match in re.finditer(re_native, contents):
native = NativeMethod(
static='static' in match.group('qualifiers'),
java_class_name=match.group('java_class_name'),
native_class_name=match.group('native_class_name'),
return_type=match.group('return_type'),
name=match.group('name').replace('native', ''),
params=JniParams.Parse(match.group('params')),
ptr_type=ptr_type)
natives += [native]
return natives
def GetStaticCastForReturnType(return_type):
type_map = { 'String' : 'jstring',
'java/lang/String' : 'jstring',
'boolean[]': 'jbooleanArray',
'byte[]': 'jbyteArray',
'char[]': 'jcharArray',
'short[]': 'jshortArray',
'int[]': 'jintArray',
'long[]': 'jlongArray',
'double[]': 'jdoubleArray' }
ret = type_map.get(return_type, None)
if ret:
return ret
if return_type.endswith('[]'):
return 'jobjectArray'
return None
def GetEnvCall(is_constructor, is_static, return_type):
"""Maps the types availabe via env->Call__Method."""
if is_constructor:
return 'NewObject'
env_call_map = {'boolean': 'Boolean',
'byte': 'Byte',
'char': 'Char',
'short': 'Short',
'int': 'Int',
'long': 'Long',
'float': 'Float',
'void': 'Void',
'double': 'Double',
'Object': 'Object',
}
call = env_call_map.get(return_type, 'Object')
if is_static:
call = 'Static' + call
return 'Call' + call + 'Method'
def GetMangledParam(datatype):
"""Returns a mangled identifier for the datatype."""
if len(datatype) <= 2:
return datatype.replace('[', 'A')
ret = ''
for i in range(1, len(datatype)):
c = datatype[i]
if c == '[':
ret += 'A'
elif c.isupper() or datatype[i - 1] in ['/', 'L']:
ret += c.upper()
return ret
def GetMangledMethodName(name, params, return_type):
"""Returns a mangled method name for the given signature.
The returned name can be used as a C identifier and will be unique for all
valid overloads of the same method.
Args:
name: string.
params: list of Param.
return_type: string.
Returns:
A mangled name.
"""
mangled_items = []
for datatype in [return_type] + [x.datatype for x in params]:
mangled_items += [GetMangledParam(JniParams.JavaToJni(datatype))]
mangled_name = name + '_'.join(mangled_items)
assert re.match(r'[0-9a-zA-Z_]+', mangled_name)
return mangled_name
def MangleCalledByNatives(called_by_natives):
"""Mangles all the overloads from the call_by_natives list."""
method_counts = collections.defaultdict(
lambda: collections.defaultdict(lambda: 0))
for called_by_native in called_by_natives:
java_class_name = called_by_native.java_class_name
name = called_by_native.name
method_counts[java_class_name][name] += 1
for called_by_native in called_by_natives:
java_class_name = called_by_native.java_class_name
method_name = called_by_native.name
method_id_var_name = method_name
if method_counts[java_class_name][method_name] > 1:
method_id_var_name = GetMangledMethodName(method_name,
called_by_native.params,
called_by_native.return_type)
called_by_native.method_id_var_name = method_id_var_name
return called_by_natives
# Regex to match the JNI return types that should be included in a
# ScopedJavaLocalRef.
RE_SCOPED_JNI_RETURN_TYPES = re.compile('jobject|jclass|jstring|.*Array')
# Regex to match a string like "@CalledByNative public void foo(int bar)".
RE_CALLED_BY_NATIVE = re.compile(
'@CalledByNative(?P<Unchecked>(Unchecked)*?)(?:\("(?P<annotation>.*)"\))?'
'\s+(?P<prefix>[\w ]*?)'
'\s*(?P<return_type>\S+?)'
'\s+(?P<name>\w+)'
'\s*\((?P<params>[^\)]*)\)')
def ExtractCalledByNatives(contents):
"""Parses all methods annotated with @CalledByNative.
Args:
contents: the contents of the java file.
Returns:
A list of dict with information about the annotated methods.
TODO(bulach): return a CalledByNative object.
Raises:
ParseError: if unable to parse.
"""
called_by_natives = []
for match in re.finditer(RE_CALLED_BY_NATIVE, contents):
called_by_natives += [CalledByNative(
system_class=False,
unchecked='Unchecked' in match.group('Unchecked'),
static='static' in match.group('prefix'),
java_class_name=match.group('annotation') or '',
return_type=match.group('return_type'),
name=match.group('name'),
params=JniParams.Parse(match.group('params')))]
# Check for any @CalledByNative occurrences that weren't matched.
unmatched_lines = re.sub(RE_CALLED_BY_NATIVE, '', contents).split('\n')
for line1, line2 in zip(unmatched_lines, unmatched_lines[1:]):
if '@CalledByNative' in line1:
raise ParseError('could not parse @CalledByNative method signature',
line1, line2)
return MangleCalledByNatives(called_by_natives)
class JNIFromJavaP(object):
"""Uses 'javap' to parse a .class file and generate the JNI header file."""
def __init__(self, contents, options):
self.contents = contents
self.namespace = options.namespace
self.fully_qualified_class = re.match(
'.*?(class|interface) (?P<class_name>.*?)( |{)',
contents[1]).group('class_name')
self.fully_qualified_class = self.fully_qualified_class.replace('.', '/')
# Java 7's javap includes type parameters in output, like HashSet<T>. Strip
# away the <...> and use the raw class name that Java 6 would've given us.
self.fully_qualified_class = self.fully_qualified_class.split('<', 1)[0]
JniParams.SetFullyQualifiedClass(self.fully_qualified_class)
self.java_class_name = self.fully_qualified_class.split('/')[-1]
if not self.namespace:
self.namespace = 'JNI_' + self.java_class_name
re_method = re.compile('(?P<prefix>.*?)(?P<return_type>\S+?) (?P<name>\w+?)'
'\((?P<params>.*?)\)')
self.called_by_natives = []
for lineno, content in enumerate(contents[2:], 2):
match = re.match(re_method, content)
if not match:
continue
self.called_by_natives += [CalledByNative(
system_class=True,
unchecked=False,
static='static' in match.group('prefix'),
java_class_name='',
return_type=match.group('return_type').replace('.', '/'),
name=match.group('name'),
params=JniParams.Parse(match.group('params').replace('.', '/')),
signature=JniParams.ParseJavaPSignature(contents[lineno + 1]))]
re_constructor = re.compile('(.*?)public ' +
self.fully_qualified_class.replace('/', '.') +
'\((?P<params>.*?)\)')
for lineno, content in enumerate(contents[2:], 2):
match = re.match(re_constructor, content)
if not match:
continue
self.called_by_natives += [CalledByNative(
system_class=True,
unchecked=False,
static=False,
java_class_name='',
return_type=self.fully_qualified_class,
name='Constructor',
params=JniParams.Parse(match.group('params').replace('.', '/')),
signature=JniParams.ParseJavaPSignature(contents[lineno + 1]),
is_constructor=True)]
self.called_by_natives = MangleCalledByNatives(self.called_by_natives)
self.inl_header_file_generator = InlHeaderFileGenerator(
self.namespace, self.fully_qualified_class, [],
self.called_by_natives, options)
def GetContent(self):
return self.inl_header_file_generator.GetContent()
@staticmethod
def CreateFromClass(class_file, options):
class_name = os.path.splitext(os.path.basename(class_file))[0]
p = subprocess.Popen(args=[options.javap, '-s', class_name],
cwd=os.path.dirname(class_file),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = p.communicate()
jni_from_javap = JNIFromJavaP(stdout.split('\n'), options)
return jni_from_javap
class JNIFromJavaSource(object):
"""Uses the given java source file to generate the JNI header file."""
def __init__(self, contents, fully_qualified_class, options):
contents = self._RemoveComments(contents, options)
JniParams.SetFullyQualifiedClass(fully_qualified_class)
JniParams.ExtractImportsAndInnerClasses(contents)
jni_namespace = ExtractJNINamespace(contents) or options.namespace
natives = ExtractNatives(contents, options.ptr_type)
called_by_natives = ExtractCalledByNatives(contents)
if len(natives) == 0 and len(called_by_natives) == 0:
raise SyntaxError('Unable to find any JNI methods for %s.' %
fully_qualified_class)
inl_header_file_generator = InlHeaderFileGenerator(
jni_namespace, fully_qualified_class, natives, called_by_natives,
options)
self.content = inl_header_file_generator.GetContent()
def _RemoveComments(self, contents, options):
# We need to support both inline and block comments, and we need to handle
# strings that contain '//' or '/*'. Rather than trying to do all that with
# regexps, we just pipe the contents through the C preprocessor. We tell cpp
# the file has already been preprocessed, so it just removes comments and
# doesn't try to parse #include, #pragma etc.
#
# TODO(husky): This is a bit hacky. It would be cleaner to use a real Java
# parser. Maybe we could ditch JNIFromJavaSource and just always use
# JNIFromJavaP; or maybe we could rewrite this script in Java and use APT.
# http://code.google.com/p/chromium/issues/detail?id=138941
p = subprocess.Popen(args=[options.cpp, '-fpreprocessed'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = p.communicate(contents)
return stdout
def GetContent(self):
return self.content
@staticmethod
def CreateFromFile(java_file_name, options):
contents = file(java_file_name).read()
fully_qualified_class = ExtractFullyQualifiedJavaClassName(java_file_name,
contents)
return JNIFromJavaSource(contents, fully_qualified_class, options)
class InlHeaderFileGenerator(object):
"""Generates an inline header file for JNI integration."""
def __init__(self, namespace, fully_qualified_class, natives,
called_by_natives, options):
self.namespace = namespace
self.fully_qualified_class = fully_qualified_class
self.class_name = self.fully_qualified_class.split('/')[-1]
self.natives = natives
self.called_by_natives = called_by_natives
self.header_guard = fully_qualified_class.replace('/', '_') + '_JNI'
self.options = options
self.init_native = self.ExtractInitNative(options)
def ExtractInitNative(self, options):
for native in self.natives:
if options.jni_init_native_name == 'native' + native.name:
self.natives.remove(native)
return native
return None
def GetContent(self):
"""Returns the content of the JNI binding file."""
template = Template("""\
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is autogenerated by
// ${SCRIPT_NAME}
// For
// ${FULLY_QUALIFIED_CLASS}
#ifndef ${HEADER_GUARD}
#define ${HEADER_GUARD}
#include <jni.h>
${INCLUDES}
// Step 1: forward declarations.
namespace {
$CLASS_PATH_DEFINITIONS
$METHOD_ID_DEFINITIONS
} // namespace
$OPEN_NAMESPACE
$FORWARD_DECLARATIONS
// Step 2: method stubs.
$METHOD_STUBS
// Step 3: RegisterNatives.
$JNI_NATIVE_METHODS
$REGISTER_NATIVES
$CLOSE_NAMESPACE
$JNI_REGISTER_NATIVES
#endif // ${HEADER_GUARD}
""")
values = {
'SCRIPT_NAME': self.options.script_name,
'FULLY_QUALIFIED_CLASS': self.fully_qualified_class,
'CLASS_PATH_DEFINITIONS': self.GetClassPathDefinitionsString(),
'METHOD_ID_DEFINITIONS': self.GetMethodIDDefinitionsString(),
'FORWARD_DECLARATIONS': self.GetForwardDeclarationsString(),
'METHOD_STUBS': self.GetMethodStubsString(),
'OPEN_NAMESPACE': self.GetOpenNamespaceString(),
'JNI_NATIVE_METHODS': self.GetJNINativeMethodsString(),
'REGISTER_NATIVES': self.GetRegisterNativesString(),
'CLOSE_NAMESPACE': self.GetCloseNamespaceString(),
'HEADER_GUARD': self.header_guard,
'INCLUDES': self.GetIncludesString(),
'JNI_REGISTER_NATIVES': self.GetJNIRegisterNativesString()
}
return WrapOutput(template.substitute(values))
def GetClassPathDefinitionsString(self):
ret = []
ret += [self.GetClassPathDefinitions()]
return '\n'.join(ret)
def GetMethodIDDefinitionsString(self):
"""Returns the definition of method ids for the called by native methods."""
if not self.options.eager_called_by_natives:
return ''
template = Template("""\
jmethodID g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} = NULL;""")
ret = []
for called_by_native in self.called_by_natives:
values = {
'JAVA_CLASS': called_by_native.java_class_name or self.class_name,
'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name,
}
ret += [template.substitute(values)]
return '\n'.join(ret)
def GetForwardDeclarationsString(self):
ret = []
for native in self.natives:
if native.type != 'method':
ret += [self.GetForwardDeclaration(native)]
return '\n'.join(ret)
def GetMethodStubsString(self):
"""Returns the code corresponding to method stubs."""
ret = []
for native in self.natives:
if native.type == 'method':
ret += [self.GetNativeMethodStubString(native)]
if self.options.eager_called_by_natives:
ret += self.GetEagerCalledByNativeMethodStubs()
else:
ret += self.GetLazyCalledByNativeMethodStubs()
return '\n'.join(ret)
def GetLazyCalledByNativeMethodStubs(self):
return [self.GetLazyCalledByNativeMethodStub(called_by_native)
for called_by_native in self.called_by_natives]
def GetEagerCalledByNativeMethodStubs(self):
ret = []
if self.called_by_natives:
ret += ['namespace {']
for called_by_native in self.called_by_natives:
ret += [self.GetEagerCalledByNativeMethodStub(called_by_native)]
ret += ['} // namespace']
return ret
def GetIncludesString(self):
if not self.options.includes:
return ''
includes = self.options.includes.split(',')
return '\n'.join('#include "%s"' % x for x in includes)
def GetKMethodsString(self, clazz):
ret = []
for native in self.natives:
if (native.java_class_name == clazz or
(not native.java_class_name and clazz == self.class_name)):
ret += [self.GetKMethodArrayEntry(native)]
return '\n'.join(ret)
def SubstituteNativeMethods(self, template):
"""Substitutes JAVA_CLASS and KMETHODS in the provided template."""
ret = []
all_classes = self.GetUniqueClasses(self.natives)
all_classes[self.class_name] = self.fully_qualified_class
for clazz in all_classes:
kmethods = self.GetKMethodsString(clazz)
if kmethods:
values = {'JAVA_CLASS': clazz,
'KMETHODS': kmethods}
ret += [template.substitute(values)]
if not ret: return ''
return '\n' + '\n'.join(ret)
def GetJNINativeMethodsString(self):
"""Returns the implementation of the array of native methods."""
template = Template("""\
static const JNINativeMethod kMethods${JAVA_CLASS}[] = {
${KMETHODS}
};
""")
return self.SubstituteNativeMethods(template)
def GetRegisterCalledByNativesImplString(self):
"""Returns the code for registering the called by native methods."""
if not self.options.eager_called_by_natives:
return ''
template = Template("""\
g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} = ${GET_METHOD_ID_IMPL}
if (g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} == NULL) {
return false;
}
""")
ret = []
for called_by_native in self.called_by_natives:
values = {
'JAVA_CLASS': called_by_native.java_class_name or self.class_name,
'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name,
'GET_METHOD_ID_IMPL': self.GetMethodIDImpl(called_by_native),
}
ret += [template.substitute(values)]
return '\n'.join(ret)
def GetRegisterNativesString(self):
"""Returns the code for RegisterNatives."""
template = Template("""\
${REGISTER_NATIVES_SIGNATURE} {
${CLASSES}
${NATIVES}
${CALLED_BY_NATIVES}
return true;
}
""")
signature = 'static bool RegisterNativesImpl(JNIEnv* env'
if self.init_native:
signature += ', jclass clazz)'
else:
signature += ')'
natives = self.GetRegisterNativesImplString()
called_by_natives = self.GetRegisterCalledByNativesImplString()
values = {'REGISTER_NATIVES_SIGNATURE': signature,
'CLASSES': self.GetFindClasses(),
'NATIVES': natives,
'CALLED_BY_NATIVES': called_by_natives,
}
return template.substitute(values)
def GetRegisterNativesImplString(self):
"""Returns the shared implementation for RegisterNatives."""
template = Template("""\
const int kMethods${JAVA_CLASS}Size = arraysize(kMethods${JAVA_CLASS});
if (env->RegisterNatives(g_${JAVA_CLASS}_clazz,
kMethods${JAVA_CLASS},
kMethods${JAVA_CLASS}Size) < 0) {
jni_generator::HandleRegistrationError(
env, g_${JAVA_CLASS}_clazz, __FILE__);
return false;
}
""")
return self.SubstituteNativeMethods(template)
def GetJNIRegisterNativesString(self):
"""Returns the implementation for the JNI registration of native methods."""
if not self.init_native:
return ''
template = Template("""\
extern "C" JNIEXPORT bool JNICALL
Java_${FULLY_QUALIFIED_CLASS}_${INIT_NATIVE_NAME}(JNIEnv* env, jclass clazz) {
return ${NAMESPACE}RegisterNativesImpl(env, clazz);
}
""")
fully_qualified_class = self.fully_qualified_class.replace('/', '_')
namespace = ''
if self.namespace:
namespace = self.namespace + '::'
values = {'FULLY_QUALIFIED_CLASS': fully_qualified_class,
'INIT_NATIVE_NAME': 'native' + self.init_native.name,
'NAMESPACE': namespace,
'REGISTER_NATIVES_IMPL': self.GetRegisterNativesImplString()
}
return template.substitute(values)
def GetOpenNamespaceString(self):
if self.namespace:
all_namespaces = ['namespace %s {' % ns
for ns in self.namespace.split('::')]
return '\n'.join(all_namespaces)
return ''
def GetCloseNamespaceString(self):
if self.namespace:
all_namespaces = ['} // namespace %s' % ns
for ns in self.namespace.split('::')]
all_namespaces.reverse()
return '\n'.join(all_namespaces) + '\n'
return ''
def GetJNIFirstParam(self, native):
ret = []
if native.type == 'method':
ret = ['jobject jcaller']
elif native.type == 'function':
if native.static:
ret = ['jclass jcaller']
else:
ret = ['jobject jcaller']
return ret
def GetParamsInDeclaration(self, native):
"""Returns the params for the stub declaration.
Args:
native: the native dictionary describing the method.
Returns:
A string containing the params.
"""
return ',\n '.join(self.GetJNIFirstParam(native) +
[JavaDataTypeToC(param.datatype) + ' ' +
param.name
for param in native.params])
def GetCalledByNativeParamsInDeclaration(self, called_by_native):
return ',\n '.join([JavaDataTypeToC(param.datatype) + ' ' +
param.name
for param in called_by_native.params])
def GetForwardDeclaration(self, native):
template = Template("""
static ${RETURN} ${NAME}(JNIEnv* env, ${PARAMS});
""")
values = {'RETURN': JavaDataTypeToC(native.return_type),
'NAME': native.name,
'PARAMS': self.GetParamsInDeclaration(native)}
return template.substitute(values)
def GetNativeMethodStubString(self, native):
"""Returns stubs for native methods."""
template = Template("""\
static ${RETURN} ${NAME}(JNIEnv* env, ${PARAMS_IN_DECLARATION}) {
${P0_TYPE}* native = reinterpret_cast<${P0_TYPE}*>(${PARAM0_NAME});
CHECK_NATIVE_PTR(env, jcaller, native, "${NAME}"${OPTIONAL_ERROR_RETURN});
return native->${NAME}(${PARAMS_IN_CALL})${POST_CALL};
}
""")
params = []
if not self.options.pure_native_methods:
params = ['env', 'jcaller']
params_in_call = ', '.join(params + [p.name for p in native.params[1:]])
return_type = JavaDataTypeToC(native.return_type)
optional_error_return = JavaReturnValueToC(native.return_type)
if optional_error_return:
optional_error_return = ', ' + optional_error_return
post_call = ''
if re.match(RE_SCOPED_JNI_RETURN_TYPES, return_type):
post_call = '.Release()'
values = {
'RETURN': return_type,
'OPTIONAL_ERROR_RETURN': optional_error_return,
'NAME': native.name,
'PARAMS_IN_DECLARATION': self.GetParamsInDeclaration(native),
'PARAM0_NAME': native.params[0].name,
'P0_TYPE': native.p0_type,
'PARAMS_IN_CALL': params_in_call,
'POST_CALL': post_call
}
return template.substitute(values)
def GetCalledByNativeValues(self, called_by_native):
"""Fills in necessary values for the CalledByNative methods."""
if called_by_native.static or called_by_native.is_constructor:
first_param_in_declaration = ''
first_param_in_call = ('g_%s_clazz' %
(called_by_native.java_class_name or
self.class_name))
else:
first_param_in_declaration = ', jobject obj'
first_param_in_call = 'obj'
params_in_declaration = self.GetCalledByNativeParamsInDeclaration(
called_by_native)
if params_in_declaration:
params_in_declaration = ', ' + params_in_declaration
params_in_call = ', '.join(param.name for param in called_by_native.params)
if params_in_call:
params_in_call = ', ' + params_in_call
pre_call = ''
post_call = ''
if called_by_native.static_cast:
pre_call = 'static_cast<%s>(' % called_by_native.static_cast
post_call = ')'
check_exception = ''
if not called_by_native.unchecked:
check_exception = 'jni_generator::CheckException(env);'
return_type = JavaDataTypeToC(called_by_native.return_type)
optional_error_return = JavaReturnValueToC(called_by_native.return_type)
if optional_error_return:
optional_error_return = ', ' + optional_error_return
return_declaration = ''
return_clause = ''
if return_type != 'void':
pre_call = ' ' + pre_call
return_declaration = return_type + ' ret ='
if re.match(RE_SCOPED_JNI_RETURN_TYPES, return_type):
return_type = 'base::android::ScopedJavaLocalRef<' + return_type + '>'
return_clause = 'return ' + return_type + '(env, ret);'
else:
return_clause = 'return ret;'
return {
'JAVA_CLASS': called_by_native.java_class_name or self.class_name,
'RETURN_TYPE': return_type,
'OPTIONAL_ERROR_RETURN': optional_error_return,
'RETURN_DECLARATION': return_declaration,
'RETURN_CLAUSE': return_clause,
'FIRST_PARAM_IN_DECLARATION': first_param_in_declaration,
'PARAMS_IN_DECLARATION': params_in_declaration,
'PRE_CALL': pre_call,
'POST_CALL': post_call,
'ENV_CALL': called_by_native.env_call,
'FIRST_PARAM_IN_CALL': first_param_in_call,
'PARAMS_IN_CALL': params_in_call,
'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name,
'CHECK_EXCEPTION': check_exception,
'GET_METHOD_ID_IMPL': self.GetMethodIDImpl(called_by_native)
}
def GetEagerCalledByNativeMethodStub(self, called_by_native):
"""Returns the implementation of the called by native method."""
template = Template("""
static ${RETURN_TYPE} ${METHOD_ID_VAR_NAME}(\
JNIEnv* env${FIRST_PARAM_IN_DECLARATION}${PARAMS_IN_DECLARATION}) {
${RETURN_DECLARATION}${PRE_CALL}env->${ENV_CALL}(${FIRST_PARAM_IN_CALL},
g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME}${PARAMS_IN_CALL})${POST_CALL};
${RETURN_CLAUSE}
}""")
values = self.GetCalledByNativeValues(called_by_native)
return template.substitute(values)
def GetLazyCalledByNativeMethodStub(self, called_by_native):
"""Returns a string."""
function_signature_template = Template("""\
static ${RETURN_TYPE} Java_${JAVA_CLASS}_${METHOD_ID_VAR_NAME}(\
JNIEnv* env${FIRST_PARAM_IN_DECLARATION}${PARAMS_IN_DECLARATION})""")
function_header_template = Template("""\
${FUNCTION_SIGNATURE} {""")
function_header_with_unused_template = Template("""\
${FUNCTION_SIGNATURE} __attribute__ ((unused));
${FUNCTION_SIGNATURE} {""")
template = Template("""
static base::subtle::AtomicWord g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} = 0;
${FUNCTION_HEADER}
/* Must call RegisterNativesImpl() */
CHECK_CLAZZ(env, ${FIRST_PARAM_IN_CALL},
g_${JAVA_CLASS}_clazz${OPTIONAL_ERROR_RETURN});
jmethodID method_id =
${GET_METHOD_ID_IMPL}
${RETURN_DECLARATION}
${PRE_CALL}env->${ENV_CALL}(${FIRST_PARAM_IN_CALL},
method_id${PARAMS_IN_CALL})${POST_CALL};
${CHECK_EXCEPTION}
${RETURN_CLAUSE}
}""")
values = self.GetCalledByNativeValues(called_by_native)
values['FUNCTION_SIGNATURE'] = (
function_signature_template.substitute(values))
if called_by_native.system_class:
values['FUNCTION_HEADER'] = (
function_header_with_unused_template.substitute(values))
else:
values['FUNCTION_HEADER'] = function_header_template.substitute(values)
return template.substitute(values)
def GetKMethodArrayEntry(self, native):
template = Template("""\
{ "native${NAME}", ${JNI_SIGNATURE}, reinterpret_cast<void*>(${NAME}) },""")
values = {'NAME': native.name,
'JNI_SIGNATURE': JniParams.Signature(native.params,
native.return_type,
True)}
return template.substitute(values)
def GetUniqueClasses(self, origin):
ret = {self.class_name: self.fully_qualified_class}
for entry in origin:
class_name = self.class_name
jni_class_path = self.fully_qualified_class
if entry.java_class_name:
class_name = entry.java_class_name
jni_class_path = self.fully_qualified_class + '$' + class_name
ret[class_name] = jni_class_path
return ret
def GetClassPathDefinitions(self):
"""Returns the ClassPath constants."""
ret = []
template = Template("""\
const char k${JAVA_CLASS}ClassPath[] = "${JNI_CLASS_PATH}";""")
native_classes = self.GetUniqueClasses(self.natives)
called_by_native_classes = self.GetUniqueClasses(self.called_by_natives)
all_classes = native_classes
all_classes.update(called_by_native_classes)
for clazz in all_classes:
values = {
'JAVA_CLASS': clazz,
'JNI_CLASS_PATH': JniParams.RemapClassName(all_classes[clazz]),
}
ret += [template.substitute(values)]
ret += ''
for clazz in called_by_native_classes:
template = Template("""\
// Leaking this jclass as we cannot use LazyInstance from some threads.
jclass g_${JAVA_CLASS}_clazz = NULL;""")
values = {
'JAVA_CLASS': clazz,
}
ret += [template.substitute(values)]
return '\n'.join(ret)
def GetFindClasses(self):
"""Returns the imlementation of FindClass for all known classes."""
if self.init_native:
template = Template("""\
g_${JAVA_CLASS}_clazz = static_cast<jclass>(env->NewWeakGlobalRef(clazz));""")
else:
template = Template("""\
g_${JAVA_CLASS}_clazz = reinterpret_cast<jclass>(env->NewGlobalRef(
base::android::GetClass(env, k${JAVA_CLASS}ClassPath).obj()));""")
ret = []
for clazz in self.GetUniqueClasses(self.called_by_natives):
values = {'JAVA_CLASS': clazz}
ret += [template.substitute(values)]
return '\n'.join(ret)
def GetMethodIDImpl(self, called_by_native):
"""Returns the implementation of GetMethodID."""
if self.options.eager_called_by_natives:
template = Template("""\
env->Get${STATIC_METHOD_PART}MethodID(
g_${JAVA_CLASS}_clazz,
"${JNI_NAME}", ${JNI_SIGNATURE});""")
else:
template = Template("""\
base::android::MethodID::LazyGet<
base::android::MethodID::TYPE_${STATIC}>(
env, g_${JAVA_CLASS}_clazz,
"${JNI_NAME}",
${JNI_SIGNATURE},
&g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME});
""")
jni_name = called_by_native.name
jni_return_type = called_by_native.return_type
if called_by_native.is_constructor:
jni_name = '<init>'
jni_return_type = 'void'
if called_by_native.signature:
signature = called_by_native.signature
else:
signature = JniParams.Signature(called_by_native.params,
jni_return_type,
True)
values = {
'JAVA_CLASS': called_by_native.java_class_name or self.class_name,
'JNI_NAME': jni_name,
'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name,
'STATIC': 'STATIC' if called_by_native.static else 'INSTANCE',
'STATIC_METHOD_PART': 'Static' if called_by_native.static else '',
'JNI_SIGNATURE': signature,
}
return template.substitute(values)
def WrapOutput(output):
ret = []
for line in output.splitlines():
# Do not wrap lines under 80 characters or preprocessor directives.
if len(line) < 80 or line.lstrip()[:1] == '#':
stripped = line.rstrip()
if len(ret) == 0 or len(ret[-1]) or len(stripped):
ret.append(stripped)
else:
first_line_indent = ' ' * (len(line) - len(line.lstrip()))
subsequent_indent = first_line_indent + ' ' * 4
if line.startswith('//'):
subsequent_indent = '//' + subsequent_indent
wrapper = textwrap.TextWrapper(width=80,
subsequent_indent=subsequent_indent,
break_long_words=False)
ret += [wrapped.rstrip() for wrapped in wrapper.wrap(line)]
ret += ['']
return '\n'.join(ret)
def ExtractJarInputFile(jar_file, input_file, out_dir):
"""Extracts input file from jar and returns the filename.
The input file is extracted to the same directory that the generated jni
headers will be placed in. This is passed as an argument to script.
Args:
jar_file: the jar file containing the input files to extract.
input_files: the list of files to extract from the jar file.
out_dir: the name of the directories to extract to.
Returns:
the name of extracted input file.
"""
jar_file = zipfile.ZipFile(jar_file)
out_dir = os.path.join(out_dir, os.path.dirname(input_file))
try:
os.makedirs(out_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
extracted_file_name = os.path.join(out_dir, os.path.basename(input_file))
with open(extracted_file_name, 'w') as outfile:
outfile.write(jar_file.read(input_file))
return extracted_file_name
def GenerateJNIHeader(input_file, output_file, options):
try:
if os.path.splitext(input_file)[1] == '.class':
jni_from_javap = JNIFromJavaP.CreateFromClass(input_file, options)
content = jni_from_javap.GetContent()
else:
jni_from_java_source = JNIFromJavaSource.CreateFromFile(
input_file, options)
content = jni_from_java_source.GetContent()
except ParseError, e:
print e
sys.exit(1)
if output_file:
if not os.path.exists(os.path.dirname(os.path.abspath(output_file))):
os.makedirs(os.path.dirname(os.path.abspath(output_file)))
if options.optimize_generation and os.path.exists(output_file):
with file(output_file, 'r') as f:
existing_content = f.read()
if existing_content == content:
return
with file(output_file, 'w') as f:
f.write(content)
else:
print output
def GetScriptName():
script_components = os.path.abspath(sys.argv[0]).split(os.path.sep)
base_index = 0
for idx, value in enumerate(script_components):
if value == 'base' or value == 'third_party':
base_index = idx
break
return os.sep.join(script_components[base_index:])
def main(argv):
usage = """usage: %prog [OPTIONS]
This script will parse the given java source code extracting the native
declarations and print the header file to stdout (or a file).
See SampleForTests.java for more details.
"""
option_parser = optparse.OptionParser(usage=usage)
option_parser.add_option('-j', dest='jar_file',
help='Extract the list of input files from'
' a specified jar file.'
' Uses javap to extract the methods from a'
' pre-compiled class. --input should point'
' to pre-compiled Java .class files.')
option_parser.add_option('-n', dest='namespace',
help='Uses as a namespace in the generated header '
'instead of the javap class name, or when there is '
'no JNINamespace annotation in the java source.')
option_parser.add_option('--input_file',
help='Single input file name. The output file name '
'will be derived from it. Must be used with '
'--output_dir.')
option_parser.add_option('--output_dir',
help='The output directory. Must be used with '
'--input')
option_parser.add_option('--optimize_generation', type="int",
default=0, help='Whether we should optimize JNI '
'generation by not regenerating files if they have '
'not changed.')
option_parser.add_option('--jarjar',
help='Path to optional jarjar rules file.')
option_parser.add_option('--script_name', default=GetScriptName(),
help='The name of this script in the generated '
'header.')
option_parser.add_option('--includes',
help='The comma-separated list of header files to '
'include in the generated header.')
option_parser.add_option('--pure_native_methods',
action='store_true', dest='pure_native_methods',
help='When true, the native methods will be called '
'without any JNI-specific arguments.')
option_parser.add_option('--ptr_type', default='int',
type='choice', choices=['int', 'long'],
help='The type used to represent native pointers in '
'Java code. For 32-bit, use int; '
'for 64-bit, use long.')
option_parser.add_option('--jni_init_native_name', default='',
help='The name of the JNI registration method that '
'is used to initialize all native methods. If a '
'method with this name is not present in the Java '
'source file, setting this option is a no-op. When '
'a method with this name is found however, the '
'naming convention Java_<packageName>_<className> '
'will limit the initialization to only the '
'top-level class.')
option_parser.add_option('--eager_called_by_natives',
action='store_true', dest='eager_called_by_natives',
help='When true, the called-by-native methods will '
'be initialized in a non-atomic way.')
option_parser.add_option('--cpp', default='cpp',
help='The path to cpp command.')
option_parser.add_option('--javap', default='javap',
help='The path to javap command.')
options, args = option_parser.parse_args(argv)
if options.jar_file:
input_file = ExtractJarInputFile(options.jar_file, options.input_file,
options.output_dir)
elif options.input_file:
input_file = options.input_file
else:
option_parser.print_help()
print '\nError: Must specify --jar_file or --input_file.'
return 1
output_file = None
if options.output_dir:
root_name = os.path.splitext(os.path.basename(input_file))[0]
output_file = os.path.join(options.output_dir, root_name) + '_jni.h'
if options.jarjar:
with open(options.jarjar) as f:
JniParams.SetJarJarMappings(f.read())
GenerateJNIHeader(input_file, output_file, options)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
yvaucher/purchase-workflow | __unported__/purchase_group_orders/purchase_group_orders.py | 4 | 9678 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Alexandre Fayolle
# Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp.osv.orm import Model, browse_record, browse_null
from openerp.osv import fields
from openerp import netsvc
class procurement_order(Model):
_inherit = 'procurement.order'
_columns = {'sale_id': fields.many2one('sale.order', 'Sale Order',
help='the sale order which generated the procurement'),
'origin': fields.char('Source Document', size=512,
help="Reference of the document that created this Procurement.\n"
"This is automatically completed by OpenERP."),
}
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
if procurement.sale_id:
sale = procurement.sale_id
update = {'shop_id': sale.shop_id.id,
'carrier_id': sale.carrier_id.id}
po_vals.update(update)
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
class sale_order(Model):
_inherit = 'sale.order'
def _prepare_order_line_procurement(self, cr, uid, order, line, move_id, date_planned, context=None):
proc_data = super(sale_order, self)._prepare_order_line_procurement(cr, uid, order, line,
move_id, date_planned,
context)
proc_data['sale_id'] = order.id
return proc_data
class purchase_order(Model):
_inherit = 'purchase.order'
_columns = {
'shop_id': fields.many2one('sale.shop', 'Shop',
help='the shop which generated the sale which triggered the PO'),
'carrier_id': fields.many2one('delivery.carrier', 'Carrier',
help='the carrier in charge for delivering the related sale order'),
'carrier_partner_id': fields.related('carrier_id', 'partner_id',
type='many2one',
relation='res.partner',
string='Carrier Name',
readonly=True,
help="Name of the carrier partner in charge of delivering the related sale order"),
'origin': fields.char('Source Document', size=512,
help="Reference of the document that generated this purchase order request."),
}
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders have same stock location, same pricelist
* Purchase Orders have the same shop and the same carrier (NEW in this module)
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
"""
#TOFIX: merged order line should be unlink
wf_service = netsvc.LocalService("workflow")
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'move_dest_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
# compute what the new orders should contain
new_orders = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id',
'shop_id', 'carrier_id')) # added line
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'partner_address_id': porder.partner_address_id.id,
'dest_address_id': porder.dest_address_id.id,
'warehouse_id': porder.warehouse_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
'shop_id': porder.shop_id and porder.shop_id.id, # added line
'carrier_id': porder.carrier_id and porder.carrier_id.id, # added line
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
for order_line in porder.order_line:
line_key = make_key(order_line, ('name', 'date_planned', 'taxes_id', 'price_unit', 'notes', 'product_id', 'move_dest_id', 'account_analytic_id'))
o_line = order_infos['order_line'].setdefault(line_key, {})
if o_line:
# merge the line with an existing line
o_line['product_qty'] += order_line.product_qty * order_line.product_uom.factor / o_line['uom_factor']
else:
# append a new "standalone" line
for field in ('product_qty', 'product_uom'):
field_val = getattr(order_line, field)
if isinstance(field_val, browse_record):
field_val = field_val.id
o_line[field] = field_val
o_line['uom_factor'] = order_line.product_uom and order_line.product_uom.factor or 1.0
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(0, 0, value) for value in order_data['order_line'].itervalues()]
# create the new order
neworder_id = self.create(cr, uid, order_data)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
wf_service.trg_redirect(uid, 'purchase.order', old_id, neworder_id, cr)
wf_service.trg_validate(uid, 'purchase.order', old_id, 'purchase_cancel', cr)
return orders_info
| agpl-3.0 |
glwu/python-for-android | python3-alpha/python3-src/Lib/distutils/tests/test_install_data.py | 147 | 2603 | """Tests for distutils.command.install_data."""
import sys
import os
import unittest
import getpass
from distutils.command.install_data import install_data
from distutils.tests import support
from test.support import run_unittest
class InstallDataTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def test_simple_run(self):
pkg_dir, dist = self.create_dist()
cmd = install_data(dist)
cmd.install_dir = inst = os.path.join(pkg_dir, 'inst')
# data_files can contain
# - simple files
# - a tuple with a path, and a list of file
one = os.path.join(pkg_dir, 'one')
self.write_file(one, 'xxx')
inst2 = os.path.join(pkg_dir, 'inst2')
two = os.path.join(pkg_dir, 'two')
self.write_file(two, 'xxx')
cmd.data_files = [one, (inst2, [two])]
self.assertEqual(cmd.get_inputs(), [one, (inst2, [two])])
# let's run the command
cmd.ensure_finalized()
cmd.run()
# let's check the result
self.assertEqual(len(cmd.get_outputs()), 2)
rtwo = os.path.split(two)[-1]
self.assertTrue(os.path.exists(os.path.join(inst2, rtwo)))
rone = os.path.split(one)[-1]
self.assertTrue(os.path.exists(os.path.join(inst, rone)))
cmd.outfiles = []
# let's try with warn_dir one
cmd.warn_dir = 1
cmd.ensure_finalized()
cmd.run()
# let's check the result
self.assertEqual(len(cmd.get_outputs()), 2)
self.assertTrue(os.path.exists(os.path.join(inst2, rtwo)))
self.assertTrue(os.path.exists(os.path.join(inst, rone)))
cmd.outfiles = []
# now using root and empty dir
cmd.root = os.path.join(pkg_dir, 'root')
inst3 = os.path.join(cmd.install_dir, 'inst3')
inst4 = os.path.join(pkg_dir, 'inst4')
three = os.path.join(cmd.install_dir, 'three')
self.write_file(three, 'xx')
cmd.data_files = [one, (inst2, [two]),
('inst3', [three]),
(inst4, [])]
cmd.ensure_finalized()
cmd.run()
# let's check the result
self.assertEqual(len(cmd.get_outputs()), 4)
self.assertTrue(os.path.exists(os.path.join(inst2, rtwo)))
self.assertTrue(os.path.exists(os.path.join(inst, rone)))
def test_suite():
return unittest.makeSuite(InstallDataTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| apache-2.0 |
yugangw-msft/azure-cli | src/azure-cli-core/azure/cli/core/extension/tests/latest/test_extension_commands.py | 2 | 24452 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import tempfile
import unittest
import shutil
import hashlib
import mock
import sys
from azure.cli.core.util import CLIError
from azure.cli.core.extension import get_extension, build_extension_path
from azure.cli.core.extension.operations import (add_extension_to_path, list_extensions, add_extension,
show_extension, remove_extension, update_extension,
list_available_extensions, OUT_KEY_NAME, OUT_KEY_VERSION,
OUT_KEY_METADATA, OUT_KEY_PATH)
from azure.cli.core.extension._resolve import NoExtensionCandidatesError
from azure.cli.core.mock import DummyCli
from . import IndexPatch, mock_ext
def _get_test_data_file(filename):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', filename)
def _compute_file_hash(filename):
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
sha256.update(f.read())
return sha256.hexdigest()
MY_EXT_NAME = 'myfirstcliextension'
MY_EXT_SOURCE = _get_test_data_file('myfirstcliextension-0.0.3+dev-py2.py3-none-any.whl')
MY_BAD_EXT_SOURCE = _get_test_data_file('notanextension.txt')
MY_SECOND_EXT_NAME_DASHES = 'my-second-cli-extension'
MY_SECOND_EXT_SOURCE_DASHES = _get_test_data_file('my_second_cli_extension-0.0.1+dev-py2.py3-none-any.whl')
class TestExtensionCommands(unittest.TestCase):
def setUp(self):
self.ext_dir = tempfile.mkdtemp()
self.ext_sys_dir = tempfile.mkdtemp()
self.patchers = [mock.patch('azure.cli.core.extension.EXTENSIONS_DIR', self.ext_dir),
mock.patch('azure.cli.core.extension.EXTENSIONS_SYS_DIR', self.ext_sys_dir)]
for patcher in self.patchers:
patcher.start()
self.cmd = self._setup_cmd()
def tearDown(self):
for patcher in self.patchers:
patcher.stop()
shutil.rmtree(self.ext_dir, ignore_errors=True)
shutil.rmtree(self.ext_sys_dir, ignore_errors=True)
def test_no_extensions_dir(self):
shutil.rmtree(self.ext_dir)
actual = list_extensions()
self.assertEqual(len(actual), 0)
def test_no_extensions_in_dir(self):
actual = list_extensions()
self.assertEqual(len(actual), 0)
def test_add_list_show_remove_extension(self):
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)
actual = list_extensions()
self.assertEqual(len(actual), 1)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME)
remove_extension(MY_EXT_NAME)
num_exts = len(list_extensions())
self.assertEqual(num_exts, 0)
def test_add_list_show_remove_system_extension(self):
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE, system=True)
actual = list_extensions()
self.assertEqual(len(actual), 1)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME)
remove_extension(MY_EXT_NAME)
num_exts = len(list_extensions())
self.assertEqual(num_exts, 0)
def test_add_list_show_remove_user_system_extensions(self):
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)
add_extension(cmd=self.cmd, source=MY_SECOND_EXT_SOURCE_DASHES, system=True)
actual = list_extensions()
self.assertEqual(len(actual), 2)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_PATH], build_extension_path(MY_EXT_NAME))
second_ext = show_extension(MY_SECOND_EXT_NAME_DASHES)
self.assertEqual(second_ext[OUT_KEY_NAME], MY_SECOND_EXT_NAME_DASHES)
self.assertEqual(second_ext[OUT_KEY_PATH], build_extension_path(MY_SECOND_EXT_NAME_DASHES, system=True))
remove_extension(MY_EXT_NAME)
num_exts = len(list_extensions())
self.assertEqual(num_exts, 1)
remove_extension(MY_SECOND_EXT_NAME_DASHES)
num_exts = len(list_extensions())
self.assertEqual(num_exts, 0)
def test_add_list_show_remove_extension_with_dashes(self):
add_extension(cmd=self.cmd, source=MY_SECOND_EXT_SOURCE_DASHES)
actual = list_extensions()
self.assertEqual(len(actual), 1)
ext = show_extension(MY_SECOND_EXT_NAME_DASHES)
self.assertEqual(ext[OUT_KEY_NAME], MY_SECOND_EXT_NAME_DASHES)
self.assertIn(OUT_KEY_NAME, ext[OUT_KEY_METADATA], "Unable to get full metadata")
self.assertEqual(ext[OUT_KEY_METADATA][OUT_KEY_NAME], MY_SECOND_EXT_NAME_DASHES)
remove_extension(MY_SECOND_EXT_NAME_DASHES)
num_exts = len(list_extensions())
self.assertEqual(num_exts, 0)
def test_add_extension_twice(self):
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)
num_exts = len(list_extensions())
self.assertEqual(num_exts, 1)
with self.assertRaises(CLIError):
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)
def test_add_same_extension_user_system(self):
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)
num_exts = len(list_extensions())
self.assertEqual(num_exts, 1)
with self.assertRaises(CLIError):
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE, system=True)
def test_add_extension_invalid(self):
with self.assertRaises(ValueError):
add_extension(cmd=self.cmd, source=MY_BAD_EXT_SOURCE)
actual = list_extensions()
self.assertEqual(len(actual), 0)
def test_add_extension_invalid_whl_name(self):
with self.assertRaises(CLIError):
add_extension(cmd=self.cmd, source=os.path.join('invalid', 'ext', 'path', 'file.whl'))
actual = list_extensions()
self.assertEqual(len(actual), 0)
def test_add_extension_valid_whl_name_filenotfound(self):
with self.assertRaises(CLIError):
add_extension(cmd=self.cmd, source=_get_test_data_file('mywheel-0.0.3+dev-py2.py3-none-any.whl'))
actual = list_extensions()
self.assertEqual(len(actual), 0)
def test_add_extension_with_pip_proxy(self):
extension_name = MY_EXT_NAME
proxy_param = '--proxy'
proxy_endpoint = "https://user:[email protected]"
computed_extension_sha256 = _compute_file_hash(MY_EXT_SOURCE)
with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)), \
mock.patch('azure.cli.core.extension.operations.shutil'), \
mock.patch('azure.cli.core.extension.operations.check_output') as check_output:
add_extension(cmd=self.cmd, extension_name=extension_name, pip_proxy=proxy_endpoint)
args = check_output.call_args
pip_cmd = args[0][0]
proxy_index = pip_cmd.index(proxy_param)
assert pip_cmd[proxy_index + 1] == proxy_endpoint
def test_add_extension_verify_no_pip_proxy(self):
extension_name = MY_EXT_NAME
computed_extension_sha256 = _compute_file_hash(MY_EXT_SOURCE)
with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)), \
mock.patch('azure.cli.core.extension.operations.shutil'), \
mock.patch('azure.cli.core.extension.operations.check_output') as check_output:
add_extension(cmd=self.cmd, extension_name=extension_name)
args = check_output.call_args
pip_cmd = args[0][0]
if '--proxy' in pip_cmd:
raise AssertionError("proxy parameter in check_output args although no proxy specified")
def test_add_extension_with_specific_version(self):
extension_name = MY_EXT_NAME
extension1 = 'myfirstcliextension-0.0.3+dev-py2.py3-none-any.whl'
extension2 = 'myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl'
mocked_index_data = {
extension_name: [
mock_ext(extension1, version='0.0.3+dev', download_url=_get_test_data_file(extension1)),
mock_ext(extension2, version='0.0.4+dev', download_url=_get_test_data_file(extension2))
]
}
with IndexPatch(mocked_index_data):
add_extension(self.cmd, extension_name=extension_name, version='0.0.3+dev')
ext = show_extension(extension_name)
self.assertEqual(ext['name'], extension_name)
self.assertEqual(ext['version'], '0.0.3+dev')
def test_add_extension_with_non_existing_version(self):
extension_name = MY_EXT_NAME
extension1 = 'myfirstcliextension-0.0.3+dev-py2.py3-none-any.whl'
extension2 = 'myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl'
mocked_index_data = {
extension_name: [
mock_ext(extension1, version='0.0.3+dev', download_url=_get_test_data_file(extension1)),
mock_ext(extension2, version='0.0.4+dev', download_url=_get_test_data_file(extension2))
]
}
non_existing_version = '0.0.5'
with IndexPatch(mocked_index_data):
with self.assertRaisesRegex(CLIError, non_existing_version):
add_extension(self.cmd, extension_name=extension_name, version=non_existing_version)
def test_add_extension_with_name_valid_checksum(self):
extension_name = MY_EXT_NAME
computed_extension_sha256 = _compute_file_hash(MY_EXT_SOURCE)
with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)):
add_extension(cmd=self.cmd, extension_name=extension_name)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME)
def test_add_extension_with_name_invalid_checksum(self):
extension_name = MY_EXT_NAME
bad_sha256 = 'thishashisclearlywrong'
with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, bad_sha256)):
with self.assertRaises(CLIError) as err:
add_extension(cmd=self.cmd, extension_name=extension_name)
self.assertTrue('The checksum of the extension does not match the expected value.' in str(err.exception))
def test_add_extension_with_name_source_not_whl(self):
extension_name = 'myextension'
with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=('{}.notwhl'.format(extension_name), None)):
with self.assertRaises(ValueError) as err:
add_extension(cmd=self.cmd, extension_name=extension_name)
self.assertTrue('Unknown extension type. Only Python wheels are supported.' in str(err.exception))
def test_add_extension_with_name_but_it_already_exists(self):
# Add extension without name first
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME)
# Now add using name
computed_extension_sha256 = _compute_file_hash(MY_EXT_SOURCE)
with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)):
with mock.patch('azure.cli.core.extension.operations.logger') as mock_logger:
add_extension(cmd=self.cmd, extension_name=MY_EXT_NAME)
call_args = mock_logger.warning.call_args
self.assertEqual("Extension '%s' is already installed.", call_args[0][0])
self.assertEqual(MY_EXT_NAME, call_args[0][1])
self.assertEqual(mock_logger.warning.call_count, 1)
def test_update_extension(self):
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')
newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl')
computed_extension_sha256 = _compute_file_hash(newer_extension)
with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(newer_extension, computed_extension_sha256)):
update_extension(self.cmd, MY_EXT_NAME)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_VERSION], '0.0.4+dev')
def test_update_extension_with_pip_proxy(self):
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')
newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl')
computed_extension_sha256 = _compute_file_hash(newer_extension)
proxy_param = '--proxy'
proxy_endpoint = "https://user:[email protected]"
with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)), \
mock.patch('azure.cli.core.extension.operations.shutil'), \
mock.patch('azure.cli.core.extension.operations.is_valid_sha256sum', return_value=(True, computed_extension_sha256)), \
mock.patch('azure.cli.core.extension.operations.extension_exists', return_value=None), \
mock.patch('azure.cli.core.extension.operations.check_output') as check_output:
update_extension(self.cmd, MY_EXT_NAME, pip_proxy=proxy_endpoint)
args = check_output.call_args
pip_cmd = args[0][0]
proxy_index = pip_cmd.index(proxy_param)
assert pip_cmd[proxy_index + 1] == proxy_endpoint
def test_update_extension_verify_no_pip_proxy(self):
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')
newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl')
computed_extension_sha256 = _compute_file_hash(newer_extension)
with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)), \
mock.patch('azure.cli.core.extension.operations.shutil'), \
mock.patch('azure.cli.core.extension.operations.is_valid_sha256sum', return_value=(True, computed_extension_sha256)), \
mock.patch('azure.cli.core.extension.operations.extension_exists', return_value=None), \
mock.patch('azure.cli.core.extension.operations.check_output') as check_output:
update_extension(self.cmd, MY_EXT_NAME)
args = check_output.call_args
pip_cmd = args[0][0]
if '--proxy' in pip_cmd:
raise AssertionError("proxy parameter in check_output args although no proxy specified")
def test_update_extension_not_found(self):
with self.assertRaises(CLIError) as err:
update_extension(self.cmd, MY_EXT_NAME)
self.assertEqual(str(err.exception), 'The extension {} is not installed.'.format(MY_EXT_NAME))
def test_update_extension_no_updates(self):
logger_msgs = []
def mock_log_warning(_, msg):
logger_msgs.append(msg)
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')
with mock.patch('azure.cli.core.extension.operations.resolve_from_index', side_effect=NoExtensionCandidatesError()), \
mock.patch('logging.Logger.warning', mock_log_warning):
update_extension(self.cmd, MY_EXT_NAME)
self.assertTrue("No updates available for '{}'.".format(MY_EXT_NAME) in logger_msgs[0])
def test_update_extension_exception_in_update_and_rolled_back(self):
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')
newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl')
bad_sha256 = 'thishashisclearlywrong'
with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(newer_extension, bad_sha256)):
with self.assertRaises(CLIError) as err:
update_extension(self.cmd, MY_EXT_NAME)
self.assertTrue('Failed to update. Rolled {} back to {}.'.format(ext['name'], ext[OUT_KEY_VERSION]) in str(err.exception))
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')
def test_list_available_extensions_default(self):
with mock.patch('azure.cli.core.extension.operations.get_index_extensions', autospec=True) as c:
list_available_extensions(cli_ctx=self.cmd.cli_ctx)
c.assert_called_once_with(None, self.cmd.cli_ctx)
def test_list_available_extensions_operations_index_url(self):
with mock.patch('azure.cli.core.extension.operations.get_index_extensions', autospec=True) as c:
index_url = 'http://contoso.com'
list_available_extensions(index_url=index_url, cli_ctx=self.cmd.cli_ctx)
c.assert_called_once_with(index_url, self.cmd.cli_ctx)
def test_list_available_extensions_show_details(self):
with mock.patch('azure.cli.core.extension.operations.get_index_extensions', autospec=True) as c:
list_available_extensions(show_details=True, cli_ctx=self.cmd.cli_ctx)
c.assert_called_once_with(None, self.cmd.cli_ctx)
def test_list_available_extensions_no_show_details(self):
sample_index_extensions = {
'test_sample_extension1': [{
'metadata': {
'name': 'test_sample_extension1',
'summary': 'my summary',
'version': '0.1.0'
}}],
'test_sample_extension2': [{
'metadata': {
'name': 'test_sample_extension2',
'summary': 'my summary',
'version': '0.1.0',
'azext.isPreview': True,
'azext.isExperimental': True
}}]
}
with mock.patch('azure.cli.core.extension.operations.get_index_extensions', return_value=sample_index_extensions):
res = list_available_extensions(cli_ctx=self.cmd.cli_ctx)
self.assertIsInstance(res, list)
self.assertEqual(len(res), len(sample_index_extensions))
self.assertEqual(res[0]['name'], 'test_sample_extension1')
self.assertEqual(res[0]['summary'], 'my summary')
self.assertEqual(res[0]['version'], '0.1.0')
self.assertEqual(res[0]['preview'], False)
self.assertEqual(res[0]['experimental'], False)
with mock.patch('azure.cli.core.extension.operations.get_index_extensions', return_value=sample_index_extensions):
res = list_available_extensions(cli_ctx=self.cmd.cli_ctx)
self.assertIsInstance(res, list)
self.assertEqual(len(res), len(sample_index_extensions))
self.assertEqual(res[1]['name'], 'test_sample_extension2')
self.assertEqual(res[1]['summary'], 'my summary')
self.assertEqual(res[1]['version'], '0.1.0')
self.assertEqual(res[1]['preview'], True)
self.assertEqual(res[1]['experimental'], True)
def test_list_available_extensions_incompatible_cli_version(self):
sample_index_extensions = {
'test_sample_extension1': [{
'metadata': {
"azext.maxCliCoreVersion": "0.0.0",
'name': 'test_sample_extension1',
'summary': 'my summary',
'version': '0.1.0'
}}]
}
with mock.patch('azure.cli.core.extension.operations.get_index_extensions', return_value=sample_index_extensions):
res = list_available_extensions(cli_ctx=self.cmd.cli_ctx)
self.assertIsInstance(res, list)
self.assertEqual(len(res), 0)
def test_add_list_show_remove_extension_extra_index_url(self):
"""
Tests extension addition while specifying --extra-index-url parameter.
:return:
"""
extra_index_urls = ['https://testpypi.python.org/simple', 'https://pypi.python.org/simple']
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE, pip_extra_index_urls=extra_index_urls)
actual = list_extensions()
self.assertEqual(len(actual), 1)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME)
remove_extension(MY_EXT_NAME)
num_exts = len(list_extensions())
self.assertEqual(num_exts, 0)
def test_update_extension_extra_index_url(self):
"""
Tests extension update while specifying --extra-index-url parameter.
:return:
"""
extra_index_urls = ['https://testpypi.python.org/simple', 'https://pypi.python.org/simple']
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE, pip_extra_index_urls=extra_index_urls)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')
newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl')
computed_extension_sha256 = _compute_file_hash(newer_extension)
with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(newer_extension, computed_extension_sha256)):
update_extension(self.cmd, MY_EXT_NAME, pip_extra_index_urls=extra_index_urls)
ext = show_extension(MY_EXT_NAME)
self.assertEqual(ext[OUT_KEY_VERSION], '0.0.4+dev')
def test_add_extension_to_path(self):
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)
num_exts = len(list_extensions())
self.assertEqual(num_exts, 1)
ext = get_extension('myfirstcliextension')
old_path = sys.path[:]
try:
add_extension_to_path(ext.name)
self.assertSequenceEqual(old_path, sys.path[:-1])
self.assertEqual(ext.path, sys.path[-1])
finally:
sys.path[:] = old_path
def test_add_extension_azure_to_path(self):
import azure
import azure.mgmt
old_path_0 = list(sys.path)
old_path_1 = list(azure.__path__)
old_path_2 = list(azure.mgmt.__path__)
add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)
ext = get_extension('myfirstcliextension')
azure_dir = os.path.join(ext.path, "azure")
azure_mgmt_dir = os.path.join(azure_dir, "mgmt")
os.mkdir(azure_dir)
os.mkdir(azure_mgmt_dir)
try:
add_extension_to_path(ext.name)
new_path_1 = list(azure.__path__)
new_path_2 = list(azure.mgmt.__path__)
finally:
sys.path.remove(ext.path)
remove_extension(ext.name)
if isinstance(azure.__path__, list):
azure.__path__[:] = old_path_1
else:
list(azure.__path__)
if isinstance(azure.mgmt.__path__, list):
azure.mgmt.__path__[:] = old_path_2
else:
list(azure.mgmt.__path__)
self.assertSequenceEqual(old_path_1, new_path_1[:-1])
self.assertSequenceEqual(old_path_2, new_path_2[:-1])
self.assertEqual(azure_dir, new_path_1[-1])
self.assertEqual(azure_mgmt_dir, new_path_2[-1])
self.assertSequenceEqual(old_path_0, list(sys.path))
self.assertSequenceEqual(old_path_1, list(azure.__path__))
self.assertSequenceEqual(old_path_2, list(azure.mgmt.__path__))
def _setup_cmd(self):
cmd = mock.MagicMock()
cmd.cli_ctx = DummyCli()
return cmd
if __name__ == '__main__':
unittest.main()
| mit |
openstack/taskflow | taskflow/examples/resume_many_flows/run_flow.py | 7 | 1433 | # -*- coding: utf-8 -*-
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
logging.basicConfig(level=logging.ERROR)
self_dir = os.path.abspath(os.path.dirname(__file__))
top_dir = os.path.abspath(
os.path.join(self_dir, os.pardir, os.pardir, os.pardir))
example_dir = os.path.abspath(os.path.join(self_dir, os.pardir))
sys.path.insert(0, top_dir)
sys.path.insert(0, self_dir)
sys.path.insert(0, example_dir)
import taskflow.engines
import example_utils # noqa
import my_flows # noqa
with example_utils.get_backend() as backend:
engine = taskflow.engines.load_from_factory(my_flows.flow_factory,
backend=backend)
print('Running flow %s %s' % (engine.storage.flow_name,
engine.storage.flow_uuid))
engine.run()
| apache-2.0 |
jehine-MSFT/azure-storage-python | tests/blob_performance.py | 4 | 5539 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import os
import datetime
import sys
from azure.storage.blob import (
BlockBlobService,
PageBlobService,
AppendBlobService,
)
import tests.settings_real as settings
# Warning:
# This script will take a while to run with everything enabled.
# Edit the lists below to enable only the blob sizes and connection
# counts that you are interested in.
# NAME, SIZE (MB), +ADD SIZE (B)
LOCAL_BLOCK_BLOB_FILES = [
('BLOC-0080M+000B', 80, 0),
('BLOC-0080M+013B', 80, 13),
('BLOC-0500M+000B', 500, 0),
('BLOC-2500M+000B', 2500, 0),
]
LOCAL_PAGE_BLOB_FILES = [
('PAGE-0072M+000B', 72, 0),
('PAGE-0072M+512B', 72, 512),
('PAGE-0500M+000B', 500, 0),
('PAGE-2500M+000B', 2500, 0),
]
LOCAL_APPEND_BLOB_FILES = [
('APPD-0072M+000B', 80, 0),
('APPD-0072M+512B', 80, 13),
('APPD-0500M+000B', 500, 0),
('APPD-2500M+000B', 2500, 0),
]
CONNECTION_COUNTS = [1, 2, 5, 10, 50]
CONTAINER_NAME = 'performance'
def input_file(name):
return 'input-' + name
def output_file(name):
return 'output-' + name
def create_random_content_file(name, size_in_megs, additional_byte_count=0):
file_name = input_file(name)
if not os.path.exists(file_name):
print('generating {0}'.format(name))
with open(file_name, 'wb') as stream:
for i in range(size_in_megs):
stream.write(os.urandom(1048576))
if additional_byte_count > 0:
stream.write(os.urandom(additional_byte_count))
def upload_blob(service, name, connections):
blob_name = name
file_name = input_file(name)
sys.stdout.write('\tUp:')
start_time = datetime.datetime.now()
if isinstance(service, BlockBlobService):
service.create_blob_from_path(
CONTAINER_NAME, blob_name, file_name, max_connections=connections)
elif isinstance(service, PageBlobService):
service.create_blob_from_path(
CONTAINER_NAME, blob_name, file_name, max_connections=connections)
elif isinstance(service, AppendBlobService):
service.append_blob_from_path(
CONTAINER_NAME, blob_name, file_name, max_connections=connections)
else:
service.create_blob_from_path(
CONTAINER_NAME, blob_name, file_name, max_connections=connections)
elapsed_time = datetime.datetime.now() - start_time
sys.stdout.write('{0}s'.format(elapsed_time.total_seconds()))
def download_blob(service, name, connections):
blob_name = name
target_file_name = output_file(name)
if os.path.exists(target_file_name):
os.remove(target_file_name)
sys.stdout.write('\tDn:')
start_time = datetime.datetime.now()
service.get_blob_to_path(
CONTAINER_NAME, blob_name, target_file_name, max_connections=connections)
elapsed_time = datetime.datetime.now() - start_time
sys.stdout.write('{0}s'.format(elapsed_time.total_seconds()))
def file_contents_equal(first_file_path, second_file_path):
first_size = os.path.getsize(first_file_path);
second_size = os.path.getsize(second_file_path)
if first_size != second_size:
return False
with open(first_file_path, 'rb') as first_stream:
with open(second_file_path, 'rb') as second_stream:
while True:
first_data = first_stream.read(1048576)
second_data = second_stream.read(1048576)
if first_data != second_data:
return False
if not first_data:
return True
def compare_files(name):
first_file_path = input_file(name)
second_file_path = output_file(name)
sys.stdout.write('\tCmp:')
if file_contents_equal(first_file_path, second_file_path):
sys.stdout.write('ok')
else:
sys.stdout.write('ERR!')
def process(service, blobs, counts):
for name, size_in_megs, additional in blobs:
create_random_content_file(name, size_in_megs, additional)
for name, _, _ in blobs:
for max_conn in counts:
sys.stdout.write('{0}\tParallel:{1}'.format(name, max_conn))
upload_blob(service, name, max_conn)
download_blob(service, name, max_conn)
compare_files(name)
print('')
print('')
def main():
bbs = BlockBlobService(settings.STORAGE_ACCOUNT_NAME, settings.STORAGE_ACCOUNT_KEY)
pbs = PageBlobService(settings.STORAGE_ACCOUNT_NAME, settings.STORAGE_ACCOUNT_KEY)
abs = AppendBlobService(settings.STORAGE_ACCOUNT_NAME, settings.STORAGE_ACCOUNT_KEY)
service.create_container(CONTAINER_NAME)
process(bbs, LOCAL_BLOCK_BLOB_FILES, CONNECTION_COUNTS)
process(pbs, LOCAL_PAGE_BLOB_FILES, CONNECTION_COUNTS)
process(abs, LOCAL_APPEND_BLOB_FILES, CONNECTION_COUNTS)
if __name__ == '__main__':
main()
| apache-2.0 |
MeirKriheli/Open-Knesset | mks/urls.py | 14 | 2520 | from django.conf import settings
from django.conf.urls import url, patterns
from . import views as mkv
from feeds import MemberActivityFeed
mksurlpatterns = patterns('mks.views',
url(r'^parties-members/$', mkv.PartiesMembersRedirctView.as_view(), name='parties-members-index'),
url(r'^parties-members/(?P<pk>\d+)/$', mkv.PartiesMembersView.as_view(), name='parties-members-list'),
url(r'^member/$', mkv.MemberRedirectView.as_view(), name='member-list'),
url(r'^member/csv$', mkv.MemberCsvView.as_view()),
url(r'^party/csv$', mkv.PartyCsvView.as_view()),
url(r'^member/(?P<pk>\d+)/$', 'mk_detail', name='member-detail'),
url(r'^member/(?P<pk>\d+)/embed/$', mkv.MemberEmbedView.as_view(), name='member-embed'),
# "more" actions
url(r'^member/(?P<pk>\d+)/more_actions/$', mkv.MemeberMoreActionsView.as_view(), name='member-more-actions'),
url(r'^member/(?P<pk>\d+)/more_legislation/$', mkv.MemeberMoreLegislationView.as_view(), name='member-more-legislation'),
url(r'^member/(?P<pk>\d+)/more_committee/$', mkv.MemeberMoreCommitteeView.as_view(), name='member-more-committees'),
url(r'^member/(?P<pk>\d+)/more_plenum/$', mkv.MemeberMorePlenumView.as_view(), name='member-more-plenums'),
url(r'^member/(?P<pk>\d+)/more_mmm/$', mkv.MemeberMoreMMMView.as_view(), name='member-more-mmm'),
url(r'^member/(?P<object_id>\d+)/rss/$', MemberActivityFeed(), name='member-activity-feed'),
url(r'^member/(?P<pk>\d+)/(?P<slug>[\w\-\"]+)/$', 'mk_detail', name='member-detail-with-slug'),
# TODO:the next url is hardcoded in a js file
url(r'^member/auto_complete/$', mkv.member_auto_complete, name='member-auto-complete'),
url(r'^member/search/?$', mkv.member_by_name, name='member-by-name'),
url(r'^member/by/(?P<stat_type>' + '|'.join(x[0] for x in mkv.MemberListView.pages) + ')/$', mkv.MemberListView.as_view(), name='member-stats'),
# a JS view for adding mks tooltips on a page
url(r'^member/tooltip.js', mkv.members_tooltips, name='member-tooltip'),
url(r'^party/$', mkv.PartyRedirectView.as_view(), name='party-list'),
url(r'^party/(?P<pk>\d+)/$', mkv.PartyDetailView.as_view(), name='party-detail'),
url(r'^party/(?P<pk>\d+)/(?P<slug>[\w\-\"]+)/$', mkv.PartyDetailView.as_view(), name='party-detail-with-slug'),
url(r'^party/by/(?P<stat_type>' + '|'.join(x[0] for x in mkv.PartyListView.pages) + ')/$', mkv.PartyListView.as_view(), name='party-stats'),
url(r'^party/search/?$', mkv.party_by_name, name='party-by-name'),
)
| bsd-3-clause |
Froggiewalker/geonode | geonode/base/enumerations.py | 15 | 13719 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.utils.translation import ugettext_lazy as _
LINK_TYPES = ['original', 'data', 'image', 'metadata', 'html',
'OGC:WMS', 'OGC:WFS', 'OGC:WCS']
HIERARCHY_LEVELS = (
('series', _('series')),
('software', _('computer program or routine')),
('featureType', _('feature type')),
('model', _('copy or imitation of an existing or hypothetical object')),
('collectionHardware', _('collection hardware')),
('collectionSession', _('collection session')),
('nonGeographicDataset', _('non-geographic data')),
('propertyType', _('property type')),
('fieldSession', _('field session')),
('dataset', _('dataset')),
('service', _('service interfaces')),
('attribute', _('attribute class')),
('attributeType', _('characteristic of a feature')),
('tile', _('tile or spatial subset of geographic data')),
('feature', _('feature')),
('dimensionGroup', _('dimension group')),
)
UPDATE_FREQUENCIES = (
('unknown', _('frequency of maintenance for the data is not known')),
('continual', _('data is repeatedly and frequently updated')),
('notPlanned', _('there are no plans to update the data')),
('daily', _('data is updated each day')),
('annually', _('data is updated every year')),
('asNeeded', _('data is updated as deemed necessary')),
('monthly', _('data is updated each month')),
('fortnightly', _('data is updated every two weeks')),
('irregular',
_('data is updated in intervals that are uneven in duration')),
('weekly', _('data is updated on a weekly basis')),
('biannually', _('data is updated twice each year')),
('quarterly', _('data is updated every three months')),
)
CONTACT_FIELDS = [
'name',
'organization',
'position',
'voice',
'facsimile',
'delivery_point',
'city',
'administrative_area',
'postal_code',
'country',
'email',
'role'
]
DEFAULT_SUPPLEMENTAL_INFORMATION = _(
_('No information provided')
)
COUNTRIES = (
('AFG', 'Afghanistan'),
('ALA', 'Aland Islands'),
('ALB', 'Albania'),
('DZA', 'Algeria'),
('ASM', 'American Samoa'),
('AND', 'Andorra'),
('AGO', 'Angola'),
('AIA', 'Anguilla'),
('ATG', 'Antigua and Barbuda'),
('ARG', 'Argentina'),
('ARM', 'Armenia'),
('ABW', 'Aruba'),
('AUS', 'Australia'),
('AUT', 'Austria'),
('AZE', 'Azerbaijan'),
('BHS', 'Bahamas'),
('BHR', 'Bahrain'),
('BGD', 'Bangladesh'),
('BRB', 'Barbados'),
('BLR', 'Belarus'),
('BEL', 'Belgium'),
('BLZ', 'Belize'),
('BEN', 'Benin'),
('BMU', 'Bermuda'),
('BTN', 'Bhutan'),
('BOL', 'Bolivia'),
('BIH', 'Bosnia and Herzegovina'),
('BWA', 'Botswana'),
('BRA', 'Brazil'),
('VGB', 'British Virgin Islands'),
('BRN', 'Brunei Darussalam'),
('BGR', 'Bulgaria'),
('BFA', 'Burkina Faso'),
('BDI', 'Burundi'),
('KHM', 'Cambodia'),
('CMR', 'Cameroon'),
('CAN', 'Canada'),
('CPV', 'Cape Verde'),
('CYM', 'Cayman Islands'),
('CAF', 'Central African Republic'),
('TCD', 'Chad'),
('CIL', 'Channel Islands'),
('CHL', 'Chile'),
('CHN', 'China'),
('HKG', 'China - Hong Kong'),
('MAC', 'China - Macao'),
('COL', 'Colombia'),
('COM', 'Comoros'),
('COG', 'Congo'),
('COK', 'Cook Islands'),
('CRI', 'Costa Rica'),
('CIV', 'Cote d\'Ivoire'),
('HRV', 'Croatia'),
('CUB', 'Cuba'),
('CYP', 'Cyprus'),
('CZE', 'Czech Republic'),
('PRK', 'Democratic People\'s Republic of Korea'),
('COD', 'Democratic Republic of the Congo'),
('DNK', 'Denmark'),
('DJI', 'Djibouti'),
('DMA', 'Dominica'),
('DOM', 'Dominican Republic'),
('ECU', 'Ecuador'),
('EGY', 'Egypt'),
('SLV', 'El Salvador'),
('GNQ', 'Equatorial Guinea'),
('ERI', 'Eritrea'),
('EST', 'Estonia'),
('ETH', 'Ethiopia'),
('FRO', 'Faeroe Islands'),
('FLK', 'Falkland Islands (Malvinas)'),
('FJI', 'Fiji'),
('FIN', 'Finland'),
('FRA', 'France'),
('GUF', 'French Guiana'),
('PYF', 'French Polynesia'),
('GAB', 'Gabon'),
('GMB', 'Gambia'),
('GEO', 'Georgia'),
('DEU', 'Germany'),
('GHA', 'Ghana'),
('GIB', 'Gibraltar'),
('GRC', 'Greece'),
('GRL', 'Greenland'),
('GRD', 'Grenada'),
('GLP', 'Guadeloupe'),
('GUM', 'Guam'),
('GTM', 'Guatemala'),
('GGY', 'Guernsey'),
('GIN', 'Guinea'),
('GNB', 'Guinea-Bissau'),
('GUY', 'Guyana'),
('HTI', 'Haiti'),
('VAT', 'Holy See (Vatican City)'),
('HND', 'Honduras'),
('HUN', 'Hungary'),
('ISL', 'Iceland'),
('IND', 'India'),
('IDN', 'Indonesia'),
('IRN', 'Iran'),
('IRQ', 'Iraq'),
('IRL', 'Ireland'),
('IMN', 'Isle of Man'),
('ISR', 'Israel'),
('ITA', 'Italy'),
('JAM', 'Jamaica'),
('JPN', 'Japan'),
('JEY', 'Jersey'),
('JOR', 'Jordan'),
('KAZ', 'Kazakhstan'),
('KEN', 'Kenya'),
('KIR', 'Kiribati'),
('KWT', 'Kuwait'),
('KGZ', 'Kyrgyzstan'),
('LAO', 'Lao People\'s Democratic Republic'),
('LVA', 'Latvia'),
('LBN', 'Lebanon'),
('LSO', 'Lesotho'),
('LBR', 'Liberia'),
('LBY', 'Libyan Arab Jamahiriya'),
('LIE', 'Liechtenstein'),
('LTU', 'Lithuania'),
('LUX', 'Luxembourg'),
('MKD', 'Macedonia'),
('MDG', 'Madagascar'),
('MWI', 'Malawi'),
('MYS', 'Malaysia'),
('MDV', 'Maldives'),
('MLI', 'Mali'),
('MLT', 'Malta'),
('MHL', 'Marshall Islands'),
('MTQ', 'Martinique'),
('MRT', 'Mauritania'),
('MUS', 'Mauritius'),
('MYT', 'Mayotte'),
('MEX', 'Mexico'),
('FSM', 'Micronesia, Federated States of'),
('MCO', 'Monaco'),
('MNG', 'Mongolia'),
('MNE', 'Montenegro'),
('MSR', 'Montserrat'),
('MAR', 'Morocco'),
('MOZ', 'Mozambique'),
('MMR', 'Myanmar'),
('NAM', 'Namibia'),
('NRU', 'Nauru'),
('NPL', 'Nepal'),
('NLD', 'Netherlands'),
('ANT', 'Netherlands Antilles'),
('NCL', 'New Caledonia'),
('NZL', 'New Zealand'),
('NIC', 'Nicaragua'),
('NER', 'Niger'),
('NGA', 'Nigeria'),
('NIU', 'Niue'),
('NFK', 'Norfolk Island'),
('MNP', 'Northern Mariana Islands'),
('NOR', 'Norway'),
('PSE', 'Occupied Palestinian Territory'),
('OMN', 'Oman'),
('PAK', 'Pakistan'),
('PLW', 'Palau'),
('PAN', 'Panama'),
('PNG', 'Papua New Guinea'),
('PRY', 'Paraguay'),
('PER', 'Peru'),
('PHL', 'Philippines'),
('PCN', 'Pitcairn'),
('POL', 'Poland'),
('PRT', 'Portugal'),
('PRI', 'Puerto Rico'),
('QAT', 'Qatar'),
('KOR', 'Republic of Korea'),
('MDA', 'Republic of Moldova'),
('REU', 'Reunion'),
('ROU', 'Romania'),
('RUS', 'Russian Federation'),
('RWA', 'Rwanda'),
('BLM', 'Saint-Barthelemy'),
('SHN', 'Saint Helena'),
('KNA', 'Saint Kitts and Nevis'),
('LCA', 'Saint Lucia'),
('MAF', 'Saint-Martin (French part)'),
('SPM', 'Saint Pierre and Miquelon'),
('VCT', 'Saint Vincent and the Grenadines'),
('WSM', 'Samoa'),
('SMR', 'San Marino'),
('STP', 'Sao Tome and Principe'),
('SAU', 'Saudi Arabia'),
('SEN', 'Senegal'),
('SRB', 'Serbia'),
('SYC', 'Seychelles'),
('SLE', 'Sierra Leone'),
('SGP', 'Singapore'),
('SVK', 'Slovakia'),
('SVN', 'Slovenia'),
('SLB', 'Solomon Islands'),
('SOM', 'Somalia'),
('ZAF', 'South Africa'),
('SSD', 'South Sudan'),
('ESP', 'Spain'),
('LKA', 'Sri Lanka'),
('SDN', 'Sudan'),
('SUR', 'Suriname'),
('SJM', 'Svalbard and Jan Mayen Islands'),
('SWZ', 'Swaziland'),
('SWE', 'Sweden'),
('CHE', 'Switzerland'),
('SYR', 'Syrian Arab Republic'),
('TJK', 'Tajikistan'),
('THA', 'Thailand'),
('TLS', 'Timor-Leste'),
('TGO', 'Togo'),
('TKL', 'Tokelau'),
('TON', 'Tonga'),
('TTO', 'Trinidad and Tobago'),
('TUN', 'Tunisia'),
('TUR', 'Turkey'),
('TKM', 'Turkmenistan'),
('TCA', 'Turks and Caicos Islands'),
('TUV', 'Tuvalu'),
('UGA', 'Uganda'),
('UKR', 'Ukraine'),
('ARE', 'United Arab Emirates'),
('GBR', 'United Kingdom'),
('TZA', 'United Republic of Tanzania'),
('USA', 'United States of America'),
('VIR', 'United States Virgin Islands'),
('URY', 'Uruguay'),
('UZB', 'Uzbekistan'),
('VUT', 'Vanuatu'),
('VEN', 'Venezuela (Bolivarian Republic of)'),
('VNM', 'Viet Nam'),
('WLF', 'Wallis and Futuna Islands'),
('ESH', 'Western Sahara'),
('YEM', 'Yemen'),
('ZMB', 'Zambia'),
('ZWE', 'Zimbabwe'),
)
# Taken from http://www.w3.org/WAI/ER/IG/ert/iso639.htm
ALL_LANGUAGES = (
('abk', 'Abkhazian'),
('aar', 'Afar'),
('afr', 'Afrikaans'),
('amh', 'Amharic'),
('ara', 'Arabic'),
('asm', 'Assamese'),
('aym', 'Aymara'),
('aze', 'Azerbaijani'),
('bak', 'Bashkir'),
('ben', 'Bengali'),
('bih', 'Bihari'),
('bis', 'Bislama'),
('bre', 'Breton'),
('bul', 'Bulgarian'),
('bel', 'Byelorussian'),
('cat', 'Catalan'),
('cos', 'Corsican'),
('dan', 'Danish'),
('dzo', 'Dzongkha'),
('eng', 'English'),
('fra', 'French'),
('epo', 'Esperanto'),
('est', 'Estonian'),
('fao', 'Faroese'),
('fij', 'Fijian'),
('fin', 'Finnish'),
('fry', 'Frisian'),
('glg', 'Gallegan'),
('kal', 'Greenlandic'),
('grn', 'Guarani'),
('guj', 'Gujarati'),
('hau', 'Hausa'),
('heb', 'Hebrew'),
('hin', 'Hindi'),
('hun', 'Hungarian'),
('ind', 'Indonesian'),
('ina', 'Interlingua (International Auxiliary language Association)'),
('iku', 'Inuktitut'),
('ipk', 'Inupiak'),
('ita', 'Italian'),
('jpn', 'Japanese'),
('kan', 'Kannada'),
('kas', 'Kashmiri'),
('kaz', 'Kazakh'),
('khm', 'Khmer'),
('kin', 'Kinyarwanda'),
('kir', 'Kirghiz'),
('kor', 'Korean'),
('kur', 'Kurdish'),
('oci', 'Langue d \'Oc (post 1500)'),
('lao', 'Lao'),
('lat', 'Latin'),
('lav', 'Latvian'),
('lin', 'Lingala'),
('lit', 'Lithuanian'),
('mlg', 'Malagasy'),
('mlt', 'Maltese'),
('mar', 'Marathi'),
('mol', 'Moldavian'),
('mon', 'Mongolian'),
('nau', 'Nauru'),
('nep', 'Nepali'),
('nor', 'Norwegian'),
('ori', 'Oriya'),
('orm', 'Oromo'),
('pan', 'Panjabi'),
('pol', 'Polish'),
('por', 'Portuguese'),
('pus', 'Pushto'),
('que', 'Quechua'),
('roh', 'Rhaeto-Romance'),
('run', 'Rundi'),
('rus', 'Russian'),
('smo', 'Samoan'),
('sag', 'Sango'),
('san', 'Sanskrit'),
('scr', 'Serbo-Croatian'),
('sna', 'Shona'),
('snd', 'Sindhi'),
('sin', 'Singhalese'),
('ssw', 'Siswant'),
('slv', 'Slovenian'),
('som', 'Somali'),
('sot', 'Sotho'),
('spa', 'Spanish'),
('sun', 'Sudanese'),
('swa', 'Swahili'),
('tgl', 'Tagalog'),
('tgk', 'Tajik'),
('tam', 'Tamil'),
('tat', 'Tatar'),
('tel', 'Telugu'),
('tha', 'Thai'),
('tir', 'Tigrinya'),
('tog', 'Tonga (Nyasa)'),
('tso', 'Tsonga'),
('tsn', 'Tswana'),
('tur', 'Turkish'),
('tuk', 'Turkmen'),
('twi', 'Twi'),
('uig', 'Uighur'),
('ukr', 'Ukrainian'),
('urd', 'Urdu'),
('uzb', 'Uzbek'),
('vie', 'Vietnamese'),
('vol', 'Volapük'),
('wol', 'Wolof'),
('xho', 'Xhosa'),
('yid', 'Yiddish'),
('yor', 'Yoruba'),
('zha', 'Zhuang'),
('zul', 'Zulu'),
)
CHARSETS = (('', 'None/Unknown'),
('UTF-8', 'UTF-8/Unicode'),
('ISO-8859-1', 'Latin1/ISO-8859-1'),
('ISO-8859-2', 'Latin2/ISO-8859-2'),
('ISO-8859-3', 'Latin3/ISO-8859-3'),
('ISO-8859-4', 'Latin4/ISO-8859-4'),
('ISO-8859-5', 'Latin5/ISO-8859-5'),
('ISO-8859-6', 'Latin6/ISO-8859-6'),
('ISO-8859-7', 'Latin7/ISO-8859-7'),
('ISO-8859-8', 'Latin8/ISO-8859-8'),
('ISO-8859-9', 'Latin9/ISO-8859-9'),
('ISO-8859-10', 'Latin10/ISO-8859-10'),
('ISO-8859-13', 'Latin13/ISO-8859-13'),
('ISO-8859-14', 'Latin14/ISO-8859-14'),
('ISO8859-15', 'Latin15/ISO-8859-15'),
('Big5', 'BIG5'),
('EUC-JP', 'EUC-JP'),
('EUC-KR', 'EUC-KR'),
('GBK', 'GBK'),
('GB18030', 'GB18030'),
('Shift_JIS', 'Shift_JIS'),
('KOI8-R', 'KOI8-R'),
('KOI8-U', 'KOI8-U'),
('windows-874', 'Windows CP874'),
('windows-1250', 'Windows CP1250'),
('windows-1251', 'Windows CP1251'),
('windows-1252', 'Windows CP1252'),
('windows-1253', 'Windows CP1253'),
('windows-1254', 'Windows CP1254'),
('windows-1255', 'Windows CP1255'),
('windows-1256', 'Windows CP1256'),
('windows-1257', 'Windows CP1257'),
('windows-1258', 'Windows CP1258'))
| gpl-3.0 |
moio/spacewalk | client/solaris/smartpm/smart/transaction.py | 3 | 61738 | #
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <[email protected]>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.const import INSTALL, REMOVE, UPGRADE, FIX, REINSTALL, KEEP
from smart.cache import PreRequires, Package
from smart import *
class ChangeSet(dict):
def __init__(self, cache, state=None):
self._cache = cache
if state:
self.update(state)
def getCache(self):
return self._cache
def getState(self):
return self.copy()
def setState(self, state):
if state is not self:
self.clear()
self.update(state)
def getPersistentState(self):
state = {}
for pkg in self:
state[(pkg.__class__, pkg.name, pkg.version)] = self[pkg]
return state
def setPersistentState(self, state):
self.clear()
for pkg in self._cache.getPackages():
op = state.get((pkg.__class__, pkg.name, pkg.version))
if op is not None:
self[pkg] = op
def copy(self):
return ChangeSet(self._cache, self)
def set(self, pkg, op, force=False):
if self.get(pkg) is op:
return
if op is INSTALL:
if force or not pkg.installed:
self[pkg] = INSTALL
else:
if pkg in self:
del self[pkg]
else:
if force or pkg.installed:
self[pkg] = REMOVE
else:
if pkg in self:
del self[pkg]
def installed(self, pkg):
op = self.get(pkg)
return op is INSTALL or pkg.installed and not op is REMOVE
def difference(self, other):
diff = ChangeSet(self._cache)
for pkg in self:
sop = self[pkg]
if sop is not other.get(pkg):
diff[pkg] = sop
return diff
def intersect(self, other):
isct = ChangeSet(self._cache)
for pkg in self:
sop = self[pkg]
if sop is other.get(pkg):
isct[pkg] = sop
return isct
def __str__(self):
l = []
for pkg in self:
l.append("%s %s\n" % (self[pkg] is INSTALL and "I" or "R", pkg))
return "".join(l)
class Policy(object):
def __init__(self, trans):
self._trans = trans
self._locked = {}
self._sysconflocked = []
self._priorities = {}
def runStarting(self):
self._priorities.clear()
cache = self._trans.getCache()
for pkg in pkgconf.filterByFlag("lock", cache.getPackages()):
if pkg not in self._locked:
self._sysconflocked.append(pkg)
self._locked[pkg] = True
def runFinished(self):
self._priorities.clear()
for pkg in self._sysconflocked:
del self._locked[pkg]
del self._sysconflocked[:]
def getLocked(self, pkg):
return pkg in self._locked
def setLocked(self, pkg, flag):
if flag:
self._locked[pkg] = True
else:
if pkg in self._locked:
del self._locked[pkg]
def getLockedSet(self):
return self._locked
def getWeight(self, changeset):
return 0
def getPriority(self, pkg):
priority = self._priorities.get(pkg)
if priority is None:
self._priorities[pkg] = priority = pkg.getPriority()
return priority
def getPriorityWeights(self, targetPkg, pkgs):
set = {}
lower = None
for pkg in pkgs:
priority = self.getPriority(pkg)
if lower is None or priority < lower:
lower = priority
set[pkg] = priority
for pkg in set:
set[pkg] = -(set[pkg] - lower)*10
return set
class PolicyInstall(Policy):
"""Give precedence for keeping functionality in the system."""
def runStarting(self):
Policy.runStarting(self)
self._upgrading = upgrading = {}
self._upgraded = upgraded = {}
self._downgraded = downgraded = {}
for pkg in self._trans.getCache().getPackages():
# Precompute upgrade relations.
for upg in pkg.upgrades:
for prv in upg.providedby:
for prvpkg in prv.packages:
if prvpkg.installed:
if (self.getPriority(pkg) >=
self.getPriority(prvpkg)):
upgrading[pkg] = True
if prvpkg in upgraded:
upgraded[prvpkg].append(pkg)
else:
upgraded[prvpkg] = [pkg]
else:
if prvpkg in downgraded:
downgraded[prvpkg].append(pkg)
else:
downgraded[prvpkg] = [pkg]
# Downgrades are upgrades if they have a higher priority.
for prv in pkg.provides:
for upg in prv.upgradedby:
for upgpkg in upg.packages:
if upgpkg.installed:
if (self.getPriority(pkg) >
self.getPriority(upgpkg)):
upgrading[pkg] = True
if upgpkg in upgraded:
upgraded[upgpkg].append(pkg)
else:
upgraded[upgpkg] = [pkg]
else:
if upgpkg in downgraded:
downgraded[upgpkg].append(pkg)
else:
downgraded[upgpkg] = [pkg]
def runFinished(self):
Policy.runFinished(self)
del self._upgrading
del self._upgraded
del self._downgraded
def getWeight(self, changeset):
weight = 0
upgrading = self._upgrading
upgraded = self._upgraded
downgraded = self._downgraded
for pkg in changeset:
if changeset[pkg] is REMOVE:
# Upgrading a package that will be removed
# is better than upgrading a package that will
# stay in the system.
for upgpkg in upgraded.get(pkg, ()):
if changeset.get(upgpkg) is INSTALL:
weight -= 1
break
else:
for dwnpkg in downgraded.get(pkg, ()):
if changeset.get(dwnpkg) is INSTALL:
weight += 15
break
else:
weight += 20
else:
if pkg in upgrading:
weight += 2
else:
weight += 3
return weight
class PolicyRemove(Policy):
"""Give precedence to the choice with less changes."""
def getWeight(self, changeset):
weight = 0
for pkg in changeset:
if changeset[pkg] is REMOVE:
weight += 1
else:
weight += 5
return weight
class PolicyUpgrade(Policy):
"""Give precedence to the choice with more upgrades and smaller impact."""
def runStarting(self):
Policy.runStarting(self)
self._upgrading = upgrading = {}
self._upgraded = upgraded = {}
self._sortbonus = sortbonus = {}
self._requiredbonus = requiredbonus = {}
queue = self._trans.getQueue()
for pkg in self._trans.getCache().getPackages():
# Precompute upgrade relations.
for upg in pkg.upgrades:
for prv in upg.providedby:
for prvpkg in prv.packages:
if (prvpkg.installed and
self.getPriority(pkg) >= self.getPriority(prvpkg)):
dct = upgrading.get(pkg)
if dct:
dct[prvpkg] = True
else:
upgrading[pkg] = {prvpkg: True}
lst = upgraded.get(prvpkg)
if lst:
lst.append(pkg)
else:
upgraded[prvpkg] = [pkg]
# Downgrades are upgrades if they have a higher priority.
for prv in pkg.provides:
for upg in prv.upgradedby:
for upgpkg in upg.packages:
if (upgpkg.installed and
self.getPriority(pkg) > self.getPriority(upgpkg)):
dct = upgrading.get(pkg)
if dct:
dct[upgpkg] = True
else:
upgrading[pkg] = {upgpkg: True}
lst = upgraded.get(upgpkg)
if lst:
lst.append(pkg)
else:
upgraded[upgpkg] = [pkg]
pkgs = self._trans._queue.keys()
sortUpgrades(pkgs, self)
for i, pkg in enumerate(pkgs):
self._sortbonus[pkg] = -1./(i+100)
def runFinished(self):
Policy.runFinished(self)
del self._upgrading
del self._upgraded
def getWeight(self, changeset):
weight = 0
upgrading = self._upgrading
upgraded = self._upgraded
sortbonus = self._sortbonus
requiredbonus = self._requiredbonus
installedcount = 0
upgradedmap = {}
for pkg in changeset:
if changeset[pkg] is REMOVE:
# Upgrading a package that will be removed
# is better than upgrading a package that will
# stay in the system.
lst = upgraded.get(pkg, ())
for lstpkg in lst:
if changeset.get(lstpkg) is INSTALL:
weight -= 1
break
else:
weight += 3
else:
installedcount += 1
upgpkgs = upgrading.get(pkg)
if upgpkgs:
weight += sortbonus.get(pkg, 0)
upgradedmap.update(upgpkgs)
upgradedcount = len(upgradedmap)
weight += -30*upgradedcount+(installedcount-upgradedcount)
return weight
class Failed(Error): pass
PENDING_REMOVE = 1
PENDING_INSTALL = 2
PENDING_UPDOWN = 3
class Transaction(object):
def __init__(self, cache, policy=None, changeset=None, queue=None):
self._cache = cache
self._policy = policy and policy(self) or Policy(self)
self._changeset = changeset or ChangeSet(cache)
self._queue = queue or {}
def clear(self):
self._changeset.clear()
self._queue.clear()
def getCache(self):
return self._cache
def getQueue(self):
return self._queue
def getPolicy(self):
return self._policy
def setPolicy(self, policy):
self._policy = policy(self)
def getWeight(self):
return self._policy.getWeight(self._changeset)
def getChangeSet(self):
return self._changeset
def setChangeSet(self, changeset):
self._changeset = changeset
def getState(self):
return self._changeset.getState()
def setState(self, state):
self._changeset.setState(state)
def __nonzero__(self):
return bool(self._changeset)
def __str__(self):
return str(self._changeset)
def _install(self, pkg, changeset, locked, pending, depth=0):
#print "[%03d] _install(%s)" % (depth, pkg)
#depth += 1
locked[pkg] = True
changeset.set(pkg, INSTALL)
isinst = changeset.installed
# Remove packages conflicted by this one.
for cnf in pkg.conflicts:
for prv in cnf.providedby:
for prvpkg in prv.packages:
if prvpkg is pkg:
continue
if not isinst(prvpkg):
locked[prvpkg] = True
continue
if prvpkg in locked:
raise Failed, _("Can't install %s: conflicted package "
"%s is locked") % (pkg, prvpkg)
self._remove(prvpkg, changeset, locked, pending, depth)
pending.append((PENDING_UPDOWN, prvpkg))
# Remove packages conflicting with this one.
for prv in pkg.provides:
for cnf in prv.conflictedby:
for cnfpkg in cnf.packages:
if cnfpkg is pkg:
continue
if not isinst(cnfpkg):
locked[cnfpkg] = True
continue
if cnfpkg in locked:
raise Failed, _("Can't install %s: it's conflicted by "
"the locked package %s") \
% (pkg, cnfpkg)
self._remove(cnfpkg, changeset, locked, pending, depth)
pending.append((PENDING_UPDOWN, cnfpkg))
# Remove packages with the same name that can't
# coexist with this one.
namepkgs = self._cache.getPackages(pkg.name)
for namepkg in namepkgs:
if namepkg is not pkg and not pkg.coexists(namepkg):
if not isinst(namepkg):
locked[namepkg] = True
continue
if namepkg in locked:
raise Failed, _("Can't install %s: it can't coexist "
"with %s") % (pkg, namepkg)
self._remove(namepkg, changeset, locked, pending, depth)
# Install packages required by this one.
for req in pkg.requires:
# Check if someone is already providing it.
prvpkgs = {}
found = False
for prv in req.providedby:
for prvpkg in prv.packages:
if isinst(prvpkg):
found = True
break
if prvpkg not in locked:
prvpkgs[prvpkg] = True
else:
continue
break
if found:
# Someone is already providing it. Good.
continue
# No one is currently providing it. Do something.
if not prvpkgs:
# No packages provide it at all. Give up.
raise Failed, _("Can't install %s: no package provides %s") % \
(pkg, req)
if len(prvpkgs) == 1:
# Don't check locked here. prvpkgs was
# already filtered above.
self._install(prvpkgs.popitem()[0], changeset, locked,
pending, depth)
else:
# More than one package provide it. This package
# must be post-processed.
pending.append((PENDING_INSTALL, pkg, req, prvpkgs.keys()))
def _remove(self, pkg, changeset, locked, pending, depth=0):
#print "[%03d] _remove(%s)" % (depth, pkg)
#depth += 1
if pkg.essential:
raise Failed, _("Can't remove %s: it's an essential package")
locked[pkg] = True
changeset.set(pkg, REMOVE)
isinst = changeset.installed
# Check packages requiring this one.
for prv in pkg.provides:
for req in prv.requiredby:
# Check if someone installed is requiring it.
for reqpkg in req.packages:
if isinst(reqpkg):
break
else:
# No one requires it, so it doesn't matter.
continue
# Check if someone installed is still providing it.
prvpkgs = {}
found = False
for prv in req.providedby:
for prvpkg in prv.packages:
if prvpkg is pkg:
continue
if isinst(prvpkg):
found = True
break
if prvpkg not in locked:
prvpkgs[prvpkg] = True
else:
continue
break
if found:
# Someone is still providing it. Good.
continue
# No one is providing it anymore. We'll have to do
# something about it.
if prvpkgs:
# There are other options, besides removing.
pending.append((PENDING_REMOVE, pkg, prv, req.packages,
prvpkgs.keys()))
else:
# Remove every requiring package, or
# upgrade/downgrade them to something which
# does not require this dependency.
for reqpkg in req.packages:
if not isinst(reqpkg):
continue
if reqpkg in locked:
raise Failed, _("Can't remove %s: %s is locked") \
% (pkg, reqpkg)
self._remove(reqpkg, changeset, locked, pending, depth)
pending.append((PENDING_UPDOWN, reqpkg))
def _updown(self, pkg, changeset, locked, depth=0):
#print "[%03d] _updown(%s)" % (depth, pkg)
#depth += 1
isinst = changeset.installed
getpriority = self._policy.getPriority
pkgpriority = getpriority(pkg)
# Check if any upgrading version of this package is installed.
# If so, we won't try to install any other version.
upgpkgs = {}
for prv in pkg.provides:
for upg in prv.upgradedby:
for upgpkg in upg.packages:
if isinst(upgpkg):
return
if getpriority(upgpkg) < pkgpriority:
continue
if upgpkg not in locked and upgpkg not in upgpkgs:
upgpkgs[upgpkg] = True
# Also check if any downgrading version with a higher
# priority is installed.
for upg in pkg.upgrades:
for prv in upg.providedby:
for prvpkg in prv.packages:
if getpriority(prvpkg) <= pkgpriority:
continue
if isinst(prvpkg):
return
if prvpkg not in locked and prvpkg not in upgpkgs:
upgpkgs[prvpkg] = True
# No, let's try to upgrade it.
getweight = self._policy.getWeight
alternatives = [(getweight(changeset), changeset)]
# Check if upgrading is possible.
for upgpkg in upgpkgs:
try:
cs = changeset.copy()
lk = locked.copy()
_pending = []
self._install(upgpkg, cs, lk, _pending, depth)
if _pending:
self._pending(cs, lk, _pending, depth)
except Failed:
pass
else:
alternatives.append((getweight(cs), cs))
# Is any downgrading version of this package installed?
try:
dwnpkgs = {}
for upg in pkg.upgrades:
for prv in upg.providedby:
for prvpkg in prv.packages:
if getpriority(prvpkg) > pkgpriority:
continue
if isinst(prvpkg):
raise StopIteration
if prvpkg not in locked:
dwnpkgs[prvpkg] = True
# Also check if any upgrading version with a lower
# priority is installed.
for prv in pkg.provides:
for upg in prv.upgradedby:
for upgpkg in upg.packages:
if getpriority(upgpkg) >= pkgpriority:
continue
if isinst(upgpkg):
raise StopIteration
if upgpkg not in locked:
dwnpkgs[upgpkg] = True
except StopIteration:
pass
else:
# Check if downgrading is possible.
for dwnpkg in dwnpkgs:
try:
cs = changeset.copy()
lk = locked.copy()
_pending = []
self._install(dwnpkg, cs, lk, _pending, depth)
if _pending:
self._pending(cs, lk, _pending, depth)
except Failed:
pass
else:
alternatives.append((getweight(cs), cs))
# If there's only one alternative, it's the one currenlty in use.
if len(alternatives) > 1:
alternatives.sort()
changeset.setState(alternatives[0][1])
def _pending(self, changeset, locked, pending, depth=0):
#print "[%03d] _pending()" % depth
#depth += 1
isinst = changeset.installed
getweight = self._policy.getWeight
updown = []
while pending:
item = pending.pop(0)
kind = item[0]
if kind == PENDING_UPDOWN:
updown.append(item[1])
elif kind == PENDING_INSTALL:
kind, pkg, req, prvpkgs = item
# Check if any prvpkg was already selected for installation
# due to some other change.
found = False
for i in range(len(prvpkgs)-1,-1,-1):
prvpkg = prvpkgs[i]
if isinst(prvpkg):
found = True
break
if prvpkg in locked:
del prvpkgs[i]
if found:
continue
if not prvpkgs:
# No packages provide it at all. Give up.
raise Failed, _("Can't install %s: no package "
"provides %s") % (pkg, req)
if len(prvpkgs) > 1:
# More than one package provide it. We use _pending here,
# since any option must consider the whole change for
# weighting.
alternatives = []
failures = []
sortUpgrades(prvpkgs)
keeporder = 0.000001
pw = self._policy.getPriorityWeights(pkg, prvpkgs)
for prvpkg in prvpkgs:
try:
_pending = []
cs = changeset.copy()
lk = locked.copy()
self._install(prvpkg, cs, lk, _pending, depth)
if _pending:
self._pending(cs, lk, _pending, depth)
except Failed, e:
failures.append(unicode(e))
else:
alternatives.append((getweight(cs)+pw[prvpkg]+
keeporder, cs, lk))
keeporder += 0.000001
if not alternatives:
raise Failed, _("Can't install %s: all packages "
"providing %s failed to install:\n%s")\
% (pkg, req, "\n".join(failures))
alternatives.sort()
changeset.setState(alternatives[0][1])
if len(alternatives) == 1:
locked.update(alternatives[0][2])
else:
# This turned out to be the only way.
self._install(prvpkgs[0], changeset, locked,
pending, depth)
elif kind == PENDING_REMOVE:
kind, pkg, prv, reqpkgs, prvpkgs = item
# Check if someone installed is still requiring it.
reqpkgs = [x for x in reqpkgs if isinst(x)]
if not reqpkgs:
continue
# Check if someone installed is providing it.
found = False
for prvpkg in prvpkgs:
if isinst(prvpkg):
found = True
break
if found:
# Someone is still providing it. Good.
continue
prvpkgs = [x for x in prvpkgs if x not in locked]
# No one is providing it anymore. We'll have to do
# something about it.
# Try to install other providing packages.
if prvpkgs:
alternatives = []
failures = []
pw = self._policy.getPriorityWeights(pkg, prvpkgs)
for prvpkg in prvpkgs:
try:
_pending = []
cs = changeset.copy()
lk = locked.copy()
self._install(prvpkg, cs, lk, _pending, depth)
if _pending:
self._pending(cs, lk, _pending, depth)
except Failed, e:
failures.append(unicode(e))
else:
alternatives.append((getweight(cs)+pw[prvpkg],
cs, lk))
if not prvpkgs or not alternatives:
# There's no alternatives. We must remove
# every requiring package.
for reqpkg in reqpkgs:
if reqpkg in locked and isinst(reqpkg):
raise Failed, _("Can't remove %s: requiring "
"package %s is locked") % \
(pkg, reqpkg)
for reqpkg in reqpkgs:
# We check again, since other actions may have
# changed their state.
if not isinst(reqpkg):
continue
if reqpkg in locked:
raise Failed, _("Can't remove %s: requiring "
"package %s is locked") % \
(pkg, reqpkg)
self._remove(reqpkg, changeset, locked,
pending, depth)
continue
# Then, remove every requiring package, or
# upgrade/downgrade them to something which
# does not require this dependency.
cs = changeset.copy()
lk = locked.copy()
try:
for reqpkg in reqpkgs:
if reqpkg in locked and isinst(reqpkg):
raise Failed, _("%s is locked") % reqpkg
for reqpkg in reqpkgs:
if not cs.installed(reqpkg):
continue
if reqpkg in lk:
raise Failed, _("%s is locked") % reqpkg
_pending = []
self._remove(reqpkg, cs, lk, _pending, depth)
if _pending:
self._pending(cs, lk, _pending, depth)
except Failed, e:
failures.append(unicode(e))
else:
alternatives.append((getweight(cs), cs, lk))
if not alternatives:
raise Failed, _("Can't install %s: all packages providing "
"%s failed to install:\n%s") \
% (pkg, prv, "\n".join(failures))
alternatives.sort()
changeset.setState(alternatives[0][1])
if len(alternatives) == 1:
locked.update(alternatives[0][2])
for pkg in updown:
self._updown(pkg, changeset, locked, depth)
del pending[:]
def _upgrade(self, pkgs, changeset, locked, pending, depth=0):
#print "[%03d] _upgrade()" % depth
#depth += 1
isinst = changeset.installed
getweight = self._policy.getWeight
sortUpgrades(pkgs, self._policy)
pkgs.reverse()
lockedstate = {}
origchangeset = changeset.copy()
weight = getweight(changeset)
for pkg in pkgs:
if pkg in locked and not isinst(pkg):
continue
try:
cs = changeset.copy()
lk = locked.copy()
_pending = []
self._install(pkg, cs, lk, _pending, depth)
if _pending:
self._pending(cs, lk, _pending, depth)
except Failed, e:
pass
else:
lockedstate[pkg] = lk
csweight = getweight(cs)
if csweight < weight:
weight = csweight
changeset.setState(cs)
lockedstates = {}
for pkg in pkgs:
if changeset.get(pkg) is INSTALL:
state = lockedstate.get(pkg)
if state:
lockedstates.update(state)
for pkg in changeset.keys():
op = changeset.get(pkg)
if (op and op != origchangeset.get(pkg) and
pkg not in locked and pkg not in lockedstates):
try:
cs = changeset.copy()
lk = locked.copy()
_pending = []
if op is REMOVE:
self._install(pkg, cs, lk, _pending, depth)
elif op is INSTALL:
self._remove(pkg, cs, lk, _pending, depth)
if _pending:
self._pending(cs, lk, _pending, depth)
except Failed, e:
pass
else:
csweight = getweight(cs)
if csweight < weight:
weight = csweight
changeset.setState(cs)
def _fix(self, pkgs, changeset, locked, pending, depth=0):
#print "[%03d] _fix()" % depth
#depth += 1
getweight = self._policy.getWeight
isinst = changeset.installed
for pkg in pkgs:
if not isinst(pkg):
continue
# Is it broken at all?
try:
for req in pkg.requires:
for prv in req.providedby:
for prvpkg in prv.packages:
if isinst(prvpkg):
break
else:
continue
break
else:
iface.debug(_("Unsatisfied dependency: "
"%s requires %s") % (pkg, req))
raise StopIteration
for cnf in pkg.conflicts:
for prv in cnf.providedby:
for prvpkg in prv.packages:
if prvpkg is pkg:
continue
if isinst(prvpkg):
iface.debug(_("Unsatisfied dependency: "
"%s conflicts with %s")
% (pkg, prvpkg))
raise StopIteration
for prv in pkg.provides:
for cnf in prv.conflictedby:
for cnfpkg in cnf.packages:
if cnfpkg is pkg:
continue
if isinst(cnfpkg):
iface.debug(_("Unsatisfied dependency: "
"%s conflicts with %s")
% (cnfpkg, pkg))
raise StopIteration
# Check packages with the same name that can't
# coexist with this one.
namepkgs = self._cache.getPackages(pkg.name)
for namepkg in namepkgs:
if (isinst(namepkg) and namepkg is not pkg
and not pkg.coexists(namepkg)):
iface.debug(_("Package %s can't coexist with %s") %
(namepkg, pkg))
raise StopIteration
except StopIteration:
pass
else:
continue
# We have a broken package. Fix it.
alternatives = []
failures = []
# Try to fix by installing it.
try:
cs = changeset.copy()
lk = locked.copy()
_pending = []
self._install(pkg, cs, lk, _pending, depth)
if _pending:
self._pending(cs, lk, _pending, depth)
except Failed, e:
failures.append(unicode(e))
else:
# If they weight the same, it's better to keep the package.
alternatives.append((getweight(cs)-0.000001, cs))
# Try to fix by removing it.
try:
cs = changeset.copy()
lk = locked.copy()
_pending = []
self._remove(pkg, cs, lk, _pending, depth)
if _pending:
self._pending(cs, lk, _pending, depth)
self._updown(pkg, cs, lk, depth)
except Failed, e:
failures.append(unicode(e))
else:
alternatives.append((getweight(cs), cs))
if not alternatives:
raise Failed, _("Can't fix %s:\n%s") % \
(pkg, "\n".join(failures))
alternatives.sort()
changeset.setState(alternatives[0][1])
def enqueue(self, pkg, op):
if op is UPGRADE:
isinst = self._changeset.installed
_upgpkgs = {}
try:
pkgpriority = pkg.getPriority()
for prv in pkg.provides:
for upg in prv.upgradedby:
for upgpkg in upg.packages:
if upgpkg.getPriority() < pkgpriority:
continue
if isinst(upgpkg):
raise StopIteration
_upgpkgs[upgpkg] = True
for upg in pkg.upgrades:
for prv in upg.providedby:
for prvpkg in prv.packages:
if prvpkg.getPriority() <= pkgpriority:
continue
if isinst(prvpkg):
raise StopIteration
_upgpkgs[prvpkg] = True
except StopIteration:
pass
else:
for upgpkg in _upgpkgs:
self._queue[upgpkg] = op
else:
self._queue[pkg] = op
def run(self):
self._policy.runStarting()
try:
changeset = self._changeset.copy()
isinst = changeset.installed
locked = self._policy.getLockedSet().copy()
pending = []
for pkg in self._queue:
op = self._queue[pkg]
if op is KEEP:
if pkg in changeset:
del changeset[pkg]
elif op is INSTALL:
if not isinst(pkg) and pkg in locked:
raise Failed, _("Can't install %s: it's locked") % pkg
changeset.set(pkg, INSTALL)
elif op is REMOVE:
if isinst(pkg) and pkg in locked:
raise Failed, _("Can't remove %s: it's locked") % pkg
changeset.set(pkg, REMOVE)
elif op is REINSTALL:
if pkg in locked:
raise Failed, _("Can't reinstall %s: it's locked")%pkg
changeset.set(pkg, INSTALL, force=True)
upgpkgs = []
fixpkgs = []
for pkg in self._queue:
op = self._queue[pkg]
if op is KEEP:
if pkg.installed:
op = INSTALL
else:
op = REMOVE
if op is INSTALL or op is REINSTALL:
self._install(pkg, changeset, locked, pending)
elif op is REMOVE:
self._remove(pkg, changeset, locked, pending)
elif op is UPGRADE:
upgpkgs.append(pkg)
elif op is FIX:
fixpkgs.append(pkg)
if pending:
self._pending(changeset, locked, pending)
if upgpkgs:
self._upgrade(upgpkgs, changeset, locked, pending)
if fixpkgs:
self._fix(fixpkgs, changeset, locked, pending)
self._changeset.setState(changeset)
finally:
self._queue.clear()
self._policy.runFinished()
class ChangeSetSplitter(object):
# This class operates on *sane* changesets.
DEBUG = 0
def __init__(self, changeset, forcerequires=True):
self._changeset = changeset
self._forcerequires = forcerequires
self._locked = {}
def getForceRequires(self):
return self._userequires
def setForceRequires(self, flag):
self._forcerequires = flag
def getLocked(self, pkg):
return pkg in self._locked
def setLocked(self, pkg, flag):
if flag:
self._locked[pkg] = True
else:
if pkg in self._locked:
del self._locked[pkg]
def setLockedSet(self, set):
self._locked.clear()
self._locked.update(set)
def resetLocked(self):
self._locked.clear()
def _remove(self, subset, pkg, locked):
set = self._changeset
# Include requiring packages being removed, or exclude
# requiring packages being installed.
for prv in pkg.provides:
for req in prv.requiredby:
reqpkgs = [reqpkg for reqpkg in req.packages if
subset.get(reqpkg) is INSTALL or
subset.get(reqpkg) is not REMOVE and
reqpkg.installed]
if not reqpkgs:
continue
# Check if some package that will stay
# in the system or some package already
# selected for installation provide the
# needed dependency.
found = False
for prv in req.providedby:
for prvpkg in prv.packages:
if (subset.get(prvpkg) is INSTALL or
(prvpkg.installed and not
subset.get(prvpkg) is REMOVE)):
found = True
break
else:
continue
break
if found:
continue
# Try to include some providing package
# that is selected for installation.
found = False
for prv in req.providedby:
for prvpkg in prv.packages:
if (set.get(prvpkg) is INSTALL and
prvpkg not in locked):
try:
self.include(subset, prvpkg, locked)
except Error:
pass
else:
found = True
break
else:
continue
break
if found:
continue
# Now, try to keep in the system some
# providing package which is already installed.
found = False
wasbroken = True
for prv in req.providedby:
for prvpkg in prv.packages:
if set.get(prvpkg) is not REMOVE:
continue
wasbroken = False
# Package is necessarily in subset
# otherwise we wouldn't get here.
if prvpkg not in locked:
try:
self.exclude(subset, prvpkg, locked)
except Error:
pass
else:
found = True
break
else:
continue
break
if found:
continue
needed = (not wasbroken and
(self._forcerequires or
isinstance(req, PreRequires)))
for reqpkg in reqpkgs:
# Finally, try to exclude the requiring
# package if it is being installed, or
# include it if it's being removed.
reqpkgop = set.get(reqpkg)
if reqpkgop and reqpkg not in locked:
try:
if reqpkgop is INSTALL:
self.exclude(subset, reqpkg, locked)
else:
self.include(subset, reqpkg, locked)
except Error:
if needed: raise
else:
continue
# Should we care about this?
if needed:
raise Error, _("No providers for '%s', "
"required by '%s'") % (req, reqpkg)
# Check upgrading/downgrading packages.
relpkgs = [upgpkg for prv in pkg.provides
for upg in prv.upgradedby
for upgpkg in upg.packages]
relpkgs.extend([prvpkg for upg in pkg.upgrades
for prv in upg.providedby
for prvpkg in prv.packages])
if set[pkg] is INSTALL:
# Package is being installed, but excluded from the
# subset. Exclude every related package which is
# being removed.
for relpkg in relpkgs:
if subset.get(relpkg) is REMOVE:
if relpkg in locked:
raise Error, _("Package '%s' is locked") % relpkg
self.exclude(subset, relpkg, locked)
else:
# Package is being removed, and included in the
# subset. Include every related package which is
# being installed.
for relpkg in relpkgs:
if set.get(relpkg) is INSTALL and relpkg not in subset:
if relpkg in locked:
raise Error, _("Package '%s' is locked") % relpkg
self.include(subset, relpkg, locked)
def _install(self, subset, pkg, locked):
set = self._changeset
# Check all dependencies needed by this package.
for req in pkg.requires:
# Check if any already installed or to be installed
# package will solve the problem.
found = False
for prv in req.providedby:
for prvpkg in prv.packages:
if (subset.get(prvpkg) is INSTALL or
(prvpkg.installed and
subset.get(prvpkg) is not REMOVE)):
found = True
break
else:
continue
break
if found:
continue
# Check if any package that could be installed
# may solve the problem.
found = False
for prv in req.providedby:
for prvpkg in prv.packages:
if (set.get(prvpkg) is INSTALL
and prvpkg not in locked):
try:
self.include(subset, prvpkg, locked)
except Error:
pass
else:
found = True
break
else:
continue
break
if found:
continue
# Nope. Let's try to keep in the system some
# package providing the dependency.
found = False
wasbroken = True
for prv in req.providedby:
for prvpkg in prv.packages:
if set.get(prvpkg) is not REMOVE:
continue
wasbroken = False
# Package is necessarily in subset
# otherwise we wouldn't get here.
if prvpkg not in locked:
try:
self.exclude(subset, prvpkg, locked)
except Error:
pass
else:
found = True
break
else:
continue
break
if found or wasbroken:
continue
# There are no solutions for the problem.
# Should we really care about it?
if (self._forcerequires or
isinstance(req, PreRequires)):
raise Error, _("No providers for '%s', "
"required by '%s'") % (req, pkg)
cnfpkgs = [prvpkg for cnf in pkg.conflicts
for prv in cnf.providedby
for prvpkg in prv.packages
if prvpkg is not pkg]
cnfpkgs.extend([cnfpkg for prv in pkg.provides
for cnf in prv.conflictedby
for cnfpkg in cnf.packages
if cnfpkg is not pkg])
for cnfpkg in cnfpkgs:
if (subset.get(cnfpkg) is INSTALL or
cnfpkg.installed and subset.get(cnfpkg) is not REMOVE):
if cnfpkg not in set:
raise Error, _("Can't remove %s, which conflicts with %s")\
% (cnfpkg, pkg)
if set[cnfpkg] is INSTALL:
self.exclude(subset, cnfpkg, locked)
else:
self.include(subset, cnfpkg, locked)
# Check upgrading/downgrading packages.
relpkgs = [upgpkg for prv in pkg.provides
for upg in prv.upgradedby
for upgpkg in upg.packages]
relpkgs.extend([prvpkg for upg in pkg.upgrades
for prv in upg.providedby
for prvpkg in prv.packages])
if set[pkg] is INSTALL:
# Package is being installed, and included in the
# subset. Include every related package which is
# being removed.
for relpkg in relpkgs:
if set.get(relpkg) is REMOVE and relpkg not in subset:
if relpkg in locked:
raise Error, _("Package '%s' is locked") % relpkg
self.include(subset, relpkg, locked)
else:
# Package is being removed, but excluded from the
# subset. Exclude every related package which is
# being installed.
for relpkg in relpkgs:
if subset.get(relpkg) is INSTALL:
if relpkg in locked:
raise Error, _("Package '%s' is locked") % relpkg
self.exclude(subset, relpkg, locked)
def include(self, subset, pkg, locked=None):
set = self._changeset
if locked is None:
locked = self._locked
if self.DEBUG: print "-"*79
else:
locked = locked.copy()
if self.DEBUG:
strop = set.get(pkg) is INSTALL and "INSTALL" or "REMOVE"
print "Including %s of %s" % (strop, pkg)
if pkg not in set:
raise Error, _("Package '%s' is not in changeset") % pkg
if pkg in locked:
raise Error, _("Package '%s' is locked") % pkg
locked[pkg] = True
op = subset[pkg] = set[pkg]
try:
if op is INSTALL:
self._install(subset, pkg, locked)
else:
self._remove(subset, pkg, locked)
except Error, e:
if self.DEBUG:
print "FAILED: Including %s of %s: %s" % (strop, pkg, e)
del subset[pkg]
raise
def exclude(self, subset, pkg, locked=None):
set = self._changeset
if locked is None:
locked = self._locked
if self.DEBUG: print "-"*79
else:
locked = locked.copy()
if self.DEBUG:
strop = set.get(pkg) is INSTALL and "INSTALL" or "REMOVE"
print "Excluding %s of %s" % (strop, pkg)
if pkg not in set:
raise Error, _("Package '%s' is not in changeset") % pkg
if pkg in locked:
raise Error, _("Package '%s' is locked") % pkg
locked[pkg] = True
if pkg in subset:
del subset[pkg]
op = set[pkg]
try:
if op is INSTALL:
self._remove(subset, pkg, locked)
elif op is REMOVE:
self._install(subset, pkg, locked)
except Error, e:
if self.DEBUG:
print "FAILED: Excluding %s of %s: %s" % (strop, pkg, e)
subset[pkg] = op
raise
def includeAll(self, subset):
# Include everything that doesn't change locked packages
set = self._changeset.get()
for pkg in set.keys():
try:
self.include(subset, pkg)
except Error:
pass
def excludeAll(self, subset):
# Exclude everything that doesn't change locked packages
set = self._changeset.get()
for pkg in set.keys():
try:
self.exclude(subset, pkg)
except Error:
pass
def sortUpgrades(pkgs, policy=None):
upgpkgs = {}
for pkg in pkgs:
dct = {}
rupg = recursiveUpgrades(pkg, dct)
del dct[pkg]
upgpkgs[pkg] = dct
pkgs.sort()
pkgs.reverse()
newpkgs = []
priority = {}
if policy:
for pkg in pkgs:
priority[pkg] = policy.getPriority(pkg)
else:
for pkg in pkgs:
priority[pkg] = pkg.getPriority()
for pkg in pkgs:
pkgupgs = upgpkgs[pkg]
for i in range(len(newpkgs)):
newpkg = newpkgs[i]
if newpkg in pkgupgs or priority[pkg] > priority[newpkg]:
newpkgs.insert(i, pkg)
break
else:
newpkgs.append(pkg)
pkgs[:] = newpkgs
def recursiveUpgrades(pkg, set):
set[pkg] = True
for upg in pkg.upgrades:
for prv in upg.providedby:
for prvpkg in prv.packages:
if prvpkg not in set:
recursiveUpgrades(prvpkg, set)
def sortInternalRequires(pkgs):
rellst = []
numrel = {}
pkgmap = dict.fromkeys(pkgs, True)
for pkg in pkgs:
rellst.append((recursiveInternalRequires(pkgmap, pkg, numrel), pkg))
rellst.sort()
rellst.reverse()
pkgs[:] = [x[1] for x in rellst]
def recursiveInternalRequires(pkgmap, pkg, numrel, done=None):
if done is None:
done = {}
done[pkg] = True
if pkg in numrel:
return numrel[pkg]
n = 0
for prv in pkg.provides:
for req in prv.requiredby:
for relpkg in req.packages:
if relpkg in pkgmap and relpkg not in done:
n += 1
if relpkg in numrel:
n += numrel[relpkg]
else:
n += recursiveInternalRequires(pkgmap, relpkg,
numrel, done)
numrel[pkg] = n
return n
def forwardRequires(pkg, map):
for req in pkg.requires:
if req not in map:
map[req] = True
for prv in req.providedby:
if prv not in map:
map[prv] = True
for prvpkg in prv.packages:
if prvpkg not in map:
map[prvpkg] = True
forwardRequires(prvpkg, map)
def backwardRequires(pkg, map):
for prv in pkg.provides:
if prv not in map:
map[prv] = True
for req in prv.requiredby:
if req not in map:
map[req] = True
for reqpkg in req.packages:
if reqpkg not in map:
map[reqpkg] = True
backwardRequires(reqpkg, map)
def forwardPkgRequires(pkg, map=None):
if map is None:
map = {}
forwardRequires(pkg, map)
for item in map.keys():
if not isinstance(item, Package):
del map[item]
return map
def backwardPkgRequires(pkg, map=None):
if map is None:
map = {}
backwardRequires(pkg, map)
for item in map.keys():
if not isinstance(item, Package):
del map[item]
return map
def getAlternates(pkg, cache):
"""
For a given package, return every package that *might* get
removed if the given package was installed. The alternate
packages are every package that conflicts with any of the
required packages, or require any package conflicting with
any of the required packages.
"""
conflicts = {}
# Direct conflicts.
for namepkg in cache.getPackages(pkg.name):
if namepkg is not pkg and not pkg.coexists(namepkg):
conflicts[(pkg, namepkg)] = True
for cnf in pkg.conflicts:
for prv in cnf.providedby:
for prvpkg in prv.packages:
if prvpkg is not pkg:
conflicts[(pkg, prvpkg)] = True
for prv in pkg.provides:
for cnf in prv.conflictedby:
for cnfpkg in cnf.packages:
if cnfpkg is not pkg:
conflicts[(pkg, cnfpkg)] = True
# Conflicts of requires.
queue = [pkg]
done = {}
while queue:
qpkg = queue.pop()
done[qpkg] = True
for req in qpkg.requires:
prvpkgs = {}
for prv in req.providedby:
for prvpkg in prv.packages:
if prvpkg is qpkg or prvpkg is pkg:
break
prvpkgs[prvpkg] = True
else:
continue
break
else:
for prvpkg in prvpkgs:
if prvpkg in done:
continue
done[prvpkg] = True
queue.append(prvpkg)
for namepkg in cache.getPackages(prvpkg.name):
if (namepkg not in prvpkgs and
namepkg is not pkg and
not prvpkg.coexists(namepkg)):
conflicts[(prvpkg, namepkg)] = True
for cnf in prvpkg.conflicts:
for prv in cnf.providedby:
for _prvpkg in prv.packages:
if (_prvpkg is not pkg and
_prvpkg not in prvpkgs):
conflicts[(prvpkg, _prvpkg)] = True
for prv in prvpkg.provides:
for cnf in prv.conflictedby:
for cnfpkg in cnf.packages:
if (cnfpkg is not pkg and
cnfpkg not in prvpkgs):
conflicts[(prvpkg, cnfpkg)] = True
alternates = {}
for reqpkg, cnfpkg in conflicts:
print reqpkg, cnfpkg
alternates[cnfpkg] = True
for prv in cnfpkg.provides:
for req in prv.requiredby:
# Do not ascend if reqpkg also provides
# what cnfpkg is offering.
for _prv in req.providedby:
if reqpkg in _prv.packages:
break
else:
for _reqpkg in req.packages:
alternates[_reqpkg] = True
alternates.update(backwardPkgRequires(_reqpkg))
return alternates
def checkPackages(cache, pkgs, report=False, all=False, uninstalled=False):
pkgs.sort()
problems = False
coexistchecked = {}
for pkg in pkgs:
if not all:
if uninstalled:
for loader in pkg.loaders:
if not loader.getInstalled():
break
else:
continue
elif not pkg.installed:
continue
for req in pkg.requires:
for prv in req.providedby:
for prvpkg in prv.packages:
if all:
break
elif uninstalled:
for loader in prvpkg.loaders:
if not loader.getInstalled():
break
else:
continue
break
elif prvpkg.installed:
break
else:
continue
break
else:
if report:
iface.info(_("Unsatisfied dependency: %s requires %s") %
(pkg, req))
problems = True
if not pkg.installed:
continue
for cnf in pkg.conflicts:
for prv in cnf.providedby:
for prvpkg in prv.packages:
if prvpkg is pkg:
continue
if prvpkg.installed:
if report:
iface.info(_("Unsatisfied dependency: "
"%s conflicts with %s") %
(pkg, prvpkg))
problems = True
namepkgs = cache.getPackages(pkg.name)
for namepkg in namepkgs:
if (namepkg, pkg) in coexistchecked:
continue
coexistchecked[(pkg, namepkg)] = True
if (namepkg.installed and namepkg is not pkg and
not pkg.coexists(namepkg)):
if report:
iface.info(_("Package %s can't coexist with %s") %
(namepkg, pkg))
problems = True
return not problems
# vim:ts=4:sw=4:et
| gpl-2.0 |
yewang15215/django | tests/m2m_through_regress/models.py | 273 | 2771 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# Forward declared intermediate model
@python_2_unicode_compatible
class Membership(models.Model):
person = models.ForeignKey('Person', models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
price = models.IntegerField(default=100)
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
# using custom id column to test ticket #11107
@python_2_unicode_compatible
class UserMembership(models.Model):
id = models.AutoField(db_column='usermembership_id', primary_key=True)
user = models.ForeignKey(User, models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
price = models.IntegerField(default=100)
def __str__(self):
return "%s is a user and member of %s" % (self.user.username, self.group.name)
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=128)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=128)
# Membership object defined as a class
members = models.ManyToManyField(Person, through=Membership)
user_members = models.ManyToManyField(User, through='UserMembership')
def __str__(self):
return self.name
# A set of models that use an non-abstract inherited model as the 'through' model.
class A(models.Model):
a_text = models.CharField(max_length=20)
class ThroughBase(models.Model):
a = models.ForeignKey(A, models.CASCADE)
b = models.ForeignKey('B', models.CASCADE)
class Through(ThroughBase):
extra = models.CharField(max_length=20)
class B(models.Model):
b_text = models.CharField(max_length=20)
a_list = models.ManyToManyField(A, through=Through)
# Using to_field on the through model
@python_2_unicode_compatible
class Car(models.Model):
make = models.CharField(max_length=20, unique=True, null=True)
drivers = models.ManyToManyField('Driver', through='CarDriver')
def __str__(self):
return "%s" % self.make
@python_2_unicode_compatible
class Driver(models.Model):
name = models.CharField(max_length=20, unique=True, null=True)
def __str__(self):
return "%s" % self.name
class Meta:
ordering = ('name',)
@python_2_unicode_compatible
class CarDriver(models.Model):
car = models.ForeignKey('Car', models.CASCADE, to_field='make')
driver = models.ForeignKey('Driver', models.CASCADE, to_field='name')
def __str__(self):
return "pk=%s car=%s driver=%s" % (str(self.pk), self.car, self.driver)
| bsd-3-clause |
danstoner/python_experiments | pgu/pgu/gui/menus.py | 13 | 3333 | """
"""
from .const import *
from . import table
from . import basic, button
class _Menu_Options(table.Table):
def __init__(self,menu,**params):
table.Table.__init__(self,**params)
self.menu = menu
def event(self,e):
handled = False
arect = self.get_abs_rect()
if e.type == MOUSEMOTION:
abspos = e.pos[0]+arect.x,e.pos[1]+arect.y
for w in self.menu.container.widgets:
if not w is self.menu:
mrect = w.get_abs_rect()
if mrect.collidepoint(abspos):
self.menu._close(None)
w._open(None)
handled = True
if not handled: table.Table.event(self,e)
class _Menu(button.Button):
def __init__(self,parent,widget=None,**params): #TODO widget= could conflict with module widget
params.setdefault('cls','menu')
button.Button.__init__(self,widget,**params)
self.parent = parent
self._cls = self.cls
self.options = _Menu_Options(self, cls=self.cls+".options")
self.connect(CLICK,self._open,None)
self.pos = 0
def _open(self,value):
self.parent.value = self
self.pcls = 'down'
self.repaint()
self.container.open(self.options,self.rect.x,self.rect.bottom)
self.options.connect(BLUR,self._close,None)
self.options.focus()
self.repaint()
def _pass(self,value):
pass
def _close(self,value):
self.pcls = ''
self.parent.value = None
self.repaint()
self.options.close()
def _valuefunc(self,value):
self._close(None)
if value['fnc'] != None:
value['fnc'](value['value'])
def event(self,e):
button.Button.event(self,e)
if self.parent.value == self:
self.pcls = 'down'
def add(self,w,fnc=None,value=None):
w.style.align = -1
b = button.Button(w,cls=self.cls+".option")
b.connect(CLICK,self._valuefunc,{'fnc':fnc,'value':value})
self.options.tr()
self.options.add(b)
return b
class Menus(table.Table):
"""A drop down menu bar.
Example:
data = [
('File/Save', fnc_save, None),
('File/New', fnc_new, None),
('Edit/Copy', fnc_copy, None),
('Edit/Cut', fnc_cut, None),
('Help/About', fnc_help, help_about_content),
('Help/Reference', fnc_help, help_reference_content),
]
w = Menus(data)
"""
def __init__(self,data,menu_cls='menu',**params):
params.setdefault('cls','menus')
table.Table.__init__(self,**params)
self.value = None
n,m,mt = 0,None,None
for path,cmd,value in data:
parts = path.split("/")
if parts[0] != mt:
mt = parts[0]
m = _Menu(self,basic.Label(mt,cls=menu_cls+".label"),cls=menu_cls)
self.add(m,n,0)
n += 1
#print ("add", parts[1], cmd, value)
m.add(basic.Label(parts[1],cls=m.cls+".option.label"),cmd,value)
| gpl-2.0 |
jeffery9/mixprint_addons | account_check_writing/__openerp__.py | 58 | 1721 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Check Writing',
'version': '1.1',
'author': 'OpenERP SA, NovaPoint Group',
'category': 'Generic Modules/Accounting',
'description': """
Module for the Check Writing and Check Printing.
================================================
""",
'website': 'http://www.openerp.com',
'depends' : ['account_voucher'],
'data': [
'wizard/account_check_batch_printing_view.xml',
'account_check_writing_report.xml',
'account_view.xml',
'account_voucher_view.xml',
'account_check_writing_data.xml',
],
'demo': ['account_demo.xml'],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jolyonb/edx-platform | openedx/core/djangoapps/credentials/signals.py | 1 | 6466 | """
This file contains signal handlers for credentials-related functionality.
"""
from logging import getLogger
from course_modes.models import CourseMode
from django.contrib.sites.models import Site
from lms.djangoapps.certificates.models import CertificateStatuses, GeneratedCertificate
from lms.djangoapps.grades.api import CourseGradeFactory
from openedx.core.djangoapps.catalog.utils import get_programs
from openedx.core.djangoapps.credentials.models import CredentialsApiConfig
from openedx.core.djangoapps.site_configuration import helpers
from .tasks.v1.tasks import send_grade_to_credentials
log = getLogger(__name__)
# "interesting" here means "credentials will want to know about it"
INTERESTING_MODES = CourseMode.CERTIFICATE_RELEVANT_MODES
INTERESTING_STATUSES = [
CertificateStatuses.notpassing,
CertificateStatuses.downloadable,
]
# These handlers have Credentials business logic that has bled into the LMS. But we want to filter here in order to
# not flood our task queue with a bunch of signals. So we put up with it.
def is_course_run_in_a_program(course_run_key):
""" Returns true if the given course key is in any program at all. """
# We don't have an easy way to go from course_run_key to a specific site that owns it. So just search each site.
sites = Site.objects.all()
str_key = str(course_run_key)
for site in sites:
for program in get_programs(site):
for course in program['courses']:
for course_run in course['course_runs']:
if str_key == course_run['key']:
return True
return False
def send_grade_if_interesting(user, course_run_key, mode, status, letter_grade, percent_grade, verbose=False):
""" Checks if grade is interesting to Credentials and schedules a Celery task if so. """
if verbose:
msg = u"Starting send_grade_if_interesting with params: "\
u"user [{username}], "\
u"course_run_key [{key}], "\
u"mode [{mode}], "\
u"status [{status}], "\
u"letter_grade [{letter_grade}], "\
u"percent_grade [{percent_grade}], "\
u"verbose [{verbose}]"\
.format(
username=getattr(user, 'username', None),
key=str(course_run_key),
mode=mode,
status=status,
letter_grade=letter_grade,
percent_grade=percent_grade,
verbose=verbose
)
log.info(msg)
# Avoid scheduling new tasks if certification is disabled. (Grades are a part of the records/cert story)
if not CredentialsApiConfig.current().is_learner_issuance_enabled:
if verbose:
log.info("Skipping send grade: is_learner_issuance_enabled False")
return
# Avoid scheduling new tasks if learner records are disabled for this site.
if not helpers.get_value_for_org(course_run_key.org, 'ENABLE_LEARNER_RECORDS', True):
if verbose:
log.info(
u"Skipping send grade: ENABLE_LEARNER_RECORDS False for org [{org}]".format(
org=course_run_key.org
)
)
return
# Grab mode/status if we don't have them in hand
if mode is None or status is None:
try:
cert = GeneratedCertificate.objects.get(user=user, course_id=course_run_key) # pylint: disable=no-member
mode = cert.mode
status = cert.status
except GeneratedCertificate.DoesNotExist:
# We only care about grades for which there is a certificate.
if verbose:
log.info(
u"Skipping send grade: no cert for user [{username}] & course_id [{course_id}]".format(
username=getattr(user, 'username', None),
course_id=str(course_run_key)
)
)
return
# Don't worry about whether it's available as well as awarded. Just awarded is good enough to record a verified
# attempt at a course. We want even the grades that didn't pass the class because Credentials wants to know about
# those too.
if mode not in INTERESTING_MODES or status not in INTERESTING_STATUSES:
if verbose:
log.info(
u"Skipping send grade: mode/status uninteresting for mode [{mode}] & status [{status}]".format(
mode=mode,
status=status
)
)
return
# If the course isn't in any program, don't bother telling Credentials about it. When Credentials grows support
# for course records as well as program records, we'll need to open this up.
if not is_course_run_in_a_program(course_run_key):
if verbose:
log.info(
u"Skipping send grade: course run not in a program. [{course_id}]".format(course_id=str(course_run_key))
)
return
# Grab grades if we don't have them in hand
if letter_grade is None or percent_grade is None:
grade = CourseGradeFactory().read(user, course_key=course_run_key, create_if_needed=False)
if grade is None:
if verbose:
log.info(
u"Skipping send grade: No grade found for user [{username}] & course_id [{course_id}]".format(
username=getattr(user, 'username', None),
course_id=str(course_run_key)
)
)
return
letter_grade = grade.letter_grade
percent_grade = grade.percent
send_grade_to_credentials.delay(user.username, str(course_run_key), True, letter_grade, percent_grade)
def handle_grade_change(user, course_grade, course_key, **kwargs):
"""
Notifies the Credentials IDA about certain grades it needs for its records, when a grade changes.
"""
send_grade_if_interesting(
user,
course_key,
None,
None,
course_grade.letter_grade,
course_grade.percent,
verbose=kwargs.get('verbose', False)
)
def handle_cert_change(user, course_key, mode, status, **kwargs):
"""
Notifies the Credentials IDA about certain grades it needs for its records, when a cert changes.
"""
send_grade_if_interesting(user, course_key, mode, status, None, None, verbose=kwargs.get('verbose', False))
| agpl-3.0 |
mcalhoun/ansible-modules-core | cloud/openstack/quantum_router_interface.py | 99 | 8558 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_router_interface
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
short_description: Attach/Dettach a subnet's interface to a router
description:
- Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone URL for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
router_name:
description:
- Name of the router to which the subnet's interface should be attached.
required: true
default: None
subnet_name:
description:
- Name of the subnet to whose interface should be attached to the router.
required: true
default: None
tenant_name:
description:
- Name of the tenant whose subnet has to be attached.
required: false
default: None
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
# Attach tenant1's subnet to the external router
- quantum_router_interface: state=present login_username=admin
login_password=admin
login_tenant_name=admin
tenant_name=tenant1
router_name=external_route
subnet_name=t1subnet
'''
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
login_tenant_name = module.params['login_tenant_name']
else:
login_tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == login_tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_router_id(module, neutron):
kwargs = {
'name': module.params['router_name'],
}
try:
routers = neutron.list_routers(**kwargs)
except Exception, e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']:
return None
return routers['routers'][0]['id']
def _get_subnet_id(module, neutron):
subnet_id = None
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['subnet_name'],
}
try:
subnets = neutron.list_subnets(**kwargs)
except Exception, e:
module.fail_json( msg = " Error in getting the subnet list:%s " % e.message)
if not subnets['subnets']:
return None
return subnets['subnets'][0]['id']
def _get_port_id(neutron, module, router_id, subnet_id):
kwargs = {
'tenant_id': _os_tenant_id,
'device_id': router_id,
}
try:
ports = neutron.list_ports(**kwargs)
except Exception, e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
for port in ports['ports']:
for subnet in port['fixed_ips']:
if subnet['subnet_id'] == subnet_id:
return port['id']
return None
def _add_interface_router(neutron, module, router_id, subnet_id):
kwargs = {
'subnet_id': subnet_id
}
try:
neutron.add_interface_router(router_id, kwargs)
except Exception, e:
module.fail_json(msg = "Error in adding interface to router: %s" % e.message)
return True
def _remove_interface_router(neutron, module, router_id, subnet_id):
kwargs = {
'subnet_id': subnet_id
}
try:
neutron.remove_interface_router(router_id, kwargs)
except Exception, e:
module.fail_json(msg="Error in removing interface from router: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
router_name = dict(required=True),
subnet_name = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
router_id = _get_router_id(module, neutron)
if not router_id:
module.fail_json(msg="failed to get the router id, please check the router name")
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
module.fail_json(msg="failed to get the subnet id, please check the subnet name")
if module.params['state'] == 'present':
port_id = _get_port_id(neutron, module, router_id, subnet_id)
if not port_id:
_add_interface_router(neutron, module, router_id, subnet_id)
module.exit_json(changed=True, result="created", id=port_id)
module.exit_json(changed=False, result="success", id=port_id)
if module.params['state'] == 'absent':
port_id = _get_port_id(neutron, module, router_id, subnet_id)
if not port_id:
module.exit_json(changed = False, result = "Success")
_remove_interface_router(neutron, module, router_id, subnet_id)
module.exit_json(changed=True, result="Deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
kangkot/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/pythonwin/pywin/framework/editor/ModuleBrowser.py | 17 | 7082 | # ModuleBrowser.py - A view that provides a module browser for an editor document.
import pywin.mfc.docview
import win32ui
import win32con
import commctrl
import win32api
from pywin.tools import hierlist, browser
import pywin.framework.scriptutils
import afxres
import pyclbr
class HierListCLBRModule(hierlist.HierListItem):
def __init__(self, modName, clbrdata):
self.modName = modName
self.clbrdata = clbrdata
def GetText(self):
return self.modName
def GetSubList(self):
ret = []
for item in self.clbrdata.values():
if item.__class__ != pyclbr.Class: # ie, it is a pyclbr Function instance (only introduced post 1.5.2)
ret.append(HierListCLBRFunction( item ) )
else:
ret.append(HierListCLBRClass( item) )
ret.sort()
return ret
def IsExpandable(self):
return 1
class HierListCLBRItem(hierlist.HierListItem):
def __init__(self, name, file, lineno, suffix = ""):
self.name = str(name)
self.file = file
self.lineno = lineno
self.suffix = suffix
def __cmp__(self, other):
return cmp(self.name, other.name)
def GetText(self):
return self.name + self.suffix
def TakeDefaultAction(self):
if self.file:
pywin.framework.scriptutils.JumpToDocument(self.file, self.lineno, bScrollToTop = 1)
else:
win32ui.SetStatusText("Can not locate the source code for this object.")
def PerformItemSelected(self):
if self.file is None:
msg = "%s - source can not be located." % (self.name, )
else:
msg = "%s defined at line %d of %s" % (self.name, self.lineno, self.file)
win32ui.SetStatusText(msg)
class HierListCLBRClass(HierListCLBRItem):
def __init__(self, clbrclass, suffix = ""):
try:
name = clbrclass.name
file = clbrclass.file
lineno = clbrclass.lineno
self.super = clbrclass.super
self.methods = clbrclass.methods
except AttributeError:
name = clbrclass
file = lineno = None
self.super = []; self.methods = {}
HierListCLBRItem.__init__(self, name, file, lineno, suffix)
def __cmp__(self,other):
ret = cmp(self.name,other.name)
if ret==0 and (self is not other) and self.file==other.file:
self.methods = other.methods
self.super = other.super
self.lineno = other.lineno
return ret
def GetSubList(self):
r1 = []
for c in self.super:
r1.append(HierListCLBRClass(c, " (Parent class)"))
r1.sort()
r2=[]
for meth, lineno in self.methods.items():
r2.append(HierListCLBRMethod(meth, self.file, lineno))
r2.sort()
return r1+r2
def IsExpandable(self):
return len(self.methods) + len(self.super)
def GetBitmapColumn(self):
return 21
class HierListCLBRFunction(HierListCLBRItem):
def __init__(self, clbrfunc, suffix = ""):
name = clbrfunc.name
file = clbrfunc.file
lineno = clbrfunc.lineno
HierListCLBRItem.__init__(self, name, file, lineno, suffix)
def GetBitmapColumn(self):
return 22
class HierListCLBRMethod(HierListCLBRItem):
def GetBitmapColumn(self):
return 22
class HierListCLBRErrorItem(hierlist.HierListItem):
def __init__(self, text):
self.text = text
def GetText(self):
return self.text
def GetSubList(self):
return [HierListCLBRErrorItem(self.text)]
def IsExpandable(self):
return 0
class HierListCLBRErrorRoot(HierListCLBRErrorItem):
def IsExpandable(self):
return 1
class BrowserView(pywin.mfc.docview.TreeView):
def OnInitialUpdate(self):
self.list = None
rc = self._obj_.OnInitialUpdate()
self.HookMessage(self.OnSize, win32con.WM_SIZE)
self.bDirty = 0
self.destroying = 0
return rc
def DestroyBrowser(self):
self.DestroyList()
def OnActivateView(self, activate, av, dv):
# print "AV", self.bDirty, activate
if activate:
self.CheckRefreshList()
return self._obj_.OnActivateView(activate, av, dv)
def _MakeRoot(self):
path = self.GetDocument().GetPathName()
if not path:
return HierListCLBRErrorRoot("Error: Can not browse a file until it is saved")
else:
mod, path = pywin.framework.scriptutils.GetPackageModuleName(path)
if self.bDirty:
what = "Refreshing"
# Hack for pyclbr being too smart
try:
del pyclbr._modules[mod]
except (KeyError, AttributeError):
pass
else:
what = "Building"
win32ui.SetStatusText("%s class list - please wait..." % (what,), 1)
win32ui.DoWaitCursor(1)
try:
reader = pyclbr.readmodule_ex # new version post 1.5.2
except AttributeError:
reader = pyclbr.readmodule
try:
data = reader(mod, [path])
if data:
return HierListCLBRModule(mod, data)
else:
return HierListCLBRErrorRoot("No Python classes in module.")
finally:
win32ui.DoWaitCursor(0)
win32ui.SetStatusText(win32ui.LoadString(afxres.AFX_IDS_IDLEMESSAGE))
def DestroyList(self):
self.destroying = 1
list = getattr(self, "list", None) # If the document was not successfully opened, we may not have a list.
self.list = None
if list is not None:
list.HierTerm()
self.destroying = 0
def CheckMadeList(self):
if self.list is not None or self.destroying: return
self.rootitem = root = self._MakeRoot()
self.list = list = hierlist.HierListWithItems( root, win32ui.IDB_BROWSER_HIER)
list.HierInit(self.GetParentFrame(), self)
list.SetStyle(commctrl.TVS_HASLINES | commctrl.TVS_LINESATROOT | commctrl.TVS_HASBUTTONS)
def CheckRefreshList(self):
if self.bDirty:
if self.list is None:
self.CheckMadeList()
else:
new_root = self._MakeRoot()
if self.rootitem.__class__==new_root.__class__==HierListCLBRModule:
self.rootitem.modName = new_root.modName
self.rootitem.clbrdata = new_root.clbrdata
self.list.Refresh()
else:
self.list.AcceptRoot(self._MakeRoot())
self.bDirty = 0
def OnSize(self, params):
lparam = params[3]
w = win32api.LOWORD(lparam)
h = win32api.HIWORD(lparam)
if w != 0:
self.CheckMadeList()
elif w == 0:
self.DestroyList()
return 1
def _UpdateUIForState(self):
self.bDirty = 1
| apache-2.0 |
utamaro/youtube-dl | youtube_dl/extractor/tv2.py | 113 | 4640 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
float_or_none,
parse_iso8601,
remove_end,
)
class TV2IE(InfoExtractor):
_VALID_URL = 'http://(?:www\.)?tv2\.no/v/(?P<id>\d+)'
_TEST = {
'url': 'http://www.tv2.no/v/916509/',
'md5': '9cb9e3410b18b515d71892f27856e9b1',
'info_dict': {
'id': '916509',
'ext': 'flv',
'title': 'Se Gryttens hyllest av Steven Gerrard',
'description': 'TV 2 Sportens huspoet tar avskjed med Liverpools kaptein Steven Gerrard.',
'timestamp': 1431715610,
'upload_date': '20150515',
'duration': 156.967,
'view_count': int,
'categories': list,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
formats = []
format_urls = []
for protocol in ('HDS', 'HLS'):
data = self._download_json(
'http://sumo.tv2.no/api/web/asset/%s/play.json?protocol=%s&videoFormat=SMIL+ISMUSP' % (video_id, protocol),
video_id, 'Downloading play JSON')['playback']
for item in data['items']['item']:
video_url = item.get('url')
if not video_url or video_url in format_urls:
continue
format_id = '%s-%s' % (protocol.lower(), item.get('mediaFormat'))
if not self._is_valid_url(video_url, video_id, format_id):
continue
format_urls.append(video_url)
ext = determine_ext(video_url)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
video_url, video_id, f4m_id=format_id))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', m3u8_id=format_id))
elif ext == 'ism' or video_url.endswith('.ism/Manifest'):
pass
else:
formats.append({
'url': video_url,
'format_id': format_id,
'tbr': int_or_none(item.get('bitrate')),
'filesize': int_or_none(item.get('fileSize')),
})
self._sort_formats(formats)
asset = self._download_json(
'http://sumo.tv2.no/api/web/asset/%s.json' % video_id,
video_id, 'Downloading metadata JSON')['asset']
title = asset['title']
description = asset.get('description')
timestamp = parse_iso8601(asset.get('createTime'))
duration = float_or_none(asset.get('accurateDuration') or asset.get('duration'))
view_count = int_or_none(asset.get('views'))
categories = asset.get('keywords', '').split(',')
thumbnails = [{
'id': thumbnail.get('@type'),
'url': thumbnail.get('url'),
} for _, thumbnail in asset.get('imageVersions', {}).items()]
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnails': thumbnails,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'categories': categories,
'formats': formats,
}
class TV2ArticleIE(InfoExtractor):
_VALID_URL = 'http://(?:www\.)?tv2\.no/(?:a|\d{4}/\d{2}/\d{2}(/[^/]+)+)/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.tv2.no/2015/05/16/nyheter/alesund/krim/pingvin/6930542',
'info_dict': {
'id': '6930542',
'title': 'Russen hetses etter pingvintyveri – innrømmer å ha åpnet luken på buret',
'description': 'md5:339573779d3eea3542ffe12006190954',
},
'playlist_count': 2,
}, {
'url': 'http://www.tv2.no/a/6930542',
'only_matching': True,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('http://www.tv2.no/v/%s' % video_id, 'TV2')
for video_id in re.findall(r'data-assetid="(\d+)"', webpage)]
title = remove_end(self._og_search_title(webpage), ' - TV2.no')
description = remove_end(self._og_search_description(webpage), ' - TV2.no')
return self.playlist_result(entries, playlist_id, title, description)
| unlicense |
simsong/grr-insider | lib/objectfilter.py | 2 | 26902 | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Classes to perform filtering of objects based on their data members.
Given a list of objects and a textual filter expression, these classes allow
you to determine which objects match the filter. The system has two main
pieces: A parser for the supported grammar and a filter implementation.
Given any complying user-supplied grammar, it is parsed with a custom lexer
based on GRR's lexer and then compiled into an actual implementation by using
the filter implementation. A filter implementation simply provides actual
implementations for the primitives required to perform filtering. The compiled
result is always a class supporting the Filter interface.
If we define a class called Car such as:
class Car(object):
def __init__(self, code, color="white", doors=3):
self.code = code
self.color = color
self.doors = 3
And we have two instances:
ford_ka = Car("FORDKA1", color="grey")
toyota_corolla = Car("COROLLA1", color="white", doors=5)
fleet = [ford_ka, toyota_corolla]
We want to find cars that are grey and have 3 or more doors. We could filter
our fleet like this:
criteria = "(color is grey) and (doors >= 3)"
parser = ContextFilterParser(criteria).Parse()
compiled_filter = parser.Compile(LowercaseAttributeFilterImp)
for car in fleet:
if compiled_filter.Matches(car):
print "Car %s matches the supplied filter." % car.code
The filter expression contains two subexpressions joined by an AND operator:
"color is grey" and "doors >= 3"
This means we want to search for objects matching these two subexpressions.
Let's analyze the first one in depth "color is grey":
"color": the left operand specifies a search path to look for the data. This
tells our filtering system to look for the color property on passed objects.
"is": the operator. Values retrieved for the "color" property will be checked
against the right operand to see if they are equal.
"grey": the right operand. It specifies an explicit value to check for.
So each time an object is passed through the filter, it will expand the value
of the color data member, and compare its value against "grey".
Because data members of objects are often not simple datatypes but other
objects, the system allows you to reference data members within other data
members by separating each by a dot. Let's see an example:
Let's add a more complex Car class with default tyre data:
class CarWithTyres(Car):
def __init__(self, code, tyres=None, color="white", doors=3):
super(self, CarWithTyres).__init__(code, color, doors)
tyres = tyres or Tyre("Pirelli", "PZERO")
class Tyre(object):
def __init__(self, brand, code):
self.brand = brand
self.code = code
And two new instances:
ford_ka = CarWithTyres("FORDKA", color="grey", tyres=Tyre("AVON", "ZT5"))
toyota_corolla = Car("COROLLA1", color="white", doors=5)
fleet = [ford_ka, toyota_corolla]
To filter a car based on the tyre brand, we would use a search path of
"tyres.brand".
Because the filter implementation provides the actual classes that perform
handling of the search paths, operators, etc. customizing the behaviour of the
filter is easy. Three basic filter implementations are given:
BaseFilterImplementation: search path expansion is done on attribute names
as provided (case-sensitive).
LowercaseAttributeFilterImp: search path expansion is done on the lowercased
attribute name, so that it only accesses attributes, not methods.
DictFilterImplementation: search path expansion is done on dictionary access
to the given object. So "a.b" expands the object obj to obj["a"]["b"]
"""
import abc
import binascii
import logging
import re
from grr.lib import lexer
from grr.lib import utils
class Error(Exception):
"""Base module exception."""
class MalformedQueryError(Error):
"""The provided filter query is malformed."""
class ParseError(Error, lexer.ParseError):
"""The parser for textual queries returned invalid results."""
class InvalidNumberOfOperands(Error):
"""The number of operands provided to this operator is wrong."""
class Filter(object):
"""Base class for every filter."""
def __init__(self, arguments=None, value_expander=None):
"""Constructor.
Args:
arguments: Arguments to the filter.
value_expander: A callable that will be used to expand values for the
objects passed to this filter. Implementations expanders are provided by
subclassing ValueExpander.
Raises:
Error: If the given value_expander is not a subclass of ValueExpander
"""
self.value_expander = None
self.value_expander_cls = value_expander
if self.value_expander_cls:
if not issubclass(self.value_expander_cls, ValueExpander):
raise Error("%s is not a valid value expander" % (
self.value_expander_cls))
self.value_expander = self.value_expander_cls()
self.args = arguments or []
logging.debug("Adding %s", arguments)
@abc.abstractmethod
def Matches(self, obj):
"""Whether object obj matches this filter."""
def Filter(self, objects):
"""Returns a list of objects that pass the filter."""
return filter(self.Matches, objects)
def __str__(self):
return "%s(%s)" % (self.__class__.__name__,
", ".join([str(arg) for arg in self.args]))
class AndFilter(Filter):
"""Performs a boolean AND of the given Filter instances as arguments.
Note that if no conditions are passed, all objects will pass.
"""
def Matches(self, obj):
for child_filter in self.args:
if not child_filter.Matches(obj):
return False
return True
class OrFilter(Filter):
"""Performs a boolean OR of the given Filter instances as arguments.
Note that if no conditions are passed, all objects will pass.
"""
def Matches(self, obj):
if not self.args: return True
for child_filter in self.args:
if child_filter.Matches(obj):
return True
return False
class Operator(Filter):
"""Base class for all operators."""
class IdentityFilter(Operator):
def Matches(self, _):
return True
class UnaryOperator(Operator):
"""Base class for unary operators."""
def __init__(self, operand, **kwargs):
"""Constructor."""
super(UnaryOperator, self).__init__(arguments=[operand], **kwargs)
if len(self.args) != 1:
raise InvalidNumberOfOperands("Only one operand is accepted by %s. "
"Received %d." % (self.__class__.__name__,
len(self.args)))
class BinaryOperator(Operator):
"""Base class for binary operators.
The left operand is always a path into the object which will be expanded for
values. The right operand is a value defined at initialization and is stored
at self.right_operand.
"""
def __init__(self, arguments=None, **kwargs):
super(BinaryOperator, self).__init__(arguments=arguments, **kwargs)
if len(self.args) != 2:
raise InvalidNumberOfOperands("Only two operands are accepted by %s. "
"Received %d." % (self.__class__.__name__,
len(self.args)))
self.left_operand = self.args[0]
self.right_operand = self.args[1]
class GenericBinaryOperator(BinaryOperator):
"""Allows easy implementations of operators."""
def Operation(self, x, y):
"""Performs the operation between two values."""
def Operate(self, values):
"""Takes a list of values and if at least one matches, returns True."""
for val in values:
try:
logging.debug("Operating %s with x=%s and y=%s",
self.__class__.__name__, val, self.right_operand)
if self.Operation(val, self.right_operand):
return True
else:
continue
except (ValueError, TypeError):
continue
return False
def Matches(self, obj):
key = self.left_operand
values = self.value_expander.Expand(obj, key)
if values and self.Operate(values):
return True
return False
class Equals(GenericBinaryOperator):
"""Matches objects when the right operand equals the expanded value."""
def Operation(self, x, y):
return x == y
class NotEquals(GenericBinaryOperator):
"""Matches when the right operand isn't equal to the expanded value."""
def Operate(self, values):
return not Equals(arguments=self.args,
value_expander=self.value_expander_cls).Operate(values)
class Less(GenericBinaryOperator):
"""Whether the expanded value >= right_operand."""
def Operation(self, x, y):
return x < y
class LessEqual(GenericBinaryOperator):
"""Whether the expanded value <= right_operand."""
def Operation(self, x, y):
return x <= y
class Greater(GenericBinaryOperator):
"""Whether the expanded value > right_operand."""
def Operation(self, x, y):
return x > y
class GreaterEqual(GenericBinaryOperator):
"""Whether the expanded value >= right_operand."""
def Operation(self, x, y):
return x >= y
class Contains(GenericBinaryOperator):
"""Whether the right operand is contained in the value."""
def Operation(self, x, y):
return y in x
class NotContains(GenericBinaryOperator):
"""Whether the right operand is not contained in the values."""
def Operate(self, values):
return not Contains(arguments=self.args,
value_expander=self.value_expander_cls).Operate(values)
# TODO(user): Change to an N-ary Operator?
class InSet(GenericBinaryOperator):
"""Whether all values are contained within the right operand."""
def Operation(self, x, y):
"""Whether x is fully contained in y."""
if x in y:
return True
# x might be an iterable
# first we need to skip strings or we'll do silly things
if (isinstance(x, basestring)
or isinstance(x, bytes)):
return False
try:
for value in x:
if value not in y:
return False
return True
except TypeError:
return False
class NotInSet(GenericBinaryOperator):
"""Whether at least a value is not present in the right operand."""
def Operate(self, values):
return not InSet(arguments=self.args,
value_expander=self.value_expander_cls).Operate(values)
class Regexp(GenericBinaryOperator):
"""Whether the value matches the regexp in the right operand."""
def __init__(self, *children, **kwargs):
super(Regexp, self).__init__(*children, **kwargs)
logging.debug("Compiled: %s", self.right_operand)
try:
self.compiled_re = re.compile(utils.SmartUnicode(self.right_operand))
except re.error:
raise ValueError("Regular expression \"%s\" is malformed." %
self.right_operand)
def Operation(self, x, y):
try:
if self.compiled_re.search(utils.SmartUnicode(x)):
return True
except TypeError:
return False
class Context(Operator):
"""Restricts the child operators to a specific context within the object.
Solves the context problem. The context problem is the following:
Suppose you store a list of loaded DLLs within a process. Suppose that for
each of these DLLs you store the number of imported functions and each of the
imported functions name.
Imagine that a malicious DLL is injected into processes and its indicators are
that it only imports one function and that it is RegQueryValueEx. You'd write
your indicator like this:
AndOperator(
Equal("ImportedDLLs.ImpFunctions.Name", "RegQueryValueEx"),
Equal("ImportedDLLs.NumImpFunctions", "1")
)
Now imagine you have these two processes on a given system.
Process1
+[0]__ImportedDlls
+[0]__Name: "notevil.dll"
|[0]__ImpFunctions
| +[1]__Name: "CreateFileA"
|[0]__NumImpFunctions: 1
|
+[1]__Name: "alsonotevil.dll"
|[1]__ImpFunctions
| +[0]__Name: "RegQueryValueEx"
| +[1]__Name: "CreateFileA"
|[1]__NumImpFunctions: 2
Process2
+[0]__ImportedDlls
+[0]__Name: "evil.dll"
|[0]__ImpFunctions
| +[0]__Name: "RegQueryValueEx"
|[0]__NumImpFunctions: 1
Both Process1 and Process2 match your query, as each of the indicators are
evaluated separatedly. While you wanted to express "find me processes that
have a DLL that has both one imported function and ReqQueryValueEx is in the
list of imported functions", your indicator actually means "find processes
that have at least a DLL with 1 imported functions and at least one DLL that
imports the ReqQueryValueEx function".
To write such an indicator you need to specify a context of ImportedDLLs for
these two clauses. Such that you convert your indicator to:
Context("ImportedDLLs",
AndOperator(
Equal("ImpFunctions.Name", "RegQueryValueEx"),
Equal("NumImpFunctions", "1")
))
Context will execute the filter specified as the second parameter for each of
the objects under "ImportedDLLs", thus applying the condition per DLL, not per
object and returning the right result.
"""
def __init__(self, arguments=None, **kwargs):
if len(arguments) != 2:
raise InvalidNumberOfOperands("Context accepts only 2 operands.")
super(Context, self).__init__(arguments=arguments, **kwargs)
self.context, self.condition = self.args
def Matches(self, obj):
for object_list in self.value_expander.Expand(obj, self.context):
for sub_object in object_list:
if self.condition.Matches(sub_object):
return True
return False
OP2FN = {"equals": Equals,
"is": Equals,
"==": Equals,
"notequals": NotEquals,
"isnot": NotEquals,
"!=": NotEquals,
"contains": Contains,
"notcontains": NotContains,
">": Greater,
">=": GreaterEqual,
"<": Less,
"<=": LessEqual,
"inset": InSet,
"notinset": NotInSet,
"regexp": Regexp,
}
class ValueExpander(object):
"""Encapsulates the logic to expand values available in an object.
Once instantiated and called, this class returns all the values that follow a
given field path.
"""
FIELD_SEPARATOR = "."
def _GetAttributeName(self, path):
"""Returns the attribute name to fetch given a path."""
return path[0]
def _GetValue(self, obj, attr_name):
"""Returns the value of tha attribute attr_name."""
raise NotImplementedError()
def _AtLeaf(self, attr_value):
"""Called when at a leaf value. Should yield a value."""
yield attr_value
def _AtNonLeaf(self, attr_value, path):
"""Called when at a non-leaf value. Should recurse and yield values."""
try:
# Check first for iterables
# If it's a dictionary, we yield it
if isinstance(attr_value, dict):
yield attr_value
else:
# If it's an iterable, we recurse on each value.
for sub_obj in attr_value:
for value in self.Expand(sub_obj, path[1:]):
yield value
except TypeError: # This is then not iterable, we recurse with the value
for value in self.Expand(attr_value, path[1:]):
yield value
def Expand(self, obj, path):
"""Returns a list of all the values for the given path in the object obj.
Given a path such as ["sub1", "sub2"] it returns all the values available
in obj.sub1.sub2 as a list. sub1 and sub2 must be data attributes or
properties.
If sub1 returns a list of objects, or a generator, Expand aggregates the
values for the remaining path for each of the objects, thus returning a
list of all the values under the given path for the input object.
Args:
obj: An object that will be traversed for the given path
path: A list of strings
Yields:
The values once the object is traversed.
"""
if isinstance(path, basestring):
path = path.split(self.FIELD_SEPARATOR)
attr_name = self._GetAttributeName(path)
attr_value = self._GetValue(obj, attr_name)
if attr_value is None:
return
if len(path) == 1:
for value in self._AtLeaf(attr_value):
yield value
else:
for value in self._AtNonLeaf(attr_value, path):
yield value
class AttributeValueExpander(ValueExpander):
"""An expander that gives values based on object attribute names."""
def _GetValue(self, obj, attr_name):
return getattr(obj, attr_name, None)
class LowercaseAttributeValueExpander(AttributeValueExpander):
"""An expander that lowercases all attribute names before access."""
def _GetAttributeName(self, path):
return path[0].lower()
class DictValueExpander(ValueExpander):
"""An expander that gets values from dictionary access to the object."""
def _GetValue(self, obj, attr_name):
return obj.get(attr_name, None)
### PARSER DEFINITION
class BasicExpression(lexer.Expression):
def Compile(self, filter_implementation):
arguments = [self.attribute]
op_str = self.operator.lower()
operator = filter_implementation.OPS.get(op_str, None)
if not operator:
raise ParseError("Unknown operator %s provided." % self.operator)
arguments.extend(self.args)
expander = filter_implementation.FILTERS["ValueExpander"]
return operator(arguments=arguments, value_expander=expander)
class ContextExpression(lexer.Expression):
"""Represents the context operator."""
def __init__(self, attribute="", part=None):
self.attribute = attribute
self.args = []
if part: self.args.append(part)
super(ContextExpression, self).__init__()
def __str__(self):
return "Context(%s %s)" % (
self.attribute, [str(x) for x in self.args])
def SetExpression(self, expression):
if isinstance(expression, lexer.Expression):
self.args = [expression]
else:
raise ParseError("Expected expression, got %s" % expression)
def Compile(self, filter_implementation):
arguments = [self.attribute]
for arg in self.args:
arguments.append(arg.Compile(filter_implementation))
expander = filter_implementation.FILTERS["ValueExpander"]
context_cls = filter_implementation.FILTERS["Context"]
return context_cls(arguments=arguments,
value_expander=expander)
class BinaryExpression(lexer.BinaryExpression):
def Compile(self, filter_implemention):
"""Compile the binary expression into a filter object."""
operator = self.operator.lower()
if operator == "and" or operator == "&&":
method = "AndFilter"
elif operator == "or" or operator == "||":
method = "OrFilter"
else:
raise ParseError("Invalid binary operator %s" % operator)
args = [x.Compile(filter_implemention) for x in self.args]
return filter_implemention.FILTERS[method](arguments=args)
class IdentityExpression(lexer.Expression):
def Compile(self, filter_implementation):
return filter_implementation.FILTERS["IdentityFilter"]()
class Parser(lexer.SearchParser):
"""Parses and generates an AST for a query written in the described language.
Examples of valid syntax:
size is 40
(name contains "Program Files" AND hash.md5 is "123abc")
@imported_modules (num_symbols = 14 AND symbol.name is "FindWindow")
"""
expression_cls = BasicExpression
binary_expression_cls = BinaryExpression
context_cls = ContextExpression
identity_expression_cls = IdentityExpression
tokens = [
# Operators and related tokens
lexer.Token("INITIAL", r"\@[\w._0-9]+",
"ContextOperator,PushState", "CONTEXTOPEN"),
lexer.Token("INITIAL", r"[^\s\(\)]", "PushState,PushBack", "ATTRIBUTE"),
lexer.Token("INITIAL", r"\(", "PushState,BracketOpen", None),
lexer.Token("INITIAL", r"\)", "BracketClose", "BINARY"),
# Context
lexer.Token("CONTEXTOPEN", r"\(", "BracketOpen", "INITIAL"),
# Double quoted string
lexer.Token("STRING", "\"", "PopState,StringFinish", None),
lexer.Token("STRING", r"\\x(..)", "HexEscape", None),
lexer.Token("STRING", r"\\(.)", "StringEscape", None),
lexer.Token("STRING", r"[^\\\"]+", "StringInsert", None),
# Single quoted string
lexer.Token("SQ_STRING", "'", "PopState,StringFinish", None),
lexer.Token("SQ_STRING", r"\\x(..)", "HexEscape", None),
lexer.Token("SQ_STRING", r"\\(.)", "StringEscape", None),
lexer.Token("SQ_STRING", r"[^\\']+", "StringInsert", None),
# Basic expression
lexer.Token("ATTRIBUTE", r"[\w._0-9]+", "StoreAttribute", "OPERATOR"),
lexer.Token("OPERATOR", r"(\w+|[<>!=]=?)", "StoreOperator", "ARG"),
lexer.Token("ARG", r"(\d+\.\d+)", "InsertFloatArg", "ARG"),
lexer.Token("ARG", r"(0x\d+)", "InsertInt16Arg", "ARG"),
lexer.Token("ARG", r"(\d+)", "InsertIntArg", "ARG"),
lexer.Token("ARG", "\"", "PushState,StringStart", "STRING"),
lexer.Token("ARG", "'", "PushState,StringStart", "SQ_STRING"),
# When the last parameter from arg_list has been pushed
# State where binary operators are supported (AND, OR)
lexer.Token("BINARY", r"(?i)(and|or|\&\&|\|\|)",
"BinaryOperator", "INITIAL"),
# - We can also skip spaces
lexer.Token("BINARY", r"\s+", None, None),
# - But if it's not "and" or just spaces we have to go back
lexer.Token("BINARY", ".", "PushBack,PopState", None),
# Skip whitespace.
lexer.Token(".", r"\s+", None, None),
]
def InsertArg(self, string="", **_):
"""Insert an arg to the current expression."""
logging.debug("Storing Argument %s", string)
# This expression is complete
if self.current_expression.AddArg(string):
self.stack.append(self.current_expression)
self.current_expression = self.expression_cls()
# We go to the BINARY state, to find if there's an AND or OR operator
return "BINARY"
def InsertFloatArg(self, string="", **_):
"""Inserts a Float argument."""
try:
float_value = float(string)
return self.InsertArg(float_value)
except (TypeError, ValueError):
raise ParseError("%s is not a valid float." % string)
def InsertIntArg(self, string="", **_):
"""Inserts an Integer argument."""
try:
int_value = int(string)
return self.InsertArg(int_value)
except (TypeError, ValueError):
raise ParseError("%s is not a valid integer." % string)
def InsertInt16Arg(self, string="", **_):
"""Inserts an Integer in base16 argument."""
try:
int_value = int(string, 16)
return self.InsertArg(int_value)
except (TypeError, ValueError):
raise ParseError("%s is not a valid base16 integer." % string)
def StringFinish(self, **_):
if self.state == "ATTRIBUTE":
return self.StoreAttribute(string=self.string)
elif self.state == "ARG":
return self.InsertArg(string=self.string)
def StringEscape(self, string, match, **_):
"""Escape backslashes found inside a string quote.
Backslashes followed by anything other than [\'"rnbt] will raise an Error.
Args:
string: The string that matched.
match: The match object (m.group(1) is the escaped code)
Raises:
ParseError: When the escaped string is not one of [\'"rnbt]
"""
if match.group(1) in "\\'\"rnbt":
self.string += string.decode("string_escape")
else:
raise ParseError("Invalid escape character %s." % string)
def HexEscape(self, string, match, **_):
"""Converts a hex escaped string."""
logging.debug("HexEscape matched %s", string)
hex_string = match.group(1)
try:
self.string += binascii.unhexlify(hex_string)
except TypeError:
raise ParseError("Invalid hex escape %s" % string)
def ContextOperator(self, string="", **_):
self.stack.append(self.context_cls(string[1:]))
def Reduce(self):
"""Reduce the token stack into an AST."""
# Check for sanity
if self.state != "INITIAL" and self.state != "BINARY":
self.Error("Premature end of expression")
length = len(self.stack)
while length > 1:
# Precendence order
self._CombineParenthesis()
self._CombineBinaryExpressions("and")
self._CombineBinaryExpressions("or")
self._CombineContext()
# No change
if len(self.stack) == length: break
length = len(self.stack)
if length != 1:
self.Error("Illegal query expression")
return self.stack[0]
def Error(self, message=None, _=None):
raise ParseError("%s in position %s: %s <----> %s )" % (
message, len(self.processed_buffer), self.processed_buffer,
self.buffer))
def _CombineBinaryExpressions(self, operator):
for i in range(1, len(self.stack)-1):
item = self.stack[i]
if (isinstance(item, lexer.BinaryExpression) and
item.operator.lower() == operator.lower() and
isinstance(self.stack[i-1], lexer.Expression) and
isinstance(self.stack[i+1], lexer.Expression)):
lhs = self.stack[i-1]
rhs = self.stack[i+1]
self.stack[i].AddOperands(lhs, rhs)
self.stack[i-1] = None
self.stack[i+1] = None
self.stack = filter(None, self.stack)
def _CombineContext(self):
# Context can merge from item 0
for i in range(len(self.stack)-1, 0, -1):
item = self.stack[i-1]
if (isinstance(item, ContextExpression) and
isinstance(self.stack[i], lexer.Expression)):
expression = self.stack[i]
self.stack[i-1].SetExpression(expression)
self.stack[i] = None
self.stack = filter(None, self.stack)
### FILTER IMPLEMENTATIONS
class BaseFilterImplementation(object):
"""Defines the base implementation of an object filter by its attributes.
Inherit from this class, switch any of the needed operators and pass it to
the Compile method of a parsed string to obtain an executable filter.
"""
OPS = OP2FN
FILTERS = {"ValueExpander": AttributeValueExpander,
"AndFilter": AndFilter,
"OrFilter": OrFilter,
"IdentityFilter": IdentityFilter,
"Context": Context}
class LowercaseAttributeFilterImplementation(BaseFilterImplementation):
"""Does field name access on the lowercase version of names.
Useful to only access attributes and properties with Google's python naming
style.
"""
FILTERS = {}
FILTERS.update(BaseFilterImplementation.FILTERS)
FILTERS.update({"ValueExpander": LowercaseAttributeValueExpander})
class DictFilterImplementation(BaseFilterImplementation):
"""Does value fetching by dictionary access on the object."""
FILTERS = {}
FILTERS.update(BaseFilterImplementation.FILTERS)
FILTERS.update({"ValueExpander": DictValueExpander})
| apache-2.0 |
synologix/enigma2 | lib/python/Tools/Notifications.py | 66 | 1963 | notifications = [ ]
notificationAdded = [ ]
# notifications which are currently on screen (and might be closed by similiar notifications)
current_notifications = [ ]
def __AddNotification(fnc, screen, id, *args, **kwargs):
if ".MessageBox'>" in `screen`:
kwargs["simple"] = True
notifications.append((fnc, screen, args, kwargs, id))
for x in notificationAdded:
x()
def AddNotification(screen, *args, **kwargs):
AddNotificationWithCallback(None, screen, *args, **kwargs)
def AddNotificationWithCallback(fnc, screen, *args, **kwargs):
__AddNotification(fnc, screen, None, *args, **kwargs)
def AddNotificationParentalControl(fnc, screen, *args, **kwargs):
RemovePopup("Parental control")
__AddNotification(fnc, screen, "Parental control", *args, **kwargs)
def AddNotificationWithID(id, screen, *args, **kwargs):
__AddNotification(None, screen, id, *args, **kwargs)
def AddNotificationWithIDCallback(fnc, id, screen, *args, **kwargs):
__AddNotification(fnc, screen, id, *args, **kwargs)
# we don't support notifications with callback and ID as this
# would require manually calling the callback on cancelled popups.
def RemovePopup(id):
# remove similiar notifications
print "RemovePopup, id =", id
for x in notifications:
if x[4] and x[4] == id:
print "(found in notifications)"
notifications.remove(x)
for x in current_notifications:
if x[0] == id:
print "(found in current notifications)"
x[1].close()
from Screens.MessageBox import MessageBox
def AddPopup(text, type, timeout, id = None):
if id is not None:
RemovePopup(id)
print "AddPopup, id =", id
AddNotificationWithID(id, MessageBox, text = text, type = type, timeout = timeout, close_on_any_key = True)
def AddPopupWithCallback(fnc, text, type, timeout, id = None):
if id is not None:
RemovePopup(id)
print "AddPopup, id =", id
AddNotificationWithIDCallback(fnc, id, MessageBox, text = text, type = type, timeout = timeout, close_on_any_key = False)
| gpl-2.0 |
BMan-L/shadowsocks | tests/nose_plugin.py | 1072 | 1164 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nose
from nose.plugins.base import Plugin
class ExtensionPlugin(Plugin):
name = "ExtensionPlugin"
def options(self, parser, env):
Plugin.options(self, parser, env)
def configure(self, options, config):
Plugin.configure(self, options, config)
self.enabled = True
def wantFile(self, file):
return file.endswith('.py')
def wantDirectory(self, directory):
return True
def wantModule(self, file):
return True
if __name__ == '__main__':
nose.main(addplugins=[ExtensionPlugin()])
| apache-2.0 |
slint/zenodo | zenodo/modules/exporter/__init__.py | 2 | 1189 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Exporter programmatic API."""
from __future__ import absolute_import, print_function
from .api import Exporter
from .streams import BZip2ResultStream, ResultStream
from .writers import BucketWriter, filename_factory
| gpl-2.0 |
flos-club/eekk | libmat2/abstract.py | 1 | 1460 | import abc
import os
import re
from typing import Set, Dict, Union
assert Set # make pyflakes happy
class AbstractParser(abc.ABC):
""" This is the base class of every parser.
It might yield `ValueError` on instantiation on invalid files,
and `RuntimeError` when something went wrong in `remove_all`.
"""
meta_list = set() # type: Set[str]
mimetypes = set() # type: Set[str]
def __init__(self, filename: str) -> None:
"""
:raises ValueError: Raised upon an invalid file
"""
if re.search('^[a-z0-9./]', filename) is None:
# Some parsers are calling external binaries,
# this prevents shell command injections
filename = os.path.join('.', filename)
self.filename = filename
fname, extension = os.path.splitext(filename)
# Special case for tar.gz, tar.bz2, … files
if fname.endswith('.tar') and len(fname) > 4:
fname, extension = fname[:-4], '.tar' + extension
self.output_filename = fname + '.cleaned' + extension
self.lightweight_cleaning = False
@abc.abstractmethod
def get_meta(self) -> Dict[str, Union[str, dict]]:
"""Return all the metadata of the current file"""
@abc.abstractmethod
def remove_all(self) -> bool:
"""
Remove all the metadata of the current file
:raises RuntimeError: Raised if the cleaning process went wrong.
"""
| gpl-3.0 |
aragos/tichu-tournament | api/src/welcome_handler.py | 1 | 3909 | import webapp2
import json
from generic_handler import GenericHandler
from google.appengine.api import mail
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.api.app_identity import get_application_id
from handler_utils import CheckUserOwnsTournamentAndMaybeReturnStatus
from handler_utils import GetTourneyWithIdAndMaybeReturnStatus
from handler_utils import SetErrorStatus
from models import Tournament
from models import PlayerPair
class WelcomeHandler(GenericHandler):
''' Handles reuqests to /api/tournament/:id/welcome. Responsible for emailing
players with their player codes.
'''
@ndb.toplevel
def post(self, id):
''' Sends an email for all email addresses in the request.
Checks that emails belong to players in the tournament and sends the email
only to valid addresses.
Args:
id: tournament ID to look up. Tournament must already have been
created.
'''
user = users.get_current_user()
tourney = GetTourneyWithIdAndMaybeReturnStatus(self.response, id)
if not tourney:
return
if not CheckUserOwnsTournamentAndMaybeReturnStatus(self.response, user,
tourney):
return
request_dict = self._ParseRequestAndMaybeSetStatus()
if not request_dict:
return
self._SendEmails(request_dict, user, tourney)
self.response.headers['Content-Type'] = 'application/json'
self.response.set_status(201)
def _SendEmails(self, request_dict, user, tourney):
'''Sends a welcome email for all email addresses in the request_dict.
Args:
request_dict: Parsed JSON dict.
user: The ndb.User owning this tournament.
tourney: The tournament model object.
'''
player_pairs = PlayerPair._query(ancestor=tourney.key).fetch()
requested_emails = request_dict["emails"]
for player_pair in player_pairs:
for player in player_pair.player_list():
if player.get("email") not in requested_emails:
continue
player_name = player.get("name")
player_greeting = "Dear {},".format(player_name) if player_name else "Greetings!"
email_text = """{}
\nWelcome to Tichu tournament \"{}\". Your pair's ID is {}.
You can use it to view and enter your results on https://tichu-tournament.appspot.com/home/{}.
\nGood Luck!
Your friendly neighborhood tournament director""".format(
player_greeting, tourney.name, player_pair.id, player_pair.id)
email_html = """{}
<br/>
<br/>Welcome to Tichu tournament \"{}\". Your pair's ID is <b>{}</b>.
You can use it to view and enter your results on https://tichu-tournament.appspot.com/home/{}.
<br/>
<br/>Good Luck!
<br/>Your friendly neighborhood tournament director
""".format(player_greeting, tourney.name, player_pair.id, player_pair.id)
mail.send_mail(
sender="{} <welcome@{}.appspotmail.com>".format(tourney.name, get_application_id()),
to=player["email"],
subject="Your Tichu Tournament Pair Code",
body=email_text,
html=email_html,
reply_to=user.email())
def _ParseRequestAndMaybeSetStatus(self):
''' Parses the client request for email sents an error status if the
request is unreadable or the email list is empty.
Returns: dict corresponding to the parsed request.s
'''
try:
request_dict = json.loads(self.request.body)
except ValueError:
SetErrorStatus(self.response, 500, "Invalid Input",
"Unable to parse request body as JSON object")
return None
request_dict["emails"] = [e for e in request_dict["emails"] if e and e != ""]
if len(request_dict["emails"]) == 0:
SetErrorStatus(self.response, 400, "Invalid Input",
"No emails specified.")
return None
return request_dict | mit |
PHOTOX/fuase | ase/ase/tasks/io.py | 9 | 1499 | import numpy as np
from ase.parallel import world
try:
import json
except ImportError:
json = None
if json is None:
def dumps(obj):
if isinstance(obj, str):
return '"' + obj + '"'
if isinstance(obj, (int, float)):
return repr(obj)
if isinstance(obj, dict):
return '{' + ', '.join(dumps(key) + ': ' + dumps(value)
for key, value in obj.items()) + '}'
return '[' + ','.join(dumps(value) for value in obj) + ']'
loads = eval
else:
class NDArrayEncoder(json.JSONEncoder):
def __init__(self):
json.JSONEncoder.__init__(self, sort_keys=True, indent=4)
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
dumps = NDArrayEncoder().encode
loads = json.loads
def numpyfy(obj):
if isinstance(obj, dict):
return dict((key, numpyfy(value)) for key, value in obj.items())
if isinstance(obj, list):
try:
obj = np.array(obj)
except ValueError:
obj = [numpyfy(value) for value in obj]
return obj
def write_json(name, results):
if world.rank == 0:
fd = open(name, 'w')
fd.write(dumps(results))
fd.close()
def read_json(name):
fd = open(name, 'r')
results = loads(fd.read())
fd.close()
world.barrier()
return numpyfy(results)
| gpl-2.0 |
luser/socorro | socorro/unittest/external/fs/test_fslegacydatedradixtreestorage.py | 3 | 17117 | import os
import shutil
from mock import Mock
from configman import ConfigurationManager
from nose.tools import eq_, ok_, assert_raises
from socorro.external.fs.crashstorage import (
FSLegacyDatedRadixTreeStorage,
FSTemporaryStorage
)
from socorro.external.crashstorage_base import (
CrashIDNotFound,
MemoryDumpsMapping,
)
from socorro.unittest.testbase import TestCase
class TestFSLegacyDatedRadixTreeStorage(TestCase):
CRASH_ID_1 = "0bba929f-8721-460c-dead-a43c20071025"
CRASH_ID_2 = "0bba929f-8721-460c-dead-a43c20071026"
CRASH_ID_3 = "0bba929f-8721-460c-dddd-a43c20071025"
def setUp(self):
with self._common_config_setup().context() as config:
self.fsrts = FSLegacyDatedRadixTreeStorage(config)
def tearDown(self):
shutil.rmtree(self.fsrts.config.fs_root)
def _common_config_setup(self):
mock_logging = Mock()
required_config = FSLegacyDatedRadixTreeStorage.get_required_config()
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'minute_slice_interval': 1
}],
argv_source=[]
)
return config_manager
def _make_test_crash(self):
self.fsrts.save_raw_crash({
"test": "TEST"
}, MemoryDumpsMapping({
'foo': 'bar',
self.fsrts.config.dump_field: 'baz'
}), self.CRASH_ID_1)
def _make_test_crash_3(self):
self.fsrts.save_raw_crash({
"test": "TEST"
}, MemoryDumpsMapping({
'foo': 'bar',
self.fsrts.config.dump_field: 'baz'
}), self.CRASH_ID_3)
def test_save_raw_crash(self):
self._make_test_crash()
ok_(os.path.islink(
os.path.join(
self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1),
self.fsrts._get_date_root_name(self.CRASH_ID_1))))
ok_(os.path.exists(
os.path.join(
self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1),
self.fsrts._get_date_root_name(self.CRASH_ID_1),
self.CRASH_ID_1)))
def test_get_raw_crash(self):
self._make_test_crash()
eq_(self.fsrts.get_raw_crash(self.CRASH_ID_1)['test'],
"TEST")
assert_raises(CrashIDNotFound, self.fsrts.get_raw_crash,
self.CRASH_ID_2)
def test_get_raw_dump(self):
self._make_test_crash()
eq_(self.fsrts.get_raw_dump(self.CRASH_ID_1, 'foo'),
"bar")
eq_(self.fsrts.get_raw_dump(self.CRASH_ID_1,
self.fsrts.config.dump_field),
"baz")
assert_raises(CrashIDNotFound, self.fsrts.get_raw_dump,
self.CRASH_ID_2, "foo")
assert_raises(IOError, self.fsrts.get_raw_dump, self.CRASH_ID_1,
"foor")
def test_get_raw_dumps(self):
self._make_test_crash()
eq_(self.fsrts.get_raw_dumps(self.CRASH_ID_1), MemoryDumpsMapping({
'foo': 'bar',
self.fsrts.config.dump_field: 'baz'
}))
assert_raises(CrashIDNotFound, self.fsrts.get_raw_dumps,
self.CRASH_ID_2)
def test_remove(self):
self._make_test_crash()
self.fsrts.remove(self.CRASH_ID_1)
parent = os.path.realpath(
os.path.join(
self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1),
self.fsrts._get_date_root_name(self.CRASH_ID_1)))
p = os.path.join(parent, self.CRASH_ID_1)
ok_(not os.path.exists(p))
assert_raises(CrashIDNotFound, self.fsrts.remove,
self.CRASH_ID_2)
def test_new_crashes(self):
self.fsrts._current_slot = lambda: ['00', '00_00']
self._make_test_crash()
self.fsrts._current_slot = lambda: ['00', '00_01']
eq_(list(self.fsrts.new_crashes()), [self.CRASH_ID_1])
eq_(list(self.fsrts.new_crashes()), [])
self.fsrts.remove(self.CRASH_ID_1)
del self.fsrts._current_slot
self.fsrts._current_slot = lambda: ['00', '00_00']
self._make_test_crash()
date_path = self.fsrts._get_dated_parent_directory(self.CRASH_ID_1,
['00', '00_00'])
new_date_path = self.fsrts._get_dated_parent_directory(self.CRASH_ID_1,
['00', '00_01'])
webhead_path = os.sep.join([new_date_path, 'webhead_0'])
os.mkdir(new_date_path)
os.rename(date_path, webhead_path)
os.unlink(os.sep.join([webhead_path, self.CRASH_ID_1]))
os.symlink('../../../../name/' + os.sep.join(self.fsrts._get_radix(
self.CRASH_ID_1)),
os.sep.join([webhead_path, self.CRASH_ID_1]))
self.fsrts._current_slot = lambda: ['00', '00_02']
eq_(list(self.fsrts.new_crashes()),
[self.CRASH_ID_1])
def test_orphaned_symlink_clean_up(self):
# Bug 971496 identified a problem where a second crash coming in with
# the same crash id would derail saving the second crash and leave
# an extra undeleted symbolic link in the file system. This link
# would be sited as undeleted on every run of 'new_crashes'.
# this test shows that we can clean these extra symlinks if we
# encounter them.
self.fsrts._current_slot = lambda: ['00', '00_00']
self._make_test_crash()
self.fsrts._current_slot = lambda: ['00', '00_01']
# make sure we can't create the duplicate in a different slot
assert_raises(OSError, self._make_test_crash)
# make sure the second slot exists so we can make the bogus symlink
self._make_test_crash_3()
# create bogus orphan link
self.fsrts._create_name_to_date_symlink(
self.CRASH_ID_1,
self.fsrts._current_slot()
)
ok_(os.path.islink(
'./crashes/20071025/date/00/00_01/0bba929f-8721-460c-dead-'
'a43c20071025'
))
# run through the new_crashes iterator which will yield each of the
# crashes that has been submitted since the last run of new_crashes.
# this should cause all the symlinks to be removed.
# we don't bother saving the crashes, as we don't need them.
for x in self.fsrts.new_crashes():
pass
ok_(not os.path.exists(
'./crashes/20071025/date/00/00_01/0bba929f-8721-460c-dead-'
'a43c20071025'
))
class MyFSTemporaryStorage(FSTemporaryStorage):
def _get_current_date(self):
return "25"
class TestFSTemporaryStorage(TestCase):
CRASH_ID_1 = "0bba929f-8721-460c-dead-a43c20071025"
CRASH_ID_2 = "0bba929f-8721-460c-dead-a43c20071026"
CRASH_ID_3 = "0bba929f-8721-460c-dddd-a43c20071025"
CRASH_ID_4 = "0bba929f-8721-460c-dddd-a43c20071125"
def setUp(self):
with self._common_config_setup().context() as config:
self.fsrts = MyFSTemporaryStorage(config)
def tearDown(self):
shutil.rmtree(self.fsrts.config.fs_root)
def _common_config_setup(self):
mock_logging = Mock()
required_config = MyFSTemporaryStorage.get_required_config()
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'minute_slice_interval': 1
}],
argv_source=[]
)
return config_manager
def _make_test_crash(self):
self.fsrts.save_raw_crash(
{"test": "TEST"},
MemoryDumpsMapping({
'foo': 'bar',
self.fsrts.config.dump_field: 'baz'
}),
self.CRASH_ID_1
)
def _make_test_crash_3(self):
self.fsrts.save_raw_crash(
{"test": "TEST"},
MemoryDumpsMapping({
'foo': 'bar',
self.fsrts.config.dump_field: 'baz'
}),
self.CRASH_ID_3
)
def _make_test_crash_4(self):
self.fsrts.save_raw_crash(
{"test": "TEST"},
MemoryDumpsMapping({
'foo': 'bar',
self.fsrts.config.dump_field: 'baz'
}),
self.CRASH_ID_4
)
def test_save_raw_crash(self):
self._make_test_crash()
ok_(os.path.islink(
os.path.join(
self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1),
self.fsrts._get_date_root_name(self.CRASH_ID_1))))
ok_(os.path.exists(
os.path.join(
self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1),
self.fsrts._get_date_root_name(self.CRASH_ID_1),
self.CRASH_ID_1)))
def test_get_raw_crash(self):
self._make_test_crash()
eq_(self.fsrts.get_raw_crash(self.CRASH_ID_1)['test'],
"TEST")
assert_raises(CrashIDNotFound, self.fsrts.get_raw_crash,
self.CRASH_ID_2)
def test_get_raw_dump(self):
self._make_test_crash()
eq_(self.fsrts.get_raw_dump(self.CRASH_ID_1, 'foo'),
"bar")
eq_(self.fsrts.get_raw_dump(self.CRASH_ID_1,
self.fsrts.config.dump_field),
"baz")
assert_raises(CrashIDNotFound, self.fsrts.get_raw_dump,
self.CRASH_ID_2, "foo")
assert_raises(IOError, self.fsrts.get_raw_dump, self.CRASH_ID_1,
"foor")
def test_get_raw_dumps(self):
self._make_test_crash()
eq_(self.fsrts.get_raw_dumps(self.CRASH_ID_1), MemoryDumpsMapping({
'foo': 'bar',
self.fsrts.config.dump_field: 'baz'
}))
assert_raises(CrashIDNotFound, self.fsrts.get_raw_dumps,
self.CRASH_ID_2)
def test_remove(self):
self._make_test_crash()
self.fsrts.remove(self.CRASH_ID_1)
parent = os.path.realpath(
os.path.join(
self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1),
self.fsrts._get_date_root_name(self.CRASH_ID_1)))
p = os.path.join(parent, self.CRASH_ID_1)
ok_(not os.path.exists(p))
assert_raises(CrashIDNotFound, self.fsrts.remove,
self.CRASH_ID_2)
def test_new_crashes(self):
self.fsrts._current_slot = lambda: ['00', '00_00']
self._make_test_crash()
self.fsrts._current_slot = lambda: ['00', '00_01']
eq_(list(self.fsrts.new_crashes()), [self.CRASH_ID_1])
eq_(list(self.fsrts.new_crashes()), [])
self.fsrts.remove(self.CRASH_ID_1)
del self.fsrts._current_slot
self.fsrts._current_slot = lambda: ['00', '00_00']
self._make_test_crash()
date_path = self.fsrts._get_dated_parent_directory(self.CRASH_ID_1,
['00', '00_00'])
new_date_path = self.fsrts._get_dated_parent_directory(self.CRASH_ID_1,
['00', '00_01'])
webhead_path = os.sep.join([new_date_path, 'webhead_0'])
os.mkdir(new_date_path)
os.rename(date_path, webhead_path)
os.unlink(os.sep.join([webhead_path, self.CRASH_ID_1]))
os.symlink('../../../../name/' + os.sep.join(self.fsrts._get_radix(
self.CRASH_ID_1)),
os.sep.join([webhead_path, self.CRASH_ID_1]))
self.fsrts._current_slot = lambda: ['00', '00_02']
eq_(list(self.fsrts.new_crashes()),
[self.CRASH_ID_1])
def test_orphaned_symlink_clean_up(self):
# Bug 971496 identified a problem where a second crash coming in with
# the same crash id would derail saving the second crash and leave
# an extra undeleted symbolic link in the file system. This link
# would be sited as undeleted on every run of 'new_crashes'.
# this test shows that we can clean these extra symlinks if we
# encounter them.
self.fsrts._current_slot = lambda: ['00', '00_00']
self._make_test_crash()
self.fsrts._current_slot = lambda: ['00', '00_01']
# make sure we can't create the duplicate in a different slot
assert_raises(OSError, self._make_test_crash)
# make sure the second slot exists so we can make the bogus symlink
self._make_test_crash_3()
# create bogus orphan link
self.fsrts._create_name_to_date_symlink(
self.CRASH_ID_1,
self.fsrts._current_slot()
)
ok_(os.path.islink(
'./crashes/25/date/00/00_01/0bba929f-8721-460c-dead-'
'a43c20071025'
))
ok_(os.path.islink(
'./crashes/25/date/00/00_01/0bba929f-8721-460c-dddd-'
'a43c20071025'
))
# make sure all slots in use are traversable
self.fsrts._current_slot = lambda: ['00', '00_02']
# run through the new_crashes iterator which will yield each of the
# crashes that has been submitted since the last run of new_crashes.
# this should cause all the symlinks to be removed.
# we don't bother saving the crashes, as we don't need them.
for x in self.fsrts.new_crashes():
pass
ok_(not os.path.exists(
'./crashes/25/date/00/00_01/0bba929f-8721-460c-dead-a43c20071025'
))
def test_make_sure_days_recycle(self):
self.fsrts._current_slot = lambda: ['00', '00_01']
self._make_test_crash()
self._make_test_crash_3()
self._make_test_crash_4()
ok_(os.path.exists(
'./crashes/25/date/00/00_01/0bba929f-8721-460c-dead-a43c20071025'
))
ok_(os.path.exists(
'./crashes/25/date/00/00_01/0bba929f-8721-460c-dddd-a43c20071025'
))
ok_(os.path.exists(
'./crashes/25/date/00/00_01/0bba929f-8721-460c-dddd-a43c20071125'
))
for x in self.fsrts.new_crashes():
pass
def _secondary_config_setup(self):
mock_logging = Mock()
required_config = FSLegacyDatedRadixTreeStorage.get_required_config()
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'minute_slice_interval': 1
}],
argv_source=[]
)
return config_manager
def test_make_sure_old_style_date_directories_are_traversed(self):
with self._secondary_config_setup().context() as config:
self.fsrts_old = FSLegacyDatedRadixTreeStorage(config)
self.fsrts_old._current_slot = lambda: ['00', '00_00']
# save crash 1 in old system
self.fsrts_old.save_raw_crash({
"test": "TEST"
}, MemoryDumpsMapping({
'foo': 'bar',
self.fsrts.config.dump_field: 'baz'
}), self.CRASH_ID_1)
ok_(os.path.exists(
'./crashes/20071025/date/00/00_00/0bba929f-8721-460c-dead-'
'a43c20071025'
))
self.fsrts._current_slot = lambda: ['00', '00_00']
#save crash 3 in new system
self._make_test_crash_3()
ok_(os.path.exists(
'./crashes/25/date/00/00_00/0bba929f-8721-460c-dddd-a43c20071025'
))
# consume crashes
for x in self.fsrts.new_crashes():
pass
# should be consumed because it isn't in our working tree or slot
ok_(not os.path.exists(
'./crashes/20071025/date/00/00_00/0bba929f-8721-460c-dead-'
'a43c20071025'
))
# should not be consumed, while in working tree, it is in active slot
ok_(os.path.exists(
'./crashes/25/date/00/00_00/0bba929f-8721-460c-dddd-a43c20071025'
))
# switch to next active slot
self.fsrts._current_slot = lambda: ['00', '00_01']
# consume crashes
for x in self.fsrts.new_crashes():
pass
# should be consumed because it is in working tree and inactive slot
ok_( not os.path.exists(
'./crashes/25/date/00/00_00/0bba929f-8721-460c-dddd-a43c20071025'
))
| mpl-2.0 |
ianan/demreg | python/dem_reg_map.py | 1 | 1067 | import numpy as np
def dem_reg_map(sigmaa,sigmab,U,W,data,err,reg_tweak,nmu=500):
"""
dem_reg_map
computes the regularisation parameter
Inputs
sigmaa:
gsv vector
sigmab:
gsv vector
U:
gsvd matrix
V:
gsvd matrix
data:
dn data
err:
dn error
reg_tweak:
how much to adjust the chisq each iteration
Outputs
opt:
regularization paramater
"""
nf=data.shape[0]
nreg=sigmaa.shape[0]
arg=np.zeros([nreg,nmu])
discr=np.zeros([nmu])
sigs=sigmaa[:nf]/sigmab[:nf]
maxx=max(sigs)
minx=min(sigs)**2.0*1E-2
step=(np.log(maxx)-np.log(minx))/(nmu-1.)
mu=np.exp(np.arange(nmu)*step)*minx
for kk in np.arange(nf):
coef=data@U[kk,:]-sigmaa[kk]
for ii in np.arange(nmu):
arg[kk,ii]=(mu[ii]*sigmab[kk]**2*coef/(sigmaa[kk]**2+mu[ii]*sigmab[kk]**2))**2
discr=np.sum(arg,axis=0)-np.sum(err**2)*reg_tweak
opt=mu[np.argmin(np.abs(discr))]
return opt | gpl-2.0 |
guorendong/iridium-browser-ubuntu | third_party/webpagereplay/third_party/dns/resolver.py | 215 | 28920 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS stub resolver.
@var default_resolver: The default resolver object
@type default_resolver: dns.resolver.Resolver object"""
import socket
import sys
import time
import dns.exception
import dns.message
import dns.name
import dns.query
import dns.rcode
import dns.rdataclass
import dns.rdatatype
if sys.platform == 'win32':
import _winreg
class NXDOMAIN(dns.exception.DNSException):
"""The query name does not exist."""
pass
# The definition of the Timeout exception has moved from here to the
# dns.exception module. We keep dns.resolver.Timeout defined for
# backwards compatibility.
Timeout = dns.exception.Timeout
class NoAnswer(dns.exception.DNSException):
"""The response did not contain an answer to the question."""
pass
class NoNameservers(dns.exception.DNSException):
"""No non-broken nameservers are available to answer the query."""
pass
class NotAbsolute(dns.exception.DNSException):
"""Raised if an absolute domain name is required but a relative name
was provided."""
pass
class NoRootSOA(dns.exception.DNSException):
"""Raised if for some reason there is no SOA at the root name.
This should never happen!"""
pass
class Answer(object):
"""DNS stub resolver answer
Instances of this class bundle up the result of a successful DNS
resolution.
For convenience, the answer object implements much of the sequence
protocol, forwarding to its rrset. E.g. "for a in answer" is
equivalent to "for a in answer.rrset", "answer[i]" is equivalent
to "answer.rrset[i]", and "answer[i:j]" is equivalent to
"answer.rrset[i:j]".
Note that CNAMEs or DNAMEs in the response may mean that answer
node's name might not be the query name.
@ivar qname: The query name
@type qname: dns.name.Name object
@ivar rdtype: The query type
@type rdtype: int
@ivar rdclass: The query class
@type rdclass: int
@ivar response: The response message
@type response: dns.message.Message object
@ivar rrset: The answer
@type rrset: dns.rrset.RRset object
@ivar expiration: The time when the answer expires
@type expiration: float (seconds since the epoch)
"""
def __init__(self, qname, rdtype, rdclass, response):
self.qname = qname
self.rdtype = rdtype
self.rdclass = rdclass
self.response = response
min_ttl = -1
rrset = None
for count in xrange(0, 15):
try:
rrset = response.find_rrset(response.answer, qname,
rdclass, rdtype)
if min_ttl == -1 or rrset.ttl < min_ttl:
min_ttl = rrset.ttl
break
except KeyError:
if rdtype != dns.rdatatype.CNAME:
try:
crrset = response.find_rrset(response.answer,
qname,
rdclass,
dns.rdatatype.CNAME)
if min_ttl == -1 or crrset.ttl < min_ttl:
min_ttl = crrset.ttl
for rd in crrset:
qname = rd.target
break
continue
except KeyError:
raise NoAnswer
raise NoAnswer
if rrset is None:
raise NoAnswer
self.rrset = rrset
self.expiration = time.time() + min_ttl
def __getattr__(self, attr):
if attr == 'name':
return self.rrset.name
elif attr == 'ttl':
return self.rrset.ttl
elif attr == 'covers':
return self.rrset.covers
elif attr == 'rdclass':
return self.rrset.rdclass
elif attr == 'rdtype':
return self.rrset.rdtype
else:
raise AttributeError(attr)
def __len__(self):
return len(self.rrset)
def __iter__(self):
return iter(self.rrset)
def __getitem__(self, i):
return self.rrset[i]
def __delitem__(self, i):
del self.rrset[i]
def __getslice__(self, i, j):
return self.rrset[i:j]
def __delslice__(self, i, j):
del self.rrset[i:j]
class Cache(object):
"""Simple DNS answer cache.
@ivar data: A dictionary of cached data
@type data: dict
@ivar cleaning_interval: The number of seconds between cleanings. The
default is 300 (5 minutes).
@type cleaning_interval: float
@ivar next_cleaning: The time the cache should next be cleaned (in seconds
since the epoch.)
@type next_cleaning: float
"""
def __init__(self, cleaning_interval=300.0):
"""Initialize a DNS cache.
@param cleaning_interval: the number of seconds between periodic
cleanings. The default is 300.0
@type cleaning_interval: float.
"""
self.data = {}
self.cleaning_interval = cleaning_interval
self.next_cleaning = time.time() + self.cleaning_interval
def maybe_clean(self):
"""Clean the cache if it's time to do so."""
now = time.time()
if self.next_cleaning <= now:
keys_to_delete = []
for (k, v) in self.data.iteritems():
if v.expiration <= now:
keys_to_delete.append(k)
for k in keys_to_delete:
del self.data[k]
now = time.time()
self.next_cleaning = now + self.cleaning_interval
def get(self, key):
"""Get the answer associated with I{key}. Returns None if
no answer is cached for the key.
@param key: the key
@type key: (dns.name.Name, int, int) tuple whose values are the
query name, rdtype, and rdclass.
@rtype: dns.resolver.Answer object or None
"""
self.maybe_clean()
v = self.data.get(key)
if v is None or v.expiration <= time.time():
return None
return v
def put(self, key, value):
"""Associate key and value in the cache.
@param key: the key
@type key: (dns.name.Name, int, int) tuple whose values are the
query name, rdtype, and rdclass.
@param value: The answer being cached
@type value: dns.resolver.Answer object
"""
self.maybe_clean()
self.data[key] = value
def flush(self, key=None):
"""Flush the cache.
If I{key} is specified, only that item is flushed. Otherwise
the entire cache is flushed.
@param key: the key to flush
@type key: (dns.name.Name, int, int) tuple or None
"""
if not key is None:
if self.data.has_key(key):
del self.data[key]
else:
self.data = {}
self.next_cleaning = time.time() + self.cleaning_interval
class Resolver(object):
"""DNS stub resolver
@ivar domain: The domain of this host
@type domain: dns.name.Name object
@ivar nameservers: A list of nameservers to query. Each nameserver is
a string which contains the IP address of a nameserver.
@type nameservers: list of strings
@ivar search: The search list. If the query name is a relative name,
the resolver will construct an absolute query name by appending the search
names one by one to the query name.
@type search: list of dns.name.Name objects
@ivar port: The port to which to send queries. The default is 53.
@type port: int
@ivar timeout: The number of seconds to wait for a response from a
server, before timing out.
@type timeout: float
@ivar lifetime: The total number of seconds to spend trying to get an
answer to the question. If the lifetime expires, a Timeout exception
will occur.
@type lifetime: float
@ivar keyring: The TSIG keyring to use. The default is None.
@type keyring: dict
@ivar keyname: The TSIG keyname to use. The default is None.
@type keyname: dns.name.Name object
@ivar keyalgorithm: The TSIG key algorithm to use. The default is
dns.tsig.default_algorithm.
@type keyalgorithm: string
@ivar edns: The EDNS level to use. The default is -1, no Edns.
@type edns: int
@ivar ednsflags: The EDNS flags
@type ednsflags: int
@ivar payload: The EDNS payload size. The default is 0.
@type payload: int
@ivar cache: The cache to use. The default is None.
@type cache: dns.resolver.Cache object
"""
def __init__(self, filename='/etc/resolv.conf', configure=True):
"""Initialize a resolver instance.
@param filename: The filename of a configuration file in
standard /etc/resolv.conf format. This parameter is meaningful
only when I{configure} is true and the platform is POSIX.
@type filename: string or file object
@param configure: If True (the default), the resolver instance
is configured in the normal fashion for the operating system
the resolver is running on. (I.e. a /etc/resolv.conf file on
POSIX systems and from the registry on Windows systems.)
@type configure: bool"""
self.reset()
if configure:
if sys.platform == 'win32':
self.read_registry()
elif filename:
self.read_resolv_conf(filename)
def reset(self):
"""Reset all resolver configuration to the defaults."""
self.domain = \
dns.name.Name(dns.name.from_text(socket.gethostname())[1:])
if len(self.domain) == 0:
self.domain = dns.name.root
self.nameservers = []
self.search = []
self.port = 53
self.timeout = 2.0
self.lifetime = 30.0
self.keyring = None
self.keyname = None
self.keyalgorithm = dns.tsig.default_algorithm
self.edns = -1
self.ednsflags = 0
self.payload = 0
self.cache = None
def read_resolv_conf(self, f):
"""Process f as a file in the /etc/resolv.conf format. If f is
a string, it is used as the name of the file to open; otherwise it
is treated as the file itself."""
if isinstance(f, str) or isinstance(f, unicode):
try:
f = open(f, 'r')
except IOError:
# /etc/resolv.conf doesn't exist, can't be read, etc.
# We'll just use the default resolver configuration.
self.nameservers = ['127.0.0.1']
return
want_close = True
else:
want_close = False
try:
for l in f:
if len(l) == 0 or l[0] == '#' or l[0] == ';':
continue
tokens = l.split()
if len(tokens) == 0:
continue
if tokens[0] == 'nameserver':
self.nameservers.append(tokens[1])
elif tokens[0] == 'domain':
self.domain = dns.name.from_text(tokens[1])
elif tokens[0] == 'search':
for suffix in tokens[1:]:
self.search.append(dns.name.from_text(suffix))
finally:
if want_close:
f.close()
if len(self.nameservers) == 0:
self.nameservers.append('127.0.0.1')
def _determine_split_char(self, entry):
#
# The windows registry irritatingly changes the list element
# delimiter in between ' ' and ',' (and vice-versa) in various
# versions of windows.
#
if entry.find(' ') >= 0:
split_char = ' '
elif entry.find(',') >= 0:
split_char = ','
else:
# probably a singleton; treat as a space-separated list.
split_char = ' '
return split_char
def _config_win32_nameservers(self, nameservers):
"""Configure a NameServer registry entry."""
# we call str() on nameservers to convert it from unicode to ascii
nameservers = str(nameservers)
split_char = self._determine_split_char(nameservers)
ns_list = nameservers.split(split_char)
for ns in ns_list:
if not ns in self.nameservers:
self.nameservers.append(ns)
def _config_win32_domain(self, domain):
"""Configure a Domain registry entry."""
# we call str() on domain to convert it from unicode to ascii
self.domain = dns.name.from_text(str(domain))
def _config_win32_search(self, search):
"""Configure a Search registry entry."""
# we call str() on search to convert it from unicode to ascii
search = str(search)
split_char = self._determine_split_char(search)
search_list = search.split(split_char)
for s in search_list:
if not s in self.search:
self.search.append(dns.name.from_text(s))
def _config_win32_fromkey(self, key):
"""Extract DNS info from a registry key."""
try:
servers, rtype = _winreg.QueryValueEx(key, 'NameServer')
except WindowsError:
servers = None
if servers:
self._config_win32_nameservers(servers)
try:
dom, rtype = _winreg.QueryValueEx(key, 'Domain')
if dom:
self._config_win32_domain(dom)
except WindowsError:
pass
else:
try:
servers, rtype = _winreg.QueryValueEx(key, 'DhcpNameServer')
except WindowsError:
servers = None
if servers:
self._config_win32_nameservers(servers)
try:
dom, rtype = _winreg.QueryValueEx(key, 'DhcpDomain')
if dom:
self._config_win32_domain(dom)
except WindowsError:
pass
try:
search, rtype = _winreg.QueryValueEx(key, 'SearchList')
except WindowsError:
search = None
if search:
self._config_win32_search(search)
def read_registry(self):
"""Extract resolver configuration from the Windows registry."""
lm = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
want_scan = False
try:
try:
# XP, 2000
tcp_params = _winreg.OpenKey(lm,
r'SYSTEM\CurrentControlSet'
r'\Services\Tcpip\Parameters')
want_scan = True
except EnvironmentError:
# ME
tcp_params = _winreg.OpenKey(lm,
r'SYSTEM\CurrentControlSet'
r'\Services\VxD\MSTCP')
try:
self._config_win32_fromkey(tcp_params)
finally:
tcp_params.Close()
if want_scan:
interfaces = _winreg.OpenKey(lm,
r'SYSTEM\CurrentControlSet'
r'\Services\Tcpip\Parameters'
r'\Interfaces')
try:
i = 0
while True:
try:
guid = _winreg.EnumKey(interfaces, i)
i += 1
key = _winreg.OpenKey(interfaces, guid)
if not self._win32_is_nic_enabled(lm, guid, key):
continue
try:
self._config_win32_fromkey(key)
finally:
key.Close()
except EnvironmentError:
break
finally:
interfaces.Close()
finally:
lm.Close()
def _win32_is_nic_enabled(self, lm, guid, interface_key):
# Look in the Windows Registry to determine whether the network
# interface corresponding to the given guid is enabled.
#
# (Code contributed by Paul Marks, thanks!)
#
try:
# This hard-coded location seems to be consistent, at least
# from Windows 2000 through Vista.
connection_key = _winreg.OpenKey(
lm,
r'SYSTEM\CurrentControlSet\Control\Network'
r'\{4D36E972-E325-11CE-BFC1-08002BE10318}'
r'\%s\Connection' % guid)
try:
# The PnpInstanceID points to a key inside Enum
(pnp_id, ttype) = _winreg.QueryValueEx(
connection_key, 'PnpInstanceID')
if ttype != _winreg.REG_SZ:
raise ValueError
device_key = _winreg.OpenKey(
lm, r'SYSTEM\CurrentControlSet\Enum\%s' % pnp_id)
try:
# Get ConfigFlags for this device
(flags, ttype) = _winreg.QueryValueEx(
device_key, 'ConfigFlags')
if ttype != _winreg.REG_DWORD:
raise ValueError
# Based on experimentation, bit 0x1 indicates that the
# device is disabled.
return not (flags & 0x1)
finally:
device_key.Close()
finally:
connection_key.Close()
except (EnvironmentError, ValueError):
# Pre-vista, enabled interfaces seem to have a non-empty
# NTEContextList; this was how dnspython detected enabled
# nics before the code above was contributed. We've retained
# the old method since we don't know if the code above works
# on Windows 95/98/ME.
try:
(nte, ttype) = _winreg.QueryValueEx(interface_key,
'NTEContextList')
return nte is not None
except WindowsError:
return False
def _compute_timeout(self, start):
now = time.time()
if now < start:
if start - now > 1:
# Time going backwards is bad. Just give up.
raise Timeout
else:
# Time went backwards, but only a little. This can
# happen, e.g. under vmware with older linux kernels.
# Pretend it didn't happen.
now = start
duration = now - start
if duration >= self.lifetime:
raise Timeout
return min(self.lifetime - duration, self.timeout)
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None):
"""Query nameservers to find the answer to the question.
The I{qname}, I{rdtype}, and I{rdclass} parameters may be objects
of the appropriate type, or strings that can be converted into objects
of the appropriate type. E.g. For I{rdtype} the integer 2 and the
the string 'NS' both mean to query for records with DNS rdata type NS.
@param qname: the query name
@type qname: dns.name.Name object or string
@param rdtype: the query type
@type rdtype: int or string
@param rdclass: the query class
@type rdclass: int or string
@param tcp: use TCP to make the query (default is False).
@type tcp: bool
@param source: bind to this IP address (defaults to machine default IP).
@type source: IP address in dotted quad notation
@rtype: dns.resolver.Answer instance
@raises Timeout: no answers could be found in the specified lifetime
@raises NXDOMAIN: the query name does not exist
@raises NoAnswer: the response did not contain an answer
@raises NoNameservers: no non-broken nameservers are available to
answer the question."""
if isinstance(qname, (str, unicode)):
qname = dns.name.from_text(qname, None)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(rdclass, str):
rdclass = dns.rdataclass.from_text(rdclass)
qnames_to_try = []
if qname.is_absolute():
qnames_to_try.append(qname)
else:
if len(qname) > 1:
qnames_to_try.append(qname.concatenate(dns.name.root))
if self.search:
for suffix in self.search:
qnames_to_try.append(qname.concatenate(suffix))
else:
qnames_to_try.append(qname.concatenate(self.domain))
all_nxdomain = True
start = time.time()
for qname in qnames_to_try:
if self.cache:
answer = self.cache.get((qname, rdtype, rdclass))
if answer:
return answer
request = dns.message.make_query(qname, rdtype, rdclass)
if not self.keyname is None:
request.use_tsig(self.keyring, self.keyname, self.keyalgorithm)
request.use_edns(self.edns, self.ednsflags, self.payload)
response = None
#
# make a copy of the servers list so we can alter it later.
#
nameservers = self.nameservers[:]
backoff = 0.10
while response is None:
if len(nameservers) == 0:
raise NoNameservers
for nameserver in nameservers[:]:
timeout = self._compute_timeout(start)
try:
if tcp:
response = dns.query.tcp(request, nameserver,
timeout, self.port,
source=source)
else:
response = dns.query.udp(request, nameserver,
timeout, self.port,
source=source)
except (socket.error, dns.exception.Timeout):
#
# Communication failure or timeout. Go to the
# next server
#
response = None
continue
except dns.query.UnexpectedSource:
#
# Who knows? Keep going.
#
response = None
continue
except dns.exception.FormError:
#
# We don't understand what this server is
# saying. Take it out of the mix and
# continue.
#
nameservers.remove(nameserver)
response = None
continue
rcode = response.rcode()
if rcode == dns.rcode.NOERROR or \
rcode == dns.rcode.NXDOMAIN:
break
#
# We got a response, but we're not happy with the
# rcode in it. Remove the server from the mix if
# the rcode isn't SERVFAIL.
#
if rcode != dns.rcode.SERVFAIL:
nameservers.remove(nameserver)
response = None
if not response is None:
break
#
# All nameservers failed!
#
if len(nameservers) > 0:
#
# But we still have servers to try. Sleep a bit
# so we don't pound them!
#
timeout = self._compute_timeout(start)
sleep_time = min(timeout, backoff)
backoff *= 2
time.sleep(sleep_time)
if response.rcode() == dns.rcode.NXDOMAIN:
continue
all_nxdomain = False
break
if all_nxdomain:
raise NXDOMAIN
answer = Answer(qname, rdtype, rdclass, response)
if self.cache:
self.cache.put((qname, rdtype, rdclass), answer)
return answer
def use_tsig(self, keyring, keyname=None,
algorithm=dns.tsig.default_algorithm):
"""Add a TSIG signature to the query.
@param keyring: The TSIG keyring to use; defaults to None.
@type keyring: dict
@param keyname: The name of the TSIG key to use; defaults to None.
The key must be defined in the keyring. If a keyring is specified
but a keyname is not, then the key used will be the first key in the
keyring. Note that the order of keys in a dictionary is not defined,
so applications should supply a keyname when a keyring is used, unless
they know the keyring contains only one key.
@param algorithm: The TSIG key algorithm to use. The default
is dns.tsig.default_algorithm.
@type algorithm: string"""
self.keyring = keyring
if keyname is None:
self.keyname = self.keyring.keys()[0]
else:
self.keyname = keyname
self.keyalgorithm = algorithm
def use_edns(self, edns, ednsflags, payload):
"""Configure Edns.
@param edns: The EDNS level to use. The default is -1, no Edns.
@type edns: int
@param ednsflags: The EDNS flags
@type ednsflags: int
@param payload: The EDNS payload size. The default is 0.
@type payload: int"""
if edns is None:
edns = -1
self.edns = edns
self.ednsflags = ednsflags
self.payload = payload
default_resolver = None
def get_default_resolver():
"""Get the default resolver, initializing it if necessary."""
global default_resolver
if default_resolver is None:
default_resolver = Resolver()
return default_resolver
def query(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None):
"""Query nameservers to find the answer to the question.
This is a convenience function that uses the default resolver
object to make the query.
@see: L{dns.resolver.Resolver.query} for more information on the
parameters."""
return get_default_resolver().query(qname, rdtype, rdclass, tcp, source)
def zone_for_name(name, rdclass=dns.rdataclass.IN, tcp=False, resolver=None):
"""Find the name of the zone which contains the specified name.
@param name: the query name
@type name: absolute dns.name.Name object or string
@param rdclass: The query class
@type rdclass: int
@param tcp: use TCP to make the query (default is False).
@type tcp: bool
@param resolver: the resolver to use
@type resolver: dns.resolver.Resolver object or None
@rtype: dns.name.Name"""
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, dns.name.root)
if resolver is None:
resolver = get_default_resolver()
if not name.is_absolute():
raise NotAbsolute(name)
while 1:
try:
answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
return name
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
try:
name = name.parent()
except dns.name.NoParent:
raise NoRootSOA
| bsd-3-clause |
ezequielpereira/Time-Line | autopilot/autopilotlib/instructions/selectmenu.py | 2 | 3493 | # Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import wx
from autopilotlib.instructions.instruction import Instruction
from autopilotlib.app.logger import Logger
from autopilotlib.app.exceptions import NotFoundException
from autopilotlib.app.decorators import Overrides
class SelectMenuInstruction(Instruction):
"""
0 1 2 3 4 5 6 7
command object ( arg1 , arg2 [ , arg]* )
command ::= Select
object ::= Menu | Mnu
arg ::= STRING | TEXT
Select a menu in the the menu hierarchy, given by the args.
At least 2 targets must be present.
Example 1: Select menu (Show, Sidebar)
Example 2: Select menu (Show, "Balloons on hover")
Example 3: Select Menu(File, New, "File Timeline...")
"""
@Overrides(Instruction)
def execute(self, manuscript, win):
manuscript.execute_next_instruction()
self._select_menu(win)
def _select_menu(self, win):
try:
item_id = self._find_menu_item_id(win)
win.click_menu_item(item_id)
except NotFoundException:
Logger.add_error("Menu not found")
def _find_menu_item_id(self, win):
labels = self.get_all_args()
menu_bar = self._get_menu_bar(win)
inx = menu_bar.FindMenu(labels[0])
menu = menu_bar.GetMenu(inx)
labels = labels [1:]
while len(labels) > 0:
item_id = self._get_menu_item_id(menu, labels[0])
if len(labels) > 1:
menu_item = menu_bar.FindItemById(item_id)
menu = menu_item.GetSubMenu()
labels = labels [1:]
return item_id
def _get_menu_bar(self, win):
menu_bar = win.GetMenuBar()
if menu_bar is None:
raise NotFoundException()
return menu_bar
def _get_menu_item_id(self, menu, label):
valid_labels = self._get_valid_labels(label)
for label in valid_labels:
item_id = menu.FindItem(label)
if item_id != wx.NOT_FOUND:
return item_id
return wx.NOT_FOUND
def _get_valid_labels(self, label):
valid_labels = [label]
self._get_elipsis_label(label, valid_labels)
self._get_accelerator_labels(label, valid_labels)
return valid_labels
def _get_elipsis_label(self, label, alternative_labels):
alternative_labels.append(label + "...")
def _get_accelerator_labels(self, label, alternative_labels):
for i in range(len(label)):
alternative_label = label[0:i] + "&" + label[i:]
alternative_labels.append(alternative_label)
return alternative_labels
| gpl-3.0 |
xbmc/atv2 | xbmc/lib/libPython/Python/Tools/scripts/combinerefs.py | 102 | 4381 | #! /usr/bin/env python
"""
combinerefs path
A helper for analyzing PYTHONDUMPREFS output.
When the PYTHONDUMPREFS envar is set in a debug build, at Python shutdown
time Py_Finalize() prints the list of all live objects twice: first it
prints the repr() of each object while the interpreter is still fully intact.
After cleaning up everything it can, it prints all remaining live objects
again, but the second time just prints their addresses, refcounts, and type
names (because the interpreter has been torn down, calling repr methods at
this point can get into infinite loops or blow up).
Save all this output into a file, then run this script passing the path to
that file. The script finds both output chunks, combines them, then prints
a line of output for each object still alive at the end:
address refcnt typename repr
address is the address of the object, in whatever format the platform C
produces for a %p format code.
refcnt is of the form
"[" ref "]"
when the object's refcount is the same in both PYTHONDUMPREFS output blocks,
or
"[" ref_before "->" ref_after "]"
if the refcount changed.
typename is object->ob_type->tp_name, extracted from the second PYTHONDUMPREFS
output block.
repr is repr(object), extracted from the first PYTHONDUMPREFS output block.
CAUTION: If object is a container type, it may not actually contain all the
objects shown in the repr: the repr was captured from the first output block,
and some of the containees may have been released since then. For example,
it's common for the line showing the dict of interned strings to display
strings that no longer exist at the end of Py_Finalize; this can be recognized
(albeit painfully) because such containees don't have a line of their own.
The objects are listed in allocation order, with most-recently allocated
printed first, and the first object allocated printed last.
Simple examples:
00857060 [14] str '__len__'
The str object '__len__' is alive at shutdown time, and both PYTHONDUMPREFS
output blocks said there were 14 references to it. This is probably due to
C modules that intern the string "__len__" and keep a reference to it in a
file static.
00857038 [46->5] tuple ()
46-5 = 41 references to the empty tuple were removed by the cleanup actions
between the times PYTHONDUMPREFS produced output.
00858028 [1025->1456] str '<dummy key>'
The string '<dummy key>', which is used in dictobject.c to overwrite a real
key that gets deleted, grew several hundred references during cleanup. It
suggests that stuff did get removed from dicts by cleanup, but that the dicts
themselves are staying alive for some reason. """
import re
import sys
# Generate lines from fileiter. If whilematch is true, continue reading
# while the regexp object pat matches line. If whilematch is false, lines
# are read so long as pat doesn't match them. In any case, the first line
# that doesn't match pat (when whilematch is true), or that does match pat
# (when whilematch is false), is lost, and fileiter will resume at the line
# following it.
def read(fileiter, pat, whilematch):
for line in fileiter:
if bool(pat.match(line)) == whilematch:
yield line
else:
break
def combine(fname):
f = file(fname)
fi = iter(f)
for line in read(fi, re.compile(r'^Remaining objects:$'), False):
pass
crack = re.compile(r'([a-zA-Z\d]+) \[(\d+)\] (.*)')
addr2rc = {}
addr2guts = {}
before = 0
for line in read(fi, re.compile(r'^Remaining object addresses:$'), False):
m = crack.match(line)
if m:
addr, addr2rc[addr], addr2guts[addr] = m.groups()
before += 1
else:
print '??? skipped:', line
after = 0
for line in read(fi, crack, True):
after += 1
m = crack.match(line)
assert m
addr, rc, guts = m.groups() # guts is type name here
if addr not in addr2rc:
print '??? new object created while tearing down:', line.rstrip()
continue
print addr,
if rc == addr2rc[addr]:
print '[%s]' % rc,
else:
print '[%s->%s]' % (addr2rc[addr], rc),
print guts, addr2guts[addr]
f.close()
print "%d objects before, %d after" % (before, after)
if __name__ == '__main__':
combine(sys.argv[1])
| gpl-2.0 |
matthew-tucker/mne-python | examples/inverse/plot_read_inverse.py | 42 | 1384 | """
===========================
Reading an inverse operator
===========================
The inverse operator's source space is shown in 3D.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname = data_path
fname += '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
inv = read_inverse_operator(fname)
print("Method: %s" % inv['methods'])
print("fMRI prior: %s" % inv['fmri_prior'])
print("Number of sources: %s" % inv['nsource'])
print("Number of channels: %s" % inv['nchan'])
###############################################################################
# Show result on 3D source space
lh_points = inv['src'][0]['rr']
lh_faces = inv['src'][0]['use_tris']
rh_points = inv['src'][1]['rr']
rh_faces = inv['src'][1]['use_tris']
from mayavi import mlab # noqa
mlab.figure(size=(600, 600), bgcolor=(0, 0, 0))
mesh = mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
lh_faces, colormap='RdBu')
mesh.module_manager.scalar_lut_manager.reverse_lut = True
mesh = mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2],
rh_faces, colormap='RdBu')
mesh.module_manager.scalar_lut_manager.reverse_lut = True
| bsd-3-clause |
swapnakrishnan2k/tp-qemu | qemu/tests/nx.py | 9 | 2652 | import os
import logging
from autotest.client.shared import error
from virttest import data_dir
@error.context_aware
def run(test, params, env):
"""
try to exploit the guest to test whether nx(cpu) bit takes effect.
1) boot the guest
2) cp the exploit prog into the guest
3) run the exploit
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
exploit_cmd = params.get("exploit_cmd", "")
if not exploit_cmd or session.cmd_status("test -x %s" % exploit_cmd):
exploit_file = os.path.join(data_dir.get_deps_dir(), 'nx', 'x64_sc_rdo.c')
dst_dir = '/tmp'
error.context("Copy the Exploit file to guest.", logging.info)
vm.copy_files_to(exploit_file, dst_dir)
error.context("Build exploit program in guest.", logging.info)
build_exploit = "gcc -o /tmp/nx_exploit /tmp/x64_sc_rdo.c"
if session.cmd_status(build_exploit):
raise error.TestError("Failed to build the exploit program")
exploit_cmd = "/tmp/nx_exploit"
error.context("Run exploit program in guest.", logging.info)
# if nx is enabled (by default), the program failed.
# segmentation error. return value of shell is not zero.
exec_res = session.cmd_status(exploit_cmd)
nx_on = params.get('nx_on', 'yes')
if nx_on == 'yes':
if exec_res:
logging.info('NX works good.')
error.context("Using execstack to remove the protection.",
logging.info)
enable_exec = 'execstack -s %s' % exploit_cmd
if session.cmd_status(enable_exec):
if session.cmd_status("execstack --help"):
msg = "Please make sure guest have execstack command."
raise error.TestError(msg)
raise error.TestError('Failed to enable the execstack')
if session.cmd_status(exploit_cmd):
raise error.TestFail('NX is still protecting. Error.')
else:
logging.info('NX is disabled as desired. good')
else:
raise error.TestFail('Fatal Error: NX does not protect anything!')
else:
if exec_res:
msg = "qemu fail to disable 'nx' flag or the exploit is corrupted."
raise error.TestError(msg)
else:
logging.info('NX is disabled, and this Test Case passed.')
if session:
session.close()
| gpl-2.0 |
juanmont/one | .vscode/extensions/tht13.rst-vscode-2.0.0/src/python/docutils/transforms/frontmatter.py | 9 | 19456 | # $Id: frontmatter.py 7897 2015-05-29 11:48:20Z milde $
# Author: David Goodger, Ueli Schlaepfer <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Transforms related to the front matter of a document or a section
(information found before the main text):
- `DocTitle`: Used to transform a lone top level section's title to
the document title, promote a remaining lone top-level section's
title to the document subtitle, and determine the document's title
metadata (document['title']) based on the document title and/or the
"title" setting.
- `SectionSubTitle`: Used to transform a lone subsection into a
subtitle.
- `DocInfo`: Used to transform a bibliographic field list into docinfo
elements.
"""
__docformat__ = 'reStructuredText'
import re
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class TitlePromoter(Transform):
"""
Abstract base class for DocTitle and SectionSubTitle transforms.
"""
def promote_title(self, node):
"""
Transform the following tree::
<node>
<section>
<title>
...
into ::
<node>
<title>
...
`node` is normally a document.
"""
# Type check
if not isinstance(node, nodes.Element):
raise TypeError, 'node must be of Element-derived type.'
# `node` must not have a title yet.
assert not (len(node) and isinstance(node[0], nodes.title))
section, index = self.candidate_index(node)
if index is None:
return None
# Transfer the section's attributes to the node:
# NOTE: Change second parameter to False to NOT replace
# attributes that already exist in node with those in
# section
# NOTE: Remove third parameter to NOT copy the 'source'
# attribute from section
node.update_all_atts_concatenating(section, True, True)
# setup_child is called automatically for all nodes.
node[:] = (section[:1] # section title
+ node[:index] # everything that was in the
# node before the section
+ section[1:]) # everything that was in the section
assert isinstance(node[0], nodes.title)
return 1
def promote_subtitle(self, node):
"""
Transform the following node tree::
<node>
<title>
<section>
<title>
...
into ::
<node>
<title>
<subtitle>
...
"""
# Type check
if not isinstance(node, nodes.Element):
raise TypeError, 'node must be of Element-derived type.'
subsection, index = self.candidate_index(node)
if index is None:
return None
subtitle = nodes.subtitle()
# Transfer the subsection's attributes to the new subtitle
# NOTE: Change second parameter to False to NOT replace
# attributes that already exist in node with those in
# section
# NOTE: Remove third parameter to NOT copy the 'source'
# attribute from section
subtitle.update_all_atts_concatenating(subsection, True, True)
# Transfer the contents of the subsection's title to the
# subtitle:
subtitle[:] = subsection[0][:]
node[:] = (node[:1] # title
+ [subtitle]
# everything that was before the section:
+ node[1:index]
# everything that was in the subsection:
+ subsection[1:])
return 1
def candidate_index(self, node):
"""
Find and return the promotion candidate and its index.
Return (None, None) if no valid candidate was found.
"""
index = node.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None or len(node) > (index + 1) or \
not isinstance(node[index], nodes.section):
return None, None
else:
return node[index], index
class DocTitle(TitlePromoter):
"""
In reStructuredText_, there is no way to specify a document title
and subtitle explicitly. Instead, we can supply the document title
(and possibly the subtitle as well) implicitly, and use this
two-step transform to "raise" or "promote" the title(s) (and their
corresponding section contents) to the document level.
1. If the document contains a single top-level section as its
first non-comment element, the top-level section's title
becomes the document's title, and the top-level section's
contents become the document's immediate contents. The lone
top-level section header must be the first non-comment element
in the document.
For example, take this input text::
=================
Top-Level Title
=================
A paragraph.
Once parsed, it looks like this::
<document>
<section names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
After running the DocTitle transform, we have::
<document names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
2. If step 1 successfully determines the document title, we
continue by checking for a subtitle.
If the lone top-level section itself contains a single
second-level section as its first non-comment element, that
section's title is promoted to the document's subtitle, and
that section's contents become the document's immediate
contents. Given this input text::
=================
Top-Level Title
=================
Second-Level Title
~~~~~~~~~~~~~~~~~~
A paragraph.
After parsing and running the Section Promotion transform, the
result is::
<document names="top-level title">
<title>
Top-Level Title
<subtitle names="second-level title">
Second-Level Title
<paragraph>
A paragraph.
(Note that the implicit hyperlink target generated by the
"Second-Level Title" is preserved on the "subtitle" element
itself.)
Any comment elements occurring before the document title or
subtitle are accumulated and inserted as the first body elements
after the title(s).
This transform also sets the document's metadata title
(document['title']).
.. _reStructuredText: http://docutils.sf.net/rst.html
"""
default_priority = 320
def set_metadata(self):
"""
Set document['title'] metadata title from the following
sources, listed in order of priority:
* Existing document['title'] attribute.
* "title" setting.
* Document title node (as promoted by promote_title).
"""
if not self.document.hasattr('title'):
if self.document.settings.title is not None:
self.document['title'] = self.document.settings.title
elif len(self.document) and isinstance(self.document[0], nodes.title):
self.document['title'] = self.document[0].astext()
def apply(self):
if getattr(self.document.settings, 'doctitle_xform', 1):
# promote_(sub)title defined in TitlePromoter base class.
if self.promote_title(self.document):
# If a title has been promoted, also try to promote a
# subtitle.
self.promote_subtitle(self.document)
# Set document['title'].
self.set_metadata()
class SectionSubTitle(TitlePromoter):
"""
This works like document subtitles, but for sections. For example, ::
<section>
<title>
Title
<section>
<title>
Subtitle
...
is transformed into ::
<section>
<title>
Title
<subtitle>
Subtitle
...
For details refer to the docstring of DocTitle.
"""
default_priority = 350
def apply(self):
if not getattr(self.document.settings, 'sectsubtitle_xform', 1):
return
for section in self.document.traverse(nodes.section):
# On our way through the node tree, we are deleting
# sections, but we call self.promote_subtitle for those
# sections nonetheless. To do: Write a test case which
# shows the problem and discuss on Docutils-develop.
self.promote_subtitle(section)
class DocInfo(Transform):
"""
This transform is specific to the reStructuredText_ markup syntax;
see "Bibliographic Fields" in the `reStructuredText Markup
Specification`_ for a high-level description. This transform
should be run *after* the `DocTitle` transform.
Given a field list as the first non-comment element after the
document title and subtitle (if present), registered bibliographic
field names are transformed to the corresponding DTD elements,
becoming child elements of the "docinfo" element (except for a
dedication and/or an abstract, which become "topic" elements after
"docinfo").
For example, given this document fragment after parsing::
<document>
<title>
Document Title
<field_list>
<field>
<field_name>
Author
<field_body>
<paragraph>
A. Name
<field>
<field_name>
Status
<field_body>
<paragraph>
$RCSfile$
...
After running the bibliographic field list transform, the
resulting document tree would look like this::
<document>
<title>
Document Title
<docinfo>
<author>
A. Name
<status>
frontmatter.py
...
The "Status" field contained an expanded RCS keyword, which is
normally (but optionally) cleaned up by the transform. The sole
contents of the field body must be a paragraph containing an
expanded RCS keyword of the form "$keyword: expansion text $". Any
RCS keyword can be processed in any bibliographic field. The
dollar signs and leading RCS keyword name are removed. Extra
processing is done for the following RCS keywords:
- "RCSfile" expands to the name of the file in the RCS or CVS
repository, which is the name of the source file with a ",v"
suffix appended. The transform will remove the ",v" suffix.
- "Date" expands to the format "YYYY/MM/DD hh:mm:ss" (in the UTC
time zone). The RCS Keywords transform will extract just the
date itself and transform it to an ISO 8601 format date, as in
"2000-12-31".
(Since the source file for this text is itself stored under CVS,
we can't show an example of the "Date" RCS keyword because we
can't prevent any RCS keywords used in this explanation from
being expanded. Only the "RCSfile" keyword is stable; its
expansion text changes only if the file name changes.)
.. _reStructuredText: http://docutils.sf.net/rst.html
.. _reStructuredText Markup Specification:
http://docutils.sf.net/docs/ref/rst/restructuredtext.html
"""
default_priority = 340
biblio_nodes = {
'author': nodes.author,
'authors': nodes.authors,
'organization': nodes.organization,
'address': nodes.address,
'contact': nodes.contact,
'version': nodes.version,
'revision': nodes.revision,
'status': nodes.status,
'date': nodes.date,
'copyright': nodes.copyright,
'dedication': nodes.topic,
'abstract': nodes.topic}
"""Canonical field name (lowcased) to node class name mapping for
bibliographic fields (field_list)."""
def apply(self):
if not getattr(self.document.settings, 'docinfo_xform', 1):
return
document = self.document
index = document.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None:
return
candidate = document[index]
if isinstance(candidate, nodes.field_list):
biblioindex = document.first_child_not_matching_class(
(nodes.Titular, nodes.Decorative))
nodelist = self.extract_bibliographic(candidate)
del document[index] # untransformed field list (candidate)
document[biblioindex:biblioindex] = nodelist
def extract_bibliographic(self, field_list):
docinfo = nodes.docinfo()
bibliofields = self.language.bibliographic_fields
labels = self.language.labels
topics = {'dedication': None, 'abstract': None}
for field in field_list:
try:
name = field[0][0].astext()
normedname = nodes.make_id(name)
if not (len(field) == 2 and normedname in bibliofields
and self.check_empty_biblio_field(field, name)):
raise TransformError
canonical = bibliofields[normedname]
biblioclass = self.biblio_nodes[canonical]
if issubclass(biblioclass, nodes.TextElement):
if not self.check_compound_biblio_field(field, name):
raise TransformError
utils.clean_rcs_keywords(
field[1][0], self.rcs_keyword_substitutions)
docinfo.append(biblioclass('', '', *field[1][0]))
elif issubclass(biblioclass, nodes.authors):
self.extract_authors(field, name, docinfo)
elif issubclass(biblioclass, nodes.topic):
if topics[canonical]:
field[-1] += self.document.reporter.warning(
'There can only be one "%s" field.' % name,
base_node=field)
raise TransformError
title = nodes.title(name, labels[canonical])
topics[canonical] = biblioclass(
'', title, classes=[canonical], *field[1].children)
else:
docinfo.append(biblioclass('', *field[1].children))
except TransformError:
if len(field[-1]) == 1 \
and isinstance(field[-1][0], nodes.paragraph):
utils.clean_rcs_keywords(
field[-1][0], self.rcs_keyword_substitutions)
if normedname and normedname not in bibliofields:
field['classes'].append(normedname)
docinfo.append(field)
nodelist = []
if len(docinfo) != 0:
nodelist.append(docinfo)
for name in ('dedication', 'abstract'):
if topics[name]:
nodelist.append(topics[name])
return nodelist
def check_empty_biblio_field(self, field, name):
if len(field[-1]) < 1:
field[-1] += self.document.reporter.warning(
'Cannot extract empty bibliographic field "%s".' % name,
base_node=field)
return None
return 1
def check_compound_biblio_field(self, field, name):
if len(field[-1]) > 1:
field[-1] += self.document.reporter.warning(
'Cannot extract compound bibliographic field "%s".' % name,
base_node=field)
return None
if not isinstance(field[-1][0], nodes.paragraph):
field[-1] += self.document.reporter.warning(
'Cannot extract bibliographic field "%s" containing '
'anything other than a single paragraph.' % name,
base_node=field)
return None
return 1
rcs_keyword_substitutions = [
(re.compile(r'\$' r'Date: (\d\d\d\d)[-/](\d\d)[-/](\d\d)[ T][\d:]+'
r'[^$]* \$', re.IGNORECASE), r'\1-\2-\3'),
(re.compile(r'\$' r'RCSfile: (.+),v \$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$'), r'\1'),]
def extract_authors(self, field, name, docinfo):
try:
if len(field[1]) == 1:
if isinstance(field[1][0], nodes.paragraph):
authors = self.authors_from_one_paragraph(field)
elif isinstance(field[1][0], nodes.bullet_list):
authors = self.authors_from_bullet_list(field)
else:
raise TransformError
else:
authors = self.authors_from_paragraphs(field)
authornodes = [nodes.author('', '', *author)
for author in authors if author]
if len(authornodes) >= 1:
docinfo.append(nodes.authors('', *authornodes))
else:
raise TransformError
except TransformError:
field[-1] += self.document.reporter.warning(
'Bibliographic field "%s" incompatible with extraction: '
'it must contain either a single paragraph (with authors '
'separated by one of "%s"), multiple paragraphs (one per '
'author), or a bullet list with one paragraph (one author) '
'per item.'
% (name, ''.join(self.language.author_separators)),
base_node=field)
raise
def authors_from_one_paragraph(self, field):
text = field[1][0].astext().strip()
if not text:
raise TransformError
for authorsep in self.language.author_separators:
authornames = text.split(authorsep)
if len(authornames) > 1:
break
authornames = [author.strip() for author in authornames]
authors = [[nodes.Text(author)] for author in authornames if author]
return authors
def authors_from_bullet_list(self, field):
authors = []
for item in field[1][0]:
if len(item) != 1 or not isinstance(item[0], nodes.paragraph):
raise TransformError
authors.append(item[0].children)
if not authors:
raise TransformError
return authors
def authors_from_paragraphs(self, field):
for item in field[1]:
if not isinstance(item, nodes.paragraph):
raise TransformError
authors = [item.children for item in field[1]]
return authors
| apache-2.0 |
fnp/pylucene | test/test_BooleanPrefixQuery.py | 1 | 2671 | # ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
from unittest import TestCase, main
from lucene import *
class BooleanPrefixQueryTestCase(TestCase):
"""
Unit tests ported from Java Lucene
"""
def getCount(self, r, q):
if BooleanQuery.instance_(q):
return len(BooleanQuery.cast_(q).getClauses())
elif ConstantScoreQuery.instance_(q):
iter = ConstantScoreQuery.cast_(q).getFilter().getDocIdSet(r).iterator()
count = 0
while iter.nextDoc() != DocIdSetIterator.NO_MORE_DOCS:
count += 1
return count
else:
self.fail("unexpected query " + q)
def testMethod(self):
directory = RAMDirectory()
categories = ["food", "foodanddrink", "foodanddrinkandgoodtimes",
"food and drink"]
try:
writer = IndexWriter(directory, WhitespaceAnalyzer(), True,
IndexWriter.MaxFieldLength.LIMITED)
for category in categories:
doc = Document()
doc.add(Field("category", category, Field.Store.YES,
Field.Index.NOT_ANALYZED))
writer.addDocument(doc)
writer.close()
reader = IndexReader.open(directory, True)
query = PrefixQuery(Term("category", "foo"))
rw1 = query.rewrite(reader)
bq = BooleanQuery()
bq.add(query, BooleanClause.Occur.MUST)
rw2 = bq.rewrite(reader)
except Exception, e:
self.fail(e)
self.assertEqual(self.getCount(reader, rw1), self.getCount(reader, rw2),
"Number of Clauses Mismatch")
if __name__ == "__main__":
import sys, lucene
lucene.initVM()
if '-loop' in sys.argv:
sys.argv.remove('-loop')
while True:
try:
main()
except:
pass
else:
main()
| apache-2.0 |
cython-testbed/pandas | pandas/tests/extension/base/dtype.py | 2 | 2874 | import warnings
import numpy as np
import pandas as pd
from .base import BaseExtensionTests
class BaseDtypeTests(BaseExtensionTests):
"""Base class for ExtensionDtype classes"""
def test_name(self, dtype):
assert isinstance(dtype.name, str)
def test_kind(self, dtype):
valid = set('biufcmMOSUV')
if dtype.kind is not None:
assert dtype.kind in valid
def test_construct_from_string_own_name(self, dtype):
result = dtype.construct_from_string(dtype.name)
assert type(result) is type(dtype)
# check OK as classmethod
result = type(dtype).construct_from_string(dtype.name)
assert type(result) is type(dtype)
def test_is_dtype_from_name(self, dtype):
result = type(dtype).is_dtype(dtype.name)
assert result is True
def test_is_dtype_unboxes_dtype(self, data, dtype):
assert dtype.is_dtype(data) is True
def test_is_dtype_from_self(self, dtype):
result = type(dtype).is_dtype(dtype)
assert result is True
def test_is_not_string_type(self, dtype):
return not pd.api.types.is_string_dtype(dtype)
def test_is_not_object_type(self, dtype):
return not pd.api.types.is_object_dtype(dtype)
def test_eq_with_str(self, dtype):
assert dtype == dtype.name
assert dtype != dtype.name + '-suffix'
def test_eq_with_numpy_object(self, dtype):
assert dtype != np.dtype('object')
def test_eq_with_self(self, dtype):
assert dtype == dtype
assert dtype != object()
def test_array_type(self, data, dtype):
assert dtype.construct_array_type() is type(data)
def test_check_dtype(self, data):
dtype = data.dtype
# check equivalency for using .dtypes
df = pd.DataFrame({'A': pd.Series(data, dtype=dtype),
'B': data,
'C': 'foo', 'D': 1})
# np.dtype('int64') == 'Int64' == 'int64'
# so can't distinguish
if dtype.name == 'Int64':
expected = pd.Series([True, True, False, True],
index=list('ABCD'))
else:
expected = pd.Series([True, True, False, False],
index=list('ABCD'))
# XXX: This should probably be *fixed* not ignored.
# See libops.scalar_compare
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
result = df.dtypes == str(dtype)
self.assert_series_equal(result, expected)
expected = pd.Series([True, True, False, False],
index=list('ABCD'))
result = df.dtypes.apply(str) == str(dtype)
self.assert_series_equal(result, expected)
def test_hashable(self, dtype):
hash(dtype) # no error
| bsd-3-clause |
andreparames/odoo | addons/website_membership/models/product.py | 338 | 1264 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class product_template(osv.Model):
_inherit = 'product.template'
_columns = {
'website_published': fields.boolean('Available in the website', copy=False),
}
_defaults = {
'website_published': False,
}
| agpl-3.0 |
jylaxp/django | django/db/migrations/operations/models.py | 290 | 21735 | from __future__ import unicode_literals
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.db.models.options import normalize_together
from django.utils import six
from django.utils.functional import cached_property
class CreateModel(Operation):
"""
Create a model's table.
"""
serialization_expand_args = ['fields', 'options', 'managers']
def __init__(self, name, fields, options=None, bases=None, managers=None):
self.name = name
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
self.managers = managers or []
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'fields': self.fields,
}
if self.options:
kwargs['options'] = self.options
if self.bases and self.bases != (models.Model,):
kwargs['bases'] = self.bases
if self.managers and self.managers != [('objects', models.Manager())]:
kwargs['managers'] = self.managers
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.add_model(ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
list(self.managers),
))
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name)
def references_model(self, name, app_label=None):
strings_to_check = [self.name]
# Check we didn't inherit from the model
for base in self.bases:
if isinstance(base, six.string_types):
strings_to_check.append(base.split(".")[-1])
# Check we have no FKs/M2Ms with it
for fname, field in self.fields:
if field.remote_field:
if isinstance(field.remote_field.model, six.string_types):
strings_to_check.append(field.remote_field.model.split(".")[-1])
# Now go over all the strings and compare them
for string in strings_to_check:
if string.lower() == name.lower():
return True
return False
class DeleteModel(Operation):
"""
Drops a model's table.
"""
def __init__(self, name):
self.name = name
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.remove_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def describe(self):
return "Delete model %s" % (self.name, )
class RenameModel(Operation):
"""
Renames a model.
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
@cached_property
def old_name_lower(self):
return self.old_name.lower()
@cached_property
def new_name_lower(self):
return self.new_name.lower()
def deconstruct(self):
kwargs = {
'old_name': self.old_name,
'new_name': self.new_name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
apps = state.apps
model = apps.get_model(app_label, self.old_name)
model._meta.apps = apps
# Get all of the related objects we need to repoint
all_related_objects = (
f for f in model._meta.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (not f.hidden or f.many_to_many)
)
# Rename the model
state.models[app_label, self.new_name_lower] = state.models[app_label, self.old_name_lower]
state.models[app_label, self.new_name_lower].name = self.new_name
state.remove_model(app_label, self.old_name_lower)
# Repoint the FKs and M2Ms pointing to us
for related_object in all_related_objects:
if related_object.model is not model:
# The model being renamed does not participate in this relation
# directly. Rather, a superclass does.
continue
# Use the new related key for self referential related objects.
if related_object.related_model == model:
related_key = (app_label, self.new_name_lower)
else:
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
new_fields = []
for name, field in state.models[related_key].fields:
if name == related_object.field.name:
field = field.clone()
field.remote_field.model = "%s.%s" % (app_label, self.new_name)
new_fields.append((name, field))
state.models[related_key].fields = new_fields
state.reload_model(*related_key)
state.reload_model(app_label, self.new_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.new_name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.old_name)
# Move the main table
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Alter the fields pointing to us
for related_object in old_model._meta.related_objects:
if related_object.related_model == old_model:
model = new_model
related_key = (app_label, self.new_name_lower)
else:
model = related_object.related_model
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
to_field = to_state.apps.get_model(
*related_key
)._meta.get_field(related_object.field.name)
schema_editor.alter_field(
model,
related_object.field,
to_field,
)
# Rename M2M fields whose name is based on this model's name.
fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many)
for (old_field, new_field) in fields:
# Skip self-referential fields as these are renamed above.
if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created:
continue
# Rename the M2M table that's based on this model's name.
old_m2m_model = old_field.remote_field.through
new_m2m_model = new_field.remote_field.through
schema_editor.alter_db_table(
new_m2m_model,
old_m2m_model._meta.db_table,
new_m2m_model._meta.db_table,
)
# Rename the column in the M2M table that's based on this
# model's name.
schema_editor.alter_field(
new_m2m_model,
old_m2m_model._meta.get_field(old_model._meta.model_name),
new_m2m_model._meta.get_field(new_model._meta.model_name),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
def references_model(self, name, app_label=None):
return (
name.lower() == self.old_name_lower or
name.lower() == self.new_name_lower
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
class AlterModelTable(Operation):
"""
Renames a model's table
"""
def __init__(self, name, table):
self.name = name
self.table = table
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'table': self.table,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.models[app_label, self.name_lower].options["db_table"] = self.table
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Rename M2M fields whose name is based on this model's db_table
for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many):
if new_field.remote_field.through._meta.auto_created:
schema_editor.alter_db_table(
new_field.remote_field.through,
old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def describe(self):
return "Rename table for %s to %s" % (self.name, self.table)
class AlterUniqueTogether(Operation):
"""
Changes the value of unique_together to the target one.
Input value of unique_together must be a set of tuples.
"""
option_name = "unique_together"
def __init__(self, name, unique_together):
self.name = name
unique_together = normalize_together(unique_together)
self.unique_together = set(tuple(cons) for cons in unique_together)
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'unique_together': self.unique_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.unique_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_unique_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.unique_together or
any((name in together) for together in self.unique_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together or ''))
class AlterIndexTogether(Operation):
"""
Changes the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
option_name = "index_together"
def __init__(self, name, index_together):
self.name = name
index_together = normalize_together(index_together)
self.index_together = set(tuple(cons) for cons in index_together)
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'index_together': self.index_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.index_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_index_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.index_together or
any((name in together) for together in self.index_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together or ''))
class AlterOrderWithRespectTo(Operation):
"""
Represents a change with the order_with_respect_to option.
"""
def __init__(self, name, order_with_respect_to):
self.name = name
self.order_with_respect_to = order_with_respect_to
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'order_with_respect_to': self.order_with_respect_to,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options['order_with_respect_to'] = self.order_with_respect_to
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.name)
# Remove a field if we need to
if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to:
schema_editor.remove_field(from_model, from_model._meta.get_field("_order"))
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to:
field = to_model._meta.get_field("_order")
if not field.has_default():
field.default = 0
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
self.order_with_respect_to is None or
name == self.order_with_respect_to
)
)
def describe(self):
return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to)
class AlterModelOptions(Operation):
"""
Sets new model options that don't directly affect the database schema
(like verbose_name, permissions, ordering). Python code in migrations
may still need them.
"""
# Model options we want to compare and preserve in an AlterModelOptions op
ALTER_OPTION_KEYS = [
"get_latest_by",
"managed",
"ordering",
"permissions",
"default_permissions",
"select_on_save",
"verbose_name",
"verbose_name_plural",
]
def __init__(self, name, options):
self.name = name
self.options = options
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'options': self.options,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options = dict(model_state.options)
model_state.options.update(self.options)
for key in self.ALTER_OPTION_KEYS:
if key not in self.options and key in model_state.options:
del model_state.options[key]
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def describe(self):
return "Change Meta options on %s" % (self.name, )
class AlterModelManagers(Operation):
"""
Alters the model's managers
"""
serialization_expand_args = ['managers']
def __init__(self, name, managers):
self.name = name
self.managers = managers
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
return (
self.__class__.__name__,
[self.name, self.managers],
{}
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.managers = list(self.managers)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def describe(self):
return "Change managers on %s" % (self.name, )
| bsd-3-clause |
zanderle/django | tests/template_tests/syntax_tests/test_filter_tag.py | 521 | 1795 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class FilterTagTests(SimpleTestCase):
@setup({'filter01': '{% filter upper %}{% endfilter %}'})
def test_filter01(self):
output = self.engine.render_to_string('filter01')
self.assertEqual(output, '')
@setup({'filter02': '{% filter upper %}django{% endfilter %}'})
def test_filter02(self):
output = self.engine.render_to_string('filter02')
self.assertEqual(output, 'DJANGO')
@setup({'filter03': '{% filter upper|lower %}django{% endfilter %}'})
def test_filter03(self):
output = self.engine.render_to_string('filter03')
self.assertEqual(output, 'django')
@setup({'filter04': '{% filter cut:remove %}djangospam{% endfilter %}'})
def test_filter04(self):
output = self.engine.render_to_string('filter04', {'remove': 'spam'})
self.assertEqual(output, 'django')
@setup({'filter05': '{% filter safe %}fail{% endfilter %}'})
def test_filter05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter05')
@setup({'filter05bis': '{% filter upper|safe %}fail{% endfilter %}'})
def test_filter05bis(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter05bis')
@setup({'filter06': '{% filter escape %}fail{% endfilter %}'})
def test_filter06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter06')
@setup({'filter06bis': '{% filter upper|escape %}fail{% endfilter %}'})
def test_filter06bis(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter06bis')
| bsd-3-clause |
jamesmarva/docker-py | docker/errors.py | 39 | 2469 | # Copyright 2014 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
class APIError(requests.exceptions.HTTPError):
def __init__(self, message, response, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
super(APIError, self).__init__(message)
self.response = response
self.explanation = explanation
if self.explanation is None and response.content:
self.explanation = response.content.strip()
def __str__(self):
message = super(APIError, self).__str__()
if self.is_client_error():
message = '{0} Client Error: {1}'.format(
self.response.status_code, self.response.reason)
elif self.is_server_error():
message = '{0} Server Error: {1}'.format(
self.response.status_code, self.response.reason)
if self.explanation:
message = '{0} ("{1}")'.format(message, self.explanation)
return message
def is_client_error(self):
return 400 <= self.response.status_code < 500
def is_server_error(self):
return 500 <= self.response.status_code < 600
class DockerException(Exception):
pass
class NotFound(APIError):
pass
class InvalidVersion(DockerException):
pass
class InvalidRepository(DockerException):
pass
class InvalidConfigFile(DockerException):
pass
class DeprecatedMethod(DockerException):
pass
class TLSParameterError(DockerException):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg + (". TLS configurations should map the Docker CLI "
"client configurations. See "
"http://docs.docker.com/examples/https/ for "
"API details.")
class NullResource(DockerException, ValueError):
pass
| apache-2.0 |
Zanzibar82/script.module.urlresolver | lib/urlresolver/plugins/vidstream.py | 4 | 2431 | """
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
class VidstreamResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "vidstream"
domains = ["vidstream.in"]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
#e.g. http://vidstream.in/xdfaay6ccwqj
self.pattern = 'http://((?:www.)?vidstream.in)/(.*)'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
resp = self.net.http_GET(web_url)
html = resp.content
post_url = resp.get_url()
form_values = {}
for i in re.finditer('<input.*?name="(.*?)".*?value="(.*?)">', html):
form_values[i.group(1)] = i.group(2)
html = self.net.http_POST(post_url, form_data=form_values).content
# get stream url
pattern = 'file:\s*"([^"]+)",'
r = re.search(pattern, html)
if r:
return r.group(1)
else:
raise UrlResolver.ResolverError('File Not Found or removed')
def get_url(self, host, media_id):
return 'http://vidstream.in/%s' % (media_id)
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match(self.pattern, url) or self.name in host
| gpl-2.0 |
varunnaganathan/django | django/utils/dateformat.py | 110 | 11592 | """
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print(df.format('jS F Y H:i'))
7th October 2003 11:39
>>>
"""
from __future__ import unicode_literals
import calendar
import datetime
import re
import time
from django.utils import six
from django.utils.dates import (
MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR,
)
from django.utils.encoding import force_text
from django.utils.timezone import get_default_timezone, is_aware, is_naive
from django.utils.translation import ugettext as _
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_text(formatstr))):
if i % 2:
pieces.append(force_text(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return ''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, obj):
self.data = obj
self.timezone = None
# We only support timezone when formatting datetime objects,
# not date objects (timezone information not appropriate),
# or time objects (against established django policy).
if isinstance(obj, datetime.datetime):
if is_naive(obj):
self.timezone = get_default_timezone()
else:
self.timezone = obj.tzinfo
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError('may be implemented in a future release')
def e(self):
"""
Timezone name.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
try:
if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
# Have to use tzinfo.tzname and not datetime.tzname
# because datatime.tzname does not expect Unicode
return self.data.tzinfo.tzname(self.data) or ""
except NotImplementedError:
pass
return ""
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return '%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return '%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return '%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return '%02d' % self.data.minute
def O(self):
"""
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
seconds = self.Z()
if seconds == "":
return ""
sign = '-' if seconds < 0 else '+'
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return '%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return '%02d' % self.data.second
def T(self):
"""
Time zone of this machine; e.g. 'EST' or 'MDT'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
name = None
try:
name = self.timezone.tzname(self.data)
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
pass
if name is None:
name = self.format('O')
return six.text_type(name)
def u(self):
"Microseconds; i.e. '000000' to '999999'"
return '%06d' % self.data.microsecond
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
try:
offset = self.timezone.utcoffset(self.data)
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
return ""
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return '%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
try:
if self.timezone and self.timezone.dst(self.data):
return '1'
else:
return '0'
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
return ''
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self):
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return '%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar()[0]
def r(self):
"RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return 'th'
last = self.data.day % 10
if last == 1:
return 'st'
if last == 2:
return 'nd'
if last == 3:
return 'rd'
return 'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if isinstance(self.data, datetime.datetime) and is_aware(self.data):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year - 1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return six.text_type(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
| bsd-3-clause |
teamfx/openjfx-9-dev-rt | modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/common/find_files_unittest.py | 3 | 2675 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import unittest
from webkitpy.common.system.filesystem import FileSystem
import find_files
class MockWinFileSystem(object):
def join(self, *paths):
return '\\'.join(paths)
def normpath(self, path):
return path.replace('/', '\\')
class TestWinNormalize(unittest.TestCase):
def assert_filesystem_normalizes(self, filesystem):
self.assertEqual(find_files._normalize(filesystem, "c:\\foo",
['fast/html', 'fast/canvas/*', 'compositing/foo.html']),
['c:\\foo\\fast\html', 'c:\\foo\\fast\canvas\*', 'c:\\foo\compositing\\foo.html'])
def test_mocked_win(self):
# This tests test_files.normalize, using portable behavior emulating
# what we think Windows is supposed to do. This test will run on all
# platforms.
self.assert_filesystem_normalizes(MockWinFileSystem())
def test_win(self):
# This tests the actual windows platform, to ensure we get the same
# results that we get in test_mocked_win().
if not sys.platform.startswith('win'):
return
self.assert_filesystem_normalizes(FileSystem())
| gpl-2.0 |
coecms/CMIP5-utils | fetch_step2.py | 1 | 17182 | # Paola Petrelli - [email protected] 4th March 2014
# Last changed on 26th of March 2014
# Updates list:
# 26/03/2014 - output files and table csv file are created after
# collecting data; calling process_file with multiprocessing
# module to speed up hash checksum (md5/sha256)
# 01/04/2014 - exclude the ACCESS and CSIRO models from check
# 03/09/2014 trying to substitute the google file with a csv table
# 01/12/2014 script has been divided into two steps, this is first step fetch_step1.py
# that runs search on ESGF node and can be run interactively, the second step fetch_step2.py should be run in the queue
# 21/05/2015 comments updated, introduce argparse to manage inputs, added extra argument
# "node" to choose automatically between different nodes: only pcmdi and dkrz (default) are available at the moment
# 09/02/2016 pmcdi9.llnl.gov changed to pcmdi.llnl.gov, in step2 added extra file path checks to take into account that servers pcmdi3/7/9 are now aims3
#
# Retrieves a wget script (wget_<experiment>.out) listing all the CMIP5
# published files responding to the constraints passed as arguments.
# The search is run on one of the ESGF node but it searches through all the available
# nodes for the latest version. Multiple arguments can be passed to -e, -v, -m. At least one variable and experiment
# should be specified but models are optionals. The search is limited to the first 10000 matches,
# to change this you have to change the pcmdi_url variable in the code.
# The second step returns 3 files listing: the published files available on raijin (variables_replica.csv),
# the published files that need downloading and/or updating (variables_to_download.csv),
# the variable/model/experiment combination not yet published (variables_not_published).
# Uses md5/sha256 checksum to determine if a file already existing on raijin is exactly the same as the latest published version
# If you have to parse a big number of files, you can speed up the process by using multithread module "Pool"
# if you're doing this you should run the second step in the queue, which is the reason why the script is split into 2 steps.
# To do that you can change the threads number from 1 (to run interactively) to the number of cpus you're requesting, in line 340
# async_results = Pool(16).map_async(process_file, result)
# The maximum number of threads depends on the number of cpus you're using, in above example 16 cpus.
#
# If the "table" option is selected it returns also a table csv file summarising the search results.
#
# The CMIP5 replica data is stored on raijin.nci.org.au under
# /g/data1/ua6/unofficial-ESG-replica/tmp/tree
#
# Example of how to run on raijin.nci.org.au
#
# module load python/2.7.3 (default on raijin)
# python fetch_step2.py -v ua_Amon tos_Omon -m CCSM4 -e rcp45 -o out -t
# NB needs python version 2.7 or more recent
#
# - the variable argument is passed as variable-name_cmip-table, this avoids confusion if looking for variables from different cmip tables
# - multiple arguments can be passed to "-v", "-m", "-e";
# - to pass multiple arguments, declare the option once followed by all the desired values (as above);
# - default output files root is "variables"
# - you need to pass at least one experiment and one variable, models are optional.
# - output file is optional, default is "variables"
# - table is optional, default is False
import sys, argparse
import subprocess, re, itertools
from multiprocessing import Pool
import os.path as opath # to manage files and dirs
# help functions
def VarCmipTable(v):
if "_" not in v:
raise TypeError("String '%s' does not match required format: var_cmip-table, ie tas_Amon"%(v,))
else:
return v
def parse_input():
''' Parse input arguments '''
parser = argparse.ArgumentParser(description='''Retrieves a wget script (wget_<experiment>.out) listing all the CMIP5
published files responding to the constraints passed as arguments.
The search is run on one of the ESGF node but it searches through all the available
nodes for the latest version. Multiple arguments can be passed to -e, -v, -m. At least one variable and experiment
should be specified but models are optionals. The search is limited to the first 1000 matches,
to change this you have to change the pcmdi_url variable in the code.''')
parser.add_argument('-e','--experiment', type=str, nargs="*", help='CMIP5 experiment', required=True)
parser.add_argument('-m','--model', type=str, nargs="*", help='', required=False)
parser.add_argument('-v','--variable', type=VarCmipTable, nargs="*", help='combination of CMIP5 variable & cmip_table Ex. tas_Amon', required=True)
parser.add_argument('-t','--table', action='store_true', default='store_false', help="csv table option, default is False",
required=False)
parser.add_argument('-o','--output', type=str, nargs="?", default="variables", help='''output files root,
default is variables''', required=False)
return vars(parser.parse_args())
sys.exit()
def assign_constraint():
''' Assign default values and input to constraints '''
global var0, exp0, mod0, table, outfile
var0 = []
exp0 = []
mod0 = []
outfile = 'variables'
# assign constraints from arguments list
args = parse_input()
var0=args["variable"]
if args["model"]: mod0=args["model"]
exp0=args["experiment"]
table=args["table"]
outfile=args["output"]
return
def correct_model(model):
''' Correct name of models that have two, to make search work '''
# list model as dict{dir name : search name}
models={"ACCESS1-0" : "ACCESS1.0", "ACCESS1-3" : "ACCESS1.3",
"CESM1-BGC" : "CESM1(BGC)", "CESM1-CAM5" : "CESM1(CAM5)",
"CESM1-CAM5-1-FV2" : "CESM1(CAM5.1,FV2)", "CESM1-WACCM" : "CESM1(WACCM)",
"CESM1-FASTCHEM" : "CESM1(FASTCHEM)", "bcc-csm1-1" : "BCC-CSM1.1",
"bcc-csm1-1-m" : "BCC-CSM1.1(m)", "inmcm4" : "INM-CM4"}
# if the current model is one of the dict keys, change name
if model in models.keys():
return models[model]
return model
def tree_exist(furl):
''' Return True if file exists in tmp/tree '''
replica_dir = "/g/data1/ua6/unofficial-ESG-replica/tmp/tree/"
tree_path = replica_dir + furl
return [opath.exists(tree_path),tree_path]
def write_file():
''' Write info on file to download or replica output '''
global info
files = {"R" : orep, "D" : odown}
for item in info.values():
outfile = files[item[-1]]
outfile.write(",".join(item[0:-1])+"\n")
def file_details(fname):
''' Split the filename in variable, MIP code, model, experiment, ensemble (period is excluded) '''
namebits = fname.replace("'","").split('_')
if len(namebits) >= 5:
details = namebits[0:5]
else:
details = []
return details
def find_string(bits,string):
''' Returns matching string if found in directory structure '''
dummy = filter(lambda el: re.findall( string, el), bits)
if len(dummy) == 0:
return 'no_version'
else:
return dummy[0]
def get_info(fname,path):
''' Collect the info on a file form its path return it in a list '''
version = '[a-z]*201[0-9][0-1][0-9][0-3][0-9]'
bits = path.split('/')
finfo = file_details(fname)
finfo.append(find_string(bits[:-1],version))
finfo.append(path)
return finfo
def parse_file(wgetfile,varlist,modlist,exp):
''' extract file list from wget file '''
# open wget file, read content saving to a list of lines and close again
infile = open(wgetfile,'r')
lines = infile.readlines()
infile.close
# if wget didn't return files print a warning and exit function
if lines[0] == "No files were found that matched the query":
print lines[0] + " for ", varlist, modlist, exp
return False
else:
# select only the files lines starting as var_cmortable_model_exp ...
result=[]
# if modlist empty add to it a regex string indicating model name
if len(modlist) > 0:
comb_constr = itertools.product(*[varlist,modlist])
filestrs = ["_".join(x) for x in comb_constr]
else:
filestrs = [var + '_[A-Za-z0-9-.()]*_' for var in varlist]
for line in lines:
match = [re.search(pat,line) for pat in filestrs]
if match.count(None) != len(match) and line.find(exp):
[fname,furl,hash_type,fhash] = line.replace("'","").split()
if hash_type in ["SHA256","sha256","md5","MD5"]:
result.append([fname, furl.replace("http://",""), fhash, hash_type])
else:
print "Error in parse_file() is selecting the wrong lines!"
print line
sys.exit()
return result
def check_hash(tree_path,fhash,hash_type):
''' Execute md5sum/sha256sum on file on tree and return True,f same as in wget file '''
hash_cmd="md5sum"
if hash_type in ["SHA256","sha256"]: hash_cmd="sha256sum"
tree_hash = subprocess.check_output([hash_cmd, tree_path]).split()[0]
return tree_hash == fhash
def process_file(result):
''' Check if file exist on tree and if True check md5/sha265 hash '''
info = {}
[fname,furl,fhash,hash_type]=result
[bool,tree_path]=tree_exist(furl)
# some servers have updated name: for ex pcmdi9.llnl.gov is now aims3.llnl.gov so we need to substitute and check that too
print furl, bool
if not bool and furl[0:14]=='aims3.llnl.gov':
for num in [3,7,9]:
other_furl=furl.replace('aims3','pcmdi'+str(num))
print "other_furl ", other_furl
[bool,tree_path]=tree_exist(other_furl)
if bool:
print "bool after extra check for num ", bool, num
break
info[furl] = get_info(fname,tree_path)
# if file exists in tree compare md5/sha256 with values in wgetfile, else add to update
if "ACCESS" in fname or "CSIRO" in fname or (bool and check_hash(tree_path,fhash,hash_type)):
info[furl].append("R")
else:
info[furl][-1] = "http://" + furl
info[furl].append("D")
return info
def retrieve_info(query_item):
''' retrieve items of info related to input query combination '''
global info
# info order is: 0-var, 1-mip, 2-mod, 3-exp, 4-ens, 5-ver, 6-fname, 7-status
var, mip = query_item[0].split("_")
rows={}
# add the items in info with matching var,mip,exp to rows as dictionaries
for item in info.values():
if var == item[0] and mip == item[1] and query_item[-1] == item[3]:
key = (item[2], item[4], item[5])
try:
rows[key].append(item[7])
except:
rows[key] = [item[7]]
# loop through mod_ens_vers combination counting files to download/update
newrows=[]
for key in rows.keys():
ndown = rows[key].count("D")
status = key[2] + " " + str(len(rows[key])) + " files, " + str(ndown) + " to update"
newrows.append([tuple(key[0:2]), status])
return newrows
def result_matrix(querypub,exp0):
''' Build a matrix of the results to output to csv table '''
global gmatrix
# querypub contains only published combinations
# initialize dictionary of exp/matrices
gmatrix = {}
for exp in exp0:
# for each var_mip retrieve_info create a dict{var_mip:[[(mod1,ens1), details list][(mod1,ens2), details list],[..]]}
# they are added to exp_dict and each key will be column header, (mod1,ens1) will indicate row and details will be cell value
exp_dict={}
infoexp = [x for x in querypub if x[-1] == exp]
for item in infoexp:
exp_dict[item[0]]=retrieve_info(item)
gmatrix[exp]= exp_dict
return
def compare_query(var0,mod0,exp0):
''' compare the var_mod_exp combinations found with the requested ones '''
global info, opub
# for each el. of info: join var_mip, transform to tuple, finally convert modified info to set
info_set = set(map(tuple,[["_".join(x[0:2])] + x[2:-4] for x in info.values()]))
# create set with all possible combinations of var_mip,model,exp based on constraints
# if models not specified create a model list based on wget result
if len(mod0) < 1: mod0 = [x[2] for x in info.values()]
comb_query = set(itertools.product(*[var0,mod0,exp0]))
# the difference between two sets gives combinations not published yet
nopub_set = comb_query.difference(info_set)
for item in nopub_set:
opub.write(",".join(item) + "\n")
# write a matrix to pass results to csv table in suitable format
if table: result_matrix(comb_query.difference(nopub_set),exp0)
return nopub_set
def write_table(nopub):
''' write a csv table to summarise search '''
global gmatrix
for exp in exp0:
# length of dictionary gmatrix[exp] is number of var_mip columns
# maximum length of list in each dict inside gmatrix[exp] is number of mod/ens rows
emat = gmatrix[exp]
klist = emat.keys()
# check if there are extra variables never published
evar = list(set( [np[0] for np in nopub if np[0] not in klist if np[-1]==exp ] ))
# calculate ncol,nrow keeping into account var never published
ncol = len(klist) +2 + len(evar)
nrow = max([len(emat[x]) for x in klist]) +1
# open/create a csv file for each experiment
try:
csv = open(exp+".csv","w")
except:
print "Can not open file " + exp + ".csv"
csv.write(" model_ensemble/variable," + ",".join(klist+evar) + "\n")
# pre-fill all values with "NP", leave 1 column and 1 row for headers
# write first two columns with all (mod,ens) pairs
col1= [emat[var][i][0] for var in klist for i in range(len(emat[var])) ]
col1 = list(set(col1))
col1_sort=sorted(col1)
# write first column with mod_ens combinations & save row indexes in dict where keys are (mod,ens) combination
# print col1_sort
for modens in col1_sort:
csv.write(modens[0] + "_" + modens[1])
for var in klist:
line = [item[1].replace(", " , " (") for item in emat[var] if item[0] == modens]
if len(line) > 0:
csv.write(", " + " ".join(line) + ")")
else:
csv.write(",NP")
if len(evar) > 0:
for var in evar:
csv.write(",NP")
csv.write("\n")
csv.close()
print "Data written in table "
return
def main():
''' Main program starts here '''
global opub, odown, orep, info
# somefile is false starting turns to true if at elast one file found
somefile=False
# read inputs and assign constraints
assign_constraint()
fdown = outfile + '_to_download.csv'
frep = outfile + '_replica.csv'
fpub = outfile + '_not_published.csv'
# test reading inputs
print var0
print exp0
print mod0
print fdown
print frep
print fpub
# if one of the output files exists issue a warning an exit
if opath.isfile(fdown) or opath.isfile(frep) or opath.isfile(fpub):
print "Warning: one of the output files exists, exit to not overwrite!"
sys.exit()
info={}
# loop through experiments, 1st create a wget request for exp, then parse_file
for exp in exp0:
wgetfile = "wget_" + exp + ".out"
result=parse_file(wgetfile,var0,mod0,exp)
# if found any files matching constraints, process them one by one
# using multiprocessing Pool to parallelise process_file
if result:
async_results = Pool(1).map_async(process_file, result)
for dinfo in async_results.get():
info.update(dinfo)
somefile=True
print "Finished checksum for existing files"
# if it couldn't find any file for any experiment then exit
if not somefile:
sys.exit("No files found for any of the experiments, exiting!")
# open not published file
opub=open(fpub, "w")
opub.write("var_mip-table, model, experiment\n")
# build all requested combinations and compare to files found
nopub_set = compare_query(var0,mod0,exp0)
# write replica and download output files
# open output files and write header
odown=open(fdown, "w")
odown.write("var, mip_table, model, experiment, ensemble, version, file url\n")
orep=open(frep, "w")
orep.write("var, mip_table, model, experiment, ensemble, version, filepath\n")
write_file()
# close all the output files
odown.close()
orep.close()
opub.close()
print "Finished to write output files"
# if table option create/open spreadsheet
# if table option write summary table in csv file
if table:
write_table(nopub_set)
# check python version and then call main()
if sys.version_info < ( 2, 7):
# python too old, kill the script
sys.exit("This script requires Python 2.7 or newer!")
main()
| apache-2.0 |
Dexhub/MTX | src/mem/slicc/ast/OutPortDeclAST.py | 92 | 2802 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.ast.TypeAST import TypeAST
from slicc.symbols import Var
from slicc.symbols import Type
class OutPortDeclAST(DeclAST):
def __init__(self, slicc, ident, msg_type, var_expr, pairs):
super(OutPortDeclAST, self).__init__(slicc, pairs)
self.ident = ident
self.msg_type = msg_type
self.var_expr = var_expr
self.queue_type = TypeAST(slicc, "OutPort")
def __repr__(self):
return "[OutPortDecl: %r]" % self.ident
def generate(self):
code = self.slicc.codeFormatter(newlines=False)
queue_type = self.var_expr.generate(code)
if not queue_type.isOutPort:
self.error("The outport queue's type must have the 'outport' " +
"attribute. Type '%s' does not have this attribute.",
(queue_type))
if not self.symtab.find(self.msg_type.ident, Type):
self.error("The message type '%s' does not exist.",
self.msg_type.ident)
var = Var(self.symtab, self.ident, self.location, self.queue_type.type,
str(code), self.pairs)
self.symtab.newSymbol(var)
| bsd-3-clause |
geomapdev/idea-box | src/idea/tests/editidea_tests.py | 5 | 6582 | from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import TestCase
from idea import models, views
from idea.forms import IdeaForm, PrivateIdeaForm
from idea.tests.utils import mock_req, random_user, login, create_superuser
from datetime import date
from mock import patch
def create_idea(user=None):
if not user:
user = random_user()
state = models.State.objects.get(name='Active')
idea = models.Idea(creator=user, title='Transit subsidy to Mars',
text='Aliens need assistance.', state=state)
banner = models.Banner(id=1, title="AAAA", text="text1",
start_date=date.today())
banner.save()
idea.banner = banner
idea.save()
idea.tags.add("test tag")
return idea
class EditIdeaTest(TestCase):
fixtures = ['state']
def setUp(self):
create_superuser()
def test_edit_good_idea(self):
""" Test an normal POST submission to edit an idea. """
user = login(self)
idea = create_idea(user=user)
self.assertEquals(models.Idea.objects.all().count(), 1)
new_title = "new title"
new_summary = "new summary"
new_text = "new text"
new_banner = models.Banner(id=2, title="BBB", text="text2",
start_date=date.today())
new_banner.save()
resp = self.client.post(reverse('idea:edit_idea', args=(idea.id,)),
{'title':new_title,
'summary':new_summary,
'text':new_text,
'banner': new_banner.id})
self.assertEqual(resp.status_code, 302)
self.assertIn('detail', resp['Location'])
self.assertEquals(models.Idea.objects.all().count(), 1)
# ensure editing an idea does not up the vote count
# vote count is 0 because votes are added in views.add_idea, which is not used in this test
num_voters = get_user_model().objects.filter(vote__idea__pk=idea.id, vote__vote=1).count()
self.assertEqual(num_voters, 0)
refresh_idea = models.Idea.objects.get(id=idea.id)
self.assertEqual(refresh_idea.title, new_title)
self.assertEqual(refresh_idea.summary, new_summary)
self.assertEqual(refresh_idea.text, new_text)
self.assertEqual(refresh_idea.banner, new_banner)
# verify the expected fields remain the same
self.assertEqual(refresh_idea.tags.count(), 1)
self.assertEqual(refresh_idea.tags.all()[0].name, "test tag")
self.assertEqual(refresh_idea.creator, idea.creator)
def test_bad_edit_idea(self):
""" Test an incomplete POST submission to edit an idea. """
user = login(self)
idea = create_idea(user=user)
resp = self.client.post(reverse('idea:edit_idea', args=(idea.id,)), {'text':'new title'})
self.assertEqual(resp.status_code, 200)
self.assertIn('This field is required.', resp.content)
self.assertEquals(models.Idea.objects.all().count(), 1)
refresh_idea = models.Idea.objects.get(id=idea.id)
self.assertEqual(refresh_idea.title, idea.title)
self.assertEqual(refresh_idea.banner, idea.banner)
def test_must_be_logged_in(self):
""" A user must be logged in to edit an idea. """
user = login(self)
idea = create_idea(user=user)
self.client.logout()
resp = self.client.post(reverse('idea:edit_idea', args=(idea.id,)), {'title':'test title', 'summary':'test summary', 'text':'test text'})
self.assertEqual(resp.status_code, 302)
self.assertIn('login', resp['Location'])
def test_edit_ignores_tags(self):
""" A user must be logged in to edit an idea. """
user = login(self)
idea = create_idea(user=user)
resp = self.client.post(reverse('idea:edit_idea', args=(idea.id,)), {'title':'test title', 'summary':'test summary', 'text':'test text', 'tags':'sample, newtag'})
self.assertEqual(resp.status_code, 302)
self.assertIn('detail', resp['Location'])
refresh_idea = models.Idea.objects.get(id=idea.id)
self.assertEqual(refresh_idea.tags.count(), 1)
self.assertEqual(refresh_idea.tags.all()[0].name, "test tag")
@patch('idea.views.render')
def test_edit_idea_with_private_banner(self, render):
"""
Verify that the private banner field auto-populates properly
"""
user = login(self)
state = models.State.objects.get(name='Active')
idea1 = models.Idea(creator=user, title='Transit subsidy to Venus',
text='Aliens need assistance.', state=state)
banner1 = models.Banner(id=1, title="AAAA", text="text1",
start_date=date.today(), is_private=True)
banner1.save()
idea1.banner = banner1
idea1.save()
idea2 = models.Idea(creator=user, title='Transit subsidy to Venus',
text='Aliens need assistance.', state=state)
banner2 = models.Banner(id=2, title="BBBB", text="text2",
start_date=date.today())
banner2.save()
idea2.banner = banner2
idea2.save()
views.edit_idea(mock_req(user=user), idea1.id)
context = render.call_args[0][2]
self.assertTrue('form' in context)
self.assertTrue(isinstance(context['form'], PrivateIdeaForm))
banner_field = context['form'].fields['banner']
selected = context['form'].initial['banner']
self.assertEqual(banner1.id, selected)
self.assertEqual(context['form'].fields['banner'].widget.choices.field.empty_label, None)
self.assertIn(banner1, banner_field._queryset)
self.assertNotIn(banner2, banner_field._queryset)
views.edit_idea(mock_req(user=user), idea2.id)
context = render.call_args[0][2]
self.assertTrue('form' in context)
self.assertTrue(isinstance(context['form'], IdeaForm))
self.assertFalse(isinstance(context['form'], PrivateIdeaForm))
banner_field = context['form'].fields['banner']
selected = context['form'].initial['banner']
self.assertEqual(banner2.id, selected)
self.assertEqual(context['form'].fields['banner'].widget.choices.field.empty_label, 'Select')
self.assertNotIn(banner1, banner_field._queryset)
self.assertIn(banner2, banner_field._queryset)
| cc0-1.0 |
boyuegame/kbengine | kbe/res/scripts/common/Lib/test/test_bz2.py | 72 | 32972 | from test import support
from test.support import bigmemtest, _4G
import unittest
from io import BytesIO
import os
import pickle
import random
import subprocess
import sys
from test.support import unlink
try:
import threading
except ImportError:
threading = None
# Skip tests if the bz2 module doesn't exist.
bz2 = support.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
def setUp(self):
self.filename = support.TESTFN
def tearDown(self):
if os.path.isfile(self.filename):
os.unlink(self.filename)
if sys.platform == "win32":
# bunzip2 isn't available to run on Windows.
def decompress(self, data):
return bz2.decompress(data)
else:
def decompress(self, data):
pop = subprocess.Popen("bunzip2", shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pop.stdin.write(data)
pop.stdin.close()
ret = pop.stdout.read()
pop.stdout.close()
if pop.wait() != 0:
ret = bz2.decompress(data)
return ret
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, "/dev/null", "z")
self.assertRaises(ValueError, BZ2File, "/dev/null", "rx")
self.assertRaises(ValueError, BZ2File, "/dev/null", "rbt")
self.assertRaises(ValueError, BZ2File, "/dev/null", compresslevel=0)
self.assertRaises(ValueError, BZ2File, "/dev/null", compresslevel=10)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = bz2._BUFFER_SIZE
bz2._BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
bz2._BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
@unittest.skipUnless(threading, 'Threading required for this test.')
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
def testWithoutThreading(self):
module = support.import_fresh_module("bz2", blocked=("threading",))
with module.BZ2File(self.filename, "wb") as f:
f.write(b"abc")
with module.BZ2File(self.filename, "rb") as f:
self.assertEqual(f.read(), b"abc")
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenBytesFilename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT)
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
self.assertEqual(self.decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor())
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
block = random.getrandbits(blocksize * 8).to_bytes(blocksize, 'little')
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor())
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(self.decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
with self.open(self.filename, mode) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
CompressDecompressTest,
OpenTest,
)
support.reap_children()
if __name__ == '__main__':
test_main()
| lgpl-3.0 |
chylli/phantomjs | src/qt/qtwebkit/Source/WebCore/inspector/compile-front-end.py | 116 | 15388 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import os.path
import generate_protocol_externs
import shutil
import sys
import tempfile
inspector_path = "Source/WebCore/inspector"
inspector_frontend_path = inspector_path + "/front-end"
protocol_externs_path = inspector_frontend_path + "/protocol-externs.js"
generate_protocol_externs.generate_protocol_externs(protocol_externs_path, inspector_path + "/Inspector.json")
jsmodule_name_prefix = "jsmodule_"
modules = [
{
"name": "common",
"dependencies": [],
"sources": [
"Color.js",
"DOMExtension.js",
"Object.js",
"ParsedURL.js",
"Progress.js",
"Settings.js",
"UIString.js",
"UserMetrics.js",
"utilities.js",
]
},
{
"name": "sdk",
"dependencies": ["common"],
"sources": [
"ApplicationCacheModel.js",
"CompilerScriptMapping.js",
"ConsoleModel.js",
"ContentProvider.js",
"ContentProviderBasedProjectDelegate.js",
"ContentProviders.js",
"CookieParser.js",
"CSSMetadata.js",
"CSSStyleModel.js",
"BreakpointManager.js",
"Database.js",
"DOMAgent.js",
"DOMStorage.js",
"DebuggerModel.js",
"DebuggerScriptMapping.js",
"FileManager.js",
"FileMapping.js",
"FileSystemMapping.js",
"FileSystemModel.js",
"FileSystemProjectDelegate.js",
"FileUtils.js",
"HAREntry.js",
"IndexedDBModel.js",
"InspectorBackend.js",
"IsolatedFileSystemManager.js",
"IsolatedFileSystem.js",
"Linkifier.js",
"NetworkLog.js",
"NetworkUISourceCodeProvider.js",
"PresentationConsoleMessageHelper.js",
"RuntimeModel.js",
"SASSSourceMapping.js",
"Script.js",
"ScriptFormatter.js",
"ScriptSnippetModel.js",
"SimpleWorkspaceProvider.js",
"SnippetStorage.js",
"SourceMapping.js",
"StylesSourceMapping.js",
"TimelineManager.js",
"RemoteObject.js",
"Resource.js",
"DefaultScriptMapping.js",
"ResourceScriptMapping.js",
"LiveEditSupport.js",
"ResourceTreeModel.js",
"ResourceType.js",
"ResourceUtils.js",
"SourceMap.js",
"NetworkManager.js",
"NetworkRequest.js",
"UISourceCode.js",
"UserAgentSupport.js",
"Workspace.js",
"protocol-externs.js",
]
},
{
"name": "ui",
"dependencies": ["common"],
"sources": [
"Checkbox.js",
"ContextMenu.js",
"DOMSyntaxHighlighter.js",
"DataGrid.js",
"DefaultTextEditor.js",
"Dialog.js",
"DockController.js",
"Drawer.js",
"EmptyView.js",
"GoToLineDialog.js",
"HelpScreen.js",
"InspectorView.js",
"KeyboardShortcut.js",
"OverviewGrid.js",
"Panel.js",
"PanelEnablerView.js",
"Placard.js",
"Popover.js",
"ProgressIndicator.js",
"PropertiesSection.js",
"SearchController.js",
"Section.js",
"SidebarPane.js",
"SidebarTreeElement.js",
"ShortcutsScreen.js",
"ShowMoreDataGridNode.js",
"SidebarOverlay.js",
"SoftContextMenu.js",
"SourceTokenizer.js",
"Spectrum.js",
"SplitView.js",
"SidebarView.js",
"StatusBarButton.js",
"SuggestBox.js",
"TabbedPane.js",
"TextEditor.js",
"TextEditorHighlighter.js",
"TextEditorModel.js",
"TextPrompt.js",
"TextUtils.js",
"TimelineGrid.js",
"Toolbar.js",
"UIUtils.js",
"View.js",
"ViewportControl.js",
"treeoutline.js",
]
},
{
"name": "components",
"dependencies": ["sdk", "ui"],
"sources": [
"AdvancedSearchController.js",
"HandlerRegistry.js",
"ConsoleMessage.js",
"CookiesTable.js",
"DOMBreakpointsSidebarPane.js",
"DOMPresentationUtils.js",
"ElementsTreeOutline.js",
"FontView.js",
"ImageView.js",
"NativeBreakpointsSidebarPane.js",
"InspectElementModeController.js",
"ObjectPopoverHelper.js",
"ObjectPropertiesSection.js",
"SourceFrame.js",
"ResourceView.js",
]
},
{
"name": "elements",
"dependencies": ["components"],
"sources": [
"CSSNamedFlowCollectionsView.js",
"CSSNamedFlowView.js",
"ElementsPanel.js",
"ElementsPanelDescriptor.js",
"EventListenersSidebarPane.js",
"MetricsSidebarPane.js",
"PropertiesSidebarPane.js",
"StylesSidebarPane.js",
]
},
{
"name": "network",
"dependencies": ["components"],
"sources": [
"NetworkItemView.js",
"RequestCookiesView.js",
"RequestHeadersView.js",
"RequestHTMLView.js",
"RequestJSONView.js",
"RequestPreviewView.js",
"RequestResponseView.js",
"RequestTimingView.js",
"RequestView.js",
"ResourceWebSocketFrameView.js",
"NetworkPanel.js",
"NetworkPanelDescriptor.js",
]
},
{
"name": "resources",
"dependencies": ["components"],
"sources": [
"ApplicationCacheItemsView.js",
"CookieItemsView.js",
"DatabaseQueryView.js",
"DatabaseTableView.js",
"DirectoryContentView.js",
"DOMStorageItemsView.js",
"FileContentView.js",
"FileSystemView.js",
"IndexedDBViews.js",
"ResourcesPanel.js",
]
},
{
"name": "workers",
"dependencies": ["components"],
"sources": [
"WorkerManager.js",
]
},
{
"name": "scripts",
"dependencies": ["components", "workers"],
"sources": [
"BreakpointsSidebarPane.js",
"CallStackSidebarPane.js",
"FilteredItemSelectionDialog.js",
"JavaScriptSourceFrame.js",
"NavigatorOverlayController.js",
"NavigatorView.js",
"RevisionHistoryView.js",
"ScopeChainSidebarPane.js",
"ScriptsNavigator.js",
"ScriptsPanel.js",
"ScriptsPanelDescriptor.js",
"ScriptsSearchScope.js",
"SnippetJavaScriptSourceFrame.js",
"StyleSheetOutlineDialog.js",
"TabbedEditorContainer.js",
"UISourceCodeFrame.js",
"WatchExpressionsSidebarPane.js",
"WorkersSidebarPane.js",
]
},
{
"name": "console",
"dependencies": ["components"],
"sources": [
"ConsoleView.js",
"ConsolePanel.js",
]
},
{
"name": "timeline",
"dependencies": ["components"],
"sources": [
"DOMCountersGraph.js",
"MemoryStatistics.js",
"NativeMemoryGraph.js",
"TimelineModel.js",
"TimelineOverviewPane.js",
"TimelinePanel.js",
"TimelinePanelDescriptor.js",
"TimelinePresentationModel.js",
"TimelineFrameController.js"
]
},
{
"name": "audits",
"dependencies": ["components"],
"sources": [
"AuditCategories.js",
"AuditController.js",
"AuditFormatters.js",
"AuditLauncherView.js",
"AuditResultView.js",
"AuditRules.js",
"AuditsPanel.js",
]
},
{
"name": "extensions",
"dependencies": ["components"],
"sources": [
"ExtensionAPI.js",
"ExtensionAuditCategory.js",
"ExtensionPanel.js",
"ExtensionRegistryStub.js",
"ExtensionServer.js",
"ExtensionView.js",
]
},
{
"name": "settings",
"dependencies": ["components", "extensions"],
"sources": [
"SettingsScreen.js",
"OverridesView.js",
]
},
{
"name": "tests",
"dependencies": ["components"],
"sources": [
"TestController.js",
]
},
{
"name": "profiler",
"dependencies": ["components", "workers"],
"sources": [
"BottomUpProfileDataGridTree.js",
"CPUProfileView.js",
"CSSSelectorProfileView.js",
"FlameChart.js",
"HeapSnapshot.js",
"HeapSnapshotDataGrids.js",
"HeapSnapshotGridNodes.js",
"HeapSnapshotLoader.js",
"HeapSnapshotProxy.js",
"HeapSnapshotView.js",
"HeapSnapshotWorker.js",
"HeapSnapshotWorkerDispatcher.js",
"JSHeapSnapshot.js",
"NativeHeapSnapshot.js",
"ProfileDataGridTree.js",
"ProfilesPanel.js",
"ProfilesPanelDescriptor.js",
"ProfileLauncherView.js",
"TopDownProfileDataGridTree.js",
"CanvasProfileView.js",
]
},
{
"name": "host_stub",
"dependencies": ["components", "profiler", "timeline"],
"sources": [
"InspectorFrontendAPI.js",
"InspectorFrontendHostStub.js",
]
}
]
modules_by_name = {}
for module in modules:
modules_by_name[module["name"]] = module
def dump_module(name, recursively, processed_modules):
if name in processed_modules:
return ""
processed_modules[name] = True
module = modules_by_name[name]
command = ""
if recursively:
for dependency in module["dependencies"]:
command += dump_module(dependency, recursively, processed_modules)
command += " \\\n --module " + jsmodule_name_prefix + module["name"] + ":"
command += str(len(module["sources"]))
firstDependency = True
for dependency in module["dependencies"]:
if firstDependency:
command += ":"
else:
command += ","
firstDependency = False
command += jsmodule_name_prefix + dependency
for script in module["sources"]:
command += " \\\n --js " + inspector_frontend_path + "/" + script
return command
modules_dir = tempfile.mkdtemp()
compiler_command = "java -jar ~/closure/compiler.jar --summary_detail_level 3 --compilation_level SIMPLE_OPTIMIZATIONS --warning_level VERBOSE --language_in ECMASCRIPT5 --accept_const_keyword --module_output_path_prefix %s/ \\\n" % modules_dir
process_recursively = len(sys.argv) > 1
if process_recursively:
module_name = sys.argv[1]
if module_name != "all":
modules = []
for i in range(1, len(sys.argv)):
modules.append(modules_by_name[sys.argv[i]])
for module in modules:
command = compiler_command
command += " --externs " + inspector_frontend_path + "/externs.js"
command += dump_module(module["name"], True, {})
print "Compiling \"" + module["name"] + "\""
os.system(command)
else:
command = compiler_command
command += " --externs " + inspector_frontend_path + "/externs.js"
for module in modules:
command += dump_module(module["name"], False, {})
os.system(command)
if not process_recursively:
print "Compiling InjectedScriptSource.js..."
os.system("echo \"var injectedScriptValue = \" > " + inspector_path + "/" + "InjectedScriptSourceTmp.js")
os.system("cat " + inspector_path + "/" + "InjectedScriptSource.js" + " >> " + inspector_path + "/" + "InjectedScriptSourceTmp.js")
command = compiler_command
command += " --externs " + inspector_path + "/" + "InjectedScriptExterns.js" + " \\\n"
command += " --externs " + protocol_externs_path + " \\\n"
command += " --module " + jsmodule_name_prefix + "injected_script" + ":" + "1" + " \\\n"
command += " --js " + inspector_path + "/" + "InjectedScriptSourceTmp.js" + " \\\n"
command += "\n"
os.system(command)
os.system("rm " + inspector_path + "/" + "InjectedScriptSourceTmp.js")
print "Compiling InjectedScriptCanvasModuleSource.js..."
os.system("echo \"var injectedScriptCanvasModuleValue = \" > " + inspector_path + "/" + "InjectedScriptCanvasModuleSourceTmp.js")
os.system("cat " + inspector_path + "/" + "InjectedScriptCanvasModuleSource.js" + " >> " + inspector_path + "/" + "InjectedScriptCanvasModuleSourceTmp.js")
command = compiler_command
command += " --externs " + inspector_path + "/" + "InjectedScriptExterns.js" + " \\\n"
command += " --externs " + protocol_externs_path + " \\\n"
command += " --module " + jsmodule_name_prefix + "injected_script" + ":" + "1" + " \\\n"
command += " --js " + inspector_path + "/" + "InjectedScriptCanvasModuleSourceTmp.js" + " \\\n"
command += "\n"
os.system(command)
os.system("rm " + inspector_path + "/" + "InjectedScriptCanvasModuleSourceTmp.js")
shutil.rmtree(modules_dir)
#os.system("rm " + protocol_externs_path)
| bsd-3-clause |
lxml/lxml | src/lxml/tests/test_relaxng.py | 1 | 8434 | # -*- coding: utf-8 -*-
"""
Test cases related to RelaxNG parsing and validation
"""
from __future__ import absolute_import
import unittest
from .common_imports import (
etree, BytesIO, _bytes, HelperTestCase, fileInTestDir, make_doctest, skipif
)
try:
import rnc2rng
except ImportError:
rnc2rng = None
class ETreeRelaxNGTestCase(HelperTestCase):
def test_relaxng(self):
tree_valid = self.parse('<a><b></b></a>')
tree_invalid = self.parse('<a><c></c></a>')
schema = self.parse('''\
<element name="a" xmlns="http://relaxng.org/ns/structure/1.0">
<zeroOrMore>
<element name="b">
<text />
</element>
</zeroOrMore>
</element>
''')
schema = etree.RelaxNG(schema)
self.assertTrue(schema.validate(tree_valid))
self.assertFalse(schema.error_log.filter_from_errors())
self.assertFalse(schema.validate(tree_invalid))
self.assertTrue(schema.error_log.filter_from_errors())
self.assertTrue(schema.validate(tree_valid)) # repeat valid
self.assertFalse(schema.error_log.filter_from_errors()) # repeat valid
def test_relaxng_stringio(self):
tree_valid = self.parse('<a><b></b></a>')
tree_invalid = self.parse('<a><c></c></a>')
schema_file = BytesIO('''\
<element name="a" xmlns="http://relaxng.org/ns/structure/1.0">
<zeroOrMore>
<element name="b">
<text />
</element>
</zeroOrMore>
</element>
''')
schema = etree.RelaxNG(file=schema_file)
self.assertTrue(schema.validate(tree_valid))
self.assertFalse(schema.validate(tree_invalid))
def test_relaxng_elementtree_error(self):
self.assertRaises(ValueError, etree.RelaxNG, etree.ElementTree())
def test_relaxng_error(self):
tree_invalid = self.parse('<a><c></c></a>')
schema = self.parse('''\
<element name="a" xmlns="http://relaxng.org/ns/structure/1.0">
<zeroOrMore>
<element name="b">
<text />
</element>
</zeroOrMore>
</element>
''')
schema = etree.RelaxNG(schema)
self.assertFalse(schema.validate(tree_invalid))
errors = schema.error_log
self.assertTrue([log for log in errors
if log.level_name == "ERROR"])
self.assertTrue([log for log in errors
if "not expect" in log.message])
def test_relaxng_generic_error(self):
tree_invalid = self.parse('''\
<test>
<reference id="my-ref">This is my unique ref.</reference>
<data ref="my-ref">Valid data</data>
<data ref="myref">Invalid data</data>
</test>
''')
schema = self.parse('''\
<grammar datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes"
xmlns="http://relaxng.org/ns/structure/1.0">
<define name="by-ref">
<data type="IDREF"/>
</define>
<start>
<element name="test">
<zeroOrMore>
<element name="reference">
<attribute name="id">
<data type="ID"/>
</attribute>
<text/>
</element>
</zeroOrMore>
<zeroOrMore>
<element name="data">
<attribute name="ref">
<data type="IDREF"/>
</attribute>
<text/>
</element>
</zeroOrMore>
</element>
</start>
</grammar>
''')
schema = etree.RelaxNG(schema)
self.assertFalse(schema.validate(tree_invalid))
errors = schema.error_log
self.assertTrue(errors)
self.assertTrue([log for log in errors if "IDREF" in log.message])
self.assertTrue([log for log in errors if "myref" in log.message])
def test_relaxng_invalid_schema(self):
schema = self.parse('''\
<element name="a" xmlns="http://relaxng.org/ns/structure/1.0">
<zeroOrMore>
<element name="b" />
</zeroOrMore>
</element>
''')
self.assertRaises(etree.RelaxNGParseError,
etree.RelaxNG, schema)
def test_relaxng_invalid_schema2(self):
schema = self.parse('''\
<grammar xmlns="http://relaxng.org/ns/structure/1.0" />
''')
self.assertRaises(etree.RelaxNGParseError,
etree.RelaxNG, schema)
def test_relaxng_invalid_schema3(self):
schema = self.parse('''\
<grammar xmlns="http://relaxng.org/ns/structure/1.0">
<define name="test">
<element name="test"/>
</define>
</grammar>
''')
self.assertRaises(etree.RelaxNGParseError,
etree.RelaxNG, schema)
def test_relaxng_invalid_schema4(self):
# segfault
schema = self.parse('''\
<element name="a" xmlns="mynamespace" />
''')
self.assertRaises(etree.RelaxNGParseError,
etree.RelaxNG, schema)
def test_relaxng_include(self):
# this will only work if we access the file through path or
# file object..
f = open(fileInTestDir('test1.rng'), 'rb')
try:
schema = etree.RelaxNG(file=f)
finally:
f.close()
def test_relaxng_shortcut(self):
tree_valid = self.parse('<a><b></b></a>')
tree_invalid = self.parse('<a><c></c></a>')
schema = self.parse('''\
<element name="a" xmlns="http://relaxng.org/ns/structure/1.0">
<zeroOrMore>
<element name="b">
<text />
</element>
</zeroOrMore>
</element>
''')
self.assertTrue(tree_valid.relaxng(schema))
self.assertFalse(tree_invalid.relaxng(schema))
def test_multiple_elementrees(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
schema = etree.RelaxNG( self.parse('''\
<element name="a" xmlns="http://relaxng.org/ns/structure/1.0">
<element name="b">
<text />
</element>
<element name="c">
<text />
</element>
</element>
''') )
self.assertTrue(schema.validate(tree))
self.assertFalse(schema.error_log.filter_from_errors())
self.assertTrue(schema.validate(tree)) # repeat valid
self.assertFalse(schema.error_log.filter_from_errors()) # repeat valid
schema = etree.RelaxNG( self.parse('''\
<element name="b" xmlns="http://relaxng.org/ns/structure/1.0">
<text />
</element>
''') )
c_tree = etree.ElementTree(tree.getroot()[1])
self.assertEqual(self._rootstring(c_tree), _bytes('<c>C</c>'))
self.assertFalse(schema.validate(c_tree))
self.assertTrue(schema.error_log.filter_from_errors())
b_tree = etree.ElementTree(tree.getroot()[0])
self.assertEqual(self._rootstring(b_tree), _bytes('<b>B</b>'))
self.assertTrue(schema.validate(b_tree))
self.assertFalse(schema.error_log.filter_from_errors())
class RelaxNGCompactTestCase(HelperTestCase):
pytestmark = skipif('rnc2rng is None')
def test_relaxng_compact(self):
tree_valid = self.parse('<a><b>B</b><c>C</c></a>')
tree_invalid = self.parse('<a><b></b></a>')
schema = etree.RelaxNG(file=fileInTestDir('test.rnc'))
self.assertTrue(schema.validate(tree_valid))
self.assertFalse(schema.validate(tree_invalid))
def test_relaxng_compact_file_obj(self):
with open(fileInTestDir('test.rnc'), 'r') as f:
schema = etree.RelaxNG(file=f)
tree_valid = self.parse('<a><b>B</b><c>C</c></a>')
tree_invalid = self.parse('<a><b></b></a>')
self.assertTrue(schema.validate(tree_valid))
self.assertFalse(schema.validate(tree_invalid))
def test_relaxng_compact_str(self):
tree_valid = self.parse('<a><b>B</b></a>')
tree_invalid = self.parse('<a><b>X</b></a>')
rnc_str = 'element a { element b { "B" } }'
schema = etree.RelaxNG.from_rnc_string(rnc_str)
self.assertTrue(schema.validate(tree_valid))
self.assertFalse(schema.validate(tree_invalid))
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeRelaxNGTestCase)])
suite.addTests(
[make_doctest('../../../doc/validation.txt')])
if rnc2rng is not None:
suite.addTests([unittest.makeSuite(RelaxNGCompactTestCase)])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| bsd-3-clause |
robertwb/incubator-beam | sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query1.py | 5 | 1608 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Nexmark Query 1: Convert bid prices from dollars to euros.
The Nexmark suite is a series of queries (streaming pipelines) performed
on a simulation of auction events.
This query converts bid prices from dollars to euros.
It illustrates a simple map.
"""
# pytype: skip-file
import apache_beam as beam
from apache_beam.testing.benchmarks.nexmark.models import nexmark_model
from apache_beam.testing.benchmarks.nexmark.queries import nexmark_query_util
USD_TO_EURO = 0.89
def load(events, metadata=None, pipeline_options=None):
return (
events
| nexmark_query_util.JustBids()
| 'ConvertToEuro' >> beam.Map(
lambda bid: nexmark_model.Bid(
bid.auction,
bid.bidder,
bid.price * USD_TO_EURO,
bid.date_time,
bid.extra)))
| apache-2.0 |
jiaweizhou/kubernetes | cluster/juju/charms/trusty/kubernetes-master/hooks/kubernetes_installer.py | 213 | 4138 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shlex
import subprocess
from path import path
def run(command, shell=False):
""" A convience method for executing all the commands. """
print(command)
if shell is False:
command = shlex.split(command)
output = subprocess.check_output(command, shell=shell)
print(output)
return output
class KubernetesInstaller():
"""
This class contains the logic needed to install kuberentes binary files.
"""
def __init__(self, arch, version, output_dir):
""" Gather the required variables for the install. """
# The kubernetes-master charm needs certain commands to be aliased.
self.aliases = {'kube-apiserver': 'apiserver',
'kube-controller-manager': 'controller-manager',
'kube-proxy': 'kube-proxy',
'kube-scheduler': 'scheduler',
'kubectl': 'kubectl',
'kubelet': 'kubelet'}
self.arch = arch
self.version = version
self.output_dir = path(output_dir)
def build(self, branch):
""" Build kubernetes from a github repository using the Makefile. """
# Remove any old build artifacts.
make_clean = 'make clean'
run(make_clean)
# Always checkout the master to get the latest repository information.
git_checkout_cmd = 'git checkout master'
run(git_checkout_cmd)
# When checking out a tag, delete the old branch (not master).
if branch != 'master':
git_drop_branch = 'git branch -D {0}'.format(self.version)
print(git_drop_branch)
rc = subprocess.call(git_drop_branch.split())
if rc != 0:
print('returned: %d' % rc)
# Make sure the git repository is up-to-date.
git_fetch = 'git fetch origin {0}'.format(branch)
run(git_fetch)
if branch == 'master':
git_reset = 'git reset --hard origin/master'
run(git_reset)
else:
# Checkout a branch of kubernetes so the repo is correct.
checkout = 'git checkout -b {0} {1}'.format(self.version, branch)
run(checkout)
# Create an environment with the path to the GO binaries included.
go_path = ('/usr/local/go/bin', os.environ.get('PATH', ''))
go_env = os.environ.copy()
go_env['PATH'] = ':'.join(go_path)
print(go_env['PATH'])
# Compile the binaries with the make command using the WHAT variable.
make_what = "make all WHAT='cmd/kube-apiserver cmd/kubectl "\
"cmd/kube-controller-manager plugin/cmd/kube-scheduler "\
"cmd/kubelet cmd/kube-proxy'"
print(make_what)
rc = subprocess.call(shlex.split(make_what), env=go_env)
def install(self, install_dir=path('/usr/local/bin')):
""" Install kubernetes binary files from the output directory. """
if not install_dir.isdir():
install_dir.makedirs_p()
# Create the symbolic links to the real kubernetes binaries.
for key, value in self.aliases.iteritems():
target = self.output_dir / key
if target.exists():
link = install_dir / value
if link.exists():
link.remove()
target.symlink(link)
else:
print('Error target file {0} does not exist.'.format(target))
exit(1)
| apache-2.0 |
loopCM/chromium | media/tools/constrained_network_server/cn.py | 186 | 4311 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A script for configuring constraint networks.
Sets up a constrained network configuration on a specific port. Traffic on this
port will be redirected to another local server port.
The configuration includes bandwidth, latency, and packet loss.
"""
import collections
import logging
import optparse
import traffic_control
# Default logging is ERROR. Use --verbose to enable DEBUG logging.
_DEFAULT_LOG_LEVEL = logging.ERROR
Dispatcher = collections.namedtuple('Dispatcher', ['dispatch', 'requires_ports',
'desc'])
# Map of command names to traffic_control functions.
COMMANDS = {
# Adds a new constrained network configuration.
'add': Dispatcher(traffic_control.CreateConstrainedPort,
requires_ports=True, desc='Add a new constrained port.'),
# Deletes an existing constrained network configuration.
'del': Dispatcher(traffic_control.DeleteConstrainedPort,
requires_ports=True, desc='Delete a constrained port.'),
# Deletes all constrained network configurations.
'teardown': Dispatcher(traffic_control.TearDown,
requires_ports=False,
desc='Teardown all constrained ports.')
}
def _ParseArgs():
"""Define and parse command-line arguments.
Returns:
tuple as (command, configuration):
command: one of the possible commands to setup, delete or teardown the
constrained network.
configuration: a map of constrained network properties to their values.
"""
parser = optparse.OptionParser()
indent_first = parser.formatter.indent_increment
opt_width = parser.formatter.help_position - indent_first
cmd_usage = []
for s in COMMANDS:
cmd_usage.append('%*s%-*s%s' %
(indent_first, '', opt_width, s, COMMANDS[s].desc))
parser.usage = ('usage: %%prog {%s} [options]\n\n%s' %
('|'.join(COMMANDS.keys()), '\n'.join(cmd_usage)))
parser.add_option('--port', type='int',
help='The port to apply traffic control constraints to.')
parser.add_option('--server-port', type='int',
help='Port to forward traffic on --port to.')
parser.add_option('--bandwidth', type='int',
help='Bandwidth of the network in kbit/s.')
parser.add_option('--latency', type='int',
help=('Latency (delay) added to each outgoing packet in '
'ms.'))
parser.add_option('--loss', type='int',
help='Packet-loss percentage on outgoing packets. ')
parser.add_option('--interface', type='string',
help=('Interface to setup constraints on. Use "lo" for a '
'local client.'))
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False, help='Turn on verbose output.')
options, args = parser.parse_args()
_SetLogger(options.verbose)
# Check a valid command was entered
if not args or args[0].lower() not in COMMANDS:
parser.error('Please specify a command {%s}.' % '|'.join(COMMANDS.keys()))
user_cmd = args[0].lower()
# Check if required options are available
if COMMANDS[user_cmd].requires_ports:
if not (options.port and options.server_port):
parser.error('Please provide port and server-port values.')
config = {
'port': options.port,
'server_port': options.server_port,
'interface': options.interface,
'latency': options.latency,
'bandwidth': options.bandwidth,
'loss': options.loss
}
return user_cmd, config
def _SetLogger(verbose):
log_level = _DEFAULT_LOG_LEVEL
if verbose:
log_level = logging.DEBUG
logging.basicConfig(level=log_level, format='%(message)s')
def Main():
"""Get the command and configuration of the network to set up."""
user_cmd, config = _ParseArgs()
try:
COMMANDS[user_cmd].dispatch(config)
except traffic_control.TrafficControlError as e:
logging.error('Error: %s\n\nOutput: %s', e.msg, e.error)
if __name__ == '__main__':
Main()
| bsd-3-clause |
muntasirsyed/intellij-community | plugins/hg4idea/testData/bin/hgext/largefiles/wirestore.py | 97 | 1336 | # Copyright 2010-2011 Fog Creek Software
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''largefile store working over Mercurial's wire protocol'''
import lfutil
import remotestore
class wirestore(remotestore.remotestore):
def __init__(self, ui, repo, remote):
cap = remote.capable('largefiles')
if not cap:
raise lfutil.storeprotonotcapable([])
storetypes = cap.split(',')
if 'serve' not in storetypes:
raise lfutil.storeprotonotcapable(storetypes)
self.remote = remote
super(wirestore, self).__init__(ui, repo, remote.url())
def _put(self, hash, fd):
return self.remote.putlfile(hash, fd)
def _get(self, hash):
return self.remote.getlfile(hash)
def _stat(self, hashes):
'''For each hash, return 0 if it is available, other values if not.
It is usually 2 if the largefile is missing, but might be 1 the server
has a corrupted copy.'''
batch = self.remote.batch()
futures = {}
for hash in hashes:
futures[hash] = batch.statlfile(hash)
batch.submit()
retval = {}
for hash in hashes:
retval[hash] = futures[hash].value
return retval
| apache-2.0 |
komsas/OpenUpgrade | addons/crm/wizard/crm_phonecall_to_phonecall.py | 40 | 4562 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import time
class crm_phonecall2phonecall(osv.osv_memory):
_name = 'crm.phonecall2phonecall'
_description = 'Phonecall To Phonecall'
_columns = {
'name' : fields.char('Call summary', size=64, required=True, select=1),
'user_id' : fields.many2one('res.users',"Assign To"),
'contact_name':fields.char('Contact', size=64),
'phone':fields.char('Phone', size=64),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',False),('section_id','=',section_id),\
('object_id.model', '=', 'crm.phonecall')]"),
'date': fields.datetime('Date'),
'section_id':fields.many2one('crm.case.section','Sales Team'),
'action': fields.selection([('schedule','Schedule a call'), ('log','Log a call')], 'Action', required=True),
'partner_id' : fields.many2one('res.partner', "Partner"),
'note':fields.text('Note')
}
def action_cancel(self, cr, uid, ids, context=None):
"""
Closes Phonecall to Phonecall form
"""
return {'type':'ir.actions.act_window_close'}
def action_schedule(self, cr, uid, ids, context=None):
value = {}
if context is None:
context = {}
phonecall = self.pool.get('crm.phonecall')
phonecall_ids = context and context.get('active_ids') or []
for this in self.browse(cr, uid, ids, context=context):
phocall_ids = phonecall.schedule_another_phonecall(cr, uid, phonecall_ids, this.date, this.name, \
this.user_id and this.user_id.id or False, \
this.section_id and this.section_id.id or False, \
this.categ_id and this.categ_id.id or False, \
action=this.action, context=context)
return phonecall.redirect_phonecall_view(cr, uid, phocall_ids[phonecall_ids[0]], context=context)
def default_get(self, cr, uid, fields, context=None):
"""
This function gets default values
"""
res = super(crm_phonecall2phonecall, self).default_get(cr, uid, fields, context=context)
record_id = context and context.get('active_id', False) or False
res.update({'action': 'schedule', 'date': time.strftime('%Y-%m-%d %H:%M:%S')})
if record_id:
phonecall = self.pool.get('crm.phonecall').browse(cr, uid, record_id, context=context)
categ_id = False
data_obj = self.pool.get('ir.model.data')
try:
res_id = data_obj._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = data_obj.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
if 'name' in fields:
res.update({'name': phonecall.name})
if 'user_id' in fields:
res.update({'user_id': phonecall.user_id and phonecall.user_id.id or False})
if 'date' in fields:
res.update({'date': False})
if 'section_id' in fields:
res.update({'section_id': phonecall.section_id and phonecall.section_id.id or False})
if 'categ_id' in fields:
res.update({'categ_id': categ_id})
if 'partner_id' in fields:
res.update({'partner_id': phonecall.partner_id and phonecall.partner_id.id or False})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fernandoacorreia/DjangoWAWSLogging | DjangoWAWSLogging/env/Lib/site-packages/django/contrib/flatpages/tests/views.py | 77 | 6226 | import os
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.test import TestCase
class FlatpageViewTests(TestCase):
fixtures = ['sample_flatpages']
urls = 'django.contrib.flatpages.tests.urls'
def setUp(self):
self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES
flatpage_middleware_class = 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
if flatpage_middleware_class in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES = tuple(m for m in settings.MIDDLEWARE_CLASSES if m != flatpage_middleware_class)
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(
os.path.dirname(__file__),
'templates'
),
)
self.old_LOGIN_URL = settings.LOGIN_URL
settings.LOGIN_URL = '/accounts/login/'
def tearDown(self):
settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
settings.LOGIN_URL = self.old_LOGIN_URL
def test_view_flatpage(self):
"A flatpage can be served through a view"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.login(username='testuser',password='s3krit')
response = self.client.get('/flatpage_root/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A fallback flatpage won't be served if the middleware is disabled"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 404)
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage won't be served if the fallback middlware is disabled"
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served through a view"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/flatpage_root/some.very_special~chars-here/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it special!</p>")
class FlatpageViewAppendSlashTests(TestCase):
fixtures = ['sample_flatpages']
urls = 'django.contrib.flatpages.tests.urls'
def setUp(self):
self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES
flatpage_middleware_class = 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
if flatpage_middleware_class in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES = tuple(m for m in settings.MIDDLEWARE_CLASSES if m != flatpage_middleware_class)
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(
os.path.dirname(__file__),
'templates'
),
)
self.old_LOGIN_URL = settings.LOGIN_URL
settings.LOGIN_URL = '/accounts/login/'
self.old_APPEND_SLASH = settings.APPEND_SLASH
settings.APPEND_SLASH = True
def tearDown(self):
settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
settings.LOGIN_URL = self.old_LOGIN_URL
settings.APPEND_SLASH = self.old_APPEND_SLASH
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get('/flatpage_root/flatpage')
self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view and should not add a slash"
response = self.client.get('/flatpage_root/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"A fallback flatpage won't be served if the middleware is disabled and should not add a slash"
response = self.client.get('/flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_non_existent_flatpage(self):
"A non-existent flatpage won't be served if the fallback middlware is disabled and should not add a slash"
response = self.client.get('/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_view_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served through a view and should add a slash"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(1)
response = self.client.get('/flatpage_root/some.very_special~chars-here')
self.assertRedirects(response, '/flatpage_root/some.very_special~chars-here/', status_code=301)
| mit |
turbokongen/home-assistant | tests/components/nx584/test_binary_sensor.py | 6 | 7175 | """The tests for the nx584 sensor platform."""
from unittest import mock
from nx584 import client as nx584_client
import pytest
import requests
from homeassistant.components.nx584 import binary_sensor as nx584
from homeassistant.setup import async_setup_component
class StopMe(Exception):
"""Stop helper."""
@pytest.fixture
def fake_zones():
"""Fixture for fake zones.
Returns:
list: List of fake zones
"""
return [
{"name": "front", "number": 1},
{"name": "back", "number": 2},
{"name": "inside", "number": 3},
]
@pytest.fixture
def client(fake_zones):
"""Fixture for client.
Args:
fake_zones (list): Fixture of fake zones
Yields:
MagicMock: Client Mock
"""
with mock.patch.object(nx584_client, "Client") as _mock_client:
client = nx584_client.Client.return_value
client.list_zones.return_value = fake_zones
client.get_version.return_value = "1.1"
yield _mock_client
@pytest.mark.usefixtures("client")
@mock.patch("homeassistant.components.nx584.binary_sensor.NX584Watcher")
@mock.patch("homeassistant.components.nx584.binary_sensor.NX584ZoneSensor")
def test_nx584_sensor_setup_defaults(mock_nx, mock_watcher, hass, fake_zones):
"""Test the setup with no configuration."""
add_entities = mock.MagicMock()
config = {
"host": nx584.DEFAULT_HOST,
"port": nx584.DEFAULT_PORT,
"exclude_zones": [],
"zone_types": {},
}
assert nx584.setup_platform(hass, config, add_entities)
mock_nx.assert_has_calls([mock.call(zone, "opening") for zone in fake_zones])
assert add_entities.called
assert nx584_client.Client.call_count == 1
assert nx584_client.Client.call_args == mock.call("http://localhost:5007")
@pytest.mark.usefixtures("client")
@mock.patch("homeassistant.components.nx584.binary_sensor.NX584Watcher")
@mock.patch("homeassistant.components.nx584.binary_sensor.NX584ZoneSensor")
def test_nx584_sensor_setup_full_config(mock_nx, mock_watcher, hass, fake_zones):
"""Test the setup with full configuration."""
config = {
"host": "foo",
"port": 123,
"exclude_zones": [2],
"zone_types": {3: "motion"},
}
add_entities = mock.MagicMock()
assert nx584.setup_platform(hass, config, add_entities)
mock_nx.assert_has_calls(
[
mock.call(fake_zones[0], "opening"),
mock.call(fake_zones[2], "motion"),
]
)
assert add_entities.called
assert nx584_client.Client.call_count == 1
assert nx584_client.Client.call_args == mock.call("http://foo:123")
assert mock_watcher.called
async def _test_assert_graceful_fail(hass, config):
"""Test the failing."""
assert not await async_setup_component(hass, "nx584", config)
@pytest.mark.usefixtures("client")
@pytest.mark.parametrize(
"config",
[
({"exclude_zones": ["a"]}),
({"zone_types": {"a": "b"}}),
({"zone_types": {1: "notatype"}}),
({"zone_types": {"notazone": "motion"}}),
],
)
async def test_nx584_sensor_setup_bad_config(hass, config):
"""Test the setup with bad configuration."""
await _test_assert_graceful_fail(hass, config)
@pytest.mark.usefixtures("client")
@pytest.mark.parametrize(
"exception_type",
[
pytest.param(requests.exceptions.ConnectionError, id="connect_failed"),
pytest.param(IndexError, id="no_partitions"),
],
)
async def test_nx584_sensor_setup_with_exceptions(hass, exception_type):
"""Test the setup handles exceptions."""
nx584_client.Client.return_value.list_zones.side_effect = exception_type
await _test_assert_graceful_fail(hass, {})
@pytest.mark.usefixtures("client")
async def test_nx584_sensor_setup_version_too_old(hass):
"""Test if version is too old."""
nx584_client.Client.return_value.get_version.return_value = "1.0"
await _test_assert_graceful_fail(hass, {})
@pytest.mark.usefixtures("client")
def test_nx584_sensor_setup_no_zones(hass):
"""Test the setup with no zones."""
nx584_client.Client.return_value.list_zones.return_value = []
add_entities = mock.MagicMock()
assert nx584.setup_platform(hass, {}, add_entities)
assert not add_entities.called
def test_nx584_zone_sensor_normal():
"""Test for the NX584 zone sensor."""
zone = {"number": 1, "name": "foo", "state": True}
sensor = nx584.NX584ZoneSensor(zone, "motion")
assert "foo" == sensor.name
assert not sensor.should_poll
assert sensor.is_on
assert sensor.device_state_attributes["zone_number"] == 1
zone["state"] = False
assert not sensor.is_on
@mock.patch.object(nx584.NX584ZoneSensor, "schedule_update_ha_state")
def test_nx584_watcher_process_zone_event(mock_update):
"""Test the processing of zone events."""
zone1 = {"number": 1, "name": "foo", "state": True}
zone2 = {"number": 2, "name": "bar", "state": True}
zones = {
1: nx584.NX584ZoneSensor(zone1, "motion"),
2: nx584.NX584ZoneSensor(zone2, "motion"),
}
watcher = nx584.NX584Watcher(None, zones)
watcher._process_zone_event({"zone": 1, "zone_state": False})
assert not zone1["state"]
assert mock_update.call_count == 1
@mock.patch.object(nx584.NX584ZoneSensor, "schedule_update_ha_state")
def test_nx584_watcher_process_zone_event_missing_zone(mock_update):
"""Test the processing of zone events with missing zones."""
watcher = nx584.NX584Watcher(None, {})
watcher._process_zone_event({"zone": 1, "zone_state": False})
assert not mock_update.called
def test_nx584_watcher_run_with_zone_events():
"""Test the zone events."""
empty_me = [1, 2]
def fake_get_events():
"""Return nothing twice, then some events."""
if empty_me:
empty_me.pop()
else:
return fake_events
client = mock.MagicMock()
fake_events = [
{"zone": 1, "zone_state": True, "type": "zone_status"},
{"zone": 2, "foo": False},
]
client.get_events.side_effect = fake_get_events
watcher = nx584.NX584Watcher(client, {})
@mock.patch.object(watcher, "_process_zone_event")
def run(fake_process):
"""Run a fake process."""
fake_process.side_effect = StopMe
with pytest.raises(StopMe):
watcher._run()
assert fake_process.call_count == 1
assert fake_process.call_args == mock.call(fake_events[0])
run()
assert 3 == client.get_events.call_count
@mock.patch("time.sleep")
def test_nx584_watcher_run_retries_failures(mock_sleep):
"""Test the retries with failures."""
empty_me = [1, 2]
def fake_run():
"""Fake runner."""
if empty_me:
empty_me.pop()
raise requests.exceptions.ConnectionError()
raise StopMe()
watcher = nx584.NX584Watcher(None, {})
with mock.patch.object(watcher, "_run") as mock_inner:
mock_inner.side_effect = fake_run
with pytest.raises(StopMe):
watcher.run()
assert 3 == mock_inner.call_count
mock_sleep.assert_has_calls([mock.call(10), mock.call(10)])
| apache-2.0 |
GabrielNicolasAvellaneda/chemlab | chemlab/db/toxnetdb.py | 6 | 2107 | '''Database for toxnet'''
from .base import AbstractDB, EntryNotFound
# Python 2-3 compatibility
try:
from urllib.parse import quote_plus
from urllib.request import urlopen
except ImportError:
from urllib import quote_plus
from urllib2 import urlopen
import re
class ToxNetDB(AbstractDB):
def __init__(self):
self.baseurl = 'http://toxgate.nlm.nih.gov'
def get(self, feature, query):
searchurl = self.baseurl + '/cgi-bin/sis/search/x?dbs+hsdb:%s'%quote_plus(query)
result = urlopen(searchurl).read()
try:
result= str(result, 'utf-8')
except TypeError:
pass
if not result:
raise EntryNotFound()
#print result
firstresult = re.findall(r'\<Id>(.*?)\</Id>', result)[0].split()[0]
retrieveurl = self.baseurl + '/cgi-bin/sis/search/r?dbs+hsdb:@term+@DOCNO+%s'%firstresult
result = urlopen(retrieveurl).read()
try:
result = str(result, 'utf-8')
except TypeError:
pass
tocregex = 'SRC="(.*?)"'
basesearch = re.findall(tocregex, result)[0]
basesearch = ':'.join(basesearch.split(':')[:-1])
if feature == 'boiling point':
bprequest = urlopen(self.baseurl + basesearch + ':bp').read()
# Massaging this request is not easy
try: # python3
bprequest = str(bprequest, 'utf-8')
except TypeError:
pass
res = re.findall(r">\s*(.*?)\s*deg C", bprequest)
#print res
return float(res[0])
if feature == 'melting point':
bprequest = urlopen(self.baseurl + basesearch + ':mp').read()
try: # python3
bprequest = str(bprequest, 'utf-8')
except TypeError:
pass
# Massaging this request is not easy
res = re.findall(r">\s*(.*?)\s*deg C", bprequest)
return float(res[0])
| gpl-3.0 |
akozumpl/dnf | tests/test_commands.py | 1 | 39804 | # Copyright (C) 2012-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from tests import support
from tests.support import mock
import dnf.cli.commands
import dnf.cli.commands.group
import dnf.cli.commands.install
import dnf.cli.commands.reinstall
import dnf.cli.commands.upgrade
import dnf.repo
import itertools
import logging
import tests.support
import unittest
logger = logging.getLogger('dnf')
class CommandsCliTest(support.TestCase):
def setUp(self):
self.base = support.MockBase()
self.cli = self.base.mock_cli()
def test_erase_configure(self):
erase_cmd = dnf.cli.commands.EraseCommand(self.cli)
erase_cmd.configure([])
self.assertTrue(self.cli.demands.allow_erasing)
@mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)
def test_history_get_error_output_rollback_transactioncheckerror(self):
"""Test get_error_output with the history rollback and a TransactionCheckError."""
cmd = dnf.cli.commands.HistoryCommand(self.cli)
self.base.basecmd = 'history'
self.base.extcmds = ('rollback', '1')
lines = cmd.get_error_output(dnf.exceptions.TransactionCheckError())
self.assertEqual(
lines,
('Cannot rollback transaction 1, doing so would result in an '
'inconsistent package database.',))
@mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)
def test_history_get_error_output_undo_transactioncheckerror(self):
"""Test get_error_output with the history undo and a TransactionCheckError."""
cmd = dnf.cli.commands.HistoryCommand(self.cli)
self.base.basecmd = 'history'
self.base.extcmds = ('undo', '1')
lines = cmd.get_error_output(dnf.exceptions.TransactionCheckError())
self.assertEqual(
lines,
('Cannot undo transaction 1, doing so would result in an '
'inconsistent package database.',))
@staticmethod
@mock.patch('dnf.Base.fill_sack')
def _do_makecache(cmd, fill_sack):
return cmd.run(['timer'])
def assertLastInfo(self, logger, msg):
self.assertEqual(logger.info.mock_calls[-1],
mock.call(msg))
@mock.patch('dnf.cli.commands.logger', new_callable=tests.support.mock_logger)
@mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.util.on_ac_power', return_value=True)
def test_makecache_timer(self, _on_ac_power, logger):
cmd = dnf.cli.commands.MakeCacheCommand(self.cli)
self.base.conf.metadata_timer_sync = 0
self.assertFalse(self._do_makecache(cmd))
self.assertLastInfo(logger, u'Metadata timer caching disabled.')
self.base.conf.metadata_timer_sync = 5 # resync after 5 seconds
self.base._persistor.since_last_makecache = mock.Mock(return_value=3)
self.assertFalse(self._do_makecache(cmd))
self.assertLastInfo(logger, u'Metadata cache refreshed recently.')
self.base._persistor.since_last_makecache = mock.Mock(return_value=10)
self.base._sack = 'nonempty'
r = support.MockRepo("glimpse", None)
self.base.repos.add(r)
# regular case 1: metadata is already expired:
r.metadata_expire_in = mock.Mock(return_value=(False, 0))
r.sync_strategy = dnf.repo.SYNC_TRY_CACHE
self.assertTrue(self._do_makecache(cmd))
self.assertLastInfo(logger, u'Metadata cache created.')
self.assertEqual(r.sync_strategy, dnf.repo.SYNC_EXPIRED)
# regular case 2: metadata is cached and will expire later than
# metadata_timer_sync:
r.metadata_expire_in = mock.Mock(return_value=(True, 100))
r.sync_strategy = dnf.repo.SYNC_TRY_CACHE
self.assertTrue(self._do_makecache(cmd))
self.assertLastInfo(logger, u'Metadata cache created.')
self.assertEqual(r.sync_strategy, dnf.repo.SYNC_TRY_CACHE)
# regular case 3: metadata is cached but will eqpire before
# metadata_timer_sync:
r.metadata_expire_in = mock.Mock(return_value=(True, 4))
r.sync_strategy = dnf.repo.SYNC_TRY_CACHE
self.assertTrue(self._do_makecache(cmd))
self.assertLastInfo(logger, u'Metadata cache created.')
self.assertEqual(r.sync_strategy, dnf.repo.SYNC_EXPIRED)
@mock.patch('dnf.cli.commands.logger', new_callable=tests.support.mock_logger)
@mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.util.on_ac_power', return_value=False)
def test_makecache_timer_battery(self, _on_ac_power, logger):
cmd = dnf.cli.commands.MakeCacheCommand(self.cli)
self.base.conf.metadata_timer_sync = 5
self.assertFalse(self._do_makecache(cmd))
msg = u'Metadata timer caching disabled when running on a battery.'
self.assertLastInfo(logger, msg)
@mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.util.on_ac_power', return_value=None)
def test_makecache_timer_battery2(self, _on_ac_power):
cmd = dnf.cli.commands.MakeCacheCommand(self.cli)
self.base.conf.metadata_timer_sync = 5
self.assertTrue(self._do_makecache(cmd))
class CommandTest(support.TestCase):
def test_canonical(self):
cmd = dnf.cli.commands.upgrade.UpgradeCommand(None)
(base, ext) = cmd.canonical(['update', 'cracker', 'filling'])
self.assertEqual(base, 'upgrade')
self.assertEqual(ext, ['cracker', 'filling'])
class EraseCommandTest(support.ResultTestCase):
"""Tests of ``dnf.cli.commands.EraseCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(EraseCommandTest, self).setUp()
base = support.BaseCliStub()
base.init_sack()
self.cmd = dnf.cli.commands.EraseCommand(base.mock_cli())
def test_run(self):
"""Test whether the package is installed."""
self.cmd.run(['pepper'])
self.assertResult(
self.cmd.base,
self.cmd.base.sack.query().installed().filter(name__neq='pepper'))
@mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)
def test_run_notfound(self):
"""Test whether it fails if the package cannot be found."""
stdout = dnf.pycomp.StringIO()
with support.wiretap_logs('dnf', logging.INFO, stdout):
self.assertRaises(dnf.exceptions.Error, self.cmd.run, ['non-existent'])
self.assertEqual(stdout.getvalue(),
'No match for argument: non-existent\n')
self.assertResult(self.cmd.base, self.cmd.base.sack.query().installed())
class InstallCommandTest(support.ResultTestCase):
"""Tests of ``dnf.cli.commands.install.InstallCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(InstallCommandTest, self).setUp()
base = support.BaseCliStub('main')
base.repos['main'].metadata = mock.Mock(comps_fn=support.COMPS_PATH)
base.init_sack()
self._cmd = dnf.cli.commands.install.InstallCommand(base.mock_cli())
def test_configure(self):
cli = self._cmd.cli
self._cmd.configure([])
self.assertFalse(cli.demands.allow_erasing)
self.assertTrue(cli.demands.sack_activation)
def test_run_group(self):
"""Test whether a group is installed."""
self._cmd.run(['@Solid Ground'])
base = self._cmd.cli.base
self.assertResult(base, itertools.chain(
base.sack.query().installed(),
dnf.subject.Subject('trampoline').get_best_query(base.sack)))
@mock.patch('dnf.cli.commands.install._',
dnf.pycomp.NullTranslations().ugettext)
def test_run_group_notfound(self):
"""Test whether it fails if the group cannot be found."""
stdout = dnf.pycomp.StringIO()
with support.wiretap_logs('dnf', logging.INFO, stdout):
self.assertRaises(dnf.exceptions.Error,
self._cmd.run, ['@non-existent'])
self.assertEqual(stdout.getvalue(),
"Warning: Group 'non-existent' does not exist.\n")
self.assertResult(self._cmd.cli.base,
self._cmd.cli.base.sack.query().installed())
def test_run_package(self):
"""Test whether a package is installed."""
self._cmd.run(['lotus'])
base = self._cmd.cli.base
self.assertResult(base, itertools.chain(
base.sack.query().installed(),
dnf.subject.Subject('lotus.x86_64').get_best_query(base.sack)))
@mock.patch('dnf.cli.commands.install._',
dnf.pycomp.NullTranslations().ugettext)
def test_run_package_notfound(self):
"""Test whether it fails if the package cannot be found."""
stdout = dnf.pycomp.StringIO()
with support.wiretap_logs('dnf', logging.INFO, stdout):
self.assertRaises(dnf.exceptions.Error,
self._cmd.run, ['non-existent'])
self.assertEqual(stdout.getvalue(),
'No package non-existent available.\n')
self.assertResult(self._cmd.cli.base,
self._cmd.cli.base.sack.query().installed())
class ReinstallCommandTest(support.ResultTestCase):
"""Tests of ``dnf.cli.commands.ReinstallCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(ReinstallCommandTest, self).setUp()
base = support.BaseCliStub('main')
base.init_sack()
self._cmd = dnf.cli.commands.reinstall.ReinstallCommand(base.mock_cli())
def test_run(self):
"""Test whether the package is installed."""
self._cmd.run(['pepper'])
base = self._cmd.cli.base
self.assertResult(base, itertools.chain(
base.sack.query().installed().filter(name__neq='pepper'),
dnf.subject.Subject('pepper.x86_64').get_best_query(base.sack)
.available()))
@mock.patch('dnf.cli.commands.reinstall._',
dnf.pycomp.NullTranslations().ugettext)
def test_run_notinstalled(self):
"""Test whether it fails if the package is not installed."""
stdout = dnf.pycomp.StringIO()
with support.wiretap_logs('dnf', logging.INFO, stdout):
self.assertRaises(dnf.exceptions.Error, self._cmd.run, ['lotus'])
self.assertEqual(stdout.getvalue(), 'No match for argument: lotus\n')
self.assertResult(self._cmd.cli.base,
self._cmd.cli.base.sack.query().installed())
@mock.patch('dnf.cli.commands.reinstall._',
dnf.pycomp.NullTranslations().ugettext)
def test_run_notavailable(self):
"""Test whether it fails if the package is not available."""
base = self._cmd.cli.base
holes_query = dnf.subject.Subject('hole').get_best_query(base.sack)
for pkg in holes_query.installed():
self._cmd.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()
self._cmd.base.yumdb.get_package(pkg).from_repo = 'unknown'
stdout = dnf.pycomp.StringIO()
with support.wiretap_logs('dnf', logging.INFO, stdout):
self.assertRaises(dnf.exceptions.Error, self._cmd.run, ['hole'])
self.assertEqual(
stdout.getvalue(),
'Installed package hole-1-1.x86_64 (from unknown) not available.\n')
self.assertResult(base, base.sack.query().installed())
class RepoPkgsCommandTest(unittest.TestCase):
"""Tests of ``dnf.cli.commands.RepoPkgsCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(RepoPkgsCommandTest, self).setUp()
cli = support.BaseCliStub().mock_cli()
self.cmd = dnf.cli.commands.RepoPkgsCommand(cli)
def test_configure_badargs(self):
"""Test whether the method does not fail even in case of wrong args."""
self.cmd.configure([])
class RepoPkgsCheckUpdateSubCommandTest(unittest.TestCase):
"""Tests of ``dnf.cli.commands.RepoPkgsCommand.CheckUpdateSubCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(RepoPkgsCheckUpdateSubCommandTest, self).setUp()
base = support.BaseCliStub('main', 'updates', 'third_party')
self.cli = base.mock_cli()
@mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)
def test(self):
"""Test whether only upgrades in the repository are listed."""
cmd = dnf.cli.commands.RepoPkgsCommand.CheckUpdateSubCommand(self.cli)
with support.patch_std_streams() as (stdout, _):
cmd.run_on_repo('updates', [])
self.assertEqual(
stdout.getvalue(),
u'\n'
u'hole.x86_64 1-2'
u' updates\n'
u'hole.x86_64 2-1'
u' updates\n'
u'pepper.x86_64 20-1'
u' updates\n'
u'Obsoleting Packages\n'
u'hole.i686 2-1'
u' updates\n'
u' tour.noarch 5-0'
u' @System\n'
u'hole.x86_64 2-1'
u' updates\n'
u' tour.noarch 5-0'
u' @System\n')
self.assertEqual(self.cli.demands.success_exit_status, 100)
def test_not_found(self):
"""Test whether exit code differs if updates are not found."""
cmd = dnf.cli.commands.RepoPkgsCommand.CheckUpdateSubCommand(self.cli)
cmd.run_on_repo('main', [])
self.assertNotEqual(self.cli.demands.success_exit_status, 100)
class RepoPkgsInfoSubCommandTest(unittest.TestCase):
"""Tests of ``dnf.cli.commands.RepoPkgsCommand.InfoSubCommand`` class."""
AVAILABLE_TITLE = u'Available Packages\n'
HOLE_I686_INFO = (u'Name : hole\n'
u'Arch : i686\n'
u'Epoch : 0\n'
u'Version : 2\n'
u'Release : 1\n'
u'Size : 0.0 \n'
u'Repo : updates\n'
u'Summary : \n'
u'License : \n'
u'Description : \n'
u'\n')
HOLE_X86_64_INFO = (u'Name : hole\n'
u'Arch : x86_64\n'
u'Epoch : 0\n'
u'Version : 2\n'
u'Release : 1\n'
u'Size : 0.0 \n'
u'Repo : updates\n'
u'Summary : \n'
u'License : \n'
u'Description : \n\n')
INSTALLED_TITLE = u'Installed Packages\n'
PEPPER_SYSTEM_INFO = (u'Name : pepper\n'
u'Arch : x86_64\n'
u'Epoch : 0\n'
u'Version : 20\n'
u'Release : 0\n'
u'Size : 0.0 \n'
u'Repo : @System\n'
u'From repo : main\n'
u'Summary : \n'
u'License : \n'
u'Description : \n\n')
PEPPER_UPDATES_INFO = (u'Name : pepper\n'
u'Arch : x86_64\n'
u'Epoch : 0\n'
u'Version : 20\n'
u'Release : 1\n'
u'Size : 0.0 \n'
u'Repo : updates\n'
u'Summary : \n'
u'License : \n'
u'Description : \n\n')
def setUp(self):
"""Prepare the test fixture."""
super(RepoPkgsInfoSubCommandTest, self).setUp()
base = support.BaseCliStub('main', 'updates', 'third_party')
base.conf.recent = 7
self.cli = base.mock_cli()
@mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)
def test_info_all(self):
"""Test whether only packages related to the repository are listed."""
for pkg in self.cli.base.sack.query().installed().filter(name='pepper'):
self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()
self.cli.base.yumdb.get_package(pkg).from_repo = 'main'
cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)
with support.patch_std_streams() as (stdout, _):
cmd.run_on_repo('main', ['all', '*p*'])
self.assertEqual(
stdout.getvalue(),
''.join((
self.INSTALLED_TITLE,
self.PEPPER_SYSTEM_INFO,
self.AVAILABLE_TITLE,
u'Name : pepper\n'
u'Arch : src\n'
u'Epoch : 0\n'
u'Version : 20\n'
u'Release : 0\n'
u'Size : 0.0 \n'
u'Repo : main\n'
u'Summary : \n'
u'License : \n'
u'Description : \n'
u'\n',
u'Name : trampoline\n'
u'Arch : noarch\n'
u'Epoch : 0\n'
u'Version : 2.1\n'
u'Release : 1\n'
u'Size : 0.0 \n'
u'Repo : main\n'
u'Summary : \n'
u'License : \n'
u'Description : \n'
u'\n')))
@mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)
def test_info_available(self):
"""Test whether only packages in the repository are listed."""
cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)
with support.patch_std_streams() as (stdout, _):
cmd.run_on_repo('updates', ['available'])
self.assertEqual(
stdout.getvalue(),
''.join((
self.AVAILABLE_TITLE,
self.HOLE_I686_INFO,
self.HOLE_X86_64_INFO,
self.PEPPER_UPDATES_INFO)))
@mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)
def test_info_extras(self):
"""Test whether only extras installed from the repository are listed."""
for pkg in self.cli.base.sack.query().installed().filter(name='tour'):
self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()
self.cli.base.yumdb.get_package(pkg).from_repo = 'unknown'
cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)
with support.patch_std_streams() as (stdout, _):
cmd.run_on_repo('unknown', ['extras'])
self.assertEqual(
stdout.getvalue(),
u'Extra Packages\n'
u'Name : tour\n'
u'Arch : noarch\n'
u'Epoch : 0\n'
u'Version : 5\n'
u'Release : 0\n'
u'Size : 0.0 \n'
u'Repo : @System\n'
u'From repo : unknown\n'
u'Summary : \n'
u'License : \n'
u'Description : \n\n')
@mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)
def test_info_installed(self):
"""Test whether only packages installed from the repository are listed."""
for pkg in self.cli.base.sack.query().installed().filter(name='pepper'):
self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()
self.cli.base.yumdb.get_package(pkg).from_repo = 'main'
cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)
with support.patch_std_streams() as (stdout, _):
cmd.run_on_repo('main', ['installed'])
self.assertEqual(
stdout.getvalue(),
''.join((self.INSTALLED_TITLE, self.PEPPER_SYSTEM_INFO)))
@mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)
def test_info_obsoletes(self):
"""Test whether only obsoletes in the repository are listed."""
cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)
with support.patch_std_streams() as (stdout, _):
cmd.run_on_repo('updates', ['obsoletes'])
self.assertEqual(
stdout.getvalue(),
''.join((
u'Obsoleting Packages\n',
self.HOLE_I686_INFO,
self.HOLE_X86_64_INFO)))
@mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)
def test_info_recent(self):
"""Test whether only packages in the repository are listed."""
cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)
with mock.patch('time.time', return_value=0), \
support.patch_std_streams() as (stdout, _):
cmd.run_on_repo('updates', ['recent'])
self.assertEqual(
stdout.getvalue(),
''.join((
u'Recently Added Packages\n',
self.HOLE_I686_INFO,
self.HOLE_X86_64_INFO,
self.PEPPER_UPDATES_INFO)))
@mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)
def test_info_upgrades(self):
"""Test whether only upgrades in the repository are listed."""
cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)
with support.patch_std_streams() as (stdout, _):
cmd.run_on_repo('updates', ['upgrades'])
self.assertEqual(
stdout.getvalue(),
''.join((
u'Upgraded Packages\n'
u'Name : hole\n'
u'Arch : x86_64\n'
u'Epoch : 0\n'
u'Version : 1\n'
u'Release : 2\n'
u'Size : 0.0 \n'
u'Repo : updates\n'
u'Summary : \n'
u'License : \n'
u'Description : \n'
u'\n',
self.HOLE_X86_64_INFO,
self.PEPPER_UPDATES_INFO)))
class RepoPkgsInstallSubCommandTest(support.ResultTestCase):
"""Tests of ``dnf.cli.commands.RepoPkgsCommand.InstallSubCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(RepoPkgsInstallSubCommandTest, self).setUp()
base = support.BaseCliStub('main', 'third_party')
base.repos['main'].metadata = mock.Mock(comps_fn=support.COMPS_PATH)
base.repos['third_party'].enablegroups = False
base.init_sack()
self.cli = base.mock_cli()
def test_all(self):
"""Test whether all packages from the repository are installed."""
cmd = dnf.cli.commands.RepoPkgsCommand.InstallSubCommand(self.cli)
cmd.run_on_repo('third_party', [])
self.assertResult(self.cli.base, itertools.chain(
self.cli.base.sack.query().installed().filter(name__neq='hole'),
self.cli.base.sack.query().available().filter(reponame='third_party',
arch='x86_64')))
class RepoPkgsMoveToSubCommandTest(support.ResultTestCase):
"""Tests of ``dnf.cli.commands.RepoPkgsCommand.MoveToSubCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(RepoPkgsMoveToSubCommandTest, self).setUp()
base = support.BaseCliStub('distro', 'main')
base.init_sack()
self.cli = base.mock_cli()
def test_all(self):
"""Test whether only packages in the repository are installed."""
cmd = dnf.cli.commands.RepoPkgsCommand.MoveToSubCommand(self.cli)
cmd.run_on_repo('distro', [])
self.assertResult(self.cli.base, itertools.chain(
self.cli.base.sack.query().installed().filter(name__neq='tour'),
dnf.subject.Subject('tour-5-0').get_best_query(self.cli.base.sack)
.available()))
class RepoPkgsReinstallOldSubCommandTest(support.ResultTestCase):
"""Tests of ``dnf.cli.commands.RepoPkgsCommand.ReinstallOldSubCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(RepoPkgsReinstallOldSubCommandTest, self).setUp()
base = support.BaseCliStub('main')
base.init_sack()
self.cli = base.mock_cli()
def test_all(self):
"""Test whether all packages from the repository are reinstalled."""
for pkg in self.cli.base.sack.query().installed():
reponame = 'main' if pkg.name != 'pepper' else 'non-main'
self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()
self.cli.base.yumdb.get_package(pkg).from_repo = reponame
cmd = dnf.cli.commands.RepoPkgsCommand.ReinstallOldSubCommand(self.cli)
cmd.run_on_repo('main', [])
self.assertResult(self.cli.base, itertools.chain(
self.cli.base.sack.query().installed().filter(name__neq='librita'),
dnf.subject.Subject('librita.i686').get_best_query(self.cli.base.sack)
.installed(),
dnf.subject.Subject('librita').get_best_query(self.cli.base.sack)
.available()))
class RepoPkgsReinstallSubCommandTest(unittest.TestCase):
"""Tests of ``dnf.cli.commands.RepoPkgsCommand.ReinstallSubCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(RepoPkgsReinstallSubCommandTest, self).setUp()
self.cli = support.BaseCliStub('main').mock_cli()
self.mock = mock.Mock()
old_run_patcher = mock.patch(
'dnf.cli.commands.RepoPkgsCommand.ReinstallOldSubCommand.run_on_repo',
self.mock.reinstall_old_run)
move_run_patcher = mock.patch(
'dnf.cli.commands.RepoPkgsCommand.MoveToSubCommand.run_on_repo',
self.mock.move_to_run)
old_run_patcher.start()
self.addCleanup(old_run_patcher.stop)
move_run_patcher.start()
self.addCleanup(move_run_patcher.stop)
def test_all_fails(self):
"""Test whether it fails if everything fails."""
self.mock.reinstall_old_run.side_effect = dnf.exceptions.Error('test')
self.mock.move_to_run.side_effect = dnf.exceptions.Error('test')
cmd = dnf.cli.commands.RepoPkgsCommand.ReinstallSubCommand(self.cli)
self.assertRaises(dnf.exceptions.Error, cmd.run_on_repo, 'main', [])
self.assertEqual(self.mock.mock_calls,
[mock.call.reinstall_old_run('main', []),
mock.call.move_to_run('main', [])])
def test_all_moveto(self):
"""Test whether reinstall-old is called first and move-to next."""
self.mock.reinstall_old_run.side_effect = dnf.exceptions.Error('test')
cmd = dnf.cli.commands.RepoPkgsCommand.ReinstallSubCommand(self.cli)
cmd.run_on_repo('main', [])
self.assertEqual(self.mock.mock_calls,
[mock.call.reinstall_old_run('main', []),
mock.call.move_to_run('main', [])])
def test_all_reinstallold(self):
"""Test whether only reinstall-old is called."""
cmd = dnf.cli.commands.RepoPkgsCommand.ReinstallSubCommand(self.cli)
cmd.run_on_repo('main', [])
self.assertEqual(self.mock.mock_calls,
[mock.call.reinstall_old_run('main', [])])
class RepoPkgsRemoveOrDistroSyncSubCommandTest(support.ResultTestCase):
"""Tests of ``RemoveOrDistroSyncSubCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(RepoPkgsRemoveOrDistroSyncSubCommandTest, self).setUp()
self.cli = support.BaseCliStub('distro').mock_cli()
self.cli.base.init_sack()
def test_run_on_repo_spec_sync(self):
"""Test running with a package which can be synchronized."""
for pkg in self.cli.base.sack.query().installed():
data = support.RPMDBAdditionalDataPackageStub()
data.from_repo = 'non-distro' if pkg.name == 'pepper' else 'distro'
self.cli.base.yumdb.db[str(pkg)] = data
cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand(
self.cli)
cmd.run_on_repo('non-distro', ['pepper'])
self.assertResult(self.cli.base, itertools.chain(
self.cli.base.sack.query().installed().filter(name__neq='pepper'),
dnf.subject.Subject('pepper').get_best_query(self.cli.base.sack)
.available()))
def test_run_on_repo_spec_remove(self):
"""Test running with a package which must be removed."""
for pkg in self.cli.base.sack.query().installed():
data = support.RPMDBAdditionalDataPackageStub()
data.from_repo = 'non-distro' if pkg.name == 'hole' else 'distro'
self.cli.base.yumdb.db[str(pkg)] = data
cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand(
self.cli)
cmd.run_on_repo('non-distro', ['hole'])
self.assertResult(
self.cli.base,
self.cli.base.sack.query().installed().filter(name__neq='hole'))
def test_run_on_repo_all(self):
"""Test running without a package specification."""
nondist = {'pepper', 'hole'}
for pkg in self.cli.base.sack.query().installed():
data = support.RPMDBAdditionalDataPackageStub()
data.from_repo = 'non-distro' if pkg.name in nondist else 'distro'
self.cli.base.yumdb.db[str(pkg)] = data
cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand(
self.cli)
cmd.run_on_repo('non-distro', [])
self.assertResult(self.cli.base, itertools.chain(
self.cli.base.sack.query().installed().filter(name__neq='pepper')
.filter(name__neq='hole'),
dnf.subject.Subject('pepper').get_best_query(self.cli.base.sack)
.available()))
@mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)
def test_run_on_repo_spec_notinstalled(self):
"""Test running with a package which is not installed."""
stdout = dnf.pycomp.StringIO()
cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand(
self.cli)
with support.wiretap_logs('dnf', logging.INFO, stdout):
self.assertRaises(dnf.exceptions.Error,
cmd.run_on_repo, 'non-distro', ['not-installed'])
self.assertIn('No match for argument: not-installed\n', stdout.getvalue(),
'mismatch not logged')
@mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)
def test_run_on_repo_all_notinstalled(self):
"""Test running with a repository from which nothing is installed."""
stdout = dnf.pycomp.StringIO()
cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand(
self.cli)
with support.wiretap_logs('dnf', logging.INFO, stdout):
self.assertRaises(dnf.exceptions.Error,
cmd.run_on_repo, 'non-distro', [])
self.assertIn('No package installed from the repository.\n',
stdout.getvalue(), 'mismatch not logged')
class RepoPkgsRemoveOrReinstallSubCommandTest(support.ResultTestCase):
"""Tests of ``dnf.cli.commands.RepoPkgsCommand.RemoveOrReinstallSubCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(RepoPkgsRemoveOrReinstallSubCommandTest, self).setUp()
base = support.BaseCliStub('distro')
base.init_sack()
self.cli = base.mock_cli()
def test_all_not_installed(self):
"""Test whether it fails if no package is installed from the repository."""
cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrReinstallSubCommand(
self.cli)
self.assertRaises(dnf.exceptions.Error,
cmd.run_on_repo, 'non-distro', [])
self.assertResult(self.cli.base, self.cli.base.sack.query().installed())
def test_all_reinstall(self):
"""Test whether all packages from the repository are reinstalled."""
for pkg in self.cli.base.sack.query().installed():
reponame = 'distro' if pkg.name != 'tour' else 'non-distro'
self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()
self.cli.base.yumdb.get_package(pkg).from_repo = reponame
cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrReinstallSubCommand(
self.cli)
cmd.run_on_repo('non-distro', [])
self.assertResult(self.cli.base, itertools.chain(
self.cli.base.sack.query().installed().filter(name__neq='tour'),
dnf.subject.Subject('tour').get_best_query(self.cli.base.sack)
.available()))
def test_all_remove(self):
"""Test whether all packages from the repository are removed."""
for pkg in self.cli.base.sack.query().installed():
reponame = 'distro' if pkg.name != 'hole' else 'non-distro'
self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()
self.cli.base.yumdb.get_package(pkg).from_repo = reponame
cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrReinstallSubCommand(
self.cli)
cmd.run_on_repo('non-distro', [])
self.assertResult(
self.cli.base,
self.cli.base.sack.query().installed().filter(name__neq='hole'))
class RepoPkgsRemoveSubCommandTest(support.ResultTestCase):
"""Tests of ``dnf.cli.commands.RepoPkgsCommand.RemoveSubCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(RepoPkgsRemoveSubCommandTest, self).setUp()
base = support.BaseCliStub('main')
base.init_sack()
self.cli = base.mock_cli()
def test_all(self):
"""Test whether only packages from the repository are removed."""
for pkg in self.cli.base.sack.query().installed():
reponame = 'main' if pkg.name == 'pepper' else 'non-main'
self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()
self.cli.base.yumdb.get_package(pkg).from_repo = reponame
cmd = dnf.cli.commands.RepoPkgsCommand.RemoveSubCommand(self.cli)
cmd.run_on_repo('main', [])
self.assertResult(
self.cli.base,
self.cli.base.sack.query().installed().filter(name__neq='pepper'))
class RepoPkgsUpgradeSubCommandTest(support.ResultTestCase):
"""Tests of ``dnf.cli.commands.RepoPkgsCommand.UpgradeSubCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(RepoPkgsUpgradeSubCommandTest, self).setUp()
base = support.BaseCliStub('updates', 'third_party')
base.init_sack()
self.cli = base.mock_cli()
def test_all(self):
"""Test whether all packages from the repository are installed."""
cmd = dnf.cli.commands.RepoPkgsCommand.UpgradeSubCommand(self.cli)
cmd.run_on_repo('third_party', [])
self.assertResult(self.cli.base, itertools.chain(
self.cli.base.sack.query().installed().filter(name__neq='hole'),
self.cli.base.sack.query().upgrades().filter(reponame='third_party',
arch='x86_64')))
class RepoPkgsUpgradeToSubCommandTest(support.ResultTestCase):
"""Tests of ``dnf.cli.commands.RepoPkgsCommand.UpgradeToSubCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(RepoPkgsUpgradeToSubCommandTest, self).setUp()
base = support.BaseCliStub('updates', 'third_party')
base.init_sack()
self.cli = base.mock_cli()
def test_all(self):
"""Test whether the package from the repository is installed."""
cmd = dnf.cli.commands.RepoPkgsCommand.UpgradeToSubCommand(self.cli)
cmd.run_on_repo('updates', ['hole-1-2'])
self.assertResult(self.cli.base, itertools.chain(
self.cli.base.sack.query().installed().filter(name__neq='hole'),
dnf.subject.Subject('hole-1-2.x86_64').get_best_query(self.cli.base.sack)
.filter(reponame='updates')))
class UpgradeCommandTest(support.ResultTestCase):
"""Tests of ``dnf.cli.commands.upgrade.UpgradeCommand`` class."""
def setUp(self):
"""Prepare the test fixture."""
super(UpgradeCommandTest, self).setUp()
base = support.BaseCliStub('updates')
base.init_sack()
self.cmd = dnf.cli.commands.upgrade.UpgradeCommand(base.mock_cli())
def test_run(self):
"""Test whether a package is updated."""
self.cmd.run(['pepper'])
self.assertResult(self.cmd.base, itertools.chain(
self.cmd.base.sack.query().installed().filter(name__neq='pepper'),
self.cmd.base.sack.query().upgrades().filter(name='pepper')))
@mock.patch('dnf.cli.commands.upgrade._',
dnf.pycomp.NullTranslations().ugettext)
def test_updatePkgs_notfound(self):
"""Test whether it fails if the package cannot be found."""
stdout = dnf.pycomp.StringIO()
with support.wiretap_logs('dnf', logging.INFO, stdout):
self.assertRaises(dnf.exceptions.Error,
self.cmd.run, ['non-existent'])
self.assertEqual(stdout.getvalue(),
'No match for argument: non-existent\n')
self.assertResult(self.cmd.base, self.cmd.base.sack.query().installed())
| gpl-2.0 |
jrief/easy-thumbnails | easy_thumbnails/tests/fields.py | 2 | 3792 | import os
from django.core.files.base import ContentFile
from django.db import models
from easy_thumbnails import test
from easy_thumbnails.fields import ThumbnailerField, ThumbnailerImageField
from easy_thumbnails.exceptions import InvalidImageFormatError
class TestModel(models.Model):
avatar = ThumbnailerField(upload_to='avatars')
picture = ThumbnailerImageField(upload_to='pictures',
resize_source=dict(size=(10, 10)))
class ThumbnailerFieldTest(test.BaseTest):
def setUp(self):
super(ThumbnailerFieldTest, self).setUp()
self.storage = test.TemporaryStorage()
# Save a test image.
self.create_image(self.storage, 'avatars/avatar.jpg')
# Set the test model to use the current temporary storage.
TestModel._meta.get_field('avatar').storage = self.storage
TestModel._meta.get_field('avatar').thumbnail_storage = self.storage
def tearDown(self):
self.storage.delete_temporary_storage()
super(ThumbnailerFieldTest, self).tearDown()
def test_generate_thumbnail(self):
instance = TestModel(avatar='avatars/avatar.jpg')
thumb = instance.avatar.generate_thumbnail({'size': (300, 300)})
self.assertEqual((thumb.width, thumb.height), (300, 225))
def test_generate_thumbnail_type_error(self):
text_file = ContentFile("Lorem ipsum dolor sit amet. Not an image.")
self.storage.save('avatars/invalid.jpg', text_file)
instance = TestModel(avatar='avatars/invalid.jpg')
generate = lambda: instance.avatar.generate_thumbnail(
{'size': (300, 300)})
self.assertRaises(InvalidImageFormatError, generate)
def test_delete(self):
instance = TestModel(avatar='avatars/avatar.jpg')
source_path = instance.avatar.path
thumb_paths = (
instance.avatar.get_thumbnail({'size': (300, 300)}).path,
instance.avatar.get_thumbnail({'size': (200, 200)}).path,
instance.avatar.get_thumbnail({'size': (100, 100)}).path,
)
self.assertTrue(os.path.exists(source_path))
for path in thumb_paths:
self.assertTrue(os.path.exists(path))
instance.avatar.delete(save=False)
self.assertFalse(os.path.exists(source_path))
for path in thumb_paths:
self.assertFalse(os.path.exists(path))
def test_delete_thumbnails(self):
instance = TestModel(avatar='avatars/avatar.jpg')
source_path = instance.avatar.path
thumb_paths = (
instance.avatar.get_thumbnail({'size': (300, 300)}).path,
instance.avatar.get_thumbnail({'size': (200, 200)}).path,
instance.avatar.get_thumbnail({'size': (100, 100)}).path,
)
self.assertTrue(os.path.exists(source_path))
for path in thumb_paths:
self.assertTrue(os.path.exists(path))
instance.avatar.delete_thumbnails()
self.assertTrue(os.path.exists(source_path))
for path in thumb_paths:
self.assertFalse(os.path.exists(path))
def test_get_thumbnails(self):
instance = TestModel(avatar='avatars/avatar.jpg')
instance.avatar.get_thumbnail({'size': (300, 300)})
instance.avatar.get_thumbnail({'size': (200, 200)})
self.assertEqual(len(list(instance.avatar.get_thumbnails())), 2)
def test_saving_image_field_with_resize_source(self):
# Ensure that saving ThumbnailerImageField with resize_source enabled
# using instance.field.save() does not fail
instance = TestModel(avatar='avatars/avatar.jpg')
instance.picture.save(
'file.jpg', ContentFile(instance.avatar.file.read()), save=False)
self.assertEqual(instance.picture.width, 10)
| bsd-3-clause |
watson-developer-cloud/discovery-starter-kit | server/python/server.py | 1 | 4953 | import os
import sys
import json
from helpers import get_constants, get_questions
from flask import Flask, jsonify, render_template, request
from flask_sslify import SSLify
from flask_cors import CORS
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from requests.exceptions import HTTPError
from dotenv import load_dotenv, find_dotenv
import watson_developer_cloud.natural_language_understanding.features.v1 as features # noqa
from watson_developer_cloud import DiscoveryV1, NaturalLanguageUnderstandingV1
import metrics_tracker_client
try:
load_dotenv(find_dotenv())
except IOError:
print('warning: no .env file loaded')
# Emit Bluemix deployment event if not a demo deploy
if not(os.getenv('DEMO_DEPLOY')):
metrics_tracker_client.track()
app = Flask(
__name__,
static_folder="../../client/knowledge_base_search/build/static",
template_folder="../../client/knowledge_base_search/build"
)
# force SSL
sslify = SSLify(app)
# Limit requests
limiter = Limiter(
app,
key_func=get_remote_address,
default_limits=['240 per minute', '4 per second'],
headers_enabled=True
)
CORS(app, resources={r"/api/*": {"origins": "*"}})
# Discovery
discovery = DiscoveryV1(
url=os.getenv('DISCOVERY_URL'),
username=os.getenv('DISCOVERY_USERNAME'),
password=os.getenv('DISCOVERY_PASSWORD'),
version="2016-12-01"
)
# NLU
nlu = NaturalLanguageUnderstandingV1(
url=os.getenv('NLU_URL'),
username=os.getenv('NLU_USERNAME'),
password=os.getenv('NLU_PASSWORD'),
version="2017-02-27"
)
"""
retrieve the following:
{
environment_id: env_id,
collection_id: {
passages: passages_id,
regular: regular_id,
trained: trained_id
}
}
"""
constants = get_constants(
discovery,
passages_name=os.getenv(
'DISCOVERY_PASSAGES_COLLECTION_NAME',
'knowledge_base_regular'
),
regular_name=os.getenv(
'DISCOVERY_REGULAR_COLLECTION_NAME',
'knowledge_base_regular'
),
trained_name=os.getenv(
'DISCOVERY_TRAINED_COLLECTION_NAME',
'knowledge_base_trained'
)
)
try:
total_questions = int(os.getenv('DISCOVERY_QUESTION_COUNT', 5000))
except ValueError:
sys.exit('DISCOVERY_QUESTION_COUNT not an integer, terminating...')
passages_question_cache = get_questions(
discovery=discovery,
constants=constants,
question_count=total_questions,
feature_type='passages')
trained_question_cache = get_questions(
discovery=discovery,
constants=constants,
question_count=total_questions,
feature_type='trained')
@app.route('/')
@limiter.exempt
def index():
return render_template('index.html')
@app.route('/api/query/<collection_type>', methods=['POST'])
def query(collection_type):
query_options = json.loads(request.data)
query_options['return'] = 'text'
if collection_type == 'passages':
query_options['passages'] = True
# retrieve more results for regular so that we can compare original rank
if collection_type == 'regular':
query_options['count'] = 100
return jsonify(
discovery.query(
environment_id=constants['environment_id'],
collection_id=constants['collection_id'][collection_type],
query_options=query_options
)
)
@app.route('/api/questions/<feature_type>', methods=['GET'])
def questions(feature_type):
if feature_type == 'passages':
return jsonify(passages_question_cache)
else:
return jsonify(trained_question_cache)
@app.errorhandler(429)
def ratelimit_handler(e):
return jsonify(
error="API Rate Limit exceeded: %s" % e.description,
code=429), 429
@app.errorhandler(Exception)
def handle_error(e):
code = 500
error = 'Error processing the request'
if isinstance(e, HTTPError):
code = e.code
error = str(e.message)
return jsonify(error=error, code=code), code
if __name__ == '__main__':
# If we are in the Bluemix environment
PRODUCTION = True if os.getenv('VCAP_APPLICATION') else False
# set port to 0.0.0.0, otherwise set it to localhost (127.0.0.1)
HOST = '0.0.0.0' if PRODUCTION else '127.0.0.1'
# Get port from the Bluemix environment, or default to 5000
PORT_NUMBER = int(os.getenv('PORT', '5000'))
app.run(host=HOST, port=PORT_NUMBER, debug=not(PRODUCTION))
| mit |
jordanemedlock/psychtruths | temboo/Library/Microsoft/OAuth/RefreshToken.py | 4 | 4495 | # -*- coding: utf-8 -*-
###############################################################################
#
# RefreshToken
# Retrieves a new refresh token and access token by exchanging the refresh token that is associated with the expired access token.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class RefreshToken(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the RefreshToken Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(RefreshToken, self).__init__(temboo_session, '/Library/Microsoft/OAuth/RefreshToken')
def new_input_set(self):
return RefreshTokenInputSet()
def _make_result_set(self, result, path):
return RefreshTokenResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RefreshTokenChoreographyExecution(session, exec_id, path)
class RefreshTokenInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the RefreshToken
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((required, string) The Client ID provided by Microsoft after registering your application.)
"""
super(RefreshTokenInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((required, string) The Client Secret provided by Microsoft after registering your application.)
"""
super(RefreshTokenInputSet, self)._set_input('ClientSecret', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((required, string) An OAuth Refresh Token used to generate a new access token when the original token is expired.)
"""
super(RefreshTokenInputSet, self)._set_input('RefreshToken', value)
def set_Resource(self, value):
"""
Set the value of the Resource input for this Choreo. ((conditional, string) The App ID URI of the web API (secured resource). See Choreo notes for details.)
"""
super(RefreshTokenInputSet, self)._set_input('Resource', value)
class RefreshTokenResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the RefreshToken Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Microsoft.)
"""
return self._output.get('Response', None)
def get_Expires(self):
"""
Retrieve the value for the "Expires" output from this Choreo execution. ((integer) The remaining lifetime of the short-lived access token.)
"""
return self._output.get('Expires', None)
def get_NewRefreshToken(self):
"""
Retrieve the value for the "NewRefreshToken" output from this Choreo execution. ((string) The new Refresh Token which can be used the next time your app needs to get a new Access Token.)
"""
return self._output.get('NewRefreshToken', None)
class RefreshTokenChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RefreshTokenResultSet(response, path)
| apache-2.0 |
huahbo/src | book/Recipes/m1d.py | 5 | 10916 | from rsf.proj import *
from decimal import *
# --- User set --- #
# model
model = {
'X' : 2000, # meter
'dx': 10.0,
'dt': 0.001,
'SelT': 0.25, # selected time for snapshot show
'snpintvl': 1.0, # nterval of snapshot output
'size' : 8, # FD order
'frqcut' : 1.0,
'pml' : 240,
}
# source & receiver
srp = {
'bgn' : 0.1, # s, time of maximum ricker
'frq' : 10.0, # source domain frequence
'srcmms' : 'n', # MMS
'inject': 'n', # if y, inject; if n, Initiate conditon
'slx' : 1000.0, # source location (x), meter
'gdep' : 800 # receiver location (z), meter
}
# ------------------------------------------------------------------------------
def mgraph(fin, title):
Result(fin,
'''
put label1="Depth" unit1="m" |
transp plane=23 |
graph screenratio=0.5 title="%s"
'''%str(title))
# ------------------------------------------------------------------------------
def setpar(mdl, srp):
dx = mdl['dx']
dt = mdl['dt']
objpar = {
'vel' : mdl['vel'],
'dvel': mdl['dvel'],
'den' : mdl['den'],
'nx' : mdl['X']/dx+1,
'SelT': mdl['SelT'],
'nt' : int(Decimal(str(mdl['T']))/Decimal(str(dt)))+1,
'snt' : int(Decimal(str(mdl['SelT']))/Decimal(str(dt))/ \
Decimal(str(mdl['snpintvl']))),
'snpi': mdl['snpintvl'], #snap interval
'dt' : dt,
'iwdt': dt*1000, #dt for iwave
'dx' : dx,
'dxhf': 0.5*dx,
'ox' : 0.0,
'ot' : 0.0,
# source
'frq' : srp['frq'],
'wavfrq': srp['frq']/3.0,
'bgnp' : srp['bgn']/dt+1,
'slx' : srp['slx'],
'spx' : srp['slx']/dx+1,
'gdep' : srp['gdep'],
'gp' : int(srp['gdep']/dx+0.5),
'srcmms': srp['srcmms'], # MMS
'inject': srp['inject'], # if y, inject; if n, Initiate conditon
# fd
'size' : mdl['size'],
'fdsize': mdl['size']/2,
'frqcut': mdl['frqcut'],
'pml' : mdl['pml'],
'bd' : mdl['pml']+int((mdl['size']+1)/2)
}
return objpar
def buildmodel(par, denname, velname, dvelname, denval, velval, dvelval):
name = {
'den' : denname,
'vel' : velname,
'dvel': dvelname
}
value = {
'den' : denval,
'vel' : velval,
'dvel': dvelval
}
label = {
'den': 'Density',
'vel': 'Velocity',
'dvel': 'Velocity'
}
unit = {
'den': 'lg/m\^3\_',
'vel': 'm/s',
'dvel': 'm/s'
}
for m in ['den','vel','dvel']:
Flow(name[m],None,
'''
spike d1=%(dx)g n1=%(nx)d o1=0.0
label1=Depth unit1=m |
'''%par + '''
math output="%s"
'''%value[m])
pml = name[m]+'_pml'
pmlt = name[m]+'_pmlt'
pmlb = name[m]+'_pmlb'
Flow(pmlt, name[m], 'window n1=1 f1= 0 |spray axis=1 n=%(bd)d' %par)
Flow(pmlb, name[m], 'window n1=1 f1=-1 |spray axis=1 n=%(bd)d' %par)
Flow(pml,[pmlt, name[m], pmlb],'cat ${SOURCES[1]} ${SOURCES[2]} axis=1')
for m in ['den','vel','dvel']:
Flow(name[m]+'hf',None,
'''
spike d1=%(dx)g n1=%(nx)d o1=%(dxhf)g
label1=Depth unit1=m |
'''%par + '''
math output="%s"
'''%value[m])
def buildic(par, ic):
Flow(ic,None,
'''
spike n1=%(nx)d d1=%(dx)g k1=%(spx)d|
ricker1 frequency=%(wavfrq)g |
scale axis=1 |
put lable1="Depth" unit1="m" label2="Amplitude" unit2=""
'''%par)
def buildsrcp(par, srcp):
Flow(srcp, None,
'''
spike n1=%(nt)d d1=%(dt)g k1=%(bgnp)g |
ricker1 frequency=%(frq)g |
scale axis=1 |math output=input*400
'''%par)
def buildsrcd(par, srcd, prefix, subfix):
_pf = str(prefix)
sf_ = str(subfix)
spike = '%sspike%s' %(_pf, sf_)
ricker = '%sricker%s' %(_pf, sf_)
Flow(spike,None,
'''
spike n1=%(nx)d n2=%(nt)d d1=%(dx)g d2=%(dt)g
k1=%(spx)d k2=1
'''%par)
Flow(ricker,None,
'''
spike n1=%(nt)d d1=%(dt)g k1=%(bgnp)g |
ricker1 frequency=%(frq)g |scale axis=1
'''%par)
Flow(srcd,[spike,ricker],
'''
convft other=${SOURCES[1]} axis=2 |
window n2=%(nt)d | math output=input*400
'''%par)
def buildmms(par, mms, psrc, vsrc, pint, vint, vel, dvel, den, velhf, dvelhf, denhf):
#beta = 2*3.14159265359*par['frq']
alpha = 2*3.1415926*par['frq']/4.0
alpha = alpha*alpha
Flow([mms, psrc, vsrc, pint, vint], [vel, dvel, den, velhf, dvelhf, denhf],
'''
sfmms1dexp nt=%d dt=%g slx=%g alpha=%g
dvel=${SOURCES[1]} den=${SOURCES[2]}
presrc=${TARGETS[1]} velsrc=${TARGETS[2]}
preinit=${TARGETS[3]} velinit=${TARGETS[4]}
velhf=${SOURCES[3]} dvelhf=${SOURCES[4]} denhf=${SOURCES[5]}|
put label1="Depth" unit1="km" label2="Time" unit2="s"
'''%(par['nt'],par['dt'],par['slx'],alpha))
# ------------------------------------------------------------------------------
def lrmodel(fwf, frec, src, ic, vel, den, mmsfiles, par, prefix, suffix):
_pf = str(prefix)
sf_ = str(suffix)
fft = '%sfft%s' %(_pf, sf_)
rt = '%srt%s' %(_pf, sf_)
lt = '%slt%s' %(_pf, sf_)
Flow(fft, vel, 'fft1')
Flow([rt, lt], [vel, fft],
'''
isolrsg1 seed=2010 dt=%(dt)g fft=${SOURCES[1]} left=${TARGETS[1]}
'''%par)
if (mmsfiles == {}):
Flow([fwf,frec], [src, lt, rt, vel, den, fft, ic],
'''
sfsglr1 verb=y rec=${TARGETS[1]}
left=${SOURCES[1]} right=${SOURCES[2]}
vel=${SOURCES[3]} den=${SOURCES[4]}
fft=${SOURCES[5]} ic=${SOURCES[6]}
gdep=%(gdep)g slx=%(slx)g
inject=%(inject)s srcmms=%(srcmms)s
'''%par)
else :
psrc = mmsfiles['presrc']
vsrc = mmsfiles['velsrc']
pint = mmsfiles['preinit']
vint = mmsfiles['velinit']
Flow([fwf,frec], [src, lt, rt, vel, den, fft, ic,
psrc, vsrc, pint, vint],
'''
sfsglr1 verb=y
rec=${TARGETS[1]}
left=${SOURCES[1]} right=${SOURCES[2]}
vel=${SOURCES[3]} den=${SOURCES[4]}
fft=${SOURCES[5]} ic=${SOURCES[6]}
presrc=${SOURCES[7]} velsrc=${SOURCES[8]}
preinit=${SOURCES[9]} velinit=${SOURCES[10]}
gdep=%(gdep)g slx=%(slx)g
inject=%(inject)s srcmms=%(srcmms)s
'''%par)
def lfdmodel(fwf, frec, src, ic, vel, den, mmsfiles, par, prefix, suffix):
_pf = str(prefix)
sf_ = str(suffix)
G = '%sG%s' %(_pf, sf_)
sx = '%ssx%s' %(_pf, sf_)
Flow([G,sx],vel,
'''
sfsglfdc1 dt=%(dt)g eps=0.00001 npk=20 seed=2012
sx=${TARGETS[1]} size=%(size)d wavnumcut=%(frqcut)g
''' %par)
if mmsfiles == {}:
Flow([fwf, frec], [src, ic, vel, den, G, sx],
'''
sfsglfd1pml rec=${TARGETS[1]} ic=${SOURCES[1]}
vel=${SOURCES[2]} den=${SOURCES[3]}
G=${SOURCES[4]} sx=${SOURCES[5]}
pmld0=20
gdep=%(gdep)g slx=%(slx)g pmlsize=%(pml)d
inject=%(inject)s srcmms=%(srcmms)s
verb=y snapinter=1
''' %par)
else:
psrc = mmsfiles['presrc']
vsrc = mmsfiles['velsrc']
pint = mmsfiles['preinit']
vint = mmsfiles['velinit']
Flow([fwf, frec], [src, ic, vel, den, G, sx,
psrc, vsrc, pint, vint],
'''
sfsglfd1pml rec=${TARGETS[1]} ic=${SOURCES[1]}
vel=${SOURCES[2]} den=${SOURCES[3]}
G=${SOURCES[4]} sx=${SOURCES[5]}
presrc=${SOURCES[6]} velsrc=${SOURCES[7]}
preinit=${SOURCES[8]} velinit=${SOURCES[9]}
pmld0=20
gdep=%(gdep)g slx=%(slx)g pmlsize=%(pml)d
inject=%(inject)s srcmms=%(srcmms)s
verb=y snapinter=1
''' %par)
def fdmodel(fwf, frec, src, ic, vel, den, mmsfiles, par):
if (mmsfiles == {}):
Flow([fwf, frec], [src, ic, vel, den],
'''
sfsgfd1 ic=${SOURCES[1]}
vel=${SOURCES[2]} den=${SOURCES[3]} rec=${TARGETS[1]}
pmld0=20 size=%(fdsize)d
gdep=%(gdep)g slx=%(slx)g pmlsize=%(pml)d
inject=%(inject)s
verb=y snapinter=1
''' %par )
else :
psrc = mmsfiles['presrc']
vsrc = mmsfiles['velsrc']
pint = mmsfiles['preinit']
vint = mmsfiles['velinit']
Flow([fwf, frec], [src, ic, vel, den, psrc, vsrc, pint, vint],
'''
sfsgfd1 ic=${SOURCES[1]}
vel=${SOURCES[2]} den=${SOURCES[3]} rec=${TARGETS[1]}
presrc=${SOURCES[4]} velsrc=${SOURCES[5]}
preinit=${SOURCES[6]} velinit=${SOURCES[7]}
pmld0=20 size=%(fdsize)d
gdep=%(gdep)g slx=%(slx)g pmlsize=%(pml)d
inject=%(inject)s srcmms=%(srcmms)s
verb=y snapinter=1
''' %par )
# ------------------------------------------------------------------------------
def analyticslt(fout, par, vel, prefix, subfix):
_pf = str(prefix)
sf_ = str(subfix)
spx = par['spx']
selt= par['SelT']
dx = par['dx']
leftp = spx - round(vel*selt/dx)
rightp = spx + round(vel*selt/dx)
left = '%sleft%s' %(_pf, sf_)
right= '%sright%s'%(_pf, sf_)
for fi in [left, right]:
p = (leftp, rightp)[fi==right]
Flow(fi,None,
'''
spike n1=%d d1=%g k1=%d|
ricker1 frequency=%g | math output="input"
'''%(par['nx'],par['dx'],p,par['wavfrq']))
Flow(fout,[left,right],
'''
math t=${SOURCES[1]} output="input+t" |
scale axis=2 | scale rscale=0.5 |
put label1="Distance" unit1="km"
''')
| gpl-2.0 |
stewartsmith/bzr | bzrlib/tests/http_utils.py | 2 | 20784 | # Copyright (C) 2005-2011 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from cStringIO import StringIO
import re
import urllib2
from bzrlib import (
errors,
osutils,
tests,
transport,
)
from bzrlib.smart import (
medium,
)
from bzrlib.tests import http_server
from bzrlib.transport import chroot
class HTTPServerWithSmarts(http_server.HttpServer):
"""HTTPServerWithSmarts extends the HttpServer with POST methods that will
trigger a smart server to execute with a transport rooted at the rootdir of
the HTTP server.
"""
def __init__(self, protocol_version=None):
http_server.HttpServer.__init__(self, SmartRequestHandler,
protocol_version=protocol_version)
class SmartRequestHandler(http_server.TestingHTTPRequestHandler):
"""Extend TestingHTTPRequestHandler to support smart client POSTs.
XXX: This duplicates a fair bit of the logic in bzrlib.transport.http.wsgi.
"""
def do_POST(self):
"""Hand the request off to a smart server instance."""
backing = transport.get_transport_from_path(
self.server.test_case_server._home_dir)
chroot_server = chroot.ChrootServer(backing)
chroot_server.start_server()
try:
t = transport.get_transport_from_url(chroot_server.get_url())
self.do_POST_inner(t)
finally:
chroot_server.stop_server()
def do_POST_inner(self, chrooted_transport):
self.send_response(200)
self.send_header("Content-type", "application/octet-stream")
if not self.path.endswith('.bzr/smart'):
raise AssertionError(
'POST to path not ending in .bzr/smart: %r' % (self.path,))
t = chrooted_transport.clone(self.path[:-len('.bzr/smart')])
# if this fails, we should return 400 bad request, but failure is
# failure for now - RBC 20060919
data_length = int(self.headers['Content-Length'])
# TODO: We might like to support streaming responses. 1.0 allows no
# Content-length in this case, so for integrity we should perform our
# own chunking within the stream.
# 1.1 allows chunked responses, and in this case we could chunk using
# the HTTP chunking as this will allow HTTP persistence safely, even if
# we have to stop early due to error, but we would also have to use the
# HTTP trailer facility which may not be widely available.
request_bytes = self.rfile.read(data_length)
protocol_factory, unused_bytes = medium._get_protocol_factory_for_bytes(
request_bytes)
out_buffer = StringIO()
smart_protocol_request = protocol_factory(t, out_buffer.write, '/')
# Perhaps there should be a SmartServerHTTPMedium that takes care of
# feeding the bytes in the http request to the smart_protocol_request,
# but for now it's simpler to just feed the bytes directly.
smart_protocol_request.accept_bytes(unused_bytes)
if not (smart_protocol_request.next_read_size() == 0):
raise errors.SmartProtocolError(
"not finished reading, but all data sent to protocol.")
self.send_header("Content-Length", str(len(out_buffer.getvalue())))
self.end_headers()
self.wfile.write(out_buffer.getvalue())
class TestCaseWithWebserver(tests.TestCaseWithTransport):
"""A support class that provides readonly urls that are http://.
This is done by forcing the readonly server to be an http
one. This will currently fail if the primary transport is not
backed by regular disk files.
"""
# These attributes can be overriden or parametrized by daughter clasess if
# needed, but must exist so that the create_transport_readonly_server()
# method (or any method creating an http(s) server) can propagate it.
_protocol_version = None
_url_protocol = 'http'
def setUp(self):
super(TestCaseWithWebserver, self).setUp()
self.transport_readonly_server = http_server.HttpServer
def create_transport_readonly_server(self):
server = self.transport_readonly_server(
protocol_version=self._protocol_version)
server._url_protocol = self._url_protocol
return server
class TestCaseWithTwoWebservers(TestCaseWithWebserver):
"""A support class providing readonly urls on two servers that are http://.
We set up two webservers to allows various tests involving
proxies or redirections from one server to the other.
"""
def setUp(self):
super(TestCaseWithTwoWebservers, self).setUp()
self.transport_secondary_server = http_server.HttpServer
self.__secondary_server = None
def create_transport_secondary_server(self):
"""Create a transport server from class defined at init.
This is mostly a hook for daughter classes.
"""
server = self.transport_secondary_server(
protocol_version=self._protocol_version)
server._url_protocol = self._url_protocol
return server
def get_secondary_server(self):
"""Get the server instance for the secondary transport."""
if self.__secondary_server is None:
self.__secondary_server = self.create_transport_secondary_server()
self.start_server(self.__secondary_server)
return self.__secondary_server
def get_secondary_url(self, relpath=None):
base = self.get_secondary_server().get_url()
return self._adjust_url(base, relpath)
def get_secondary_transport(self, relpath=None):
t = transport.get_transport_from_url(self.get_secondary_url(relpath))
self.assertTrue(t.is_readonly())
return t
class ProxyServer(http_server.HttpServer):
"""A proxy test server for http transports."""
proxy_requests = True
class RedirectRequestHandler(http_server.TestingHTTPRequestHandler):
"""Redirect all request to the specified server"""
def parse_request(self):
"""Redirect a single HTTP request to another host"""
valid = http_server.TestingHTTPRequestHandler.parse_request(self)
if valid:
tcs = self.server.test_case_server
code, target = tcs.is_redirected(self.path)
if code is not None and target is not None:
# Redirect as instructed
self.send_response(code)
self.send_header('Location', target)
# We do not send a body
self.send_header('Content-Length', '0')
self.end_headers()
return False # The job is done
else:
# We leave the parent class serve the request
pass
return valid
class HTTPServerRedirecting(http_server.HttpServer):
"""An HttpServer redirecting to another server """
def __init__(self, request_handler=RedirectRequestHandler,
protocol_version=None):
http_server.HttpServer.__init__(self, request_handler,
protocol_version=protocol_version)
# redirections is a list of tuples (source, target, code)
# - source is a regexp for the paths requested
# - target is a replacement for re.sub describing where
# the request will be redirected
# - code is the http error code associated to the
# redirection (301 permanent, 302 temporarry, etc
self.redirections = []
def redirect_to(self, host, port):
"""Redirect all requests to a specific host:port"""
self.redirections = [('(.*)',
r'http://%s:%s\1' % (host, port) ,
301)]
def is_redirected(self, path):
"""Is the path redirected by this server.
:param path: the requested relative path
:returns: a tuple (code, target) if a matching
redirection is found, (None, None) otherwise.
"""
code = None
target = None
for (rsource, rtarget, rcode) in self.redirections:
target, match = re.subn(rsource, rtarget, path)
if match:
code = rcode
break # The first match wins
else:
target = None
return code, target
class TestCaseWithRedirectedWebserver(TestCaseWithTwoWebservers):
"""A support class providing redirections from one server to another.
We set up two webservers to allows various tests involving
redirections.
The 'old' server is redirected to the 'new' server.
"""
def setUp(self):
super(TestCaseWithRedirectedWebserver, self).setUp()
# The redirections will point to the new server
self.new_server = self.get_readonly_server()
# The requests to the old server will be redirected to the new server
self.old_server = self.get_secondary_server()
def create_transport_secondary_server(self):
"""Create the secondary server redirecting to the primary server"""
new = self.get_readonly_server()
redirecting = HTTPServerRedirecting(
protocol_version=self._protocol_version)
redirecting.redirect_to(new.host, new.port)
redirecting._url_protocol = self._url_protocol
return redirecting
def get_old_url(self, relpath=None):
base = self.old_server.get_url()
return self._adjust_url(base, relpath)
def get_old_transport(self, relpath=None):
t = transport.get_transport_from_url(self.get_old_url(relpath))
self.assertTrue(t.is_readonly())
return t
def get_new_url(self, relpath=None):
base = self.new_server.get_url()
return self._adjust_url(base, relpath)
def get_new_transport(self, relpath=None):
t = transport.get_transport_from_url(self.get_new_url(relpath))
self.assertTrue(t.is_readonly())
return t
class AuthRequestHandler(http_server.TestingHTTPRequestHandler):
"""Requires an authentication to process requests.
This is intended to be used with a server that always and
only use one authentication scheme (implemented by daughter
classes).
"""
# The following attributes should be defined in the server
# - auth_header_sent: the header name sent to require auth
# - auth_header_recv: the header received containing auth
# - auth_error_code: the error code to indicate auth required
def _require_authentication(self):
# Note that we must update test_case_server *before*
# sending the error or the client may try to read it
# before we have sent the whole error back.
tcs = self.server.test_case_server
tcs.auth_required_errors += 1
self.send_response(tcs.auth_error_code)
self.send_header_auth_reqed()
# We do not send a body
self.send_header('Content-Length', '0')
self.end_headers()
return
def do_GET(self):
if self.authorized():
return http_server.TestingHTTPRequestHandler.do_GET(self)
else:
return self._require_authentication()
def do_HEAD(self):
if self.authorized():
return http_server.TestingHTTPRequestHandler.do_HEAD(self)
else:
return self._require_authentication()
class BasicAuthRequestHandler(AuthRequestHandler):
"""Implements the basic authentication of a request"""
def authorized(self):
tcs = self.server.test_case_server
if tcs.auth_scheme != 'basic':
return False
auth_header = self.headers.get(tcs.auth_header_recv, None)
if auth_header:
scheme, raw_auth = auth_header.split(' ', 1)
if scheme.lower() == tcs.auth_scheme:
user, password = raw_auth.decode('base64').split(':')
return tcs.authorized(user, password)
return False
def send_header_auth_reqed(self):
tcs = self.server.test_case_server
self.send_header(tcs.auth_header_sent,
'Basic realm="%s"' % tcs.auth_realm)
# FIXME: We could send an Authentication-Info header too when
# the authentication is succesful
class DigestAuthRequestHandler(AuthRequestHandler):
"""Implements the digest authentication of a request.
We need persistence for some attributes and that can't be
achieved here since we get instantiated for each request. We
rely on the DigestAuthServer to take care of them.
"""
def authorized(self):
tcs = self.server.test_case_server
auth_header = self.headers.get(tcs.auth_header_recv, None)
if auth_header is None:
return False
scheme, auth = auth_header.split(None, 1)
if scheme.lower() == tcs.auth_scheme:
auth_dict = urllib2.parse_keqv_list(urllib2.parse_http_list(auth))
return tcs.digest_authorized(auth_dict, self.command)
return False
def send_header_auth_reqed(self):
tcs = self.server.test_case_server
header = 'Digest realm="%s", ' % tcs.auth_realm
header += 'nonce="%s", algorithm="%s", qop="auth"' % (tcs.auth_nonce,
'MD5')
self.send_header(tcs.auth_header_sent,header)
class DigestAndBasicAuthRequestHandler(DigestAuthRequestHandler):
"""Implements a digest and basic authentication of a request.
I.e. the server proposes both schemes and the client should choose the best
one it can handle, which, in that case, should be digest, the only scheme
accepted here.
"""
def send_header_auth_reqed(self):
tcs = self.server.test_case_server
self.send_header(tcs.auth_header_sent,
'Basic realm="%s"' % tcs.auth_realm)
header = 'Digest realm="%s", ' % tcs.auth_realm
header += 'nonce="%s", algorithm="%s", qop="auth"' % (tcs.auth_nonce,
'MD5')
self.send_header(tcs.auth_header_sent,header)
class AuthServer(http_server.HttpServer):
"""Extends HttpServer with a dictionary of passwords.
This is used as a base class for various schemes which should
all use or redefined the associated AuthRequestHandler.
Note that no users are defined by default, so add_user should
be called before issuing the first request.
"""
# The following attributes should be set dy daughter classes
# and are used by AuthRequestHandler.
auth_header_sent = None
auth_header_recv = None
auth_error_code = None
auth_realm = "Thou should not pass"
def __init__(self, request_handler, auth_scheme,
protocol_version=None):
http_server.HttpServer.__init__(self, request_handler,
protocol_version=protocol_version)
self.auth_scheme = auth_scheme
self.password_of = {}
self.auth_required_errors = 0
def add_user(self, user, password):
"""Declare a user with an associated password.
password can be empty, use an empty string ('') in that
case, not None.
"""
self.password_of[user] = password
def authorized(self, user, password):
"""Check that the given user provided the right password"""
expected_password = self.password_of.get(user, None)
return expected_password is not None and password == expected_password
# FIXME: There is some code duplication with
# _urllib2_wrappers.py.DigestAuthHandler. If that duplication
# grows, it may require a refactoring. Also, we don't implement
# SHA algorithm nor MD5-sess here, but that does not seem worth
# it.
class DigestAuthServer(AuthServer):
"""A digest authentication server"""
auth_nonce = 'now!'
def __init__(self, request_handler, auth_scheme,
protocol_version=None):
AuthServer.__init__(self, request_handler, auth_scheme,
protocol_version=protocol_version)
def digest_authorized(self, auth, command):
nonce = auth['nonce']
if nonce != self.auth_nonce:
return False
realm = auth['realm']
if realm != self.auth_realm:
return False
user = auth['username']
if not self.password_of.has_key(user):
return False
algorithm= auth['algorithm']
if algorithm != 'MD5':
return False
qop = auth['qop']
if qop != 'auth':
return False
password = self.password_of[user]
# Recalculate the response_digest to compare with the one
# sent by the client
A1 = '%s:%s:%s' % (user, realm, password)
A2 = '%s:%s' % (command, auth['uri'])
H = lambda x: osutils.md5(x).hexdigest()
KD = lambda secret, data: H("%s:%s" % (secret, data))
nonce_count = int(auth['nc'], 16)
ncvalue = '%08x' % nonce_count
cnonce = auth['cnonce']
noncebit = '%s:%s:%s:%s:%s' % (nonce, ncvalue, cnonce, qop, H(A2))
response_digest = KD(H(A1), noncebit)
return response_digest == auth['response']
class HTTPAuthServer(AuthServer):
"""An HTTP server requiring authentication"""
def init_http_auth(self):
self.auth_header_sent = 'WWW-Authenticate'
self.auth_header_recv = 'Authorization'
self.auth_error_code = 401
class ProxyAuthServer(AuthServer):
"""A proxy server requiring authentication"""
def init_proxy_auth(self):
self.proxy_requests = True
self.auth_header_sent = 'Proxy-Authenticate'
self.auth_header_recv = 'Proxy-Authorization'
self.auth_error_code = 407
class HTTPBasicAuthServer(HTTPAuthServer):
"""An HTTP server requiring basic authentication"""
def __init__(self, protocol_version=None):
HTTPAuthServer.__init__(self, BasicAuthRequestHandler, 'basic',
protocol_version=protocol_version)
self.init_http_auth()
class HTTPDigestAuthServer(DigestAuthServer, HTTPAuthServer):
"""An HTTP server requiring digest authentication"""
def __init__(self, protocol_version=None):
DigestAuthServer.__init__(self, DigestAuthRequestHandler, 'digest',
protocol_version=protocol_version)
self.init_http_auth()
class HTTPBasicAndDigestAuthServer(DigestAuthServer, HTTPAuthServer):
"""An HTTP server requiring basic or digest authentication"""
def __init__(self, protocol_version=None):
DigestAuthServer.__init__(self, DigestAndBasicAuthRequestHandler,
'basicdigest',
protocol_version=protocol_version)
self.init_http_auth()
# We really accept Digest only
self.auth_scheme = 'digest'
class ProxyBasicAuthServer(ProxyAuthServer):
"""A proxy server requiring basic authentication"""
def __init__(self, protocol_version=None):
ProxyAuthServer.__init__(self, BasicAuthRequestHandler, 'basic',
protocol_version=protocol_version)
self.init_proxy_auth()
class ProxyDigestAuthServer(DigestAuthServer, ProxyAuthServer):
"""A proxy server requiring basic authentication"""
def __init__(self, protocol_version=None):
ProxyAuthServer.__init__(self, DigestAuthRequestHandler, 'digest',
protocol_version=protocol_version)
self.init_proxy_auth()
class ProxyBasicAndDigestAuthServer(DigestAuthServer, ProxyAuthServer):
"""An proxy server requiring basic or digest authentication"""
def __init__(self, protocol_version=None):
DigestAuthServer.__init__(self, DigestAndBasicAuthRequestHandler,
'basicdigest',
protocol_version=protocol_version)
self.init_proxy_auth()
# We really accept Digest only
self.auth_scheme = 'digest'
| gpl-2.0 |
radicalbit/ambari | ambari-common/src/main/python/ambari_commons/get_ambari_version.py | 3 | 1589 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import ConfigParser
from resource_management.core.logger import Logger
"""
returns the ambari version on an agent host
"""
def get_ambari_version_agent():
ambari_version = None
AMBARI_AGENT_CONF = '/etc/ambari-agent/conf/ambari-agent.ini'
if os.path.exists(AMBARI_AGENT_CONF):
try:
ambari_agent_config = ConfigParser.RawConfigParser()
ambari_agent_config.read(AMBARI_AGENT_CONF)
data_dir = ambari_agent_config.get('agent', 'prefix')
ver_file = os.path.join(data_dir, 'version')
with open(ver_file, "r") as f:
ambari_version = f.read().strip()
except Exception, e:
Logger.info('Unable to determine ambari version from the agent version file.')
Logger.debug('Exception: %s' % str(e))
pass
pass
return ambari_version
| apache-2.0 |
abomyi/django | tests/shortcuts/views.py | 87 | 2274 | from django.shortcuts import render, render_to_response
from django.template import RequestContext
def render_to_response_view(request):
return render_to_response('shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_to_response_view_with_multiple_templates(request):
return render_to_response([
'shortcuts/no_such_template.html',
'shortcuts/render_test.html',
], {
'foo': 'FOO',
'bar': 'BAR',
})
def render_to_response_view_with_content_type(request):
return render_to_response('shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_to_response_view_with_status(request):
return render_to_response('shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, status=403)
def render_to_response_view_with_using(request):
using = request.GET.get('using')
return render_to_response('shortcuts/using.html', using=using)
def context_processor(request):
return {'bar': 'context processor output'}
def render_to_response_with_context_instance_misuse(request):
context_instance = RequestContext(request, {}, processors=[context_processor])
# Incorrect -- context_instance should be passed as a keyword argument.
return render_to_response('shortcuts/render_test.html', context_instance)
def render_view(request):
return render(request, 'shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_view_with_multiple_templates(request):
return render(request, [
'shortcuts/no_such_template.html',
'shortcuts/render_test.html',
], {
'foo': 'FOO',
'bar': 'BAR',
})
def render_view_with_content_type(request):
return render(request, 'shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_view_with_status(request):
return render(request, 'shortcuts/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, status=403)
def render_view_with_using(request):
using = request.GET.get('using')
return render(request, 'shortcuts/using.html', using=using)
| bsd-3-clause |
wisechengyi/pants | tests/python/pants_test/repo_scripts/test_git_hooks.py | 2 | 8029 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import datetime
import os
import shutil
import subprocess
import unittest
from contextlib import contextmanager
from pathlib import Path
from textwrap import dedent
from typing import Optional, Sequence
from pants.testutil.git_util import initialize_repo
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_file_dump, safe_mkdir_for
class PreCommitHookTest(unittest.TestCase):
@contextmanager
def _create_tiny_git_repo(self, *, copy_files: Optional[Sequence[Path]] = None):
with temporary_dir() as gitdir, temporary_dir() as worktree:
# A tiny little fake git repo we will set up. initialize_repo() requires at least one file.
Path(worktree, "README").touch()
# The contextmanager interface is only necessary if an explicit gitdir is not provided.
with initialize_repo(worktree, gitdir=gitdir) as git:
if copy_files is not None:
for fp in copy_files:
new_fp = Path(worktree, fp)
safe_mkdir_for(str(new_fp))
shutil.copy(fp, new_fp)
yield git, worktree, gitdir
def _assert_subprocess_error(self, worktree, cmd, expected_excerpt):
result = subprocess.run(
cmd, cwd=worktree, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8",
)
self.assertNotEqual(0, result.returncode)
self.assertIn(expected_excerpt, f"{result.stdout}\n{result.stderr}")
def _assert_subprocess_success(self, worktree, cmd, **kwargs):
self.assertEqual(0, subprocess.check_call(cmd, cwd=worktree, **kwargs))
def _assert_subprocess_success_with_output(self, worktree, cmd, full_expected_output):
stdout = subprocess.run(
cmd, cwd=worktree, check=True, stdout=subprocess.PIPE, encoding="utf-8"
).stdout
self.assertEqual(full_expected_output, stdout)
def test_check_packages(self):
package_check_script = "build-support/bin/check_packages.sh"
with self._create_tiny_git_repo(copy_files=[Path(package_check_script)]) as (
_,
worktree,
_,
):
init_py_path = os.path.join(worktree, "subdir/__init__.py")
# Check that an invalid __init__.py errors.
safe_file_dump(init_py_path, "asdf")
self._assert_subprocess_error(
worktree,
[package_check_script, "subdir"],
"""\
ERROR: All '__init__.py' files should be empty or else only contain a namespace
declaration, but the following contain code:
---
subdir/__init__.py
""",
)
# Check that a valid empty __init__.py succeeds.
safe_file_dump(init_py_path, "")
self._assert_subprocess_success(worktree, [package_check_script, "subdir"])
# Check that a valid __init__.py with `pkg_resources` setup succeeds.
safe_file_dump(init_py_path, '__import__("pkg_resources").declare_namespace(__name__)')
self._assert_subprocess_success(worktree, [package_check_script, "subdir"])
# TODO: consider testing the degree to which copies (-C) and moves (-M) are detected by making
# some small edits to a file, then moving it, and seeing if it is detected as a new file! That's
# more testing git functionality, but since it's not clear how this is measured, it could be
# useful if correctly detecting copies and moves ever becomes a concern.
def test_added_files_correctly_detected(self):
get_added_files_script = "build-support/bin/get_added_files.sh"
with self._create_tiny_git_repo(copy_files=[Path(get_added_files_script)]) as (
git,
worktree,
_,
):
# Create a new file.
new_file = os.path.join(worktree, "wow.txt")
safe_file_dump(new_file, "")
# Stage the file.
rel_new_file = os.path.relpath(new_file, worktree)
git.add(rel_new_file)
self._assert_subprocess_success_with_output(
worktree,
[get_added_files_script],
# This should be the only entry in the index, and it is a newly added file.
full_expected_output=f"{rel_new_file}\n",
)
def test_check_headers(self):
header_check_script = "build-support/bin/check_header.py"
cur_year_num = datetime.datetime.now().year
cur_year = str(cur_year_num)
with self._create_tiny_git_repo(
copy_files=[Path(header_check_script), "build-support/bin/common.py"]
) as (_, worktree, _):
new_py_path = os.path.join(worktree, "subdir/file.py")
def assert_header_check(added_files, expected_excerpt):
self._assert_subprocess_error(
worktree=worktree,
cmd=[header_check_script, "subdir", "--files-added"] + added_files,
expected_excerpt=expected_excerpt,
)
# Check that a file with an empty header fails.
safe_file_dump(new_py_path, "")
assert_header_check(
added_files=[], expected_excerpt="subdir/file.py: missing the expected header"
)
# Check that a file with a random header fails.
safe_file_dump(new_py_path, "asdf")
assert_header_check(
added_files=[], expected_excerpt="subdir/file.py: missing the expected header"
)
# Check that a file with a typo in the header fails
safe_file_dump(
new_py_path,
dedent(
f"""\
# Copyright {cur_year} Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the MIT License, Version 3.3 (see LICENSE).
"""
),
)
assert_header_check(
added_files=[],
expected_excerpt="subdir/file.py: header does not match the expected header",
)
# Check that a file without a valid copyright year fails.
safe_file_dump(
new_py_path,
dedent(
"""\
# Copyright YYYY Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""
),
)
assert_header_check(
added_files=[],
expected_excerpt=(
r"subdir/file.py: copyright year must match '20\d\d' (was YYYY): "
f"current year is {cur_year}"
),
)
# Check that a newly added file must have the current year.
last_year = str(cur_year_num - 1)
safe_file_dump(
new_py_path,
dedent(
f"""\
# Copyright {last_year} Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""
),
)
rel_new_py_path = os.path.relpath(new_py_path, worktree)
assert_header_check(
added_files=[rel_new_py_path],
expected_excerpt=f"subdir/file.py: copyright year must be {cur_year} (was {last_year})",
)
# Check that a file isn't checked against the current year if it is not passed as an
# arg to the script.
# Use the same file as last time, with last year's copyright date.
self._assert_subprocess_success(worktree, [header_check_script, "subdir"])
| apache-2.0 |
apbard/scipy | scipy/sparse/linalg/dsolve/tests/test_linsolve.py | 1 | 24049 | from __future__ import division, print_function, absolute_import
import threading
import numpy as np
from numpy import array, finfo, arange, eye, all, unique, ones, dot, matrix
import numpy.random as random
from numpy.testing import (
assert_array_almost_equal, assert_raises, assert_almost_equal,
assert_equal, assert_array_equal, assert_, assert_allclose,
assert_warns)
import pytest
from scipy._lib._numpy_compat import assert_raises_regex
import scipy.linalg
from scipy.linalg import norm, inv
from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix,
csr_matrix, identity, isspmatrix, dok_matrix, lil_matrix, bsr_matrix)
from scipy.sparse.linalg import SuperLU
from scipy.sparse.linalg.dsolve import (spsolve, use_solver, splu, spilu,
MatrixRankWarning, _superlu, spsolve_triangular, factorized)
from scipy._lib._numpy_compat import suppress_warnings
sup_sparse_efficiency = suppress_warnings()
sup_sparse_efficiency.filter(SparseEfficiencyWarning)
# scikits.umfpack is not a SciPy dependency but it is optionally used in
# dsolve, so check whether it's available
try:
import scikits.umfpack as umfpack
has_umfpack = True
except ImportError:
has_umfpack = False
def toarray(a):
if isspmatrix(a):
return a.toarray()
else:
return a
class TestFactorized(object):
def setup_method(self):
n = 5
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc()
random.seed(1234)
def _check_singular(self):
A = csc_matrix((5,5), dtype='d')
b = ones(5)
assert_array_almost_equal(0. * b, factorized(A)(b))
def _check_non_singular(self):
# Make a diagonal dominant, to make sure it is not singular
n = 5
a = csc_matrix(random.rand(n, n))
b = ones(n)
expected = splu(a).solve(b)
assert_array_almost_equal(factorized(a)(b), expected)
def test_singular_without_umfpack(self):
use_solver(useUmfpack=False)
assert_raises_regex(RuntimeError, "Factor is exactly singular", self._check_singular)
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_singular_with_umfpack(self):
use_solver(useUmfpack=True)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars")
assert_warns(umfpack.UmfpackWarning, self._check_singular)
def test_non_singular_without_umfpack(self):
use_solver(useUmfpack=False)
self._check_non_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_non_singular_with_umfpack(self):
use_solver(useUmfpack=True)
self._check_non_singular()
def test_cannot_factorize_nonsquare_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
assert_raises_regex(ValueError, "can only factor square matrices",
factorized, self.A[:,:4])
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_factorizes_nonsquare_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
# does not raise
factorized(self.A[:,:4])
def test_call_with_incorrectly_sized_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
assert_raises_regex(ValueError, "is of incompatible size", solve, b)
assert_raises_regex(ValueError, "is of incompatible size", solve, B)
assert_raises_regex(ValueError, "object too deep for desired array", solve, BB)
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_incorrectly_sized_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
# does not raise
solve(b)
assert_raises_regex(ValueError, "object too deep for desired array", solve, B)
assert_raises_regex(ValueError, "object too deep for desired array", solve, BB)
def test_call_with_cast_to_complex_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
assert_raises_regex(TypeError, "Cannot cast array data", solve,
b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_cast_to_complex_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
assert_warns(np.ComplexWarning, solve, b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_assume_sorted_indices_flag(self):
# a sparse matrix with unsorted indices
unsorted_inds = np.array([2, 0, 1, 0])
data = np.array([10, 16, 5, 0.4])
indptr = np.array([0, 1, 2, 4])
A = csc_matrix((data, unsorted_inds, indptr), (3, 3))
b = ones(3)
# should raise when incorrectly assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=True)
assert_raises_regex(RuntimeError, "UMFPACK_ERROR_invalid_matrix", factorized, A)
# should sort indices and succeed when not assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=False)
expected = splu(A.copy()).solve(b)
assert_equal(A.has_sorted_indices, 0)
assert_array_almost_equal(factorized(A)(b), expected)
assert_equal(A.has_sorted_indices, 1)
class TestLinsolve(object):
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
A = csc_matrix((5,5), dtype='d')
b = array([1, 2, 3, 4, 5],dtype='d')
with suppress_warnings() as sup:
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
def test_singular_gh_3312(self):
# "Bad" test case that leads SuperLU to call LAPACK with invalid
# arguments. Check that it fails moderately gracefully.
ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
A = csc_matrix((v, ij.T), shape=(20, 20))
b = np.arange(20)
try:
# should either raise a runtimeerror or return value
# appropriate for singular input
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
except RuntimeError:
pass
def test_twodiags(self):
A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
b = array([1, 2, 3, 4, 5])
# condition number of A
cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2)
for t in ['f','d','F','D']:
eps = finfo(t).eps # floating point epsilon
b = b.astype(t)
for format in ['csc','csr']:
Asp = A.astype(t).asformat(format)
x = spsolve(Asp,b)
assert_(norm(b - Asp*x) < 10 * cond_A * eps)
def test_bvector_smoketest(self):
Adense = matrix([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3)
b = As*x
x2 = spsolve(As, b)
assert_array_almost_equal(x, x2)
def test_bmatrix_smoketest(self):
Adense = matrix([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3, 4)
Bdense = As.dot(x)
Bs = csc_matrix(Bdense)
x2 = spsolve(As, Bs)
assert_array_almost_equal(x, x2.todense())
@sup_sparse_efficiency
def test_non_square(self):
# A is not square.
A = ones((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve, A, b)
# A2 and b2 have incompatible shapes.
A2 = csc_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve, A2, b2)
@sup_sparse_efficiency
def test_example_comparison(self):
row = array([0,0,1,2,2,2])
col = array([0,2,2,0,1,2])
data = array([1,2,3,-4,5,6])
sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float)
M = sM.todense()
row = array([0,0,1,1,0,0])
col = array([0,2,1,1,0,0])
data = array([1,1,1,1,1,1])
sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float)
N = sN.todense()
sX = spsolve(sM, sN)
X = scipy.linalg.solve(M, N)
assert_array_almost_equal(X, sX.todense())
@sup_sparse_efficiency
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_shape_compatibility(self):
use_solver(useUmfpack=True)
A = csc_matrix([[1., 0], [0, 2]])
bs = [
[1, 6],
array([1, 6]),
[[1], [6]],
array([[1], [6]]),
csc_matrix([[1], [6]]),
csr_matrix([[1], [6]]),
dok_matrix([[1], [6]]),
bsr_matrix([[1], [6]]),
array([[1., 2., 3.], [6., 8., 10.]]),
csc_matrix([[1., 2., 3.], [6., 8., 10.]]),
csr_matrix([[1., 2., 3.], [6., 8., 10.]]),
dok_matrix([[1., 2., 3.], [6., 8., 10.]]),
bsr_matrix([[1., 2., 3.], [6., 8., 10.]]),
]
for b in bs:
x = np.linalg.solve(A.toarray(), toarray(b))
for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]:
x1 = spsolve(spmattype(A), b, use_umfpack=True)
x2 = spsolve(spmattype(A), b, use_umfpack=False)
# check solution
if x.ndim == 2 and x.shape[1] == 1:
# interprets also these as "vectors"
x = x.ravel()
assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1)))
assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2)))
# dense vs. sparse output ("vectors" are always dense)
if isspmatrix(b) and x.ndim > 1:
assert_(isspmatrix(x1), repr((b, spmattype, 1)))
assert_(isspmatrix(x2), repr((b, spmattype, 2)))
else:
assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
# check output shape
if x.ndim == 1:
# "vector"
assert_equal(x1.shape, (A.shape[1],))
assert_equal(x2.shape, (A.shape[1],))
else:
# "matrix"
assert_equal(x1.shape, x.shape)
assert_equal(x2.shape, x.shape)
A = csc_matrix((3, 3))
b = csc_matrix((1, 3))
assert_raises(ValueError, spsolve, A, b)
@sup_sparse_efficiency
def test_ndarray_support(self):
A = array([[1., 2.], [2., 0.]])
x = array([[1., 1.], [0.5, -0.5]])
b = array([[2., 0.], [2., 2.]])
assert_array_almost_equal(x, spsolve(A, b))
def test_gssv_badinput(self):
N = 10
d = arange(N) + 1.0
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N)
for spmatrix in (csc_matrix, csr_matrix):
A = spmatrix(A)
b = np.arange(N)
def not_c_contig(x):
return x.repeat(2)[::2]
def not_1dim(x):
return x[:,None]
def bad_type(x):
return x.astype(bool)
def too_short(x):
return x[:-1]
badops = [not_c_contig, not_1dim, bad_type, too_short]
for badop in badops:
msg = "%r %r" % (spmatrix, badop)
# Not C-contiguous
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, badop(A.data), A.indices, A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, badop(A.indices), A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, A.indices, badop(A.indptr),
b, int(spmatrix == csc_matrix), err_msg=msg)
def test_sparsity_preservation(self):
ident = csc_matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
b = csc_matrix([
[0, 1],
[1, 0],
[0, 0]])
x = spsolve(ident, b)
assert_equal(ident.nnz, 3)
assert_equal(b.nnz, 2)
assert_equal(x.nnz, 2)
assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12)
def test_dtype_cast(self):
A_real = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5]])
A_complex = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5 + 1j]])
b_real = np.array([1,1,1])
b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])
x = spsolve(A_real, b_real)
assert_(np.issubdtype(x.dtype, np.floating))
x = spsolve(A_real, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_real)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
class TestSplu(object):
def setup_method(self):
use_solver(useUmfpack=False)
n = 40
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
random.seed(1234)
def _smoketest(self, spxlu, check, dtype):
if np.issubdtype(dtype, np.complexfloating):
A = self.A + 1j*self.A.T
else:
A = self.A
A = A.astype(dtype)
lu = spxlu(A)
rng = random.RandomState(1234)
# Input shapes
for k in [None, 1, 2, self.n, self.n+2]:
msg = "k=%r" % (k,)
if k is None:
b = rng.rand(self.n)
else:
b = rng.rand(self.n, k)
if np.issubdtype(dtype, np.complexfloating):
b = b + 1j*rng.rand(*b.shape)
b = b.astype(dtype)
x = lu.solve(b)
check(A, b, x, msg)
x = lu.solve(b, 'T')
check(A.T, b, x, msg)
x = lu.solve(b, 'H')
check(A.T.conj(), b, x, msg)
@sup_sparse_efficiency
def test_splu_smoketest(self):
self._internal_test_splu_smoketest()
def _internal_test_splu_smoketest(self):
# Check that splu works at all
def check(A, b, x, msg=""):
eps = np.finfo(A.dtype).eps
r = A * x
assert_(abs(r - b).max() < 1e3*eps, msg)
self._smoketest(splu, check, np.float32)
self._smoketest(splu, check, np.float64)
self._smoketest(splu, check, np.complex64)
self._smoketest(splu, check, np.complex128)
@sup_sparse_efficiency
def test_spilu_smoketest(self):
self._internal_test_spilu_smoketest()
def _internal_test_spilu_smoketest(self):
errors = []
def check(A, b, x, msg=""):
r = A * x
err = abs(r - b).max()
assert_(err < 1e-2, msg)
if b.dtype in (np.float64, np.complex128):
errors.append(err)
self._smoketest(spilu, check, np.float32)
self._smoketest(spilu, check, np.float64)
self._smoketest(spilu, check, np.complex64)
self._smoketest(spilu, check, np.complex128)
assert_(max(errors) > 1e-5)
@sup_sparse_efficiency
def test_spilu_drop_rule(self):
# Test passing in the drop_rule argument to spilu.
A = identity(2)
rules = [
b'basic,area'.decode('ascii'), # unicode
b'basic,area', # ascii
[b'basic', b'area'.decode('ascii')]
]
for rule in rules:
# Argument should be accepted
assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))
def test_splu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, splu, A)
def test_spilu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, spilu, A)
def test_splu_basic(self):
# Test basic splu functionality.
n = 30
rng = random.RandomState(12)
a = rng.rand(n, n)
a[a < 0.95] = 0
# First test with a singular matrix
a[:, 0] = 0
a_ = csc_matrix(a)
# Matrix is exactly singular
assert_raises(RuntimeError, splu, a_)
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
b = ones(n)
x = lu.solve(b)
assert_almost_equal(dot(a, x), b)
def test_splu_perm(self):
# Test the permutation vectors exposed by splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# Check that the permutation indices do belong to [0, n-1].
for perm in (lu.perm_r, lu.perm_c):
assert_(all(perm > -1))
assert_(all(perm < n))
assert_equal(len(unique(perm)), len(perm))
# Now make a symmetric, and test that the two permutation vectors are
# the same
# Note: a += a.T relies on undefined behavior.
a = a + a.T
a_ = csc_matrix(a)
lu = splu(a_)
assert_array_equal(lu.perm_r, lu.perm_c)
def test_lu_refcount(self):
# Test that we are keeping track of the reference count with splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# And now test that we don't have a refcount bug
import sys
rc = sys.getrefcount(lu)
for attr in ('perm_r', 'perm_c'):
perm = getattr(lu, attr)
assert_equal(sys.getrefcount(lu), rc + 1)
del perm
assert_equal(sys.getrefcount(lu), rc)
def test_bad_inputs(self):
A = self.A.tocsc()
assert_raises(ValueError, splu, A[:,:4])
assert_raises(ValueError, spilu, A[:,:4])
for lu in [splu(A), spilu(A)]:
b = random.rand(42)
B = random.rand(42, 3)
BB = random.rand(self.n, 3, 9)
assert_raises(ValueError, lu.solve, b)
assert_raises(ValueError, lu.solve, B)
assert_raises(ValueError, lu.solve, BB)
assert_raises(TypeError, lu.solve,
b.astype(np.complex64))
assert_raises(TypeError, lu.solve,
b.astype(np.complex128))
@sup_sparse_efficiency
def test_superlu_dlamch_i386_nan(self):
# SuperLU 4.3 calls some functions returning floats without
# declaring them. On i386@linux call convention, this fails to
# clear floating point registers after call. As a result, NaN
# can appear in the next floating point operation made.
#
# Here's a test case that triggered the issue.
n = 8
d = np.arange(n) + 1
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
A = A.astype(np.float32)
spilu(A)
A = A + 1j*A
B = A.A
assert_(not np.isnan(B).any())
@sup_sparse_efficiency
def test_lu_attr(self):
def check(dtype, complex_2=False):
A = self.A.astype(dtype)
if complex_2:
A = A + 1j*A.T
n = A.shape[0]
lu = splu(A)
# Check that the decomposition is as advertized
Pc = np.zeros((n, n))
Pc[np.arange(n), lu.perm_c] = 1
Pr = np.zeros((n, n))
Pr[lu.perm_r, np.arange(n)] = 1
Ad = A.toarray()
lhs = Pr.dot(Ad).dot(Pc)
rhs = (lu.L * lu.U).toarray()
eps = np.finfo(dtype).eps
assert_allclose(lhs, rhs, atol=100*eps)
check(np.float32)
check(np.float64)
check(np.complex64)
check(np.complex128)
check(np.complex64, True)
check(np.complex128, True)
@sup_sparse_efficiency
def test_threads_parallel(self):
oks = []
def worker():
try:
self.test_splu_basic()
self._internal_test_splu_smoketest()
self._internal_test_spilu_smoketest()
oks.append(True)
except:
pass
threads = [threading.Thread(target=worker)
for k in range(20)]
for t in threads:
t.start()
for t in threads:
t.join()
assert_equal(len(oks), 20)
class TestSpsolveTriangular(object):
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
n = 5
A = csr_matrix((n, n))
b = np.arange(n)
for lower in (True, False):
assert_raises(scipy.linalg.LinAlgError, spsolve_triangular, A, b, lower=lower)
@sup_sparse_efficiency
def test_bad_shape(self):
# A is not square.
A = np.zeros((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve_triangular, A, b)
# A2 and b2 have incompatible shapes.
A2 = csr_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve_triangular, A2, b2)
@sup_sparse_efficiency
def test_input_types(self):
A = array([[1., 0.], [1., 2.]])
b = array([[2., 0.], [2., 2.]])
for matrix_type in (array, csc_matrix, csr_matrix):
x = spsolve_triangular(matrix_type(A), b, lower=True)
assert_array_almost_equal(A.dot(x), b)
@sup_sparse_efficiency
def test_random(self):
def random_triangle_matrix(n, lower=True):
A = scipy.sparse.random(n, n, density=0.1, format='coo')
if lower:
A = scipy.sparse.tril(A)
else:
A = scipy.sparse.triu(A)
A = A.tocsr(copy=False)
for i in range(n):
A[i, i] = np.random.rand() + 1
return A
np.random.seed(1234)
for lower in (True, False):
for n in (10, 10**2, 10**3):
A = random_triangle_matrix(n, lower=lower)
for m in (1, 10):
for b in (np.random.rand(n, m),
np.random.randint(-9, 9, (n, m)),
np.random.randint(-9, 9, (n, m)) +
np.random.randint(-9, 9, (n, m)) * 1j):
x = spsolve_triangular(A, b, lower=lower)
assert_array_almost_equal(A.dot(x), b)
| bsd-3-clause |
microcom/odoo | addons/survey/wizard/survey_email_compose_message.py | 29 | 10120 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.translate import _
from datetime import datetime
from openerp.exceptions import UserError
import re
import uuid
import urlparse
emails_split = re.compile(r"[;,\n\r]+")
class survey_mail_compose_message(osv.TransientModel):
_name = 'survey.mail.compose.message'
_inherit = 'mail.compose.message'
_description = 'Email composition wizard for Survey'
_log_access = True
def _get_public_url(self, cr, uid, ids, name, arg, context=None):
res = dict((id, 0) for id in ids)
survey_obj = self.pool.get('survey.survey')
for wizard in self.browse(cr, uid, ids, context=context):
res[wizard.id] = wizard.survey_id.public_url
return res
def _get_public_url_html(self, cr, uid, ids, name, arg, context=None):
""" Compute if the message is unread by the current user """
urls = self._get_public_url(cr, uid, ids, name, arg, context=context)
for key, url in urls.items():
urls[key] = '<a href="%s">%s</a>' % (url, _("Click here to start survey"))
return urls
_columns = {
'survey_id': fields.many2one('survey.survey', 'Survey', required=True),
'public': fields.selection([('public_link', 'Share the public web link to your audience.'),
('email_public_link', 'Send by email the public web link to your audience.'),
('email_private', 'Send private invitation to your audience (only one response per recipient and per invitation).')],
string='Share options', required=True),
'public_url': fields.function(_get_public_url, string="Public url", type="char"),
'public_url_html': fields.function(_get_public_url_html, string="Public HTML web link", type="char"),
'partner_ids': fields.many2many('res.partner',
'survey_mail_compose_message_res_partner_rel',
'wizard_id', 'partner_id', 'Existing contacts'),
'attachment_ids': fields.many2many('ir.attachment',
'survey_mail_compose_message_ir_attachments_rel',
'wizard_id', 'attachment_id', 'Attachments'),
'multi_email': fields.text(string='List of emails', help="This list of emails of recipients will not converted in contacts. Emails separated by commas, semicolons or newline."),
'date_deadline': fields.date(string="Deadline to which the invitation to respond is valid", help="Deadline to which the invitation to respond for this survey is valid. If the field is empty, the invitation is still valid."),
}
_defaults = {
'public': 'public_link',
'survey_id': lambda self, cr, uid, ctx={}: ctx.get('model') == 'survey.survey' and ctx.get('res_id') or None
}
def default_get(self, cr, uid, fields, context=None):
res = super(survey_mail_compose_message, self).default_get(cr, uid, fields, context=context)
if context.get('active_model') == 'res.partner' and context.get('active_ids'):
res.update({'partner_ids': context.get('active_ids')})
return res
def onchange_multi_email(self, cr, uid, ids, multi_email, context=None):
emails = list(set(emails_split.split(multi_email or "")))
emails_checked = []
error_message = ""
for email in emails:
email = email.strip()
if email:
if not re.search(r"^[^@]+@[^@]+$", email):
error_message += "\n'%s'" % email
else:
emails_checked.append(email)
if error_message:
raise UserError(_("One email at least is incorrect: %s") % error_message)
emails_checked.sort()
values = {'multi_email': '\n'.join(emails_checked)}
return {'value': values}
def onchange_survey_id(self, cr, uid, ids, survey_id, context=None):
""" Compute if the message is unread by the current user. """
if survey_id:
survey = self.pool.get('survey.survey').browse(cr, uid, survey_id, context=context)
return {
'value': {
'subject': survey.title,
'public_url': survey.public_url,
'public_url_html': '<a href="%s">%s</a>' % (survey.public_url, _("Click here to take survey")),
}}
else:
txt = _("Please select a survey")
return {
'value': {
'public_url': txt,
'public_url_html': txt,
}}
#------------------------------------------------------
# Wizard validation and send
#------------------------------------------------------
def send_mail(self, cr, uid, ids, auto_commit=False, context=None):
""" Process the wizard content and proceed with sending the related
email(s), rendering any template patterns on the fly if needed """
if context is None:
context = {}
survey_response_obj = self.pool.get('survey.user_input')
partner_obj = self.pool.get('res.partner')
mail_mail_obj = self.pool.get('mail.mail')
try:
model, anonymous_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'portal', 'group_anonymous')
except ValueError:
anonymous_id = None
def create_response_and_send_mail(wizard, token, partner_id, email):
""" Create one mail by recipients and replace __URL__ by link with identification token """
#set url
url = wizard.survey_id.public_url
url = urlparse.urlparse(url).path[1:] # dirty hack to avoid incorrect urls
if token:
url = url + '/' + token
# post the message
values = {
'model': None,
'res_id': None,
'subject': wizard.subject,
'body': wizard.body.replace("__URL__", url),
'body_html': wizard.body.replace("__URL__", url),
'parent_id': None,
'attachment_ids': wizard.attachment_ids and [(6, 0, wizard.attachment_ids.ids)] or None,
'email_from': wizard.email_from or None,
'auto_delete': True,
}
if partner_id:
values['recipient_ids'] = [(4, partner_id)]
else:
values['email_to'] = email
mail_id = mail_mail_obj.create(cr, uid, values, context=context)
mail_mail_obj.send(cr, uid, [mail_id], context=context)
def create_token(wizard, partner_id, email):
if context.get("survey_resent_token"):
response_ids = survey_response_obj.search(cr, uid, [('survey_id', '=', wizard.survey_id.id), ('state', 'in', ['new', 'skip']), '|', ('partner_id', '=', partner_id), ('email', '=', email)], context=context)
if response_ids:
return survey_response_obj.read(cr, uid, response_ids, ['token'], context=context)[0]['token']
if wizard.public != 'email_private':
return None
else:
token = uuid.uuid4().__str__()
# create response with token
survey_response_obj.create(cr, uid, {
'survey_id': wizard.survey_id.id,
'deadline': wizard.date_deadline,
'date_create': datetime.now(),
'type': 'link',
'state': 'new',
'token': token,
'partner_id': partner_id,
'email': email},
context=context)
return token
for wizard in self.browse(cr, uid, ids, context=context):
# check if __URL__ is in the text
if wizard.body.find("__URL__") < 0:
raise UserError(_("The content of the text don't contain '__URL__'. \
__URL__ is automaticaly converted into the special url of the survey."))
if not wizard.multi_email and not wizard.partner_ids and (context.get('default_partner_ids') or context.get('default_multi_email')):
wizard.multi_email = context.get('default_multi_email')
wizard.partner_ids = context.get('default_partner_ids')
# quick check of email list
emails_list = []
if wizard.multi_email:
emails = list(set(emails_split.split(wizard.multi_email)) - set([partner.email for partner in wizard.partner_ids]))
for email in emails:
email = email.strip()
if re.search(r"^[^@]+@[^@]+$", email):
emails_list.append(email)
# remove public anonymous access
partner_list = []
for partner in wizard.partner_ids:
if not anonymous_id or not partner.user_ids or anonymous_id not in [x.id for x in partner.user_ids[0].groups_id]:
partner_list.append({'id': partner.id, 'email': partner.email})
if not len(emails_list) and not len(partner_list):
if wizard.model == 'res.partner' and wizard.res_id:
return False
raise UserError(_("Please enter at least one valid recipient."))
for email in emails_list:
partner_id = partner_obj.search(cr, uid, [('email', '=', email)], context=context)
partner_id = partner_id and partner_id[0] or None
token = create_token(wizard, partner_id, email)
create_response_and_send_mail(wizard, token, partner_id, email)
for partner in partner_list:
token = create_token(wizard, partner['id'], partner['email'])
create_response_and_send_mail(wizard, token, partner['id'], partner['email'])
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 |
freephys/python_ase | ase/transport/calculators.py | 2 | 11688 | import numpy as np
from numpy import linalg
from ase.transport.selfenergy import LeadSelfEnergy, BoxProbe
from ase.transport.greenfunction import GreenFunction
from ase.transport.tools import subdiagonalize, cutcoupling, tri2full, dagger
class TransportCalculator:
"""Determine transport properties of device sandwiched between
semi-infinite leads using nonequillibrium Green function methods.
"""
def __init__(self, **kwargs):
"""Bla Bla XXX
energies is the energy grid on which the transport properties
should be determined.
h1 (h2) is a matrix representation of the Hamiltonian of two
principal layers of the left (right) lead, and the coupling
between such layers.
h is a matrix representation of the Hamiltonian of the
scattering region. This must include at least one lead
principal layer on each side. The coupling in (out) of the
scattering region is by default assumed to be identical to the
coupling between left (right) principal layers. However,
these couplings can also be specified explicitly through hc1
and hc2.
s, s1, and s2 are the overlap matrices corresponding to h, h1,
and h2. Default is the identity operator. sc1 and sc2 are the
overlap matrices corresponding to the optional couplings hc1
and hc2.
align_bf specifies the principal layer basis index used to
align the fermi levels of the lead and scattering regions.
"""
# The default values for all extra keywords
self.input_parameters = {'energies': None,
'h': None,
'h1': None,
'h2': None,
's': None,
's1': None,
's2': None,
'hc1': None,
'hc2': None,
'sc1': None,
'sc2': None,
'box': None,
'align_bf': None,
'eta1': 1e-3,
'eta2': 1e-3,
'eta': 1e-3,
'logfile': None, # '-',
'eigenchannels': 0,
'dos': False,
'pdos': [],
}
self.initialized = False # Changed Hamiltonians?
self.uptodate = False # Changed energy grid?
self.set(**kwargs)
def set(self, **kwargs):
for key in kwargs:
if key in ['h', 'h1', 'h2', 'hc1', 'hc2',
's', 's1', 's2', 'sc1', 'sc2',
'eta', 'eta1', 'eta2', 'align_bf', 'box']:
self.initialized = False
self.uptodate = False
break
elif key in ['energies', 'eigenchannels', 'dos', 'pdos']:
self.uptodate = False
elif key not in self.input_parameters:
raise KeyError, '\'%s\' not a vaild keyword' % key
self.input_parameters.update(kwargs)
log = self.input_parameters['logfile']
if log is None:
class Trash:
def write(self, s):
pass
def flush(self):
pass
self.log = Trash()
elif log == '-':
from sys import stdout
self.log = stdout
elif 'logfile' in kwargs:
self.log = open(log, 'w')
def initialize(self):
if self.initialized:
return
print >> self.log, '# Initializing calculator...'
p = self.input_parameters
if p['s1'] == None:
p['s1'] = np.identity(len(p['h1']))
if p['s2'] == None:
p['s2'] = np.identity(len(p['h2']))
if p['s'] == None:
p['s'] = np.identity(len(p['h']))
h_mm = p['h']
s_mm = p['s']
pl1 = len(p['h1']) / 2
pl2 = len(p['h2']) / 2
h1_ii = p['h1'][:pl1, :pl1]
h1_ij = p['h1'][:pl1, pl1:2 * pl1]
s1_ii = p['s1'][:pl1, :pl1]
s1_ij = p['s1'][:pl1, pl1:2 * pl1]
h2_ii = p['h2'][:pl2, :pl2]
h2_ij = p['h2'][pl2: 2 * pl2, :pl2]
s2_ii = p['s2'][:pl2, :pl2]
s2_ij = p['s2'][pl2: 2 * pl2, :pl2]
if p['hc1'] is None:
nbf = len(h_mm)
h1_im = np.zeros((pl1, nbf), complex)
s1_im = np.zeros((pl1, nbf), complex)
h1_im[:pl1, :pl1] = h1_ij
s1_im[:pl1, :pl1] = s1_ij
else:
h1_im = p['hc1']
if p['sc1'] is not None:
s1_im = p['sc1']
else:
s1_im = np.zeros(h1_im.shape, complex)
if p['hc2'] is None:
h2_im = np.zeros((pl2, nbf), complex)
s2_im = np.zeros((pl2, nbf), complex)
h2_im[-pl2:, -pl2:] = h2_ij
s2_im[-pl2:, -pl2:] = s2_ij
else:
h2_im = p['hc2']
if p['sc2'] is not None:
s2_im[:] = p['sc2']
else:
s2_im = np.zeros(h2_im.shape, complex)
align_bf = p['align_bf']
if align_bf != None:
diff = (h_mm[align_bf, align_bf] - h1_ii[align_bf, align_bf]) \
/ s_mm[align_bf, align_bf]
print >> self.log, '# Aligning scat. H to left lead H. diff=', diff
h_mm -= diff * s_mm
#setup lead self-energies
self.selfenergies = [LeadSelfEnergy((h1_ii, s1_ii),
(h1_ij, s1_ij),
(h1_im, s1_im),
p['eta1']),
LeadSelfEnergy((h2_ii, s2_ii),
(h2_ij, s2_ij),
(h2_im, s2_im),
p['eta2'])]
box = p['box']
if box is not None:
print 'Using box probe!'
self.selfenergies.append(
BoxProbe(eta=box[0], a=box[1], b=box[2], energies=box[3],
S=s_mm, T=0.3))
#setup scattering green function
self.greenfunction = GreenFunction(selfenergies=self.selfenergies,
H=h_mm,
S=s_mm,
eta=p['eta'])
self.initialized = True
def update(self):
if self.uptodate:
return
p = self.input_parameters
self.energies = p['energies']
nepts = len(self.energies)
nchan = p['eigenchannels']
pdos = p['pdos']
self.T_e = np.empty(nepts)
if p['dos']:
self.dos_e = np.empty(nepts)
if pdos != []:
self.pdos_ne = np.empty((len(pdos), nepts))
if nchan > 0:
self.eigenchannels_ne = np.empty((nchan, nepts))
for e, energy in enumerate(self.energies):
Ginv_mm = self.greenfunction.retarded(energy, inverse=True)
lambda1_mm = self.selfenergies[0].get_lambda(energy)
lambda2_mm = self.selfenergies[1].get_lambda(energy)
a_mm = linalg.solve(Ginv_mm, lambda1_mm)
b_mm = linalg.solve(dagger(Ginv_mm), lambda2_mm)
T_mm = np.dot(a_mm, b_mm)
if nchan > 0:
t_n = linalg.eigvals(T_mm).real
self.eigenchannels_ne[:, e] = np.sort(t_n)[-nchan:]
self.T_e[e] = np.sum(t_n)
else:
self.T_e[e] = np.trace(T_mm).real
print >> self.log, energy, self.T_e[e]
self.log.flush()
if p['dos']:
self.dos_e[e] = self.greenfunction.dos(energy)
if pdos != []:
self.pdos_ne[:, e] = np.take(self.greenfunction.pdos(energy),
pdos)
self.uptodate = True
def print_pl_convergence(self):
self.initialize()
pl1 = len(self.input_parameters['h1']) / 2
h_ii = self.selfenergies[0].h_ii
s_ii = self.selfenergies[0].s_ii
ha_ii = self.greenfunction.H[:pl1, :pl1]
sa_ii = self.greenfunction.S[:pl1, :pl1]
c1 = np.abs(h_ii - ha_ii).max()
c2 = np.abs(s_ii - sa_ii).max()
print 'Conv (h,s)=%.2e, %2.e' % (c1, c2)
def plot_pl_convergence(self):
self.initialize()
pl1 = len(self.input_parameters['h1']) / 2
hlead = self.selfenergies[0].h_ii.real.diagonal()
hprincipal = self.greenfunction.H.real.diagonal[:pl1]
import pylab as pl
pl.plot(hlead, label='lead')
pl.plot(hprincipal, label='principal layer')
pl.axis('tight')
pl.show()
def get_transmission(self):
self.initialize()
self.update()
return self.T_e
def get_dos(self):
self.initialize()
self.update()
return self.dos_e
def get_eigenchannels(self, n=None):
"""Get ``n`` first eigenchannels."""
self.initialize()
self.update()
if n is None:
n = self.input_parameters['eigenchannels']
return self.eigenchannels_ne[:n]
def get_pdos(self):
self.initialize()
self.update()
return self.pdos_ne
def subdiagonalize_bfs(self, bfs):
self.initialize()
bfs = np.array(bfs)
p = self.input_parameters
h_pp = p['h']
s_pp = p['s']
ht_pp, st_pp, c_pp, e_p = subdiagonalize(h_pp, s_pp, bfs)
c_pp = np.take(c_pp, bfs, axis=0)
c_pp = np.take(c_pp, bfs, axis=1)
return ht_pp, st_pp, e_p, c_pp
def cutcoupling_bfs(self, bfs):
self.initialize()
bfs = np.array(bfs)
p = self.input_parameters
h_pp = p['h'].copy()
s_pp = p['s'].copy()
cutcoupling(h_pp, s_pp, bfs)
return h_pp, s_pp
def get_left_channels(self, energy, nchan=1):
self.initialize()
g_s_ii = self.greenfunction.retarded(energy)
lambda_l_ii = self.selfenergies[0].get_lambda(energy)
lambda_r_ii = self.selfenergies[1].get_lambda(energy)
if self.greenfunction.S is None:
s_s_qsrt_ii = s_s_isqrt = np.identity(len(g_s_ii))
else:
s_mm = self.greenfunction.S
s_s_i, s_s_ii = linalg.eig(s_mm)
s_s_i = np.abs(s_s_i)
s_s_sqrt_i = np.sqrt(s_s_i) # sqrt of eigenvalues
s_s_sqrt_ii = np.dot(s_s_ii * s_s_sqrt_i, dagger(s_s_ii))
s_s_isqrt_ii = np.dot(s_s_ii / s_s_sqrt_i, dagger(s_s_ii))
lambdab_r_ii = np.dot(np.dot(s_s_isqrt_ii, lambda_r_ii),s_s_isqrt_ii)
a_l_ii = np.dot(np.dot(g_s_ii, lambda_l_ii), dagger(g_s_ii))
ab_l_ii = np.dot(np.dot(s_s_sqrt_ii, a_l_ii), s_s_sqrt_ii)
lambda_i, u_ii = linalg.eig(ab_l_ii)
ut_ii = np.sqrt(lambda_i / (2.0 * np.pi)) * u_ii
m_ii = 2 * np.pi * np.dot(np.dot(dagger(ut_ii), lambdab_r_ii),ut_ii)
T_i,c_in = linalg.eig(m_ii)
T_i = np.abs(T_i)
channels = np.argsort(-T_i)[:nchan]
c_in = np.take(c_in, channels, axis=1)
T_n = np.take(T_i, channels)
v_in = np.dot(np.dot(s_s_isqrt_ii, ut_ii), c_in)
return T_n, v_in
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.