repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
baffolobill/django-lfs
|
lfs/portlet/models/recent_products.py
|
4
|
1913
|
# django imports
from django import forms
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.template import RequestContext
from django.template.loader import render_to_string
# portlets imports
from portlets.models import Portlet
# lfs imports
from lfs.catalog.models import Product
from lfs.caching.utils import lfs_get_object
class RecentProductsPortlet(Portlet):
"""Portlet to display recent visited products.
"""
class Meta:
app_label = 'portlet'
def __unicode__(self):
return u"%s" % self.id
def render(self, context):
"""Renders the portlet as html.
"""
object = context.get("product")
slug_not_to_display = ""
limit = settings.LFS_RECENT_PRODUCTS_LIMIT
if object:
ctype = ContentType.objects.get_for_model(object)
if ctype.name == u"product":
slug_not_to_display = object.slug
limit = settings.LFS_RECENT_PRODUCTS_LIMIT + 1
request = context.get("request")
products = []
for slug in request.session.get("RECENT_PRODUCTS", [])[:limit]:
if slug == slug_not_to_display:
continue
product = lfs_get_object(Product, slug=slug)
if product and product.is_product_with_variants() and product.has_variants():
product = product.get_default_variant()
products.append(product)
return render_to_string("lfs/portlets/recent_products.html", RequestContext(request, {
"title": self.title,
"products": products,
}))
def form(self, **kwargs):
return RecentProductsForm(instance=self, **kwargs)
class RecentProductsForm(forms.ModelForm):
"""Form for the RecentProductsPortlet.
"""
class Meta:
model = RecentProductsPortlet
exclude = ()
|
bsd-3-clause
|
seem-sky/kbengine
|
kbe/src/lib/python/Tools/pybench/Numbers.py
|
92
|
16198
|
from pybench import Test
class CompareIntegers(Test):
version = 2.0
operations = 30 * 5
rounds = 120000
def test(self):
for i in range(self.rounds):
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
2 < 3
2 > 3
2 == 3
2 > 3
2 < 3
def calibrate(self):
for i in range(self.rounds):
pass
class CompareFloats(Test):
version = 2.0
operations = 30 * 5
rounds = 80000
def test(self):
for i in range(self.rounds):
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
2.1 < 3.31
2.1 > 3.31
2.1 == 3.31
2.1 > 3.31
2.1 < 3.31
def calibrate(self):
for i in range(self.rounds):
pass
class CompareFloatsIntegers(Test):
version = 2.0
operations = 30 * 5
rounds = 60000
def test(self):
for i in range(self.rounds):
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
2.1 < 4
2.1 > 4
2.1 == 4
2.1 > 4
2.1 < 4
def calibrate(self):
for i in range(self.rounds):
pass
class CompareLongs(Test):
version = 2.0
operations = 30 * 5
rounds = 70000
def test(self):
for i in range(self.rounds):
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
1234567890 < 3456789012345
1234567890 > 3456789012345
1234567890 == 3456789012345
1234567890 > 3456789012345
1234567890 < 3456789012345
def calibrate(self):
for i in range(self.rounds):
pass
|
lgpl-3.0
|
oliverlee/sympy
|
sympy/core/tests/test_assumptions.py
|
7
|
30051
|
from sympy import I, sqrt, log, exp, sin, asin, factorial, Mod, pi
from sympy.core import Symbol, S, Rational, Integer, Dummy, Wild, Pow
from sympy.core.facts import InconsistentAssumptions
from sympy import simplify
from sympy.core.compatibility import range
from sympy.utilities.pytest import raises, XFAIL
def test_symbol_unset():
x = Symbol('x', real=True, integer=True)
assert x.is_real is True
assert x.is_integer is True
assert x.is_imaginary is False
assert x.is_noninteger is False
assert x.is_number is False
def test_zero():
z = Integer(0)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is False
assert z.is_positive is False
assert z.is_negative is False
assert z.is_nonpositive is True
assert z.is_nonnegative is True
assert z.is_even is True
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
assert z.is_number is True
def test_one():
z = Integer(1)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is True
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_number is True
assert z.is_composite is False # issue 8807
def test_negativeone():
z = Integer(-1)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is False
assert z.is_positive is False
assert z.is_negative is True
assert z.is_nonpositive is True
assert z.is_nonnegative is False
assert z.is_even is False
assert z.is_odd is True
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
assert z.is_number is True
def test_infinity():
oo = S.Infinity
assert oo.is_commutative is True
assert oo.is_integer is None
assert oo.is_rational is None
assert oo.is_algebraic is None
assert oo.is_transcendental is None
assert oo.is_real is True
assert oo.is_complex is True
assert oo.is_noninteger is None
assert oo.is_irrational is None
assert oo.is_imaginary is False
assert oo.is_positive is True
assert oo.is_negative is False
assert oo.is_nonpositive is False
assert oo.is_nonnegative is True
assert oo.is_even is None
assert oo.is_odd is None
assert oo.is_finite is False
assert oo.is_infinite is True
assert oo.is_comparable is True
assert oo.is_prime is False
assert oo.is_composite is None
assert oo.is_number is True
def test_neg_infinity():
mm = S.NegativeInfinity
assert mm.is_commutative is True
assert mm.is_integer is None
assert mm.is_rational is None
assert mm.is_algebraic is None
assert mm.is_transcendental is None
assert mm.is_real is True
assert mm.is_complex is True
assert mm.is_noninteger is None
assert mm.is_irrational is None
assert mm.is_imaginary is False
assert mm.is_positive is False
assert mm.is_negative is True
assert mm.is_nonpositive is True
assert mm.is_nonnegative is False
assert mm.is_even is None
assert mm.is_odd is None
assert mm.is_finite is False
assert mm.is_infinite is True
assert mm.is_comparable is True
assert mm.is_prime is False
assert mm.is_composite is False
assert mm.is_number is True
def test_nan():
nan = S.NaN
assert nan.is_commutative is True
assert nan.is_integer is None
assert nan.is_rational is None
assert nan.is_algebraic is None
assert nan.is_transcendental is None
assert nan.is_real is None
assert nan.is_complex is None
assert nan.is_noninteger is None
assert nan.is_irrational is None
assert nan.is_imaginary is None
assert nan.is_positive is None
assert nan.is_negative is None
assert nan.is_nonpositive is None
assert nan.is_nonnegative is None
assert nan.is_even is None
assert nan.is_odd is None
assert nan.is_finite is None
assert nan.is_infinite is None
assert nan.is_comparable is False
assert nan.is_prime is None
assert nan.is_composite is None
assert nan.is_number is True
def test_pos_rational():
r = Rational(3, 4)
assert r.is_commutative is True
assert r.is_integer is False
assert r.is_rational is True
assert r.is_algebraic is True
assert r.is_transcendental is False
assert r.is_real is True
assert r.is_complex is True
assert r.is_noninteger is True
assert r.is_irrational is False
assert r.is_imaginary is False
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonpositive is False
assert r.is_nonnegative is True
assert r.is_even is False
assert r.is_odd is False
assert r.is_finite is True
assert r.is_infinite is False
assert r.is_comparable is True
assert r.is_prime is False
assert r.is_composite is False
r = Rational(1, 4)
assert r.is_nonpositive is False
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonnegative is True
r = Rational(5, 4)
assert r.is_negative is False
assert r.is_positive is True
assert r.is_nonpositive is False
assert r.is_nonnegative is True
r = Rational(5, 3)
assert r.is_nonnegative is True
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonpositive is False
def test_neg_rational():
r = Rational(-3, 4)
assert r.is_positive is False
assert r.is_nonpositive is True
assert r.is_negative is True
assert r.is_nonnegative is False
r = Rational(-1, 4)
assert r.is_nonpositive is True
assert r.is_positive is False
assert r.is_negative is True
assert r.is_nonnegative is False
r = Rational(-5, 4)
assert r.is_negative is True
assert r.is_positive is False
assert r.is_nonpositive is True
assert r.is_nonnegative is False
r = Rational(-5, 3)
assert r.is_nonnegative is False
assert r.is_positive is False
assert r.is_negative is True
assert r.is_nonpositive is True
def test_pi():
z = S.Pi
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is False
assert z.is_transcendental is True
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is True
assert z.is_irrational is True
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
def test_E():
z = S.Exp1
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is False
assert z.is_transcendental is True
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is True
assert z.is_irrational is True
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
def test_I():
z = S.ImaginaryUnit
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is False
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is True
assert z.is_positive is False
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is False
assert z.is_even is False
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is False
assert z.is_prime is False
assert z.is_composite is False
def test_symbol_real():
# issue 3848
a = Symbol('a', real=False)
assert a.is_real is False
assert a.is_integer is False
assert a.is_negative is False
assert a.is_positive is False
assert a.is_nonnegative is False
assert a.is_nonpositive is False
assert a.is_zero is False
def test_symbol_imaginary():
a = Symbol('a', imaginary=True)
assert a.is_real is False
assert a.is_integer is False
assert a.is_negative is False
assert a.is_positive is False
assert a.is_nonnegative is False
assert a.is_nonpositive is False
assert a.is_zero is False
assert a.is_nonzero is False # since nonzero -> real
def test_symbol_zero():
x = Symbol('x', zero=True)
assert x.is_positive is False
assert x.is_nonpositive
assert x.is_negative is False
assert x.is_nonnegative
assert x.is_zero is True
# TODO Change to x.is_nonzero is None
# See https://github.com/sympy/sympy/pull/9583
assert x.is_nonzero is False
assert x.is_finite is True
def test_symbol_positive():
x = Symbol('x', positive=True)
assert x.is_positive is True
assert x.is_nonpositive is False
assert x.is_negative is False
assert x.is_nonnegative is True
assert x.is_zero is False
assert x.is_nonzero is True
def test_neg_symbol_positive():
x = -Symbol('x', positive=True)
assert x.is_positive is False
assert x.is_nonpositive is True
assert x.is_negative is True
assert x.is_nonnegative is False
assert x.is_zero is False
assert x.is_nonzero is True
def test_symbol_nonpositive():
x = Symbol('x', nonpositive=True)
assert x.is_positive is False
assert x.is_nonpositive is True
assert x.is_negative is None
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_neg_symbol_nonpositive():
x = -Symbol('x', nonpositive=True)
assert x.is_positive is None
assert x.is_nonpositive is None
assert x.is_negative is False
assert x.is_nonnegative is True
assert x.is_zero is None
assert x.is_nonzero is None
def test_symbol_falsepositive():
x = Symbol('x', positive=False)
assert x.is_positive is False
assert x.is_nonpositive is None
assert x.is_negative is None
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_symbol_falsepositive_mul():
# To test pull request 9379
# Explicit handling of arg.is_positive=False was added to Mul._eval_is_positive
x = 2*Symbol('x', positive=False)
assert x.is_positive is False # This was None before
assert x.is_nonpositive is None
assert x.is_negative is None
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_neg_symbol_falsepositive():
x = -Symbol('x', positive=False)
assert x.is_positive is None
assert x.is_nonpositive is None
assert x.is_negative is False
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_neg_symbol_falsenegative():
# To test pull request 9379
# Explicit handling of arg.is_negative=False was added to Mul._eval_is_positive
x = -Symbol('x', negative=False)
assert x.is_positive is False # This was None before
assert x.is_nonpositive is None
assert x.is_negative is None
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_symbol_falsepositive_real():
x = Symbol('x', positive=False, real=True)
assert x.is_positive is False
assert x.is_nonpositive is True
assert x.is_negative is None
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_neg_symbol_falsepositive_real():
x = -Symbol('x', positive=False, real=True)
assert x.is_positive is None
assert x.is_nonpositive is None
assert x.is_negative is False
assert x.is_nonnegative is True
assert x.is_zero is None
assert x.is_nonzero is None
def test_symbol_falsenonnegative():
x = Symbol('x', nonnegative=False)
assert x.is_positive is False
assert x.is_nonpositive is None
assert x.is_negative is None
assert x.is_nonnegative is False
assert x.is_zero is False
assert x.is_nonzero is None
@XFAIL
def test_neg_symbol_falsenonnegative():
x = -Symbol('x', nonnegative=False)
assert x.is_positive is None
assert x.is_nonpositive is False # this currently returns None
assert x.is_negative is False # this currently returns None
assert x.is_nonnegative is None
assert x.is_zero is False # this currently returns None
assert x.is_nonzero is True # this currently returns None
def test_symbol_falsenonnegative_real():
x = Symbol('x', nonnegative=False, real=True)
assert x.is_positive is False
assert x.is_nonpositive is True
assert x.is_negative is True
assert x.is_nonnegative is False
assert x.is_zero is False
assert x.is_nonzero is True
def test_neg_symbol_falsenonnegative_real():
x = -Symbol('x', nonnegative=False, real=True)
assert x.is_positive is True
assert x.is_nonpositive is False
assert x.is_negative is False
assert x.is_nonnegative is True
assert x.is_zero is False
assert x.is_nonzero is True
def test_prime():
assert S(-1).is_prime is False
assert S(-2).is_prime is False
assert S(-4).is_prime is False
assert S(0).is_prime is False
assert S(1).is_prime is False
assert S(2).is_prime is True
assert S(17).is_prime is True
assert S(4).is_prime is False
def test_composite():
assert S(-1).is_composite is False
assert S(-2).is_composite is False
assert S(-4).is_composite is False
assert S(0).is_composite is False
assert S(2).is_composite is False
assert S(17).is_composite is False
assert S(4).is_composite is True
x = Dummy(integer=True, positive=True, prime=False)
assert x.is_composite is None # x could be 1
assert (x + 1).is_composite is None
def test_prime_symbol():
x = Symbol('x', prime=True)
assert x.is_prime is True
assert x.is_integer is True
assert x.is_positive is True
assert x.is_negative is False
assert x.is_nonpositive is False
assert x.is_nonnegative is True
x = Symbol('x', prime=False)
assert x.is_prime is False
assert x.is_integer is None
assert x.is_positive is None
assert x.is_negative is None
assert x.is_nonpositive is None
assert x.is_nonnegative is None
def test_symbol_noncommutative():
x = Symbol('x', commutative=True)
assert x.is_complex is None
x = Symbol('x', commutative=False)
assert x.is_integer is False
assert x.is_rational is False
assert x.is_algebraic is False
assert x.is_irrational is False
assert x.is_real is False
assert x.is_complex is False
def test_other_symbol():
x = Symbol('x', integer=True)
assert x.is_integer is True
assert x.is_real is True
x = Symbol('x', integer=True, nonnegative=True)
assert x.is_integer is True
assert x.is_nonnegative is True
assert x.is_negative is False
assert x.is_positive is None
x = Symbol('x', integer=True, nonpositive=True)
assert x.is_integer is True
assert x.is_nonpositive is True
assert x.is_positive is False
assert x.is_negative is None
x = Symbol('x', odd=True)
assert x.is_odd is True
assert x.is_even is False
assert x.is_integer is True
x = Symbol('x', odd=False)
assert x.is_odd is False
assert x.is_even is None
assert x.is_integer is None
x = Symbol('x', even=True)
assert x.is_even is True
assert x.is_odd is False
assert x.is_integer is True
x = Symbol('x', even=False)
assert x.is_even is False
assert x.is_odd is None
assert x.is_integer is None
x = Symbol('x', integer=True, nonnegative=True)
assert x.is_integer is True
assert x.is_nonnegative is True
x = Symbol('x', integer=True, nonpositive=True)
assert x.is_integer is True
assert x.is_nonpositive is True
with raises(AttributeError):
x.is_real = False
x = Symbol('x', algebraic=True)
assert x.is_transcendental is False
x = Symbol('x', transcendental=True)
assert x.is_algebraic is False
assert x.is_rational is False
assert x.is_integer is False
def test_issue_3825():
"""catch: hash instability"""
x = Symbol("x")
y = Symbol("y")
a1 = x + y
a2 = y + x
a2.is_comparable
h1 = hash(a1)
h2 = hash(a2)
assert h1 == h2
def test_issue_4822():
z = (-1)**Rational(1, 3)*(1 - I*sqrt(3))
assert z.is_real in [True, None]
def test_hash_vs_typeinfo():
"""seemingly different typeinfo, but in fact equal"""
# the following two are semantically equal
x1 = Symbol('x', even=True)
x2 = Symbol('x', integer=True, odd=False)
assert hash(x1) == hash(x2)
assert x1 == x2
def test_hash_vs_typeinfo_2():
"""different typeinfo should mean !eq"""
# the following two are semantically different
x = Symbol('x')
x1 = Symbol('x', even=True)
assert x != x1
assert hash(x) != hash(x1) # This might fail with very low probability
def test_hash_vs_eq():
"""catch: different hash for equal objects"""
a = 1 + S.Pi # important: do not fold it into a Number instance
ha = hash(a) # it should be Add/Mul/... to trigger the bug
a.is_positive # this uses .evalf() and deduces it is positive
assert a.is_positive is True
# be sure that hash stayed the same
assert ha == hash(a)
# now b should be the same expression
b = a.expand(trig=True)
hb = hash(b)
assert a == b
assert ha == hb
def test_Add_is_pos_neg():
# these cover lines not covered by the rest of tests in core
n = Symbol('n', negative=True, infinite=True)
nn = Symbol('n', nonnegative=True, infinite=True)
np = Symbol('n', nonpositive=True, infinite=True)
p = Symbol('p', positive=True, infinite=True)
r = Dummy(real=True, finite=False)
x = Symbol('x')
xf = Symbol('xb', finite=True)
assert (n + p).is_positive is None
assert (n + x).is_positive is None
assert (p + x).is_positive is None
assert (n + p).is_negative is None
assert (n + x).is_negative is None
assert (p + x).is_negative is None
assert (n + xf).is_positive is False
assert (p + xf).is_positive is True
assert (n + xf).is_negative is True
assert (p + xf).is_negative is False
assert (x - S.Infinity).is_negative is None # issue 7798
# issue 8046, 16.2
assert (p + nn).is_positive
assert (n + np).is_negative
assert (p + r).is_positive is None
def test_Add_is_imaginary():
nn = Dummy(nonnegative=True)
assert (I*nn + I).is_imaginary # issue 8046, 17
def test_Add_is_algebraic():
a = Symbol('a', algebraic=True)
b = Symbol('a', algebraic=True)
na = Symbol('na', algebraic=False)
nb = Symbol('nb', algebraic=False)
x = Symbol('x')
assert (a + b).is_algebraic
assert (na + nb).is_algebraic is None
assert (a + na).is_algebraic is False
assert (a + x).is_algebraic is None
assert (na + x).is_algebraic is None
def test_Mul_is_algebraic():
a = Symbol('a', algebraic=True)
b = Symbol('a', algebraic=True)
na = Symbol('na', algebraic=False)
an = Symbol('an', algebraic=True, nonzero=True)
nb = Symbol('nb', algebraic=False)
x = Symbol('x')
assert (a*b).is_algebraic
assert (na*nb).is_algebraic is None
assert (a*na).is_algebraic is None
assert (an*na).is_algebraic is False
assert (a*x).is_algebraic is None
assert (na*x).is_algebraic is None
def test_Pow_is_algebraic():
e = Symbol('e', algebraic=True)
assert Pow(1, e, evaluate=False).is_algebraic
assert Pow(0, e, evaluate=False).is_algebraic
a = Symbol('a', algebraic=True)
na = Symbol('na', algebraic=False)
ia = Symbol('ia', algebraic=True, irrational=True)
ib = Symbol('ib', algebraic=True, irrational=True)
r = Symbol('r', rational=True)
x = Symbol('x')
assert (a**r).is_algebraic
assert (a**x).is_algebraic is None
assert (na**r).is_algebraic is False
assert (ia**r).is_algebraic
assert (ia**ib).is_algebraic is False
assert (a**e).is_algebraic is None
# Gelfond-Schneider constant:
assert Pow(2, sqrt(2), evaluate=False).is_algebraic is False
assert Pow(S.GoldenRatio, sqrt(3), evaluate=False).is_algebraic is False
def test_Mul_is_prime():
from sympy import Mul
x = Symbol('x', positive=True, integer=True)
y = Symbol('y', positive=True, integer=True)
assert (x*y).is_prime is None
assert ( (x+1)*(y+1) ).is_prime is False
x = Symbol('x', positive=True)
assert (x*y).is_prime is None
assert Mul(6, S.Half, evaluate=False).is_prime is True
assert Mul(sqrt(3), sqrt(3), evaluate=False).is_prime is True
assert Mul(5, S.Half, evaluate=False).is_prime is False
def test_Pow_is_prime():
from sympy import Pow
x = Symbol('x', positive=True, integer=True)
y = Symbol('y', positive=True, integer=True)
assert (x**y).is_prime is None
x = Symbol('x', positive=True)
assert (x**y).is_prime is None
assert Pow(6, S.One, evaluate=False).is_prime is False
assert Pow(9, S.Half, evaluate=False).is_prime is True
assert Pow(5, S.One, evaluate=False).is_prime is True
def test_Mul_is_infinite():
x = Symbol('x')
f = Symbol('f', finite=True)
i = Symbol('i', infinite=True)
z = Dummy(zero=True)
nzf = Dummy(finite=True, zero=False)
from sympy import Mul
assert (x*f).is_finite is None
assert (x*i).is_finite is None
assert (f*i).is_finite is False
assert (x*f*i).is_finite is None
assert (z*i).is_finite is False
assert (nzf*i).is_finite is False
assert (z*f).is_finite is True
assert Mul(0, f, evaluate=False).is_finite is True
assert Mul(0, i, evaluate=False).is_finite is False
assert (x*f).is_infinite is None
assert (x*i).is_infinite is None
assert (f*i).is_infinite is None
assert (x*f*i).is_infinite is None
assert (z*i).is_infinite is S.NaN.is_infinite
assert (nzf*i).is_infinite is True
assert (z*f).is_infinite is False
assert Mul(0, f, evaluate=False).is_infinite is False
assert Mul(0, i, evaluate=False).is_infinite is S.NaN.is_infinite
def test_special_is_rational():
i = Symbol('i', integer=True)
i2 = Symbol('i2', integer=True)
ni = Symbol('ni', integer=True, nonzero=True)
r = Symbol('r', rational=True)
rn = Symbol('r', rational=True, nonzero=True)
nr = Symbol('nr', irrational=True)
x = Symbol('x')
assert sqrt(3).is_rational is False
assert (3 + sqrt(3)).is_rational is False
assert (3*sqrt(3)).is_rational is False
assert exp(3).is_rational is False
assert exp(ni).is_rational is False
assert exp(rn).is_rational is False
assert exp(x).is_rational is None
assert exp(log(3), evaluate=False).is_rational is True
assert log(exp(3), evaluate=False).is_rational is True
assert log(3).is_rational is False
assert log(ni + 1).is_rational is False
assert log(rn + 1).is_rational is False
assert log(x).is_rational is None
assert (sqrt(3) + sqrt(5)).is_rational is None
assert (sqrt(3) + S.Pi).is_rational is False
assert (x**i).is_rational is None
assert (i**i).is_rational is True
assert (i**i2).is_rational is None
assert (r**i).is_rational is None
assert (r**r).is_rational is None
assert (r**x).is_rational is None
assert (nr**i).is_rational is None # issue 8598
assert (nr**Symbol('z', zero=True)).is_rational
assert sin(1).is_rational is False
assert sin(ni).is_rational is False
assert sin(rn).is_rational is False
assert sin(x).is_rational is None
assert asin(r).is_rational is False
assert sin(asin(3), evaluate=False).is_rational is True
@XFAIL
def test_issue_6275():
x = Symbol('x')
# both zero or both Muls...but neither "change would be very appreciated.
# This is similar to x/x => 1 even though if x = 0, it is really nan.
assert isinstance(x*0, type(0*S.Infinity))
if 0*S.Infinity is S.NaN:
b = Symbol('b', finite=None)
assert (b*0).is_zero is None
def test_sanitize_assumptions():
# issue 6666
for cls in (Symbol, Dummy, Wild):
x = cls('x', real=1, positive=0)
assert x.is_real is True
assert x.is_positive is False
assert cls('', real=True, positive=None).is_positive is None
raises(ValueError, lambda: cls('', commutative=None))
raises(ValueError, lambda: Symbol._sanitize(dict(commutative=None)))
def test_special_assumptions():
e = -3 - sqrt(5) + (-sqrt(10)/2 - sqrt(2)/2)**2
assert simplify(e < 0) is S.false
assert simplify(e > 0) is S.false
assert (e == 0) is False # it's not a literal 0
assert e.equals(0) is True
def test_inconsistent():
# cf. issues 5795 and 5545
raises(InconsistentAssumptions, lambda: Symbol('x', real=True,
commutative=False))
def test_issue_6631():
assert ((-1)**(I)).is_real is True
assert ((-1)**(I*2)).is_real is True
assert ((-1)**(I/2)).is_real is True
assert ((-1)**(I*S.Pi)).is_real is True
assert (I**(I + 2)).is_real is True
def test_issue_2730():
assert (1/(1 + I)).is_real is False
def test_issue_4149():
assert (3 + I).is_complex
assert (3 + I).is_imaginary is False
assert (3*I + S.Pi*I).is_imaginary
# as Zero.is_imaginary is False, see issue 7649
y = Symbol('y', real=True)
assert (3*I + S.Pi*I + y*I).is_imaginary is None
p = Symbol('p', positive=True)
assert (3*I + S.Pi*I + p*I).is_imaginary
n = Symbol('n', negative=True)
assert (-3*I - S.Pi*I + n*I).is_imaginary
i = Symbol('i', imaginary=True)
assert ([(i**a).is_imaginary for a in range(4)] ==
[False, True, False, True])
# tests from the PR #7887:
e = S("-sqrt(3)*I/2 + 0.866025403784439*I")
assert e.is_real is False
assert e.is_imaginary
def test_issue_2920():
n = Symbol('n', negative=True)
assert sqrt(n).is_imaginary
def test_issue_7899():
x = Symbol('x', real=True)
assert (I*x).is_real is None
assert ((x - I)*(x - 1)).is_zero is None
assert ((x - I)*(x - 1)).is_real is None
@XFAIL
def test_issue_7993():
x = Dummy(integer=True)
y = Dummy(noninteger=True)
assert (x - y).is_zero is False
def test_issue_8075():
raises(InconsistentAssumptions, lambda: Dummy(zero=True, finite=False))
raises(InconsistentAssumptions, lambda: Dummy(zero=True, infinite=True))
def test_issue_8642():
x = Symbol('x', real=True, integer=False)
assert (x*2).is_integer is None
def test_issues_8632_8633_8638_8675_8992():
p = Dummy(integer=True, positive=True)
nn = Dummy(integer=True, nonnegative=True)
assert (p - S.Half).is_positive
assert (p - 1).is_nonnegative
assert (nn + 1).is_positive
assert (-p + 1).is_nonpositive
assert (-nn - 1).is_negative
prime = Dummy(prime=True)
assert (prime - 2).is_nonnegative
assert (prime - 3).is_nonnegative is None
even = Dummy(positive=True, even=True)
assert (even - 2).is_nonnegative
p = Dummy(positive=True)
assert (p/(p + 1) - 1).is_negative
assert ((p + 2)**3 - S.Half).is_positive
n = Dummy(negative=True)
assert (n - 3).is_nonpositive
def test_issue_9115():
n = Dummy('n', integer=True, nonnegative=True)
assert (factorial(n) >= 1) == True
assert (factorial(n) < 1) == False
def test_issue_9165():
z = Symbol('z', zero=True)
f = Symbol('f', finite=False)
assert 0/z == S.NaN
assert 0*(1/z) == S.NaN
assert 0*f == S.NaN
def test_issue_10024():
x = Dummy('x')
assert Mod(x, 2*pi).is_zero is None
def test_issue_10302():
x = Symbol('x')
r = Symbol('r', real=True)
u = -(3*2**pi)**(1/pi) + 2*3**(1/pi)
i = u + u*I
assert i.is_real is None # w/o simplification this should fail
assert (u + i).is_zero is None
assert (1 + i).is_zero is False
a = Dummy('a', zero=True)
assert (a + I).is_zero is False
assert (a + r*I).is_zero is None
assert (a + I).is_imaginary
assert (a + x + I).is_imaginary is None
assert (a + r*I + I).is_imaginary is None
def test_complex_reciprocal_imaginary():
assert (1 / (4 + 3*I)).is_imaginary is False
|
bsd-3-clause
|
2014c2g12/c2g12
|
c2wp/exts/exts/sphinxcontrib/bibtex/directives.py
|
40
|
7003
|
"""
New Doctree Directives
~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: BibliographyDirective
.. automethod:: run
.. automethod:: process_bibfile
.. automethod:: update_bibfile_cache
.. automethod:: parse_bibfile
"""
import os.path # getmtime()
import copy # deepcopy
import docutils.nodes
from docutils.parsers.rst import directives # for Directive.option_spec
from sphinx.util.compat import Directive
from sphinx.util.console import bold, standout
from pybtex.database.input import bibtex
from sphinxcontrib.bibtex.cache import BibliographyCache, BibfileCache
import sphinxcontrib.bibtex.latex_codec # registers the latex codec
from sphinxcontrib.bibtex.nodes import bibliography
def process_start_option(value):
if value == "continue":
return -1
else:
return directives.positive_int(value)
class BibliographyDirective(Directive):
"""Class for processing the :rst:dir:`bibliography` directive.
Parses the bibliography files, and produces a
:class:`~sphinxcontrib.bibtex.nodes.bibliography` node.
.. seealso::
Further processing of the resulting
:class:`~sphinxcontrib.bibtex.nodes.bibliography` node is done
by
:class:`~sphinxcontrib.bibtex.transforms.BibliographyTransform`.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
has_content = False
option_spec = {
'cited': directives.flag,
'notcited': directives.flag,
'all': directives.flag,
'style': directives.unchanged,
'list': directives.unchanged,
'enumtype': directives.unchanged,
'start': process_start_option,
'encoding': directives.encoding,
'disable-curly-bracket-strip': directives.flag,
'labelprefix': directives.unchanged,
}
def run(self):
"""Process .bib files, set file dependencies, and create a
node that is to be transformed to the entries of the
bibliography.
"""
env = self.state.document.settings.env
cache = env.bibtex_cache.bibliographies
# create id and cache for this node
# this id will be stored with the node
# and is used to look up additional data in env.bibtex_cache
# (implementation note: new_serialno only guarantees unique
# ids within a single document, but we need the id to be
# unique across all documents, so we also include the docname
# in the id)
id_ = 'bibtex-bibliography-%s-%s' % (
env.docname, env.new_serialno('bibtex'))
info = BibliographyCache(
docname=env.docname,
cite=(
"all"
if "all" in self.options else (
"notcited"
if "notcited" in self.options else (
"cited"))),
list_=self.options.get("list", "citation"),
enumtype=self.options.get("enumtype", "arabic"),
start=self.options.get("start", 1),
style=self.options.get("style", "plain"),
encoding=self.options.get(
'encoding',
'latex+' + self.state.document.settings.input_encoding),
curly_bracket_strip=(
'disable-curly-bracket-strip' not in self.options),
labelprefix=self.options.get("labelprefix", ""),
)
if (info.list_ not in set(["bullet", "enumerated", "citation"])):
env.app.warn(
"unknown bibliography list type '{0}'.".format(info.list_))
for bibfile in self.arguments[0].split():
# convert to normalized absolute path to ensure that the same file
# only occurs once in the cache
bibfile = os.path.normpath(env.relfn2path(bibfile.strip())[1])
self.process_bibfile(bibfile, info.encoding)
env.note_dependency(bibfile)
info.bibfiles.append(bibfile)
cache[id_] = info
return [bibliography('', ids=[id_])]
def parse_bibfile(self, bibfile, encoding):
"""Parse *bibfile*, and return parsed data.
:param bibfile: The bib file name.
:type bibfile: ``str``
:return: The parsed bibliography data.
:rtype: :class:`pybtex.database.BibliographyData`
"""
app = self.state.document.settings.env.app
parser = bibtex.Parser(encoding)
app.info(
bold("parsing bibtex file {0}... ".format(bibfile)), nonl=True)
parser.parse_file(bibfile)
app.info("parsed {0} entries"
.format(len(parser.data.entries)))
return parser.data
def update_bibfile_cache(self, bibfile, mtime, encoding):
"""Parse *bibfile* (see :meth:`parse_bibfile`), and store the
parsed data, along with modification time *mtime*, in the
bibtex cache.
:param bibfile: The bib file name.
:type bibfile: ``str``
:param mtime: The bib file's modification time.
:type mtime: ``float``
:return: The parsed bibliography data.
:rtype: :class:`pybtex.database.BibliographyData`
"""
data = self.parse_bibfile(bibfile, encoding)
env = self.state.document.settings.env
env.bibtex_cache.bibfiles[bibfile] = BibfileCache(
mtime=mtime,
data=data)
return data
def process_bibfile(self, bibfile, encoding):
"""Check if ``env.bibtex_cache.bibfiles[bibfile]`` is still
up to date. If not, parse the *bibfile* (see
:meth:`update_bibfile_cache`), and store parsed data in the
bibtex cache.
:param bibfile: The bib file name.
:type bibfile: ``str``
:return: The parsed bibliography data.
:rtype: :class:`pybtex.database.BibliographyData`
"""
env = self.state.document.settings.env
cache = env.bibtex_cache.bibfiles
# get modification time of bibfile
try:
mtime = os.path.getmtime(bibfile)
except OSError:
env.app.warn(
standout("could not open bibtex file {0}.".format(bibfile)))
cache[bibfile] = BibfileCache() # dummy cache
return cache[bibfile].data
# get cache and check if it is still up to date
# if it is not up to date, parse the bibtex file
# and store it in the cache
env.app.info(
bold("checking for {0} in bibtex cache... ".format(bibfile)),
nonl=True)
try:
bibfile_cache = cache[bibfile]
except KeyError:
env.app.info("not found")
self.update_bibfile_cache(bibfile, mtime, encoding)
else:
if mtime != bibfile_cache.mtime:
env.app.info("out of date")
self.update_bibfile_cache(bibfile, mtime, encoding)
else:
env.app.info('up to date')
return cache[bibfile].data
|
gpl-2.0
|
Develonaut/Develonaut
|
node_modules/nodemon/travis_after_all.py
|
76
|
4308
|
import os
import sys
import json
import time
import logging
try:
import urllib.request as urllib2
except ImportError:
import urllib2
log = logging.getLogger("travis.leader")
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
TRAVIS_JOB_NUMBER = 'TRAVIS_JOB_NUMBER'
TRAVIS_BUILD_ID = 'TRAVIS_BUILD_ID'
POLLING_INTERVAL = 'LEADER_POLLING_INTERVAL'
GITHUB_TOKEN = 'GITHUB_TOKEN'
# Travis API entry point, there are at least https://api.travis-ci.com and https://api.travis-ci.org
travis_entry = sys.argv[1] if len(sys.argv) > 1 else 'https://api.travis-ci.org'
build_id = os.getenv(TRAVIS_BUILD_ID)
polling_interval = int(os.getenv(POLLING_INTERVAL, '5'))
gh_token = os.getenv(GITHUB_TOKEN)
# assume, first job is the leader
is_leader = lambda job_number: job_number.endswith('.1')
job_number = os.getenv(TRAVIS_JOB_NUMBER)
if not job_number:
# seems even for builds with only one job, this won't get here
log.fatal("Don't use defining leader for build without matrix")
exit(1)
elif is_leader(job_number):
log.info("This is a leader")
else:
# since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_MINION=YES")
log.info("This is a minion")
exit(0)
class MatrixElement(object):
def __init__(self, json_raw):
self.is_finished = json_raw['finished_at'] is not None
self.is_succeeded = json_raw['result'] == 0
self.number = json_raw['number']
self.is_leader = is_leader(self.number)
def matrix_snapshot(travis_token):
"""
:return: Matrix List
"""
headers = {'content-type': 'application/json', 'Authorization': 'token {}'.format(travis_token)}
req = urllib2.Request("{0}/builds/{1}".format(travis_entry, build_id), headers=headers)
response = urllib2.urlopen(req).read()
raw_json = json.loads(response)
matrix_without_leader = [MatrixElement(job) for job in raw_json["matrix"] if not is_leader(job['number'])]
return matrix_without_leader
def wait_others_to_finish(travis_token):
def others_finished():
"""
Dumps others to finish
Leader cannot finish, it is working now
:return: tuple(True or False, List of not finished jobs)
"""
snapshot = matrix_snapshot(travis_token)
finished = [job.is_finished for job in snapshot if not job.is_leader]
return reduce(lambda a, b: a and b, finished), [job.number for job in snapshot if
not job.is_leader and not job.is_finished]
while True:
finished, waiting_list = others_finished()
if finished:
break
log.info("Leader waits for minions {0}...".format(waiting_list)) # just in case do not get "silence timeout"
time.sleep(polling_interval)
def get_token():
assert gh_token, 'GITHUB_TOKEN is not set'
data = {"github_token": gh_token}
headers = {'content-type': 'application/json'}
req = urllib2.Request("{0}/auth/github".format(travis_entry), json.dumps(data), headers)
response = urllib2.urlopen(req).read()
travis_token = json.loads(response).get('access_token')
return travis_token
try:
token = get_token()
wait_others_to_finish(token)
final_snapshot = matrix_snapshot(token)
log.info("Final Results: {0}".format([(e.number, e.is_succeeded) for e in final_snapshot]))
BUILD_AGGREGATE_STATUS = 'BUILD_AGGREGATE_STATUS'
others_snapshot = [el for el in final_snapshot if not el.is_leader]
if reduce(lambda a, b: a and b, [e.is_succeeded for e in others_snapshot]):
os.environ[BUILD_AGGREGATE_STATUS] = "others_succeeded"
elif reduce(lambda a, b: a and b, [not e.is_succeeded for e in others_snapshot]):
log.error("Others Failed")
os.environ[BUILD_AGGREGATE_STATUS] = "others_failed"
else:
log.warn("Others Unknown")
os.environ[BUILD_AGGREGATE_STATUS] = "unknown"
# since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_LEADER=YES {0}={1}".format(BUILD_AGGREGATE_STATUS, os.environ[BUILD_AGGREGATE_STATUS]))
except Exception as e:
log.fatal(e)
|
mit
|
antoine-de/navitia
|
source/tyr/tests/integration/users_test.py
|
1
|
23669
|
from tests.check_utils import api_get, api_post, api_delete, api_put, _dt
import json
import pytest
import mock
from navitiacommon import models
from tyr.rabbit_mq_handler import RabbitMqHandler
from tyr import app
import urllib
@pytest.fixture
def geojson_polygon():
return {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0],
[100.0, 1.0], [100.0, 0.0]]
]
}
}
@pytest.fixture
def geojson_multipolygon():
return {
"type": "Feature",
"geometry": {
"type": "MultiPolygon",
"coordinates": [
[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],
[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]
]
}
}
@pytest.fixture
def invalid_geojsonfixture():
return {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": []
}
}
@pytest.fixture
def create_user(geojson_polygon):
with app.app_context():
user = models.User('test', '[email protected]')
user.end_point = models.EndPoint.get_default()
user.billing_plan = models.BillingPlan.get_default(user.end_point)
user.shape = json.dumps(geojson_polygon)
models.db.session.add(user)
models.db.session.commit()
return user.id
@pytest.fixture
def create_user_without_shape():
with app.app_context():
user = models.User('test', '[email protected]')
user.end_point = models.EndPoint.get_default()
user.billing_plan = models.BillingPlan.get_default(user.end_point)
models.db.session.add(user)
models.db.session.commit()
return user.id
@pytest.fixture
def create_instance():
with app.app_context():
instance = models.Instance('instance')
models.db.session.add(instance)
models.db.session.commit()
return instance.id
@pytest.yield_fixture
def mock_rabbit():
with mock.patch.object(RabbitMqHandler, 'publish') as m:
yield m
@pytest.fixture
def create_multiple_users(request, geojson_polygon):
with app.app_context():
end_point = models.EndPoint()
end_point.name = 'myEndPoint'
billing_plan = models.BillingPlan()
billing_plan.name = 'free'
billing_plan.end_point = end_point
user1 = models.User('foo', '[email protected]')
user1.end_point = end_point
user1.billing_plan = billing_plan
user1.shape = json.dumps(geojson_polygon)
user2 = models.User('foodefault', '[email protected]')
user2.end_point = models.EndPoint.get_default()
user2.billing_plan = models.BillingPlan.get_default(user2.end_point)
models.db.session.add(end_point)
models.db.session.add(billing_plan)
models.db.session.add(user1)
models.db.session.add(user2)
models.db.session.commit()
# we end the context but need to keep some id for later (object won't survive this lost!)
d = {'user1': user1.id, 'user2': user2.id, 'end_point': end_point.id, 'billing_plan': billing_plan.id}
# we can't truncate end_point and billing_plan, so we have to delete them explicitly
def teardown():
with app.app_context():
end_point = models.EndPoint.query.get(d['end_point'])
billing_plan = models.BillingPlan.query.get(d['billing_plan'])
models.db.session.delete(end_point)
models.db.session.delete(billing_plan)
models.db.session.commit()
request.addfinalizer(teardown)
return d
@pytest.fixture
def create_billing_plan():
with app.app_context():
billing_plan = models.BillingPlan(name='test', max_request_count=10, max_object_count=100,
end_point_id=models.EndPoint.get_default().id)
models.db.session.add(billing_plan)
models.db.session.commit()
return billing_plan.id
def test_get_users_empty():
resp = api_get('/v0/users/')
assert resp == []
def test_add_user_without_shape(mock_rabbit):
"""
creation of a user without shape
When we get this user, we should see
shape = None and has_shape = False
"""
user = {'login': 'user1', 'email': '[email protected]'}
data = json.dumps(user)
resp = api_post('/v0/users/', data=data, content_type='application/json')
def check(u):
gen = (k for k in user if k is not 'shape')
for k in gen:
assert u[k] == user[k]
assert u['end_point']['name'] == 'navitia.io'
assert u['type'] == 'with_free_instances'
assert u['block_until'] is None
check(resp)
assert resp['shape'] is None
assert resp['has_shape'] is False
assert mock_rabbit.called
# we did not give any coord, so we don't have some
assert resp['default_coord'] is None
# with disable_geojson=true by default
resp = api_get('/v0/users/')
assert len(resp) == 1
check(resp[0])
assert resp[0]['shape'] is None
assert resp[0]['has_shape'] is False
# with disable_geojson=false
resp = api_get('/v0/users/?disable_geojson=false')
assert len(resp) == 1
check(resp[0])
assert resp[0]['shape'] is None
assert resp[0]['has_shape'] is False
def test_add_user(mock_rabbit, geojson_polygon):
"""
creation of a user passing arguments as a json
"""
coord = '2.37730;48.84550'
user = {'login': 'user1', 'email': '[email protected]', 'shape': geojson_polygon, 'has_shape': True,
'default_coord': coord}
data = json.dumps(user)
resp = api_post('/v0/users/', data=data, content_type='application/json')
def check(u):
gen = (k for k in user if k is not 'shape')
for k in gen:
assert u[k] == user[k]
assert u['end_point']['name'] == 'navitia.io'
assert u['type'] == 'with_free_instances'
assert u['block_until'] is None
check(resp)
assert resp['shape'] == geojson_polygon
assert resp['default_coord'] == coord
resp = api_get('/v0/users/')
assert len(resp) == 1
check(resp[0])
assert resp[0]['shape'] == {}
assert mock_rabbit.called
def test_add_user_with_multipolygon(mock_rabbit, geojson_multipolygon):
"""
creation of a user with multipolygon shape
status must be 200 when bragi will accept multipolygon shape
"""
user = {'login': 'user1', 'email': '[email protected]', 'shape': geojson_multipolygon, 'has_shape': True}
data = json.dumps(user)
resp, status = api_post('/v0/users/', check=False, data=data, content_type='application/json')
assert status == 400
assert mock_rabbit.call_count == 0
def test_add_user_with_invalid_geojson(mock_rabbit, invalid_geojsonfixture):
"""
creation of a user passing arguments as a json
"""
user = {'login': 'user1', 'email': '[email protected]', 'shape': invalid_geojsonfixture, 'has_shape': True}
data = json.dumps(user)
resp, status = api_post('/v0/users/', check=False, data=data, content_type='application/json')
assert status == 400
assert mock_rabbit.call_count == 0
def test_add_user_with_invalid_coord(mock_rabbit):
"""
creation of a user passing wrongly formated coord
"""
user = {'login': 'user1', 'email': '[email protected]', 'default_coord': 'bob'}
data = json.dumps(user)
resp, status = api_post('/v0/users/', check=False, data=data, content_type='application/json')
assert status == 400
assert mock_rabbit.call_count == 0
def test_add_user_with_plus(mock_rabbit):
"""
creation of a user with a "+" in the email
"""
user = {'login': '[email protected]', 'email': '[email protected]'}
resp = api_post('/v0/users/', data=json.dumps(user), content_type='application/json')
def check(u):
for k in user.iterkeys():
assert u[k] == user[k]
assert u['end_point']['name'] == 'navitia.io'
assert u['type'] == 'with_free_instances'
assert u['block_until'] is None
check(resp)
resp = api_get('/v0/users/')
assert len(resp) == 1
check(resp[0])
assert mock_rabbit.called
def test_add_user_with_plus_no_json(mock_rabbit):
"""
creation of a user with a "+" in the email
"""
user = {'login': '[email protected]', 'email': '[email protected]'}
resp = api_post('/v0/users/', data=user)
def check(u):
for k in user.iterkeys():
assert u[k] == user[k]
assert u['end_point']['name'] == 'navitia.io'
assert u['type'] == 'with_free_instances'
assert u['block_until'] is None
check(resp)
resp = api_get('/v0/users/')
assert len(resp) == 1
check(resp[0])
assert mock_rabbit.called
def test_add_user_with_plus_in_query(mock_rabbit):
"""
creation of a user with a "+" in the email
"""
user = {'email': '[email protected]', 'login': '[email protected]'}
_, status = api_post('/v0/users/?login={email}&email={email}'.format(email=user['email']),
check=False)
assert status == 400
resp = api_post('/v0/users/?login={email}&email={email}'.format(email=urllib.quote(user['email'])))
def check(u):
for k in user.iterkeys():
assert u[k] == user[k]
assert u['end_point']['name'] == 'navitia.io'
assert u['type'] == 'with_free_instances'
assert u['block_until'] is None
check(resp)
resp = api_get('/v0/users/')
assert len(resp) == 1
check(resp[0])
assert mock_rabbit.called
def test_add_duplicate_login_user(create_user, mock_rabbit):
user = {'login': 'test', 'email': '[email protected]'}
resp, status = api_post('/v0/users/', check=False, data=json.dumps(user), content_type='application/json')
assert status == 409
assert mock_rabbit.call_count == 0
def test_add_duplicate_email_user(create_user, mock_rabbit):
user = {'login': 'user', 'email': '[email protected]'}
resp, status = api_post('/v0/users/', check=False, data=json.dumps(user), content_type='application/json')
assert status == 409
assert mock_rabbit.call_count == 0
def test_add_user_invalid_email(mock_rabbit):
"""
creation of a user with an invalid email
"""
user = {'login': 'user1', 'email': 'user1'}
resp, status = api_post('/v0/users/', check=False, data=json.dumps(user), content_type='application/json')
assert status == 400
assert mock_rabbit.call_count == 0
def test_add_user_invalid_endpoint(mock_rabbit):
"""
creation of a user with an invalid endpoint
"""
user = {'login': 'user1', 'email': '[email protected]', 'end_point_id': 100}
resp, status = api_post('/v0/users/', check=False, data=json.dumps(user), content_type='application/json')
assert status == 400
assert mock_rabbit.call_count == 0
def test_add_user_invalid_billingplan(mock_rabbit):
"""
creation of a user with an invalid endpoint
"""
user = {'login': 'user1', 'email': '[email protected]', 'billing_plan_id': 100}
resp, status = api_post('/v0/users/', check=False, data=json.dumps(user), content_type='application/json')
assert status == 400
assert mock_rabbit.call_count == 0
def test_add_user_invalid_type(mock_rabbit):
"""
creation of a user with an invalid endpoint
"""
user = {'login': 'user1', 'email': '[email protected]', 'type': 'foo'}
resp, status = api_post('/v0/users/', check=False, data=json.dumps(user), content_type='application/json')
assert status == 400
assert mock_rabbit.call_count == 0
def test_multiple_users(create_multiple_users, mock_rabbit):
"""
check the list
"""
resp = api_get('/v0/users/')
assert len(resp) == 2
user1_found = False
user2_found = False
for u in resp:
if u['id'] == create_multiple_users['user1']:
user1_found = True
assert u['login'] == 'foo'
assert u['email'] == '[email protected]'
assert u['end_point']['name'] == 'myEndPoint'
assert u['billing_plan']['name'] == 'free'
if u['id'] == create_multiple_users['user2']:
user2_found = True
assert u['login'] == 'foodefault'
assert u['email'] == '[email protected]'
assert u['end_point']['name'] == 'navitia.io'
assert u['billing_plan']['name'] == 'nav_ctp'
assert user1_found
assert user2_found
assert mock_rabbit.call_count == 0
def test_delete_user(create_multiple_users, mock_rabbit):
"""
delete a user
"""
resp, status = api_delete('/v0/users/{}'.format(create_multiple_users['user1']), check=False, no_json=True)
assert status == 204
resp, status = api_get('/v0/users/{}'.format(create_multiple_users['user1']), check=False)
assert status == 404
resp = api_get('/v0/users/')
assert len(resp) == 1
u = resp[0]
assert u['id'] == create_multiple_users['user2']
assert u['login'] == 'foodefault'
assert u['email'] == '[email protected]'
assert u['end_point']['name'] == 'navitia.io'
assert u['billing_plan']['name'] == 'nav_ctp'
assert mock_rabbit.call_count == 1
def test_delete_invalid_user(create_multiple_users, mock_rabbit):
"""
we try to delete an invalid users, this must fail and after that we check out users to be sure
"""
to_delete = 0
while to_delete in create_multiple_users.values():
to_delete = to_delete + 1
resp, status = api_delete('/v0/users/{}'.format(to_delete), check=False, no_json=True)
assert status == 404
resp = api_get('/v0/users/')
assert len(resp) == 2
assert mock_rabbit.call_count == 0
def test_update_invalid_user(mock_rabbit):
"""
we try to update a user who dosn't exist
"""
user = {'login': 'user1', 'email': '[email protected]'}
resp, status = api_put('/v0/users/10', check=False, data=json.dumps(user), content_type='application/json')
assert status == 404
assert mock_rabbit.call_count == 0
def test_update_user(create_multiple_users, mock_rabbit, geojson_polygon):
"""
we update a user
"""
user = {'login': 'user1', 'email': '[email protected]', 'shape': geojson_polygon}
resp = api_put('/v0/users/{}'.format(create_multiple_users['user1']), data=json.dumps(user),
content_type='application/json')
def check(u):
for k in user.iterkeys():
assert u[k] == user[k]
assert resp['id'] == create_multiple_users['user1']
assert resp['login'] == user['login']
assert resp['email'] == user['email']
check(resp)
assert mock_rabbit.called
def test_update_block_until(create_multiple_users, mock_rabbit, geojson_polygon):
"""
we update a user
"""
user = {'block_until': '20160128T111200Z'}
resp = api_put('/v0/users/{}'.format(create_multiple_users['user1']), data=json.dumps(user),
content_type='application/json')
assert resp['id'] == create_multiple_users['user1']
assert resp['block_until'] == '2016-01-28T11:12:00'
assert resp['shape'] == geojson_polygon
assert mock_rabbit.called
def test_update_shape(create_multiple_users, mock_rabbit, geojson_polygon):
"""
we update a user
"""
user = {'shape': geojson_polygon}
resp = api_put('/v0/users/{}'.format(create_multiple_users['user1']), data=json.dumps(user),
content_type='application/json')
def check(u):
for k in user.iterkeys():
assert u[k] == user[k]
assert resp['id'] == create_multiple_users['user1']
check(resp)
assert mock_rabbit.called
def test_update_shape_with_none(create_multiple_users, mock_rabbit):
"""
we update a user
"""
user = {'shape': None}
resp = api_put('/v0/users/{}'.format(create_multiple_users['user1']), data=json.dumps(user),
content_type='application/json')
assert resp['id'] == create_multiple_users['user1']
assert resp['shape'] is None
assert mock_rabbit.called
def test_update_shape_with_empty(create_multiple_users, mock_rabbit, geojson_polygon):
"""
we update a user
"""
user = {'shape': {}}
resp = api_put('/v0/users/{}'.format(create_multiple_users['user1']), data=json.dumps(user),
content_type='application/json')
assert resp['id'] == create_multiple_users['user1']
assert resp['shape'] == geojson_polygon
assert mock_rabbit.called
def test_full_registration_then_deletion(create_instance, mock_rabbit):
"""
we create a user, then a token for him, and finaly we give to him some authorization
after that we delete him
"""
user = {'login': 'user1', 'email': '[email protected]'}
resp_user = api_post('/v0/users/', data=json.dumps(user), content_type='application/json')
api_post('/v0/users/{}/keys'.format(resp_user['id']), data=json.dumps({'app_name': 'myApp'}),
content_type='application/json')
auth = {'instance_id': create_instance, 'api_id': 1}
api_post('/v0/users/{}/authorizations'.format(resp_user['id']), data=json.dumps(auth),
content_type='application/json')
resp = api_get('/v0/users/{}'.format(resp_user['id']))
assert len(resp['keys']) == 1
assert resp['keys'][0]['app_name'] == 'myApp'
assert len(resp['authorizations']) == 1
assert resp['authorizations'][0]['instance']['name'] == 'instance'
_, status = api_delete('/v0/users/{}'.format(resp_user['id']), check=False, no_json=True)
assert status == 204
assert mock_rabbit.called
_, status = api_get('/v0/users/{}'.format(resp_user['id']), check=False)
assert status == 404
def test_deletion_keys_and_auth(create_instance, mock_rabbit):
"""
We start by creating the user, it's easier than using a fixture, then we delete the auth and the key
"""
# first, test that with an unknown user, we get a 404
_, status = api_delete('/v0/users/75/keys/1', check=False, no_json=True)
assert status == 404
user = {'login': 'user1', 'email': '[email protected]'}
resp_user = api_post('/v0/users/', data=json.dumps(user), content_type='application/json')
api_post('/v0/users/{}/keys'.format(resp_user['id']), data=json.dumps({'app_name': 'myApp'}),
content_type='application/json')
auth = {'instance_id': create_instance, 'api_id': 1}
api_post('/v0/users/{}/authorizations'.format(resp_user['id']), data=json.dumps(auth),
content_type='application/json')
resp = api_get('/v0/users/{}'.format(resp_user['id']))
resp_key = api_delete('/v0/users/{user_id}/keys/{key_id}'.format(user_id=resp['id'],
key_id=resp['keys'][0]['id']))
assert len(resp_key['keys']) == 0
resp_auth = api_delete('/v0/users/{}/authorizations/'.format(resp['id']), data=json.dumps(auth),
content_type='application/json')
assert len(resp_auth['authorizations']) == 0
assert mock_rabbit.called
def test_get_user_with_shape(create_user, geojson_polygon):
"""
We start by creating the user with a shape,
and we test that the attribute shape={} and has_shape = True
"""
print api_get('/v0/users')
resp = api_get('/v0/users/{}'.format(create_user))
assert resp['has_shape'] is True
assert resp['shape'] == {}
def test_get_user_with_shape_and_disable_geojson_param_false(create_user, geojson_polygon):
"""
We start by creating the user with a shape.
We request the user with parameter disable_geojson=true
We test that shape = geojson and has_shape = True
"""
resp = api_get('/v0/users/{}?disable_geojson=false'.format(create_user))
assert resp['has_shape'] is True
assert resp['shape'] == geojson_polygon
def test_get_user_without_shape(create_user_without_shape):
"""
We start by creating the user without shape,
and we test that shape = None and has_shape = False
"""
resp = api_get('/v0/users/{}'.format(create_user_without_shape))
print resp['shape']
assert resp['has_shape'] is False
assert resp['shape'] is None
assert resp['shape'] is None
def test_get_user_without_shape_and_disable_geojson_param_false(create_user_without_shape):
"""
We start by creating the user without shape.
We request the user with parameter disable_geojson=true
We test that shape = None and has_shape = False
"""
resp = api_get('/v0/users/{}?disable_geojson=false'.format(create_user_without_shape))
assert resp['has_shape'] is False
assert resp['shape'] is None
def test_get_users(create_multiple_users):
"""
We start by creating a user with a shape and a user without shape,
we test that:
user1.has_shape = True
user1.shape = {}
user2.has_shape = False
user2.shape = None
"""
resp = api_get('/v0/users')
foo = next((u for u in resp if u.get('login') == 'foo'), None)
assert foo
assert foo.get('has_shape') is True
assert foo.get('shape') == {}
foodefault = next((u for u in resp if u.get('login') == 'foodefault'), None)
assert foodefault
assert foodefault.get('has_shape') is False
assert foodefault.get('shape') is None
def test_get_users_with_disable_geojson_false(create_multiple_users, geojson_polygon):
"""
We start by creating a user with a shape and a user without shape,
we test that requesting /users?disable_geojson=false:
user1.has_shape = True
user1.shape = geojson
user2.has_shape = False
user2.shape = None
"""
resp = api_get('/v0/users?disable_geojson=false')
foo = next((u for u in resp if u.get('login') == 'foo'), None)
assert foo
assert foo.get('has_shape') is True
assert foo.get('shape') == geojson_polygon
foodefault = next((u for u in resp if u.get('login') == 'foodefault'), None)
assert foodefault
assert foodefault.get('has_shape') is False
assert foodefault.get('shape') is None
def test_get_billing_plan(create_billing_plan):
"""
We create a billing_plan.
"""
resp = api_get('/v0/billing_plans/{}'.format(create_billing_plan))
assert resp['name'] == 'test'
assert resp['max_request_count'] == 10
assert resp['max_object_count'] == 100
def test_delete_billing_plan(create_billing_plan):
"""
We start by creating a billing_plan.
Delete the billing_plan
"""
resp = api_get('/v0/billing_plans/{}'.format(create_billing_plan))
_, status = api_delete('/v0/billing_plans/{}'.format(resp['id']), check=False, no_json=True)
assert status == 204
def test_delete_billing_plan_used_by_an_user(create_user, geojson_polygon):
"""
We start by creating the user with a shape.
We request the user with parameter disable_geojson=false
A default billing_plan is created and used with name = 'nav_ctp'
We try to delete the billing_plan of this user but in vain.
"""
resp = api_get('/v0/users/{}?disable_geojson=false'.format(create_user))
assert resp['billing_plan']['name'] == 'nav_ctp'
assert resp['has_shape'] is True
assert resp['shape'] == geojson_polygon
_, status = api_delete('/v0/billing_plans/{}'.format(resp['billing_plan']['id']), check=False, no_json=True)
assert status == 409
|
agpl-3.0
|
ptitjano/bokeh
|
bokeh/server/protocol/messages/tests/test_push_doc.py
|
14
|
1038
|
from __future__ import absolute_import, print_function
import unittest
import bokeh.document as document
from bokeh.model import Model
from bokeh.core.properties import Int, Instance
from bokeh.server.protocol import Protocol
class AnotherModelInTestPushDoc(Model):
bar = Int(1)
class SomeModelInTestPushDoc(Model):
foo = Int(2)
child = Instance(Model)
class TestPushDocument(unittest.TestCase):
def _sample_doc(self):
doc = document.Document()
another = AnotherModelInTestPushDoc()
doc.add_root(SomeModelInTestPushDoc(child=another))
doc.add_root(SomeModelInTestPushDoc())
return doc
def test_create(self):
sample = self._sample_doc()
Protocol("1.0").create("PUSH-DOC", sample)
def test_create_then_parse(self):
sample = self._sample_doc()
msg = Protocol("1.0").create("PUSH-DOC", sample)
copy = document.Document()
msg.push_to_document(copy)
assert len(sample.roots) == 2
assert len(copy.roots) == 2
|
bsd-3-clause
|
CCI-MOC/nova
|
nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py
|
42
|
7367
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
from oslo_utils import units
from oslo_vmware.objects import datastore as ds_obj
from nova import test
from nova.virt.vmwareapi import ds_util
ResultSet = collections.namedtuple('ResultSet', ['objects'])
ObjectContent = collections.namedtuple('ObjectContent', ['obj', 'propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('ManagedObjectReference', ['value'])
class VMwareDSUtilDatastoreSelectionTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareDSUtilDatastoreSelectionTestCase, self).setUp()
self.data = [
['VMFS', 'os-some-name', True, 'normal', 987654321, 12346789],
['NFS', 'another-name', True, 'normal', 9876543210, 123467890],
['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900],
['VMFS', 'some-name-good', False, 'normal', 987654321, 12346789],
['VMFS', 'new-name', True, 'inMaintenance', 987654321, 12346789]
]
def build_result_set(self, mock_data, name_list=None):
# datastores will have a moref_id of ds-000 and
# so on based on their index in the mock_data list
if name_list is None:
name_list = self.propset_name_list
objects = []
for id, row in enumerate(mock_data):
obj = ObjectContent(
obj=MoRef(value="ds-%03d" % id),
propSet=[])
for index, value in enumerate(row):
obj.propSet.append(
DynamicProperty(name=name_list[index], val=row[index]))
objects.append(obj)
return ResultSet(objects=objects)
@property
def propset_name_list(self):
return ['summary.type', 'summary.name', 'summary.accessible',
'summary.maintenanceMode', 'summary.capacity',
'summary.freeSpace']
def test_filter_datastores_simple(self):
datastores = self.build_result_set(self.data)
best_match = ds_obj.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
rec = ds_util._select_datastore(None, datastores, best_match)
self.assertIsNotNone(rec.ref, "could not find datastore!")
self.assertEqual('ds-001', rec.ref.value,
"didn't find the right datastore!")
self.assertEqual(123467890, rec.freespace,
"did not obtain correct freespace!")
def test_filter_datastores_empty(self):
data = []
datastores = self.build_result_set(data)
best_match = ds_obj.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
rec = ds_util._select_datastore(None, datastores, best_match)
self.assertEqual(best_match, rec)
def test_filter_datastores_no_match(self):
datastores = self.build_result_set(self.data)
datastore_regex = re.compile('no_match.*')
best_match = ds_obj.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
rec = ds_util._select_datastore(None, datastores,
best_match,
datastore_regex)
self.assertEqual(best_match, rec, "did not match datastore properly")
def test_filter_datastores_specific_match(self):
data = [
['VMFS', 'os-some-name', True, 'normal', 987654321, 1234678],
['NFS', 'another-name', True, 'normal', 9876543210, 123467890],
['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900],
['VMFS', 'some-name-good', True, 'normal', 987654321, 12346789],
['VMFS', 'some-other-good', False, 'normal', 987654321000,
12346789000],
['VMFS', 'new-name', True, 'inMaintenance', 987654321000,
12346789000]
]
# only the DS some-name-good is accessible and matches the regex
datastores = self.build_result_set(data)
datastore_regex = re.compile('.*-good$')
best_match = ds_obj.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
rec = ds_util._select_datastore(None, datastores,
best_match,
datastore_regex)
self.assertIsNotNone(rec, "could not find datastore!")
self.assertEqual('ds-003', rec.ref.value,
"didn't find the right datastore!")
self.assertNotEqual('ds-004', rec.ref.value,
"accepted an unreachable datastore!")
self.assertEqual('some-name-good', rec.name)
self.assertEqual(12346789, rec.freespace,
"did not obtain correct freespace!")
self.assertEqual(987654321, rec.capacity,
"did not obtain correct capacity!")
def test_filter_datastores_missing_props(self):
data = [
['VMFS', 'os-some-name', 987654321, 1234678],
['NFS', 'another-name', 9876543210, 123467890],
]
# no matches are expected when 'summary.accessible' is missing
prop_names = ['summary.type', 'summary.name',
'summary.capacity', 'summary.freeSpace']
datastores = self.build_result_set(data, prop_names)
best_match = ds_obj.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
rec = ds_util._select_datastore(None, datastores, best_match)
self.assertEqual(best_match, rec, "no matches were expected")
def test_filter_datastores_best_match(self):
data = [
['VMFS', 'spam-good', True, 20 * units.Gi, 10 * units.Gi],
['NFS', 'eggs-good', True, 40 * units.Gi, 15 * units.Gi],
['NFS41', 'nfs41-is-good', True, 35 * units.Gi, 12 * units.Gi],
['BAD', 'some-name-bad', True, 30 * units.Gi, 20 * units.Gi],
['VMFS', 'some-name-good', True, 50 * units.Gi, 5 * units.Gi],
['VMFS', 'some-other-good', True, 10 * units.Gi, 10 * units.Gi],
]
datastores = self.build_result_set(data)
datastore_regex = re.compile('.*-good$')
# the current best match is better than all candidates
best_match = ds_obj.Datastore(ref='ds-100', name='best-ds-good',
capacity=20 * units.Gi, freespace=19 * units.Gi)
rec = ds_util._select_datastore(None,
datastores,
best_match,
datastore_regex)
self.assertEqual(best_match, rec, "did not match datastore properly")
|
apache-2.0
|
AndyLavr/Aspire-SW5-012_Kernel_4.8
|
scripts/gdb/linux/utils.py
|
509
|
4833
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# common utilities
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
class CachedType:
def __init__(self, name):
self._type = None
self._name = name
def _new_objfile_handler(self, event):
self._type = None
gdb.events.new_objfile.disconnect(self._new_objfile_handler)
def get_type(self):
if self._type is None:
self._type = gdb.lookup_type(self._name)
if self._type is None:
raise gdb.GdbError(
"cannot resolve type '{0}'".format(self._name))
if hasattr(gdb, 'events') and hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.connect(self._new_objfile_handler)
return self._type
long_type = CachedType("long")
def get_long_type():
global long_type
return long_type.get_type()
def offset_of(typeobj, field):
element = gdb.Value(0).cast(typeobj)
return int(str(element[field].address).split()[0], 16)
def container_of(ptr, typeobj, member):
return (ptr.cast(get_long_type()) -
offset_of(typeobj, member)).cast(typeobj)
class ContainerOf(gdb.Function):
"""Return pointer to containing data structure.
$container_of(PTR, "TYPE", "ELEMENT"): Given PTR, return a pointer to the
data structure of the type TYPE in which PTR is the address of ELEMENT.
Note that TYPE and ELEMENT have to be quoted as strings."""
def __init__(self):
super(ContainerOf, self).__init__("container_of")
def invoke(self, ptr, typename, elementname):
return container_of(ptr, gdb.lookup_type(typename.string()).pointer(),
elementname.string())
ContainerOf()
BIG_ENDIAN = 0
LITTLE_ENDIAN = 1
target_endianness = None
def get_target_endianness():
global target_endianness
if target_endianness is None:
endian = gdb.execute("show endian", to_string=True)
if "little endian" in endian:
target_endianness = LITTLE_ENDIAN
elif "big endian" in endian:
target_endianness = BIG_ENDIAN
else:
raise gdb.GdbError("unknown endianness '{0}'".format(str(endian)))
return target_endianness
def read_memoryview(inf, start, length):
return memoryview(inf.read_memory(start, length))
def read_u16(buffer):
value = [0, 0]
if type(buffer[0]) is str:
value[0] = ord(buffer[0])
value[1] = ord(buffer[1])
else:
value[0] = buffer[0]
value[1] = buffer[1]
if get_target_endianness() == LITTLE_ENDIAN:
return value[0] + (value[1] << 8)
else:
return value[1] + (value[0] << 8)
def read_u32(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return read_u16(buffer[0:2]) + (read_u16(buffer[2:4]) << 16)
else:
return read_u16(buffer[2:4]) + (read_u16(buffer[0:2]) << 16)
def read_u64(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return read_u32(buffer[0:4]) + (read_u32(buffer[4:8]) << 32)
else:
return read_u32(buffer[4:8]) + (read_u32(buffer[0:4]) << 32)
target_arch = None
def is_target_arch(arch):
if hasattr(gdb.Frame, 'architecture'):
return arch in gdb.newest_frame().architecture().name()
else:
global target_arch
if target_arch is None:
target_arch = gdb.execute("show architecture", to_string=True)
return arch in target_arch
GDBSERVER_QEMU = 0
GDBSERVER_KGDB = 1
gdbserver_type = None
def get_gdbserver_type():
def exit_handler(event):
global gdbserver_type
gdbserver_type = None
gdb.events.exited.disconnect(exit_handler)
def probe_qemu():
try:
return gdb.execute("monitor info version", to_string=True) != ""
except:
return False
def probe_kgdb():
try:
thread_info = gdb.execute("info thread 2", to_string=True)
return "shadowCPU0" in thread_info
except:
return False
global gdbserver_type
if gdbserver_type is None:
if probe_qemu():
gdbserver_type = GDBSERVER_QEMU
elif probe_kgdb():
gdbserver_type = GDBSERVER_KGDB
if gdbserver_type is not None and hasattr(gdb, 'events'):
gdb.events.exited.connect(exit_handler)
return gdbserver_type
def gdb_eval_or_none(expresssion):
try:
return gdb.parse_and_eval(expresssion)
except:
return None
def dentry_name(d):
parent = d['d_parent']
if parent == d or parent == 0:
return ""
p = dentry_name(d['d_parent']) + "/"
return p + d['d_iname'].string()
|
gpl-2.0
|
eeshangarg/oh-mainline
|
vendor/packages/scrapy/scrapy/tests/test_contrib_exporter.py
|
16
|
7618
|
import unittest, cPickle as pickle
from cStringIO import StringIO
from scrapy.item import Item, Field
from scrapy.utils.python import str_to_unicode
from scrapy.utils.py26 import json
from scrapy.contrib.exporter import BaseItemExporter, PprintItemExporter, \
PickleItemExporter, CsvItemExporter, XmlItemExporter, JsonLinesItemExporter, \
JsonItemExporter
class TestItem(Item):
name = Field()
age = Field()
class BaseItemExporterTest(unittest.TestCase):
def setUp(self):
self.i = TestItem(name=u'John\xa3', age='22')
self.output = StringIO()
self.ie = self._get_exporter()
def _get_exporter(self, **kwargs):
return BaseItemExporter(**kwargs)
def _check_output(self):
pass
def _assert_expected_item(self, exported_dict):
for k, v in exported_dict.items():
exported_dict[k] = str_to_unicode(v)
self.assertEqual(self.i, exported_dict)
def test_export_item(self):
self.ie.start_exporting()
try:
self.ie.export_item(self.i)
except NotImplementedError:
if self.ie.__class__ is not BaseItemExporter:
raise
self.ie.finish_exporting()
self._check_output()
def test_serialize_field(self):
self.assertEqual(self.ie.serialize_field( \
self.i.fields['name'], 'name', self.i['name']), 'John\xc2\xa3')
self.assertEqual( \
self.ie.serialize_field(self.i.fields['age'], 'age', self.i['age']), '22')
def test_fields_to_export(self):
ie = self._get_exporter(fields_to_export=['name'])
self.assertEqual(list(ie._get_serialized_fields(self.i)), [('name', 'John\xc2\xa3')])
ie = self._get_exporter(fields_to_export=['name'], encoding='latin-1')
name = list(ie._get_serialized_fields(self.i))[0][1]
assert isinstance(name, str)
self.assertEqual(name, 'John\xa3')
def test_field_custom_serializer(self):
def custom_serializer(value):
return str(int(value) + 2)
class CustomFieldItem(Item):
name = Field()
age = Field(serializer=custom_serializer)
i = CustomFieldItem(name=u'John\xa3', age='22')
ie = self._get_exporter()
self.assertEqual(ie.serialize_field(i.fields['name'], 'name', i['name']), 'John\xc2\xa3')
self.assertEqual(ie.serialize_field(i.fields['age'], 'age', i['age']), '24')
class PprintItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return PprintItemExporter(self.output, **kwargs)
def _check_output(self):
self._assert_expected_item(eval(self.output.getvalue()))
class PickleItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return PickleItemExporter(self.output, **kwargs)
def _check_output(self):
self._assert_expected_item(pickle.loads(self.output.getvalue()))
def test_export_multiple_items(self):
i1 = TestItem(name='hello', age='world')
i2 = TestItem(name='bye', age='world')
f = StringIO()
ie = PickleItemExporter(f)
ie.start_exporting()
ie.export_item(i1)
ie.export_item(i2)
ie.finish_exporting()
f.reset()
self.assertEqual(pickle.load(f), i1)
self.assertEqual(pickle.load(f), i2)
class CsvItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return CsvItemExporter(self.output, **kwargs)
def _check_output(self):
self.assertEqual(self.output.getvalue(), 'age,name\r\n22,John\xc2\xa3\r\n')
def test_header(self):
output = StringIO()
ie = CsvItemExporter(output, fields_to_export=self.i.fields.keys())
ie.start_exporting()
ie.export_item(self.i)
ie.finish_exporting()
self.assertEqual(output.getvalue(), 'age,name\r\n22,John\xc2\xa3\r\n')
output = StringIO()
ie = CsvItemExporter(output, fields_to_export=['age'])
ie.start_exporting()
ie.export_item(self.i)
ie.finish_exporting()
self.assertEqual(output.getvalue(), 'age\r\n22\r\n')
output = StringIO()
ie = CsvItemExporter(output)
ie.start_exporting()
ie.export_item(self.i)
ie.export_item(self.i)
ie.finish_exporting()
self.assertEqual(output.getvalue(), 'age,name\r\n22,John\xc2\xa3\r\n22,John\xc2\xa3\r\n')
output = StringIO()
ie = CsvItemExporter(output, include_headers_line=False)
ie.start_exporting()
ie.export_item(self.i)
ie.finish_exporting()
self.assertEqual(output.getvalue(), '22,John\xc2\xa3\r\n')
def test_join_multivalue(self):
class TestItem2(Item):
name = Field()
friends = Field()
i = TestItem2(name='John', friends=['Mary', 'Paul'])
output = StringIO()
ie = CsvItemExporter(output, include_headers_line=False)
ie.start_exporting()
ie.export_item(i)
ie.finish_exporting()
self.assertEqual(output.getvalue(), '"Mary,Paul",John\r\n')
class XmlItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return XmlItemExporter(self.output, **kwargs)
def _check_output(self):
expected_value = '<?xml version="1.0" encoding="utf-8"?>\n<items><item><age>22</age><name>John\xc2\xa3</name></item></items>'
self.assertEqual(self.output.getvalue(), expected_value)
def test_multivalued_fields(self):
output = StringIO()
item = TestItem(name=[u'John\xa3', u'Doe'])
ie = XmlItemExporter(output)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
expected_value = '<?xml version="1.0" encoding="utf-8"?>\n<items><item><name><value>John\xc2\xa3</value><value>Doe</value></name></item></items>'
self.assertEqual(output.getvalue(), expected_value)
class JsonLinesItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return JsonLinesItemExporter(self.output, **kwargs)
def _check_output(self):
exported = json.loads(self.output.getvalue().strip())
self.assertEqual(exported, dict(self.i))
class JsonItemExporterTest(JsonLinesItemExporterTest):
def _get_exporter(self, **kwargs):
return JsonItemExporter(self.output, **kwargs)
def _check_output(self):
exported = json.loads(self.output.getvalue().strip())
self.assertEqual(exported, [dict(self.i)])
def test_two_items(self):
self.ie.start_exporting()
self.ie.export_item(self.i)
self.ie.export_item(self.i)
self.ie.finish_exporting()
exported = json.loads(self.output.getvalue())
self.assertEqual(exported, [dict(self.i), dict(self.i)])
class CustomItemExporterTest(unittest.TestCase):
def test_exporter_custom_serializer(self):
class CustomItemExporter(BaseItemExporter):
def serialize_field(self, field, name, value):
if name == 'age':
return str(int(value) + 1)
else:
return super(CustomItemExporter, self).serialize_field(field, \
name, value)
i = TestItem(name=u'John', age='22')
ie = CustomItemExporter()
self.assertEqual( \
ie.serialize_field(i.fields['name'], 'name', i['name']), 'John')
self.assertEqual(
ie.serialize_field(i.fields['age'], 'age', i['age']), '23')
if __name__ == '__main__':
unittest.main()
|
agpl-3.0
|
popazerty/dvbapp2-gui
|
lib/python/Tools/CopyFiles.py
|
2
|
2065
|
from Components.Task import PythonTask, Task, Job, job_manager as JobManager
from Tools.Directories import fileExists
from enigma import eTimer
from os import path
from shutil import rmtree
class DeleteFolderTask(PythonTask):
def openFiles(self, fileList):
self.fileList = fileList
def work(self):
print "[DeleteFolderTask] files ", self.fileList
errors = []
try:
rmtree(self.fileList)
except Exception, e:
errors.append(e)
if errors:
raise errors[0]
class CopyFileJob(Job):
def __init__(self, srcfile, destfile, name):
Job.__init__(self, _("Copying files"))
cmdline = "cp -Rf \"%s\" \"%s\"" % (srcfile,destfile)
AddFileProcessTask(self, cmdline, srcfile, destfile, name)
class MoveFileJob(Job):
def __init__(self, srcfile, destfile, name):
Job.__init__(self, _("Moving files"))
cmdline = "mv -f \"%s\" \"%s\"" % (srcfile,destfile)
AddFileProcessTask(self, cmdline, srcfile, destfile, name)
class AddFileProcessTask(Task):
def __init__(self, job, cmdline, srcfile, destfile, name):
Task.__init__(self, job, name)
self.setCmdline(cmdline)
self.srcfile = srcfile
self.destfile = destfile
self.ProgressTimer = eTimer()
self.ProgressTimer.callback.append(self.ProgressUpdate)
def ProgressUpdate(self):
if self.srcsize <= 0 or not fileExists(self.destfile, 'r'):
return
self.setProgress(int((path.getsize(self.destfile)/float(self.srcsize))*100))
self.ProgressTimer.start(5000, True)
def prepare(self):
if fileExists(self.srcfile, 'r'):
self.srcsize = path.getsize(self.srcfile)
self.ProgressTimer.start(5000, True)
def afterRun(self):
self.setProgress(100)
self.ProgressTimer.stop()
def copyFiles(fileList, name):
for src, dst in fileList:
JobManager.AddJob(CopyFileJob(src, dst, name))
def moveFiles(fileList, name):
for src, dst in fileList:
JobManager.AddJob(MoveFileJob(src, dst, name))
def deleteFiles(fileList, name):
job = Components.Task.Job(_("Deleting files"))
task = DeleteFolderTask(job, name)
task.openFiles(fileList)
Components.Task.job_manager.AddJob(job)
|
gpl-2.0
|
pyfisch/servo
|
tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_pluginmanager.py
|
30
|
13081
|
# encoding: UTF-8
from __future__ import absolute_import, division, print_function
import pytest
import os
import re
import sys
import types
from _pytest.config import get_config, PytestPluginManager
from _pytest.main import EXIT_NOTESTSCOLLECTED, Session
@pytest.fixture
def pytestpm():
return PytestPluginManager()
class TestPytestPluginInteractions(object):
def test_addhooks_conftestplugin(self, testdir):
testdir.makepyfile(
newhooks="""
def pytest_myhook(xyz):
"new hook"
"""
)
conf = testdir.makeconftest(
"""
import sys ; sys.path.insert(0, '.')
import newhooks
def pytest_addhooks(pluginmanager):
pluginmanager.addhooks(newhooks)
def pytest_myhook(xyz):
return xyz + 1
"""
)
config = get_config()
pm = config.pluginmanager
pm.hook.pytest_addhooks.call_historic(
kwargs=dict(pluginmanager=config.pluginmanager)
)
config.pluginmanager._importconftest(conf)
# print(config.pluginmanager.get_plugins())
res = config.hook.pytest_myhook(xyz=10)
assert res == [11]
def test_addhooks_nohooks(self, testdir):
testdir.makeconftest(
"""
import sys
def pytest_addhooks(pluginmanager):
pluginmanager.addhooks(sys)
"""
)
res = testdir.runpytest()
assert res.ret != 0
res.stderr.fnmatch_lines(["*did not find*sys*"])
def test_namespace_early_from_import(self, testdir):
p = testdir.makepyfile(
"""
from pytest import Item
from pytest import Item as Item2
assert Item is Item2
"""
)
result = testdir.runpython(p)
assert result.ret == 0
def test_do_ext_namespace(self, testdir):
testdir.makeconftest(
"""
def pytest_namespace():
return {'hello': 'world'}
"""
)
p = testdir.makepyfile(
"""
from pytest import hello
import pytest
def test_hello():
assert hello == "world"
assert 'hello' in pytest.__all__
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
def test_do_option_postinitialize(self, testdir):
config = testdir.parseconfigure()
assert not hasattr(config.option, "test123")
p = testdir.makepyfile(
"""
def pytest_addoption(parser):
parser.addoption('--test123', action="store_true",
default=True)
"""
)
config.pluginmanager._importconftest(p)
assert config.option.test123
def test_configure(self, testdir):
config = testdir.parseconfig()
values = []
class A(object):
def pytest_configure(self, config):
values.append(self)
config.pluginmanager.register(A())
assert len(values) == 0
config._do_configure()
assert len(values) == 1
config.pluginmanager.register(A()) # leads to a configured() plugin
assert len(values) == 2
assert values[0] != values[1]
config._ensure_unconfigure()
config.pluginmanager.register(A())
assert len(values) == 2
def test_hook_tracing(self):
pytestpm = get_config().pluginmanager # fully initialized with plugins
saveindent = []
class api1(object):
def pytest_plugin_registered(self):
saveindent.append(pytestpm.trace.root.indent)
class api2(object):
def pytest_plugin_registered(self):
saveindent.append(pytestpm.trace.root.indent)
raise ValueError()
values = []
pytestpm.trace.root.setwriter(values.append)
undo = pytestpm.enable_tracing()
try:
indent = pytestpm.trace.root.indent
p = api1()
pytestpm.register(p)
assert pytestpm.trace.root.indent == indent
assert len(values) >= 2
assert "pytest_plugin_registered" in values[0]
assert "finish" in values[1]
values[:] = []
with pytest.raises(ValueError):
pytestpm.register(api2())
assert pytestpm.trace.root.indent == indent
assert saveindent[0] > indent
finally:
undo()
def test_hook_proxy(self, testdir):
"""Test the gethookproxy function(#2016)"""
config = testdir.parseconfig()
session = Session(config)
testdir.makepyfile(**{"tests/conftest.py": "", "tests/subdir/conftest.py": ""})
conftest1 = testdir.tmpdir.join("tests/conftest.py")
conftest2 = testdir.tmpdir.join("tests/subdir/conftest.py")
config.pluginmanager._importconftest(conftest1)
ihook_a = session.gethookproxy(testdir.tmpdir.join("tests"))
assert ihook_a is not None
config.pluginmanager._importconftest(conftest2)
ihook_b = session.gethookproxy(testdir.tmpdir.join("tests"))
assert ihook_a is not ihook_b
def test_warn_on_deprecated_addhooks(self, pytestpm):
warnings = []
class get_warnings(object):
def pytest_logwarning(self, code, fslocation, message, nodeid):
warnings.append(message)
class Plugin(object):
def pytest_testhook():
pass
pytestpm.register(get_warnings())
before = list(warnings)
pytestpm.addhooks(Plugin())
assert len(warnings) == len(before) + 1
assert "deprecated" in warnings[-1]
def test_namespace_has_default_and_env_plugins(testdir):
p = testdir.makepyfile(
"""
import pytest
pytest.mark
"""
)
result = testdir.runpython(p)
assert result.ret == 0
def test_default_markers(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines(["*tryfirst*first*", "*trylast*last*"])
def test_importplugin_error_message(testdir, pytestpm):
"""Don't hide import errors when importing plugins and provide
an easy to debug message.
See #375 and #1998.
"""
testdir.syspathinsert(testdir.tmpdir)
testdir.makepyfile(
qwe="""
# encoding: UTF-8
def test_traceback():
raise ImportError(u'Not possible to import: ☺')
test_traceback()
"""
)
with pytest.raises(ImportError) as excinfo:
pytestpm.import_plugin("qwe")
expected_message = '.*Error importing plugin "qwe": Not possible to import: .'
expected_traceback = ".*in test_traceback"
assert re.match(expected_message, str(excinfo.value))
assert re.match(expected_traceback, str(excinfo.traceback[-1]))
class TestPytestPluginManager(object):
def test_register_imported_modules(self):
pm = PytestPluginManager()
mod = types.ModuleType("x.y.pytest_hello")
pm.register(mod)
assert pm.is_registered(mod)
values = pm.get_plugins()
assert mod in values
pytest.raises(ValueError, "pm.register(mod)")
pytest.raises(ValueError, lambda: pm.register(mod))
# assert not pm.is_registered(mod2)
assert pm.get_plugins() == values
def test_canonical_import(self, monkeypatch):
mod = types.ModuleType("pytest_xyz")
monkeypatch.setitem(sys.modules, "pytest_xyz", mod)
pm = PytestPluginManager()
pm.import_plugin("pytest_xyz")
assert pm.get_plugin("pytest_xyz") == mod
assert pm.is_registered(mod)
def test_consider_module(self, testdir, pytestpm):
testdir.syspathinsert()
testdir.makepyfile(pytest_p1="#")
testdir.makepyfile(pytest_p2="#")
mod = types.ModuleType("temp")
mod.pytest_plugins = ["pytest_p1", "pytest_p2"]
pytestpm.consider_module(mod)
assert pytestpm.get_plugin("pytest_p1").__name__ == "pytest_p1"
assert pytestpm.get_plugin("pytest_p2").__name__ == "pytest_p2"
def test_consider_module_import_module(self, testdir):
pytestpm = get_config().pluginmanager
mod = types.ModuleType("x")
mod.pytest_plugins = "pytest_a"
aplugin = testdir.makepyfile(pytest_a="#")
reprec = testdir.make_hook_recorder(pytestpm)
# syspath.prepend(aplugin.dirpath())
sys.path.insert(0, str(aplugin.dirpath()))
pytestpm.consider_module(mod)
call = reprec.getcall(pytestpm.hook.pytest_plugin_registered.name)
assert call.plugin.__name__ == "pytest_a"
# check that it is not registered twice
pytestpm.consider_module(mod)
values = reprec.getcalls("pytest_plugin_registered")
assert len(values) == 1
def test_consider_env_fails_to_import(self, monkeypatch, pytestpm):
monkeypatch.setenv("PYTEST_PLUGINS", "nonexisting", prepend=",")
with pytest.raises(ImportError):
pytestpm.consider_env()
def test_plugin_skip(self, testdir, monkeypatch):
p = testdir.makepyfile(
skipping1="""
import pytest
pytest.skip("hello")
"""
)
p.copy(p.dirpath("skipping2.py"))
monkeypatch.setenv("PYTEST_PLUGINS", "skipping2")
result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True)
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines(
["*skipped plugin*skipping1*hello*", "*skipped plugin*skipping2*hello*"]
)
def test_consider_env_plugin_instantiation(self, testdir, monkeypatch, pytestpm):
testdir.syspathinsert()
testdir.makepyfile(xy123="#")
monkeypatch.setitem(os.environ, "PYTEST_PLUGINS", "xy123")
l1 = len(pytestpm.get_plugins())
pytestpm.consider_env()
l2 = len(pytestpm.get_plugins())
assert l2 == l1 + 1
assert pytestpm.get_plugin("xy123")
pytestpm.consider_env()
l3 = len(pytestpm.get_plugins())
assert l2 == l3
def test_pluginmanager_ENV_startup(self, testdir, monkeypatch):
testdir.makepyfile(pytest_x500="#")
p = testdir.makepyfile(
"""
import pytest
def test_hello(pytestconfig):
plugin = pytestconfig.pluginmanager.get_plugin('pytest_x500')
assert plugin is not None
"""
)
monkeypatch.setenv("PYTEST_PLUGINS", "pytest_x500", prepend=",")
result = testdir.runpytest(p, syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_import_plugin_importname(self, testdir, pytestpm):
pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")')
pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwx.y")')
testdir.syspathinsert()
pluginname = "pytest_hello"
testdir.makepyfile(**{pluginname: ""})
pytestpm.import_plugin("pytest_hello")
len1 = len(pytestpm.get_plugins())
pytestpm.import_plugin("pytest_hello")
len2 = len(pytestpm.get_plugins())
assert len1 == len2
plugin1 = pytestpm.get_plugin("pytest_hello")
assert plugin1.__name__.endswith("pytest_hello")
plugin2 = pytestpm.get_plugin("pytest_hello")
assert plugin2 is plugin1
def test_import_plugin_dotted_name(self, testdir, pytestpm):
pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")')
pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwex.y")')
testdir.syspathinsert()
testdir.mkpydir("pkg").join("plug.py").write("x=3")
pluginname = "pkg.plug"
pytestpm.import_plugin(pluginname)
mod = pytestpm.get_plugin("pkg.plug")
assert mod.x == 3
def test_consider_conftest_deps(self, testdir, pytestpm):
mod = testdir.makepyfile("pytest_plugins='xyz'").pyimport()
with pytest.raises(ImportError):
pytestpm.consider_conftest(mod)
class TestPytestPluginManagerBootstrapming(object):
def test_preparse_args(self, pytestpm):
pytest.raises(
ImportError, lambda: pytestpm.consider_preparse(["xyz", "-p", "hello123"])
)
def test_plugin_prevent_register(self, pytestpm):
pytestpm.consider_preparse(["xyz", "-p", "no:abc"])
l1 = pytestpm.get_plugins()
pytestpm.register(42, name="abc")
l2 = pytestpm.get_plugins()
assert len(l2) == len(l1)
assert 42 not in l2
def test_plugin_prevent_register_unregistered_alredy_registered(self, pytestpm):
pytestpm.register(42, name="abc")
l1 = pytestpm.get_plugins()
assert 42 in l1
pytestpm.consider_preparse(["xyz", "-p", "no:abc"])
l2 = pytestpm.get_plugins()
assert 42 not in l2
|
mpl-2.0
|
open-craft/opencraft
|
instance/management/commands/recreate_db.py
|
1
|
4389
|
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015-2019 OpenCraft <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This management command will destroy and recreate an instance's edxapp database. This is to remediate issues
where an initial provision fails mid-migration and can't finish without starting the DB over.
"""
import logging
from textwrap import dedent
from django.core.management.base import BaseCommand, CommandError
from instance.models.openedx_instance import OpenEdXInstance
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
"""
This management command will recreate the edxapp database for a specified instance.
"""
help = (
'Drop and recreate a database for an instance. The instance must have failed initial provisioning.'
)
def add_arguments(self, parser):
"""
Add the arguments for the DB recreation command.
"""
parser.add_argument(
'--domain',
help='Domain name of instance to recreate db for',
required=True,
)
parser.add_argument(
# Needless to say, this isn't forensically valid and is on the honor system. Check system logs if unsure.
'--admin',
help="The name of the admin (that is, your name) who is performing this operation",
required=True,
)
parser.add_argument(
'--reason',
help="A written reason for why you're recreating the database.",
required=True,
)
parser.add_argument(
# Not calling this 'force' because we're not coding in the option to force the drop and creation of a
# DB this way.
'-y', '--yes',
help="Don't prompt for confirmation.",
action='store_true',
)
def handle(self, *args, **options):
"""
Recreates the instance's DB.
"""
domain = options['domain']
try:
instance = OpenEdXInstance.objects.get(
internal_lms_domain__iexact=domain,
)
except OpenEdXInstance.DoesNotExist:
raise CommandError(f'An instance with the domain name "{domain}" could not be found.')
if instance.successfully_provisioned:
raise CommandError(
f'Cowardly refusing to drop the database of "{domain}", which has already '
'successfully provisioned at least once.',
)
self.confirm(instance.internal_lms_domain, options)
instance.logger.warn(dedent(
f"""
!!!
! Blowing away and recreating the edxapp database!
! Authorized by: {options['admin']}
! Reason: {options['reason']}
!!!
"""
))
instance.logger.info('Dropping edxapp database...')
instance.drop_db('edxapp')
instance.logger.info('DB Dropped. Recreating database...')
instance.create_db('edxapp')
instance.logger.info('DB Recreated successfully.')
def confirm(self, internal_lms_domain, options):
"""
Gets confirmation from the user, and raises if confirmation fails.
"""
if options['yes']:
return
self.stdout.write(f'> We will drop the edxapp database for {internal_lms_domain} and recreate it.\n')
self.stdout.write(f'> Your name: {options["admin"]}\n')
self.stdout.write(f'> Reason for recreating the DB: {options["reason"]}\n')
answer = input('Are you sure you want to continue? [yes/No]')
if not answer.lower().startswith('y'):
raise CommandError('Aborted.')
|
agpl-3.0
|
timokoola/timoechobot
|
requests/packages/chardet/escprober.py
|
2936
|
3187
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
|
apache-2.0
|
alivecor/tensorflow
|
tensorflow/python/debug/lib/debug_gradients_test.py
|
39
|
15165
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for debug_gradients module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_gradients
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
class IdentifyGradientTest(test_util.TensorFlowTestCase):
def setUp(self):
self.sess = session.Session()
with self.sess:
self.u = variables.Variable(2.0, name="u")
self.v = variables.Variable(3.0, name="v")
self.w = math_ops.multiply(self.u.value(), self.v.value(), name="w")
def tearDown(self):
ops.reset_default_graph()
debug_gradients.clear_gradient_debuggers()
def testIdentifyGradientGivesCorrectTensorObjectWithoutContextManager(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor's name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testIdentifyGradientGivesCorrectTensorObjectWithTfGradients(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
with grad_debugger:
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor's name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
# Fetch the gradient tensor with the x-tensor name.
w_grad = grad_debugger.gradient_tensor(self.w.name)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testCallingIdentifyGradientTwiceWithTheSameGradientsDebuggerErrors(self):
grad_debugger = debug_gradients.GradientsDebugger()
grad_debugger.identify_gradient(self.w)
with self.assertRaisesRegexp(
ValueError, "The graph already contains an op named .*"):
grad_debugger.identify_gradient(self.w)
def testIdentifyGradientWorksOnMultipleLosses(self):
grad_debugger_1 = debug_gradients.GradientsDebugger()
grad_debugger_2 = debug_gradients.GradientsDebugger()
y = math_ops.add(self.w, -1.0, name="y")
debug_y = grad_debugger_1.identify_gradient(y)
z1 = math_ops.square(debug_y, name="z1")
debug_y = grad_debugger_2.identify_gradient(y)
z2 = math_ops.sqrt(debug_y, name="z2")
with grad_debugger_1:
gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)
with grad_debugger_2:
gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)
dz1_dy = grad_debugger_1.gradient_tensor(y)
dz2_dy = grad_debugger_2.gradient_tensor(y)
self.assertIsInstance(dz1_dy, ops.Tensor)
self.assertIsInstance(dz2_dy, ops.Tensor)
self.assertIsNot(dz1_dy, dz2_dy)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0 ** 2, self.sess.run(z1))
self.assertAllClose(5.0 ** 0.5, self.sess.run(z2))
self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
self.assertAllClose(0.5 * (5.0 ** -0.5), self.sess.run(dz2_dy))
def testIdentifyGradientRaisesLookupErrorForUnknownXTensor(self):
grad_debugger_1 = debug_gradients.GradientsDebugger()
grad_debugger_2 = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger_1.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
# There are >1 gradient debuggers registered, and grad_debugger is not used
# as a context manager here, so the gradient w.r.t. self.w will not be
# registered.
gradients_impl.gradients(y, [self.u, self.v])
with self.assertRaisesRegexp(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "):
grad_debugger_1.gradient_tensor(self.w)
with self.assertRaisesRegexp(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "):
grad_debugger_2.gradient_tensor(self.w)
def testIdentifyGradientRaisesTypeErrorForNonTensorOrTensorNameInput(self):
grad_debugger = debug_gradients.GradientsDebugger()
with self.assertRaisesRegexp(
TypeError,
r"x_tensor must be a str or tf\.Tensor or tf\.Variable, but instead "
r"has type .*Operation.*"):
grad_debugger.gradient_tensor(variables.global_variables_initializer())
def testIdentifyGradientTensorWorksWithGradientDescentOptimizer(self):
grad_debugger = debug_gradients.GradientsDebugger()
id_grad_w = grad_debugger.identify_gradient(self.w)
y = math_ops.add(id_grad_w, -1.0, name="y")
with grad_debugger:
gradient_descent.GradientDescentOptimizer(0.1).minimize(y)
self.sess.run(variables.global_variables_initializer())
# Fetch the gradient tensor with the x-tensor object.
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsByXTensorNamesWorks(self):
y = math_ops.add(self.w, -1.0, name="y")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_tensor_names().
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "w:0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
w_grad = grad_debugger.gradient_tensor("w:0")
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsByXTensorNamesWorksWithoutContextManager(self):
y = math_ops.add(self.w, -1.0, name="y")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_tensor_names().
grad_debugger = debug_gradients.GradientsDebugger()
grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "w:0$")
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0, self.sess.run(y))
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
w_grad = grad_debugger.gradient_tensor(self.w)
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
w_grad = grad_debugger.gradient_tensor("w:0")
self.assertIsInstance(w_grad, ops.Tensor)
self.assertAllClose(1.0, self.sess.run(w_grad))
def testWatchGradientsWorksOnRefTensor(self):
y = math_ops.add(self.w, -1.0, name="y")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph, "u:0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
v_grad = grads[1]
self.assertIs(u_grad, grad_debugger.gradient_tensor("u:0"))
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(3.0, self.sess.run(u_grad))
self.assertAllClose(2.0, self.sess.run(v_grad))
self.assertAllClose(
3.0, self.sess.run(grad_debugger.gradient_tensor("u:0")))
def testWatchGradientsWorksOnMultipleTensors(self):
y = math_ops.add(self.w, -1.0, name="y")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(self.sess.graph,
"(u|w):0$"):
grads = gradients_impl.gradients(y, [self.u, self.v])
self.assertEqual(2, len(grads))
u_grad = grads[0]
self.assertEqual(2, len(grad_debugger.gradient_tensors()))
self.assertIs(u_grad, grad_debugger.gradient_tensor("u:0"))
self.assertIsInstance(grad_debugger.gradient_tensor("w:0"), ops.Tensor)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(
1.0, self.sess.run(grad_debugger.gradient_tensor("w:0")))
self.assertAllClose(
3.0, self.sess.run(grad_debugger.gradient_tensor("u:0")))
def testWatchGradientsByXTensorsWorks(self):
y = math_ops.add(self.w, -1.0, name="foo/y")
z = math_ops.square(y, name="foo/z")
# The constructrion of the forward graph has completed.
# But we can still get the gradient tensors by using
# watch_gradients_by_x_tensors().
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(
self.sess.graph, [self.w, self.u, y]):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z)
self.assertEqual(3, len(grad_debugger.gradient_tensors()))
u_grad = grad_debugger.gradient_tensor(self.u)
w_grad = grad_debugger.gradient_tensor(self.w)
y_grad = grad_debugger.gradient_tensor(y)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(10.0, self.sess.run(y_grad))
self.assertAllClose(10.0, self.sess.run(w_grad))
self.assertAllClose(30.0, self.sess.run(u_grad))
def testWatchGradientsByTensorCanWorkOnMultipleLosses(self):
y = math_ops.add(self.w, -1.0, name="y")
z1 = math_ops.square(y, name="z1")
z2 = math_ops.sqrt(y, name="z2")
grad_debugger_1 = debug_gradients.GradientsDebugger()
with grad_debugger_1.watch_gradients_by_tensors(self.sess.graph, y):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z1)
grad_debugger_2 = debug_gradients.GradientsDebugger()
with grad_debugger_2.watch_gradients_by_tensors(self.sess.graph, y):
gradient_descent.GradientDescentOptimizer(0.1).minimize(z2)
dz1_dy = grad_debugger_1.gradient_tensor(y)
dz2_dy = grad_debugger_2.gradient_tensor(y)
self.assertIsInstance(dz1_dy, ops.Tensor)
self.assertIsInstance(dz2_dy, ops.Tensor)
self.assertIsNot(dz1_dy, dz2_dy)
self.sess.run(variables.global_variables_initializer())
self.assertAllClose(5.0 ** 2, self.sess.run(z1))
self.assertAllClose(5.0 ** 0.5, self.sess.run(z2))
self.assertAllClose(2.0 * 5.0, self.sess.run(dz1_dy))
self.assertAllClose(0.5 * (5.0 ** -0.5), self.sess.run(dz2_dy))
def testGradientsValuesFromDumpWorks(self):
y = math_ops.add(self.w, -1.0, name="y")
z = math_ops.square(y, name="z")
grad_debugger = debug_gradients.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(
self.sess.graph, [self.w, self.u, y]):
train_op = gradient_descent.GradientDescentOptimizer(0.1).minimize(z)
self.sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
dump_dir = tempfile.mkdtemp()
debug_url = "file://" + dump_dir
debug_utils.watch_graph(
run_options,
self.sess.graph,
debug_urls=debug_url)
run_metadata = config_pb2.RunMetadata()
self.sess.run(train_op, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
dump_dir, partition_graphs=run_metadata.partition_graphs)
dump.set_python_graph(self.sess.graph)
y_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, y, dump)
self.assertEqual(1, len(y_grad_values))
self.assertAllClose(10.0, y_grad_values[0])
w_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, self.w, dump)
self.assertEqual(1, len(w_grad_values))
self.assertAllClose(10.0, w_grad_values[0])
u_grad_values = debug_gradients.gradient_values_from_dump(
grad_debugger, self.u, dump)
self.assertEqual(1, len(u_grad_values))
self.assertAllClose(30.0, u_grad_values[0])
with self.assertRaisesRegexp(
LookupError,
r"This GradientsDebugger has not received any gradient tensor for "
r"x-tensor v:0"):
debug_gradients.gradient_values_from_dump(grad_debugger, self.v, dump)
# Cleanup.
shutil.rmtree(dump_dir)
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
n0m4dz/odoo
|
addons/account_bank_statement_extensions/wizard/confirm_statement_line.py
|
381
|
1490
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class confirm_statement_line(osv.osv_memory):
_name = 'confirm.statement.line'
_description = 'Confirm selected statement lines'
def confirm_lines(self, cr, uid, ids, context):
line_ids = context['active_ids']
line_obj = self.pool.get('account.bank.statement.line')
line_obj.write(cr, uid, line_ids, {'state': 'confirm'}, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
mattsmart/uwaterloo-igem-2015
|
models/tridimensional/incomplete_scripts/cas9-AA-subst.py
|
9
|
2283
|
#///////////////////////**READ ME FIRST**//////////////////////////////////
# Point of this script is to substitute amino acids into a given pose
# protein structure in pyrosetta.
# Please change dropbox folder location accordingly + make sure that the
# script's current location is the root of PyRosetta (need to import stuff)
#//////////////////////////////////////////////////////////////////////////
## Raw file date modified: July 15th/2014
## importing modules required for script
import os
from rosetta import *
## difference between resfiles and mutate_residue is that the former takes into account
## rotamers (aka conformers) whereas the latter doesn't (for our case mutate_residue is sufficient)
## refer to link for further documentation and/or info:
## http://graylab.jhu.edu/pyrosetta/downloads/documentation/Workshop6_PyRosetta_Packing_Design.pdf
from toolbox import generate_resfile_from_pdb # generate mutations using resfiles
from toolbox import mutate_residue # generate mutations using mutate_residue
## changing directory to where PDB's are located (aka where PDB files are located )
os.chdir("~\Dropbox\Waterloo-iGEM-2015") #alter to your specific dropbox path
os.chdir("\Math Modelling\cas9_modification") ##where the WT cas9 should be located???
## not sure if completely correct???? add changes if not.
## initializing rosetta:
rosetta.init()
# import cleaning module for PDB to be usable
from toolbox import cleanATOM
cleanATOM("\4UN3.pdb") # cleaned PDB file to use for analysis
var_pose = pose_from_pdb("\4UN3.pdb") # initial pose created from clean pdb
#inputted residue number of interest
Num = raw_input("enter residue number:\n")
for i in range(0, 20):
# list of Amino Acids to substitute
AA_lst = "ACDEFGHIKLMNPQRSTVWY"
AA_var = AA_lst[i]
var_pose = pose_from_pdb("4UN3." + AA_var + ".clean.pdb")
mutate_residue(var_pose, Num , AA_var) # where Num = residue number to substitute AA
# for sanity checking purposes: prints out changed 4UN3 pdb pose protein profile
# (sequence, # of res, what is located at Num residue - if the substitution occured)
print var_pose
print var_pose.sequence()
print "Protein has", var_pose.total_residue(), "residues."
print var_pose.residue(Num).name() # where Num is a residue number
|
mit
|
bregman-arie/ansible
|
test/units/modules/network/ironware/test_ironware_command.py
|
57
|
4066
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.ironware import ironware_command
from units.modules.utils import set_module_args
from .ironware_module import TestIronwareModule, load_fixture
class TestIronwareCommandModule(TestIronwareModule):
module = ironware_command
def setUp(self):
super(TestIronwareCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.ironware.ironware_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestIronwareCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for command in commands:
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_ironware_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('System Mode: MLX'))
def test_ironware_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('System Mode: MLX'))
def test_ironware_command_wait_for(self):
wait_for = 'result[0] contains "IronWare"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_ironware_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_ironware_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_ironware_command_match_any(self):
wait_for = ['result[0] contains "IronWare"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_ironware_command_match_all(self):
wait_for = ['result[0] contains "IronWare"',
'result[0] contains "uptime is"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_ironware_command_match_all_failure(self):
wait_for = ['result[0] contains "IronWare"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
|
gpl-3.0
|
scenarios/tensorflow
|
tensorflow/compiler/tests/function_test.py
|
27
|
4129
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for Tensorflow functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class FunctionTest(XLATestCase):
def testFunction(self):
"""Executes a simple TensorFlow function."""
def APlus2B(a, b):
return a + b * 2
aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
bval = np.array([5, 6, 7, 8]).reshape([2, 2]).astype(np.float32)
expected = APlus2B(aval, bval)
with self.test_session() as sess:
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(a, b):
return APlus2B(a, b)
a = constant_op.constant(aval, name="a")
b = constant_op.constant(bval, name="b")
with self.test_scope():
call_f = Foo(a, b)
result = sess.run(call_f)
self.assertAllClose(result, expected, rtol=1e-3)
def testNestedFunctions(self):
"""Executes two nested TensorFlow functions."""
def TimesTwo(x):
return x * 2
def APlus2B(a, b):
return a + TimesTwo(b)
aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
bval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
expected = APlus2B(aval, bval)
with self.test_session() as sess:
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(a, b):
return APlus2B(a, b)
a = constant_op.constant(aval, name="a")
b = constant_op.constant(bval, name="b")
with self.test_scope():
call_g = Foo(a, b)
result = sess.run(call_g)
self.assertAllClose(result, expected, rtol=1e-3)
def testFunctionMultipleRetvals(self):
"""Executes a function with multiple return values."""
# This function will run on the XLA device
def Func(a, b):
return a + b, a - b
aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
bval = np.array([5, 6, 7, 8]).reshape([2, 2]).astype(np.float32)
expected = Func(aval, bval)
with self.test_session() as sess:
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(a, b):
return Func(a, b)
a = constant_op.constant(aval, name="a")
b = constant_op.constant(bval, name="b")
with self.test_scope():
call_f = Foo(a, b)
result = sess.run(call_f)
self.assertAllClose(result, expected, rtol=1e-3)
def testFunctionsNoInline(self):
@function.Defun(dtypes.float32, noinline=True)
def TimesTwo(x):
return x * 2
@function.Defun(dtypes.float32, dtypes.float32)
def APlus2B(a, b):
return a + TimesTwo(b)
aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
bval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
expected = aval + bval * 2
with self.test_session() as sess:
with self.test_scope():
a = array_ops.placeholder(dtypes.float32, name="a")
b = array_ops.placeholder(dtypes.float32, name="b")
call = APlus2B(a, b)
result = sess.run(call, {a: aval, b: bval})
self.assertAllClose(result, expected, rtol=1e-3)
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
imankulov/sentry
|
tests/sentry/utils/models/tests.py
|
29
|
1711
|
from __future__ import absolute_import
from django.db import models
from sentry.db.models import (
Model, BoundedIntegerField, BoundedBigIntegerField,
BoundedPositiveIntegerField)
from sentry.testutils import TestCase
# There's a good chance this model wont get created in the db, so avoid
# assuming it exists in these tests.
class DummyModel(Model):
foo = models.CharField(max_length=32)
normint = BoundedIntegerField(null=True)
bigint = BoundedBigIntegerField(null=True)
posint = BoundedPositiveIntegerField(null=True)
class ModelTest(TestCase):
def test_foo_hasnt_changed_on_init(self):
inst = DummyModel(id=1, foo='bar')
self.assertFalse(inst.has_changed('foo'))
def test_foo_has_changes_before_save(self):
inst = DummyModel(id=1, foo='bar')
inst.foo = 'baz'
self.assertTrue(inst.has_changed('foo'))
self.assertEquals(inst.old_value('foo'), 'bar')
def test_foo_hasnt_changed_after_save(self):
inst = DummyModel(id=1, foo='bar')
inst.foo = 'baz'
self.assertTrue(inst.has_changed('foo'))
self.assertEquals(inst.old_value('foo'), 'bar')
models.signals.post_save.send(instance=inst, sender=type(inst), created=False)
self.assertFalse(inst.has_changed('foo'))
def test_large_int(self):
with self.assertRaises(AssertionError):
DummyModel.objects.create(normint=9223372036854775807L, foo='bar')
with self.assertRaises(AssertionError):
DummyModel.objects.create(bigint=9223372036854775808L, foo='bar')
with self.assertRaises(AssertionError):
DummyModel.objects.create(posint=9223372036854775808L, foo='bar')
|
bsd-3-clause
|
StefanRijnhart/odoomrp-wip
|
mrp_subcontracting/models/procurement_order.py
|
3
|
1868
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
mrp_operation = fields.Many2one(
'mrp.production.workcenter.line', 'MPR Operation')
@api.multi
def make_po(self):
purchase_line_obj = self.env['purchase.order.line']
res = super(ProcurementOrder, self).make_po()
for procurement in self:
if res[procurement.id]:
purchase_line = purchase_line_obj.browse(res[procurement.id])
if (procurement.mrp_operation and
(not purchase_line.order_id.mrp_operation or
procurement.mrp_operation.id !=
purchase_line.order_id.mrp_operation.id)):
purchase_line.order_id.mrp_operation = (
procurement.mrp_operation.id)
procurement.mrp_operation.purchase_order = (
purchase_line.order_id.id)
return res
|
agpl-3.0
|
intity/gtk-win32
|
gvsbuild/utils/simple_ui.py
|
1
|
2270
|
# Copyright (C) 2016 - Yevgen Muntyan
# Copyright (C) 2016 - Ignacio Casal Quinteiro
# Copyright (C) 2016 - Arnavion
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
Simple user interface for info, log & debug messages
"""
import sys
import ctypes
global_verbose = False
global_debug = False
def print_message(msg):
print(msg)
def print_log(msg):
if global_verbose:
print(msg)
def print_debug(msg):
if global_debug:
print("Debug:", msg)
def error_exit(msg):
print("Error:", msg, file=sys.stderr)
sys.exit(1)
def handle_global_options(args):
global global_verbose
global global_debug
if args.verbose:
global_verbose = True
if args.debug:
global_verbose = True
global_debug = True
# Original windows console title
_script_org_title = None
def script_title(new_title):
"""
Set the new console title for the running script, saving the old one
Passing None to the title restores the old, saved, one
"""
global _script_org_title
if new_title:
# Save the old title
if _script_org_title is None:
buf = ctypes.create_unicode_buffer(256)
ctypes.windll.kernel32.GetConsoleTitleW(buf, 256)
_script_org_title = buf.value
ctypes.windll.kernel32.SetConsoleTitleW('gvsbuild ' + new_title)
else:
# Restore old title
if _script_org_title is not None:
ctypes.windll.kernel32.SetConsoleTitleW(_script_org_title)
# cleanup if we want to call the function again
_script_org_title = None
|
gpl-2.0
|
mesheven/pyOCD
|
pyocd/board/board.py
|
1
|
2633
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013,2018 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..target import TARGET
import logging
import six
log = logging.getLogger('board')
class Board(object):
"""
This class associates a target and flash to create a board.
"""
def __init__(self, session, target=None):
# As a last resort, default the target to 'cortex_m'.
if target is None:
target = 'cortex_m'
self._session = session
self._target_type = target.lower()
self._test_binary = session.options.get('test_binary', None)
# Create Target and Flash instances.
try:
log.info("Target type is %s", self._target_type)
self.target = TARGET[self._target_type](session)
except KeyError as exc:
log.error("target '%s' not recognized", self._target_type)
six.raise_from(KeyError("target '%s' not recognized" % self._target_type), exc)
self._inited = False
## @brief Initialize the board.
def init(self):
self.target.init()
self._inited = True
## @brief Uninitialize the board.
def uninit(self):
if self._inited:
log.debug("uninit board %s", self)
try:
resume = self.session.options.get('resume_on_disconnect', True)
self.target.disconnect(resume)
self._inited = False
except:
log.error("link exception during target disconnect:", exc_info=True)
@property
def session(self):
return self._session
@property
def unique_id(self):
return self.session.probe.unique_id
@property
def target_type(self):
return self._target_type
@property
def test_binary(self):
return self._test_binary
@property
def name(self):
return "generic"
@property
def description(self):
return "Generic board via " + self.session.probe.vendor_name + " " \
+ self.session.probe.product_name + " [" + self.target_type + "]"
|
apache-2.0
|
zihua/scikit-learn
|
sklearn/linear_model/tests/test_perceptron.py
|
378
|
1815
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
|
bsd-3-clause
|
muntasirsyed/intellij-community
|
plugins/hg4idea/testData/bin/mercurial/fancyopts.py
|
92
|
3562
|
# fancyopts.py - better command line parsing
#
# Copyright 2005-2009 Matt Mackall <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import getopt, util
from i18n import _
def gnugetopt(args, options, longoptions):
"""Parse options mostly like getopt.gnu_getopt.
This is different from getopt.gnu_getopt in that an argument of - will
become an argument of - instead of vanishing completely.
"""
extraargs = []
if '--' in args:
stopindex = args.index('--')
extraargs = args[stopindex + 1:]
args = args[:stopindex]
opts, parseargs = getopt.getopt(args, options, longoptions)
args = []
while parseargs:
arg = parseargs.pop(0)
if arg and arg[0] == '-' and len(arg) > 1:
parseargs.insert(0, arg)
topts, newparseargs = getopt.getopt(parseargs, options, longoptions)
opts = opts + topts
parseargs = newparseargs
else:
args.append(arg)
args.extend(extraargs)
return opts, args
def fancyopts(args, options, state, gnu=False):
"""
read args, parse options, and store options in state
each option is a tuple of:
short option or ''
long option
default value
description
option value label(optional)
option types include:
boolean or none - option sets variable in state to true
string - parameter string is stored in state
list - parameter string is added to a list
integer - parameter strings is stored as int
function - call function with parameter
non-option args are returned
"""
namelist = []
shortlist = ''
argmap = {}
defmap = {}
for option in options:
if len(option) == 5:
short, name, default, comment, dummy = option
else:
short, name, default, comment = option
# convert opts to getopt format
oname = name
name = name.replace('-', '_')
argmap['-' + short] = argmap['--' + oname] = name
defmap[name] = default
# copy defaults to state
if isinstance(default, list):
state[name] = default[:]
elif getattr(default, '__call__', False):
state[name] = None
else:
state[name] = default
# does it take a parameter?
if not (default is None or default is True or default is False):
if short:
short += ':'
if oname:
oname += '='
if short:
shortlist += short
if name:
namelist.append(oname)
# parse arguments
if gnu:
parse = gnugetopt
else:
parse = getopt.getopt
opts, args = parse(args, shortlist, namelist)
# transfer result to state
for opt, val in opts:
name = argmap[opt]
t = type(defmap[name])
if t is type(fancyopts):
state[name] = defmap[name](val)
elif t is type(1):
try:
state[name] = int(val)
except ValueError:
raise util.Abort(_('invalid value %r for option %s, '
'expected int') % (val, opt))
elif t is type(''):
state[name] = val
elif t is type([]):
state[name].append(val)
elif t is type(None) or t is type(False):
state[name] = True
# return unparsed args
return args
|
apache-2.0
|
jjmleiro/hue
|
desktop/core/ext-py/py4j-0.9/src/py4j/tests/java_dir_test.py
|
15
|
5123
|
from __future__ import unicode_literals, absolute_import
from py4j.java_gateway import JavaGateway, GatewayParameters, java_import,\
UserHelpAutoCompletion
from py4j.protocol import Py4JError
from py4j.tests.java_gateway_test import (
start_example_app_process, sleep)
from contextlib import contextmanager
ExampleClassFields = sorted([
"field10",
"field11",
"field20",
"field21",
"static_field"
])
ExampleClassMethods = sorted([
# From ExampleClass
"method1",
"method2",
"method3",
"method4",
"method5",
"method6",
# overloaded
"method7",
"method8",
"method9",
# overloaded
"method10",
"method11",
"getList",
"getField1",
"setField1",
"getStringArray",
"getIntArray",
"callHello",
"callHello2",
"static_method",
# From Object
"getClass",
"hashCode",
"equals",
"toString",
"notify",
"notifyAll",
"wait"
])
ExampleClassStatics = sorted([
"StaticClass",
"static_field",
"static_method"
])
@contextmanager
def example_app_process():
p = start_example_app_process()
try:
yield p
finally:
p.join()
sleep()
@contextmanager
def gateway(*args, **kwargs):
g = JavaGateway(
gateway_parameters=GatewayParameters(
*args, auto_convert=True, **kwargs))
lineSep = g.jvm.System.lineSeparator()
try:
yield g
# Call a dummy method to make sure we haven't corrupted the streams
assert lineSep == g.jvm.System.lineSeparator()
finally:
g.shutdown()
def test_dir_object():
with example_app_process():
with gateway() as g:
ex = g.getNewExample()
assert sorted(dir(ex)) == ExampleClassMethods
def test_dir_object_fields():
with example_app_process():
with gateway(auto_field=True) as g:
ex = g.getNewExample()
assert sorted(dir(ex)) == sorted(
ExampleClassMethods + ExampleClassFields)
def test_dir_object_shows_manually_called_after_dir():
with example_app_process():
with gateway() as g:
ex = g.getNewExample()
assert sorted(dir(ex)) == ExampleClassMethods
try:
ex.does_not_exist_in_example()
raise AssertionError("Method should not have succeeded")
except Py4JError:
pass
# Make sure the manually called method now shows up
assert sorted(dir(ex)) == sorted(
ExampleClassMethods + ["does_not_exist_in_example"])
def test_dir_object_shows_manually_called_before_dir():
with example_app_process():
with gateway() as g:
ex = g.getNewExample()
try:
ex.does_not_exist_in_example()
raise AssertionError("Method should not have succeeded")
except Py4JError:
pass
# Make sure the manually called method now shows up
assert sorted(dir(ex)) == sorted(
ExampleClassMethods + ["does_not_exist_in_example"])
def test_dir_class():
with example_app_process():
with gateway() as g:
exclass = g.jvm.py4j.examples.ExampleClass
assert sorted(dir(exclass)) == ExampleClassStatics
def helper_dir_jvmview(view):
assert sorted(dir(view)) == [UserHelpAutoCompletion.KEY]
java_import(view, "com.example.Class1")
java_import(view, "com.another.Class2")
assert sorted(dir(view)) == [
UserHelpAutoCompletion.KEY, "Class1", "Class2"]
assert sorted(dir(view)) == [
UserHelpAutoCompletion.KEY, "Class1", "Class2"]
java_import(view, "com.third.Class3")
assert sorted(dir(view)) == [
UserHelpAutoCompletion.KEY, "Class1", "Class2", "Class3"]
def test_dir_jvmview_default():
with example_app_process():
with gateway() as g:
helper_dir_jvmview(g.jvm)
def test_dir_jvmview_new():
with example_app_process():
with gateway() as g:
view = g.new_jvm_view()
helper_dir_jvmview(view)
def test_dir_jvmview_two():
with example_app_process():
with gateway() as g:
view1 = g.new_jvm_view()
view2 = g.new_jvm_view()
helper_dir_jvmview(view1)
helper_dir_jvmview(view2)
# now give them different contents
java_import(view1, "com.fourth.Class4")
java_import(view2, "com.fiftg.Class5")
assert sorted(dir(view1)) == [
UserHelpAutoCompletion.KEY, "Class1", "Class2", "Class3",
"Class4"]
assert sorted(dir(view2)) == [
UserHelpAutoCompletion.KEY, "Class1", "Class2", "Class3",
"Class5"]
def test_dir_package():
with example_app_process():
with gateway() as g:
assert sorted(dir(g.jvm)) == [UserHelpAutoCompletion.KEY]
assert sorted(dir(g.jvm.java)) == [UserHelpAutoCompletion.KEY]
assert sorted(dir(g.jvm.java.util)) == [UserHelpAutoCompletion.KEY]
|
apache-2.0
|
juliogonzalez/ebs-tools
|
lib/exceptions.py
|
1
|
9304
|
# ebs-tools, a set of tools to manage EBS volumes and snapshots
#
# Copyright (C) 2014 Julio Gonzalez Gil <[email protected]>
#
# This file is part of ebs-tools (http://github.com/juliogonzalez/ebs-tools)
#
# ebs-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ebs-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ebs-tools. If not, see <http://www.gnu.org/licenses/>.
# EC2 general exceptions
class EC2ConnectError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return('Error connecting to EC2 API: %s' % self.error)
# Instance exceptions
class ErrorStartingInstance(Exception):
def __init__(self, instance_id, error):
self.instance_id = instance_id
self.error = error
def __str__(self):
return('Error starting instance %s: %s' % self.instance_id, self.error)
class ErrorStoppingInstance(Exception):
def __init__(self, instance_id, error):
self.instance_id = instance_id
self.error = error
def __str__(self):
return('Error stopping instance %s: %s' % self.instance_id, self.error)
class InstanceFetchError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return('Error fetching EC2 instance: %s' % self.error)
class InvalidInstance(Exception):
def __init__(self, instance_id):
self.instance_id = instance_id
def __str__(self):
return('Instance %s does not exist' % self.instance_id)
class InvalidInstanceID(Exception):
def __init__(self, instance_id):
self.instance_id = instance_id
def __str__(self):
return('Invalid instance-id: %s' % self.instance_id)
class InstanceStopImpossible(Exception):
def __init__(self, instance_id, instance_state):
self.instance_id = instance_id
self.instance_state = instance_state
def __str__(self):
return('It is not possible to stop the instance %s with the state %s'
% (self.instance_id, self.instance_state))
class InstanceStartImpossible(Exception):
def __init__(self, instance_id, instance_state):
self.instance_id = instance_id
self.instance_state = instance_state
def __str__(self):
return('It is not possible to start the instance %s with the state %s'
% (self.instance_id, self.instance_state))
# EBS snapshot exceptions
class SnapshotCreateTagError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return('Error creating EBS snapshot tag: %s' % self.error)
class SnapshotCreateError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return('Error creating EBS snapshot: %s' % self.error)
class SnapshotsFetchError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return('Error fetching EBS snapshots: %s' % self.error)
class NoSnapshotsForVolume(Exception):
def __init__(self, volume_id):
self.volume_id = volume_id
def __str__(self):
return('Volume %s does not have snapshots' % self.volume_id)
class InvalidSnapshot(Exception):
def __init__(self, snapshot_id):
self.snapshot_id = snapshot_id
def __str__(self):
return('Snapshot %s does not exist' % self.snapshot_id)
# EBS volume exceptions
class VolumeFetchError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return('Error fetching EBS Volume: %s' % self.error)
class InvalidVolume(Exception):
def __init__(self, volume_id):
self.volume_id = volume_id
def __str__(self):
return('Volume %s does not exist' % self.volume_id)
class InvalidVolumeID(Exception):
def __init__(self, volume_id):
self.volume_id = volume_id
def __str__(self):
return('%s is an invalid volume id' % self.volume_id)
class InvalidVolumeType(Exception):
def __init__(self, vtype):
self.vtype = vtype
def __str__(self):
return('Invalid Volume Type: %s' % self.vtype)
class NoMatchingVolumesByDevice(Exception):
def __init__(self, instance_id, devices):
self.instance_id = instance_id
self.devices = devices
def __str__(self):
return('Device regex \'%s\' does not match any volume for instance %s'
% (self.devices, self.instance_id))
class NoMatchingVolumesByName(Exception):
def __init__(self, instance_id, name):
self.instance_id = instance_id
self.name = name
def __str__(self):
return('Name regex \'%s\' does not match any volume for instance %s'
% (self.name, self.instance_id))
class NoVolumes(Exception):
def __init__(self, instance_id):
self.instance_id = instance_id
def __str__(self):
return('There are no volumes for instance-id \'%s\''
% (self.instance_id))
class InvalidPIOPSValue(Exception):
def __init__(self, piops):
self.piops = piops
def __str__(self):
return('Invalid PIOPS volume: %s' % self.piops)
class InvalidPIOPSRatio(Exception):
def __init__(self, volume_id, piops, MAX_RATIO):
self.volume_id = volume_id
self.piops = piops
self.MAX_RATIO = MAX_RATIO
def __str__(self):
return('Size/IOPS ratio (%s) for volume %s is not valid (must be'
' better lower or equal than %s)' % (self.piops, self.volume_id,
self.MAX_RATIO))
class VolumeCreateTagError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return('Error creating EBS Volume tag: %s' % self.error)
class VolumeNotAttached(Exception):
def __init__(self, volume_id):
self.piops = volume_id
def __str__(self):
return('Volume %s is not attached to any instance' % self.volume_id)
class ErrorAttachingVolume(Exception):
def __init__(self, volume_id, error):
self.volume_id = volume_id
self.error = error,
def __str__(self):
return('Error attaching volume %s: %s' % (self.volume_id, self.error))
class ErrorDeletingVolume(Exception):
def __init__(self, volume_id, error):
self.volume_id = volume_id
self.error = error,
def __str__(self):
return('Error deleting volume %s: %s' % (self.volume_id, self.error))
class ErrorDetachingVolume(Exception):
def __init__(self, volume_id, error):
self.volume_id = volume_id
self.error = error,
def __str__(self):
return('Error detaching volume %s: %s' % (self.volume_id, self.error))
class ErrorCreatingVolume(Exception):
def __init__(self, error):
self.error = error,
def __str__(self):
return('Error creating volume: %s' % self.error)
class ErrorAllVolumesSameType(Exception):
def __init__(self, vtype):
self.vtype, = vtype,
def __str__(self):
return('You are trying to migrate all volumes from %s type to %s type'
% (self.vtype, self.vtype))
class ErrorAllVolumesSamePIOPS(Exception):
def __str__(self):
return('You are trying to migrate all volumes to the same IOPS value'
' they already have')
# Program argument exceptions
class OptionNotPresent(Exception):
def __init__(self, option):
self.option = option
def __str__(self):
return('Option --%s is not present' % self.option)
class OptionsAlternativesNotPresent(Exception):
def __init__(self, option1, option2):
self.option1 = option1
self.option2 = option2
def __str__(self):
return('You need to specify either --%s or --%s' % (self.option1,
self.option2))
class InvalidVolumeTypeio1(Exception):
def __str__(self):
return('Volume type is io1, specify iops (see --help)')
class OptInvalidValue(Exception):
def __init__(self, option):
self.option = option
def __str__(self):
return('Unknown value for --%s' % self.option)
class OptInvalidPosInteger(Exception):
def __init__(self, option):
self.option = option
def __str__(self):
return('--%s must have an positive integer value' % self.option)
class OptInvalidBoolean(Exception):
def __init__(self, option):
self.option = option
def __str__(self):
return('--%s must have an boolean value' % self.option)
class OptInvalidPosIntegerBoolean(Exception):
def __init__(self, option):
self.option = option
def __str__(self):
return('--%s must be have a positive integer or boolean value'
% self.option)
|
gpl-3.0
|
sameerparekh/pants
|
tests/python/pants_test/backend/python/tasks/checkstyle/test_print_statements.py
|
6
|
1158
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.python.tasks.checkstyle.common import Nit, PythonFile
from pants.backend.python.tasks.checkstyle.print_statements import PrintStatements
def test_print_override():
ps = PrintStatements(PythonFile.from_statement("""
from __future__ import print_function
print("I do what I want")
class Foo(object):
def print(self):
"I can do this because it's not a reserved word."
"""))
assert len(list(ps.nits())) == 0
def test_print_function():
ps = PrintStatements(PythonFile.from_statement("""
print("I do what I want")
"""))
assert len(list(ps.nits())) == 0
def test_print_statement():
ps = PrintStatements(PythonFile.from_statement("""
print["I do what I want"]
"""))
nits = list(ps.nits())
assert len(nits) == 1
assert nits[0].code == 'T607'
assert nits[0].severity == Nit.ERROR
|
apache-2.0
|
Vhati/TweetSubs
|
lib/common.py
|
1
|
1579
|
import htmlentitydefs
import logging
import os
import re
import sys
def html_unescape(text):
"""Removes HTML or XML character references and entities
from a text string.
http://effbot.org/zone/re-sub.htm#unescape-html
:param text: The HTML (or XML) source text.
:returns: The plain text, as a Unicode string, if necessary.
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def asciify(utext):
"""Converts a unicode string to ascii, substituting some chars.
:param utext: A unicode string to convert (harmless if already ascii).
:returns: An asciified string.
"""
# To check a char: http://www.eki.ee/letter/chardata.cgi?ucode=2032
utext = utext.replace(u"\u2013", "-")
utext = utext.replace(u"\u2014", "-")
utext = utext.replace(u"\u2018", "'")
utext = utext.replace(u"\u2019", "'")
utext = utext.replace(u"\u2032", "'")
utext = utext.replace(u"\u201c", "\"")
utext = utext.replace(u"\u201d", "\"")
utext = utext.replace(u"\u2026", "...")
# Replace every other non-ascii char with "?".
text = utext.encode("ASCII", "replace")
return text
|
gpl-2.0
|
turbokongen/home-assistant
|
homeassistant/components/bmw_connected_drive/device_tracker.py
|
3
|
2559
|
"""Device tracker for BMW Connected Drive vehicles."""
import logging
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from . import DOMAIN as BMW_DOMAIN, BMWConnectedDriveBaseEntity
from .const import CONF_ACCOUNT, DATA_ENTRIES
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the BMW ConnectedDrive tracker from config entry."""
account = hass.data[BMW_DOMAIN][DATA_ENTRIES][config_entry.entry_id][CONF_ACCOUNT]
entities = []
for vehicle in account.account.vehicles:
entities.append(BMWDeviceTracker(account, vehicle))
if not vehicle.state.is_vehicle_tracking_enabled:
_LOGGER.info(
"Tracking is (currently) disabled for vehicle %s (%s), defaulting to unknown",
vehicle.name,
vehicle.vin,
)
async_add_entities(entities, True)
class BMWDeviceTracker(BMWConnectedDriveBaseEntity, TrackerEntity):
"""BMW Connected Drive device tracker."""
def __init__(self, account, vehicle):
"""Initialize the Tracker."""
super().__init__(account, vehicle)
self._unique_id = vehicle.vin
self._location = (
vehicle.state.gps_position if vehicle.state.gps_position else (None, None)
)
self._name = vehicle.name
@property
def latitude(self):
"""Return latitude value of the device."""
return self._location[0]
@property
def longitude(self):
"""Return longitude value of the device."""
return self._location[1]
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return the unique ID."""
return self._unique_id
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return "mdi:car"
@property
def force_update(self):
"""All updates do not need to be written to the state machine."""
return False
def update(self):
"""Update state of the decvice tracker."""
self._location = (
self._vehicle.state.gps_position
if self._vehicle.state.is_vehicle_tracking_enabled
else (None, None)
)
|
apache-2.0
|
bolkedebruin/snakebite
|
doc/source/conf.py
|
3
|
8266
|
# -*- coding: utf-8 -*-
#
# snakebite documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 30 11:39:44 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
source_path = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '../../../')
sys.path.append(source_path)
import sphinx_rtd_theme
import snakebite.version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.ifconfig', 'sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'snakebite'
copyright = u'2013 - 2015, Spotify AB'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = snakebite.version.version()
# The full version, including alpha/beta/rc tags.
release = snakebite.version.version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'snakebitedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'snakebite.tex', u'snakebite Documentation',
u'Wouter de Bie', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'snakebite', u'snakebite Documentation',
[u'Wouter de Bie'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'snakebite', u'snakebite Documentation',
u'Wouter de Bie', 'snakebite', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autoclass_content = 'both'
|
apache-2.0
|
JingJunYin/tensorflow
|
tensorflow/python/eager/graph_only_ops.py
|
69
|
2363
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph-only versions of a few op functions, for internal use only."""
# Must be separate from array_ops to avoid a cyclic dependency.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
def graph_zeros_like(tensor):
"""Graph-only version of tf.zeros_like(), for internal use only."""
g = ops._get_graph_from_inputs([tensor]) # pylint: disable=protected-access
with g.as_default(), ops.name_scope(None, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
dtype = tensor.dtype.base_dtype
dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum)
op = g.create_op("ZerosLike", [tensor], [dtype], input_types=[dtype],
attrs={"T": dtype_value}, name=name)
result, = op.outputs
return result
def graph_placeholder(dtype, shape, name=None):
"""Graph-only version of tf.placeholder(), for internal use only."""
dtype = dtype.base_dtype
dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum)
if isinstance(shape, (list, tuple)):
shape = tensor_shape.TensorShape(shape)
assert isinstance(shape, tensor_shape.TensorShape)
shape = attr_value_pb2.AttrValue(shape=shape.as_proto())
g = ops.get_default_graph()
with ops.name_scope(name, "placeholder", []) as name:
op = g.create_op("Placeholder", [], [dtype], input_types=[],
attrs={"dtype": dtype_value, "shape": shape}, name=name)
result, = op.outputs
return result
|
apache-2.0
|
cilcoberlin/panoptes
|
panoptes/analysis/panels/events/rrule.py
|
2
|
5544
|
from django.template.defaultfilters import striptags
from django.utils.translation import ugettext as _
from icalendar.prop import vDatetime, vRecur
import pytz
import re
class RepeatRule(object):
"""A representation of an iCalendar repeat rule."""
# A regex to find the timezone information in a recurrence rule
_TIMEZONE_MATCH = re.compile(r'BEGIN:VTIMEZONE.*END:VTIMEZONE', re.MULTILINE | re.DOTALL)
_TIMEZONE_ID_MATCH = re.compile(r'TZID:([a-zA-Z\/_]+)')
# The names of important keys in the repeat rule text
_REPEAT_KEYS = ('DTSTART', 'DTEND', 'RRULE')
# A mapping of iCalendar weekday abbreviations to their full names
_WEEKDAYS = {
'SU': _("sunday"),
'MO': _("monday"),
'TU': _("tuesday"),
'WE': _("wednesday"),
'TH': _("thursday"),
'FR': _("friday"),
'SA': _("saturday")
}
def __init__(self, recurrence, location):
"""Create a representation of the rule from a recurrence instance.
Arguments:
rule -- a Google Calendar recurrence instance
"""
self.location = location
self.recurrence = recurrence
self._parse_rule()
def __nonzero__(self):
"""Count as nonzero only if a valid repeat rule is defined."""
return self._rrule is not None
def _parse_ical_datetime(self, dt, tz_name):
"""Return a timezone-aware datetime from the iCalendar datetime format.
Since this will always be from a Google Calendar event feed, and each calendar
has a timezone associated with it, the datetime string will always be in the
TZID=Timezone_Name:YYYYMMDDTHHMMSS format.
If the datetime is invalid, this will return None. If no timezone is defined
for the datetime string, the timezone whose name is specified in the tz_name
string is used.
Arguments:
dt -- a string of the iCalendar datetime
Returns: a timezone-aware datetime instance or None
"""
dt_parts = dt.split(':')
_datetime = None
# Apply timezone information to the time, either from its own specification or
# from the passed timezone name
if len(dt_parts) == 2:
tzinfo = dt_parts[0].split('=')[1]
timezone = pytz.timezone(tzinfo)
_datetime = dt_parts[1]
else:
_datetime = dt_parts[0]
if tz_name:
timezone = pytz.timezone(tz_name)
# Return the datetime with timezone information
try:
parsed_datetime = vDatetime.from_ical(_datetime)
except ValueError:
return None
else:
return timezone.localize(parsed_datetime)
def _parse_rule(self):
"""Transform the iCalendar repeat rule into an object."""
self.start_time = None
self.end_time = None
self.starts = None
self.ends = None
self._rrule = None
self._frequency = None
if not self.recurrence:
return
rule_text = self.recurrence.text
# Extract the timezone for the recurring event, using the given location's
# timezone if a timezone for the event could not be found
try:
tz_name = self._TIMEZONE_ID_MATCH.search(rule_text).group(1)
except AttributeError:
tz_name = self.location.timezone.zone
# Strip the tags and timezone information from the raw rule text, and break
# the start and end date apart from the repeat rule
rule_parts = dict([(key, "") for key in self._REPEAT_KEYS])
raw_rule = striptags(rule_text)
raw_rule = re.sub(self._TIMEZONE_MATCH, "", raw_rule).strip()
for line in re.split(r'\n+', raw_rule):
line_parts = re.split(r'[:;]', line, 1)
if len(line_parts) == 2:
rule_parts[line_parts[0]] = line_parts[1].strip()
# Set a possible start date and time boundaries
start = self._parse_ical_datetime(rule_parts['DTSTART'], tz_name)
end = self._parse_ical_datetime(rule_parts['DTEND'], tz_name)
if start:
self.starts = start
self.start_time = start.time()
if end:
self.end_time = end.time()
# Parse the repeat rule
try:
self._rrule = vRecur.from_ical(rule_parts['RRULE'])
except ValueError:
return
self._frequency = self._rrule.get('FREQ', None)
# Get the localized end date if one is available
try:
until = self._rrule.get('UNTIL', [])[0]
except IndexError:
return
self.ends = until.astimezone(pytz.timezone(tz_name))
def repeats_between_times(self, start, end):
"""Return True if the repeat rule occurs between the given times.
Arguments:
start -- the starting time bound, as a time instance
end -- the ending time bound, as a time instance
"""
repeats = False
if self.start_time:
repeats |= start.hour <= self.start_time.hour < end.hour
if self.end_time:
repeats |= start.hour < self.end_time.hour <= end.hour
return repeats
def _frequency_has_key(self, key):
"""Return True if the repeat rule's frequency has the given key."""
return self._frequency and key in self._frequency
@property
def is_daily(self):
"""True if the rule repeats daily."""
return self._frequency_has_key('DAILY')
@property
def is_weekly(self):
"""True if the rule repeats weekly."""
return self._frequency_has_key('WEEKLY')
@property
def is_monthly(self):
"""True if the rule repeats monthly."""
return self._frequency_has_key('MONTHLY')
@property
def is_yearly(self):
"""True if the rule repeats yearly."""
return self._frequency_has_key('YEARLY')
@property
def weekdays(self):
"""The ordered names of the days on which the rule repeats."""
if self.is_weekly and 'BYDAY' in self._rrule:
return [self._WEEKDAYS.get(day, u"").title() for day in self._rrule['BYDAY']]
else:
return []
@property
def has_time_bounds(self):
"""True if the event has a start or end date defined."""
return bool(self.starts or self.ends)
|
bsd-3-clause
|
ltilve/ChromiumGStreamerBackend
|
tools/telemetry/third_party/gsutilz/third_party/oauth2client/tests/test_keyring.py
|
17
|
3339
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for oauth2client.keyring_storage tests.
Unit tests for oauth2client.keyring_storage.
"""
__author__ = '[email protected] (Joe Gregorio)'
import datetime
import keyring
import unittest
import mock
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client.client import OAuth2Credentials
from oauth2client.keyring_storage import Storage
class OAuth2ClientKeyringTests(unittest.TestCase):
def test_non_existent_credentials_storage(self):
with mock.patch.object(keyring, 'get_password',
return_value=None,
autospec=True) as get_password:
s = Storage('my_unit_test', 'me')
credentials = s.get()
self.assertEquals(None, credentials)
get_password.assert_called_once_with('my_unit_test', 'me')
def test_malformed_credentials_in_storage(self):
with mock.patch.object(keyring, 'get_password',
return_value='{',
autospec=True) as get_password:
s = Storage('my_unit_test', 'me')
credentials = s.get()
self.assertEquals(None, credentials)
get_password.assert_called_once_with('my_unit_test', 'me')
def test_json_credentials_storage(self):
access_token = 'foo'
client_id = 'some_client_id'
client_secret = 'cOuDdkfjxxnv+'
refresh_token = '1/0/a.df219fjls0'
token_expiry = datetime.datetime.utcnow()
user_agent = 'refresh_checker/1.0'
credentials = OAuth2Credentials(
access_token, client_id, client_secret,
refresh_token, token_expiry, GOOGLE_TOKEN_URI,
user_agent)
# Setting autospec on a mock with an iterable side_effect is
# currently broken (http://bugs.python.org/issue17826), so instead
# we patch twice.
with mock.patch.object(keyring, 'get_password',
return_value=None,
autospec=True) as get_password:
with mock.patch.object(keyring, 'set_password',
return_value=None,
autospec=True) as set_password:
s = Storage('my_unit_test', 'me')
self.assertEquals(None, s.get())
s.put(credentials)
set_password.assert_called_once_with(
'my_unit_test', 'me', credentials.to_json())
get_password.assert_called_once_with('my_unit_test', 'me')
with mock.patch.object(keyring, 'get_password',
return_value=credentials.to_json(),
autospec=True) as get_password:
restored = s.get()
self.assertEqual('foo', restored.access_token)
self.assertEqual('some_client_id', restored.client_id)
get_password.assert_called_once_with('my_unit_test', 'me')
|
bsd-3-clause
|
PatrickChrist/scikit-learn
|
sklearn/datasets/tests/test_rcv1.py
|
322
|
2414
|
"""Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
|
bsd-3-clause
|
renzifeng/renzifeng
|
node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py
|
1366
|
120842
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub(r'\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError('Strong dict for key ' + key + ' in ' + \
self.__class__.__name__)
else:
that._properties[key] = value.copy()
else:
raise TypeError('Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__)
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError(self.__class__.__name__ + ' must implement Name')
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError(
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name()))
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError("Can't make " + value.__class__.__name__ + ' printable')
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError(property + ' not in ' + self.__class__.__name__)
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError(
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__)
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__)
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__)
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError("Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__)
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError(key + ' not in ' + self.__class__.__name__)
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list')
if not isinstance(value, property_type):
raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__)
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError(self.__class__.__name__ + ' requires ' + property)
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError('Found multiple children with path ' + child_path)
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError('Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path))
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'kext': 'wrapper.kext',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'swift': 'sourcecode.swift',
'ttf': 'file',
'xcassets': 'folder.assetcatalog',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xcdatamodeld':'wrapper.xcdatamodeld',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError(name)
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError('Variant values for ' + key)
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError(
self.__class__.__name__ + ' must implement FileGroup')
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError('Found multiple build files with path ' + path)
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError('Found multiple build files for ' + \
xcfilelikeelement.Name())
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_FRAMEWORKS_DIR': 10, # Frameworks Directory
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError('Can\'t use path %s in a %s' % \
(path, self.__class__.__name__))
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the file name
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.application.watchapp': ['wrapper.application',
'', '.app'],
'com.apple.product-type.watchkit-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.app-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
'com.apple.product-type.kernel-extension': ['wrapper.kext',
'', '.kext'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
# Extension override.
suffix = '.' + force_extension
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False)
targets = other_pbxproject.GetProperty('targets')
if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets):
dir_path = project_ref._properties['path']
product_group._hashables.extend(dir_path)
return [product_group, project_ref]
def _AllSymrootsUnique(self, target, inherit_unique_symroot):
# Returns True if all configurations have a unique 'SYMROOT' attribute.
# The value of inherit_unique_symroot decides, if a configuration is assumed
# to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't
# define an explicit value for 'SYMROOT'.
symroots = self._DefinedSymroots(target)
for s in self._DefinedSymroots(target):
if (s is not None and not self._IsUniqueSymrootForTarget(s) or
s is None and not inherit_unique_symroot):
return False
return True if symroots else inherit_unique_symroot
def _DefinedSymroots(self, target):
# Returns all values for the 'SYMROOT' attribute defined in all
# configurations for this target. If any configuration doesn't define the
# 'SYMROOT' attribute, None is added to the returned set. If all
# configurations don't define the 'SYMROOT' attribute, an empty set is
# returned.
config_list = target.GetProperty('buildConfigurationList')
symroots = set()
for config in config_list.GetProperty('buildConfigurations'):
setting = config.GetProperty('buildSettings')
if 'SYMROOT' in setting:
symroots.add(setting['SYMROOT'])
else:
symroots.add(None)
if len(symroots) == 1 and None in symroots:
return set()
return symroots
def _IsUniqueSymrootForTarget(self, symroot):
# This method returns True if all configurations in target contain a
# 'SYMROOT' attribute that is unique for the given target. A value is
# unique, if the Xcode macro '$SRCROOT' appears in it in any form.
uniquifier = ['$SRCROOT', '$(SRCROOT)']
if any(x in symroot for x in uniquifier):
return True
return False
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 46],
'rootObject': [0, PBXProject, 1, 1],
})
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
|
apache-2.0
|
KevinOConnor/klipper
|
klippy/util.py
|
1
|
4791
|
# Low level unix utility functions
#
# Copyright (C) 2016-2020 Kevin O'Connor <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, os, pty, fcntl, termios, signal, logging, json, time
import subprocess, traceback, shlex
######################################################################
# Low-level Unix commands
######################################################################
# Return the SIGINT interrupt handler back to the OS default
def fix_sigint():
signal.signal(signal.SIGINT, signal.SIG_DFL)
fix_sigint()
# Set a file-descriptor as non-blocking
def set_nonblock(fd):
fcntl.fcntl(fd, fcntl.F_SETFL
, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
# Clear HUPCL flag
def clear_hupcl(fd):
attrs = termios.tcgetattr(fd)
attrs[2] = attrs[2] & ~termios.HUPCL
try:
termios.tcsetattr(fd, termios.TCSADRAIN, attrs)
except termios.error:
pass
# Support for creating a pseudo-tty for emulating a serial port
def create_pty(ptyname):
mfd, sfd = pty.openpty()
try:
os.unlink(ptyname)
except os.error:
pass
filename = os.ttyname(sfd)
os.chmod(filename, 0o660)
os.symlink(filename, ptyname)
set_nonblock(mfd)
old = termios.tcgetattr(mfd)
old[3] = old[3] & ~termios.ECHO
termios.tcsetattr(mfd, termios.TCSADRAIN, old)
return mfd
######################################################################
# Helper code for extracting mcu build info
######################################################################
def dump_file_stats(build_dir, filename):
fname = os.path.join(build_dir, filename)
try:
mtime = os.path.getmtime(fname)
fsize = os.path.getsize(fname)
timestr = time.asctime(time.localtime(mtime))
logging.info("Build file %s(%d): %s", fname, fsize, timestr)
except:
logging.info("No build file %s", fname)
# Try to log information on the last mcu build
def dump_mcu_build():
build_dir = os.path.join(os.path.dirname(__file__), '..')
# Try to log last mcu config
dump_file_stats(build_dir, '.config')
try:
f = open(os.path.join(build_dir, '.config'), 'rb')
data = f.read(32*1024)
f.close()
logging.info("========= Last MCU build config =========\n%s"
"=======================", data)
except:
pass
# Try to log last mcu build version
dump_file_stats(build_dir, 'out/klipper.dict')
try:
f = open(os.path.join(build_dir, 'out/klipper.dict'), 'rb')
data = f.read(32*1024)
f.close()
data = json.loads(data)
logging.info("Last MCU build version: %s", data.get('version', ''))
logging.info("Last MCU build tools: %s", data.get('build_versions', ''))
cparts = ["%s=%s" % (k, v) for k, v in data.get('config', {}).items()]
logging.info("Last MCU build config: %s", " ".join(cparts))
except:
pass
dump_file_stats(build_dir, 'out/klipper.elf')
######################################################################
# General system and software information
######################################################################
def get_cpu_info():
try:
f = open('/proc/cpuinfo', 'rb')
data = f.read()
f.close()
except (IOError, OSError) as e:
logging.debug("Exception on read /proc/cpuinfo: %s",
traceback.format_exc())
return "?"
lines = [l.split(':', 1) for l in data.split('\n')]
lines = [(l[0].strip(), l[1].strip()) for l in lines if len(l) == 2]
core_count = [k for k, v in lines].count("processor")
model_name = dict(lines).get("model name", "?")
return "%d core %s" % (core_count, model_name)
def get_version_from_file(klippy_src):
try:
with open(os.path.join(klippy_src, '.version')) as h:
return h.read().rstrip()
except IOError:
pass
return "?"
def get_git_version(from_file=True):
klippy_src = os.path.dirname(__file__)
# Obtain version info from "git" program
gitdir = os.path.join(klippy_src, '..')
prog = ('git', '-C', gitdir, 'describe', '--always',
'--tags', '--long', '--dirty')
try:
process = subprocess.Popen(prog, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
ver, err = process.communicate()
retcode = process.wait()
if retcode == 0:
return ver.strip()
else:
logging.debug("Error getting git version: %s", err)
except OSError:
logging.debug("Exception on run: %s", traceback.format_exc())
if from_file:
return get_version_from_file(klippy_src)
return "?"
|
gpl-3.0
|
CongSmile/tp-qemu
|
qemu/tests/timedrift_with_stop.py
|
9
|
5827
|
import logging
import time
import os
import signal
from autotest.client.shared import error
from virttest import utils_test
def run(test, params, env):
"""
Time drift test with stop/continue the guest:
1) Log into a guest.
2) Take a time reading from the guest and host.
3) Stop the running of the guest
4) Sleep for a while
5) Continue the guest running
6) Take a second time reading.
7) If the drift (in seconds) is higher than a user specified value, fail.
:param test: QEMU test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment.
"""
login_timeout = int(params.get("login_timeout", 360))
sleep_time = int(params.get("sleep_time", 30))
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
boot_option_added = params.get("boot_option_added")
boot_option_removed = params.get("boot_option_removed")
if boot_option_added or boot_option_removed:
utils_test.update_boot_option(vm,
args_removed=boot_option_removed,
args_added=boot_option_added)
session = vm.wait_for_login(timeout=login_timeout)
# Collect test parameters:
# Command to run to get the current time
time_command = params["time_command"]
# Filter which should match a string to be passed to time.strptime()
time_filter_re = params["time_filter_re"]
# Time format for time.strptime()
time_format = params["time_format"]
rtc_clock = params.get("rtc_clock", "host")
drift_threshold = float(params.get("drift_threshold", "10"))
drift_threshold_single = float(params.get("drift_threshold_single", "3"))
stop_iterations = int(params.get("stop_iterations", 1))
stop_time = int(params.get("stop_time", 60))
stop_with_signal = params.get("stop_with_signal") == "yes"
# Get guest's pid.
pid = vm.get_pid()
try:
# Get initial time
# (ht stands for host time, gt stands for guest time)
(ht0, gt0) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
# Stop the guest
for i in range(stop_iterations):
# Get time before current iteration
(ht0_, gt0_) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
# Run current iteration
logging.info("Stop %s second: iteration %d of %d...",
stop_time, (i + 1), stop_iterations)
if stop_with_signal:
logging.debug("Stop guest")
os.kill(pid, signal.SIGSTOP)
time.sleep(stop_time)
logging.debug("Continue guest")
os.kill(pid, signal.SIGCONT)
else:
vm.pause()
time.sleep(stop_time)
vm.resume()
# Sleep for a while to wait the interrupt to be reinjected
logging.info("Waiting for the interrupt to be reinjected ...")
time.sleep(sleep_time)
# Get time after current iteration
(ht1_, gt1_) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
# Report iteration results
host_delta = ht1_ - ht0_
guest_delta = gt1_ - gt0_
drift = abs(host_delta - guest_delta)
# kvm guests CLOCK_MONOTONIC not count when guest is paused,
# so drift need to subtract stop_time.
if not stop_with_signal:
drift = abs(drift - stop_time)
if params.get("os_type") == "windows" and rtc_clock == "host":
drift = abs(host_delta - guest_delta)
logging.info("Host duration (iteration %d): %.2f",
(i + 1), host_delta)
logging.info("Guest duration (iteration %d): %.2f",
(i + 1), guest_delta)
logging.info("Drift at iteration %d: %.2f seconds",
(i + 1), drift)
# Fail if necessary
if drift > drift_threshold_single:
raise error.TestFail("Time drift too large at iteration %d: "
"%.2f seconds" % (i + 1, drift))
# Get final time
(ht1, gt1) = utils_test.get_time(session, time_command,
time_filter_re, time_format)
finally:
if session:
session.close()
# remove flags add for this test.
if boot_option_added or boot_option_removed:
utils_test.update_boot_option(vm,
args_removed=boot_option_added,
args_added=boot_option_removed)
# Report results
host_delta = ht1 - ht0
guest_delta = gt1 - gt0
drift = abs(host_delta - guest_delta)
# kvm guests CLOCK_MONOTONIC not count when guest is paused,
# so drift need to subtract stop_time.
if not stop_with_signal:
drift = abs(drift - stop_time)
if params.get("os_type") == "windows" and rtc_clock == "host":
drift = abs(host_delta - guest_delta)
logging.info("Host duration (%d stops): %.2f",
stop_iterations, host_delta)
logging.info("Guest duration (%d stops): %.2f",
stop_iterations, guest_delta)
logging.info("Drift after %d stops: %.2f seconds",
stop_iterations, drift)
# Fail if necessary
if drift > drift_threshold:
raise error.TestFail("Time drift too large after %d stops: "
"%.2f seconds" % (stop_iterations, drift))
|
gpl-2.0
|
trabacus-softapps/openerp-8.0-cc
|
openerp/addons/process/__init__.py
|
65
|
1071
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import process
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
zrhans/pythonanywhere
|
.virtualenvs/django19/lib/python3.4/site-packages/pandas/stats/math.py
|
25
|
3253
|
# pylint: disable-msg=E1103
# pylint: disable-msg=W0212
from __future__ import division
from pandas.compat import range
import numpy as np
import numpy.linalg as linalg
def rank(X, cond=1.0e-12):
"""
Return the rank of a matrix X based on its generalized inverse,
not the SVD.
"""
X = np.asarray(X)
if len(X.shape) == 2:
import scipy.linalg as SL
D = SL.svdvals(X)
result = np.add.reduce(np.greater(D / D.max(), cond))
return int(result.astype(np.int32))
else:
return int(not np.alltrue(np.equal(X, 0.)))
def solve(a, b):
"""Returns the solution of A X = B."""
try:
return linalg.solve(a, b)
except linalg.LinAlgError:
return np.dot(linalg.pinv(a), b)
def inv(a):
"""Returns the inverse of A."""
try:
return np.linalg.inv(a)
except linalg.LinAlgError:
return np.linalg.pinv(a)
def is_psd(m):
eigvals = linalg.eigvals(m)
return np.isreal(eigvals).all() and (eigvals >= 0).all()
def newey_west(m, max_lags, nobs, df, nw_overlap=False):
"""
Compute Newey-West adjusted covariance matrix, taking into account
specified number of leads / lags
Parameters
----------
m : (N x K)
max_lags : int
nobs : int
Number of observations in model
df : int
Degrees of freedom in explanatory variables
nw_overlap : boolean, default False
Assume data is overlapping
Returns
-------
ndarray (K x K)
Reference
---------
Newey, W. K. & West, K. D. (1987) A Simple, Positive
Semi-definite, Heteroskedasticity and Autocorrelation Consistent
Covariance Matrix, Econometrica, vol. 55(3), 703-708
"""
Xeps = np.dot(m.T, m)
for lag in range(1, max_lags + 1):
auto_cov = np.dot(m[:-lag].T, m[lag:])
weight = lag / (max_lags + 1)
if nw_overlap:
weight = 0
bb = auto_cov + auto_cov.T
dd = (1 - weight) * bb
Xeps += dd
Xeps *= nobs / (nobs - df)
if nw_overlap and not is_psd(Xeps):
new_max_lags = int(np.ceil(max_lags * 1.5))
# print('nw_overlap is True and newey_west generated a non positive '
# 'semidefinite matrix, so using newey_west with max_lags of %d.'
# % new_max_lags)
return newey_west(m, new_max_lags, nobs, df)
return Xeps
def calc_F(R, r, beta, var_beta, nobs, df):
"""
Computes the standard F-test statistic for linear restriction
hypothesis testing
Parameters
----------
R: ndarray (N x N)
Restriction matrix
r: ndarray (N x 1)
Restriction vector
beta: ndarray (N x 1)
Estimated model coefficients
var_beta: ndarray (N x N)
Variance covariance matrix of regressors
nobs: int
Number of observations in model
df: int
Model degrees of freedom
Returns
-------
F value, (q, df_resid), p value
"""
from scipy.stats import f
hyp = np.dot(R, beta.reshape(len(beta), 1)) - r
RSR = np.dot(R, np.dot(var_beta, R.T))
q = len(r)
F = np.dot(hyp.T, np.dot(inv(RSR), hyp)).squeeze() / q
p_value = 1 - f.cdf(F, q, nobs - df)
return F, (q, nobs - df), p_value
|
apache-2.0
|
ualikhansars/Gwent
|
lib/python2.7/site-packages/django/db/backends/sqlite3/introspection.py
|
219
|
11307
|
import re
from collections import namedtuple
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('default',))
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [
FieldInfo(
info['name'],
info['type'],
None,
info['size'],
None,
None,
info['null_ok'],
info['default'],
) for info in self._table_info(cursor, table_name)
]
def column_name_converter(self, name):
"""
SQLite will in some cases, e.g. when returning columns from views and
subselects, return column names in 'alias."column"' format instead of
simply 'column'.
Affects SQLite < 3.7.15, fixed by http://www.sqlite.org/src/info/5526e0aa3c
"""
# TODO: remove when SQLite < 3.7.15 is sufficiently old.
# 3.7.13 ships in Debian stable as of 2014-03-21.
if self.connection.Database.sqlite_version_info < (3, 7, 15):
return name.split('.')[-1].strip('"')
else:
return name
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
try:
results = cursor.fetchone()[0].strip()
except TypeError:
# It might be a view, then no results will be returned
return relations
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match('FOREIGN KEY\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_indexes(self, cursor, table_name):
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False),
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?$', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, default_value, pk
return [{
'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'default': field[4],
'pk': field[5], # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Sqlite3 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
return constraints
|
mit
|
simongoffin/my_odoo_tutorial
|
addons/account_anglo_saxon/invoice.py
|
61
|
13374
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C)
# 2004-2010 Tiny SPRL (<http://tiny.be>).
# 2009-2010 Veritos (http://veritos.nl).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_columns = {
'move_id': fields.many2one('stock.move', string="Move line", help="If the invoice was generated from a stock.picking, reference to the related move line."),
}
def move_line_get(self, cr, uid, invoice_id, context=None):
res = super(account_invoice_line,self).move_line_get(cr, uid, invoice_id, context=context)
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
company_currency = inv.company_id.currency_id.id
def get_price(cr, uid, inv, company_currency, i_line, price_unit):
cur_obj = self.pool.get('res.currency')
if inv.currency_id.id != company_currency:
price = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, price_unit * i_line.quantity, context={'date': inv.date_invoice})
else:
price = price_unit * i_line.quantity
return price
if inv.type in ('out_invoice','out_refund'):
for i_line in inv.invoice_line:
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if inv.type == 'out_invoice':
# debit account dacc will be the output account
# first check the product, if empty check the category
dacc = i_line.product_id.property_stock_account_output and i_line.product_id.property_stock_account_output.id
if not dacc:
dacc = i_line.product_id.categ_id.property_stock_account_output_categ and i_line.product_id.categ_id.property_stock_account_output_categ.id
else:
# = out_refund
# debit account dacc will be the input account
# first check the product, if empty check the category
dacc = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not dacc:
dacc = i_line.product_id.categ_id.property_stock_account_input_categ and i_line.product_id.categ_id.property_stock_account_input_categ.id
# in both cases the credit account cacc will be the expense account
# first check the product, if empty check the category
cacc = i_line.product_id.property_account_expense and i_line.product_id.property_account_expense.id
if not cacc:
cacc = i_line.product_id.categ_id.property_account_expense_categ and i_line.product_id.categ_id.property_account_expense_categ.id
if dacc and cacc:
price_unit = i_line.move_id and i_line.move_id.price_unit or i_line.product_id.standard_price
res.append({
'type':'src',
'name': i_line.name[:64],
'price_unit':price_unit,
'quantity':i_line.quantity,
'price':get_price(cr, uid, inv, company_currency, i_line, price_unit),
'account_id':dacc,
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
})
res.append({
'type':'src',
'name': i_line.name[:64],
'price_unit':price_unit,
'quantity':i_line.quantity,
'price': -1 * get_price(cr, uid, inv, company_currency, i_line, price_unit),
'account_id':cacc,
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
})
elif inv.type in ('in_invoice','in_refund'):
for i_line in inv.invoice_line:
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if i_line.product_id.type != 'service':
# get the price difference account at the product
acc = i_line.product_id.property_account_creditor_price_difference and i_line.product_id.property_account_creditor_price_difference.id
if not acc:
# if not found on the product get the price difference account at the category
acc = i_line.product_id.categ_id.property_account_creditor_price_difference_categ and i_line.product_id.categ_id.property_account_creditor_price_difference_categ.id
a = None
if inv.type == 'in_invoice':
# oa will be the stock input account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_input_categ and i_line.product_id.categ_id.property_stock_account_input_categ.id
else:
# = in_refund
# oa will be the stock output account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_output and i_line.product_id.property_stock_account_output.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_output_categ and i_line.product_id.categ_id.property_stock_account_output_categ.id
if oa:
# get the fiscal position
fpos = i_line.invoice_id.fiscal_position or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
diff_res = []
# calculate and write down the possible price difference between invoice price and product price
for line in res:
if a == line['account_id'] and i_line.product_id.id == line['product_id']:
uom = i_line.product_id.uos_id or i_line.product_id.uom_id
valuation_price_unit = self.pool.get('product.uom')._compute_price(cr, uid, uom.id, i_line.product_id.standard_price, i_line.uos_id.id)
if inv.currency_id.id != company_currency:
standard_price = self.pool.get('res.currency').compute(cr, uid, company_currency, inv.currency_id.id, standard_price, context={'date': inv.date_invoice})
if i_line.product_id.cost_method != 'standard' and i_line.purchase_line_id:
#for average/fifo/lifo costing method, fetch real cost price from incomming moves
stock_move_obj = self.pool.get('stock.move')
valuation_stock_move = stock_move_obj.search(cr, uid, [('purchase_line_id', '=', i_line.purchase_line_id.id)], limit=1, context=context)
if valuation_stock_move:
valuation_price_unit = stock_move_obj.browse(cr, uid, valuation_stock_move[0], context=context).price_unit
if valuation_price_unit != i_line.price_unit and line['price_unit'] == i_line.price_unit and acc:
price_diff = i_line.price_unit - valuation_price_unit
line.update({'price': valuation_price_unit * line['quantity']})
diff_res.append({
'type': 'src',
'name': i_line.name[:64],
'price_unit': price_diff,
'quantity': line['quantity'],
'price': price_diff * line['quantity'],
'account_id': acc,
'product_id': line['product_id'],
'uos_id': line['uos_id'],
'account_analytic_id': line['account_analytic_id'],
'taxes': line.get('taxes', []),
})
res += diff_res
return res
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
fiscal_pool = self.pool.get('account.fiscal.position')
res = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id, context, company_id)
if not product:
return res
if type in ('in_invoice','in_refund'):
product_obj = self.pool.get('product.product').browse(cr, uid, product, context=context)
if type == 'in_invoice':
oa = product_obj.property_stock_account_input and product_obj.property_stock_account_input.id
if not oa:
oa = product_obj.categ_id.property_stock_account_input_categ and product_obj.categ_id.property_stock_account_input_categ.id
else:
oa = product_obj.property_stock_account_output and product_obj.property_stock_account_output.id
if not oa:
oa = product_obj.categ_id.property_stock_account_output_categ and product_obj.categ_id.property_stock_account_output_categ.id
if oa:
fpos = fposition_id and fiscal_pool.browse(cr, uid, fposition_id, context=context) or False
a = fiscal_pool.map_account(cr, uid, fpos, oa)
res['value'].update({'account_id':a})
return res
class account_invoice(osv.osv):
_inherit = "account.invoice"
def _prepare_refund(self, cr, uid, invoice, date=None, period_id=None, description=None, journal_id=None, context=None):
invoice_data = super(account_invoice, self)._prepare_refund(cr, uid, invoice, date, period_id,
description, journal_id, context=context)
if invoice.type == 'in_invoice':
fiscal_position = self.pool.get('account.fiscal.position')
for _, _, line_dict in invoice_data['invoice_line']:
if line_dict.get('product_id'):
product = self.pool.get('product.product').browse(cr, uid, line_dict['product_id'], context=context)
counterpart_acct_id = product.property_stock_account_output and \
product.property_stock_account_output.id
if not counterpart_acct_id:
counterpart_acct_id = product.categ_id.property_stock_account_output_categ and \
product.categ_id.property_stock_account_output_categ.id
if counterpart_acct_id:
fpos = invoice.fiscal_position or False
line_dict['account_id'] = fiscal_position.map_account(cr, uid,
fpos,
counterpart_acct_id)
return invoice_data
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
metamx/spark
|
examples/src/main/python/ml/vector_assembler_example.py
|
123
|
1649
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("VectorAssemblerExample")\
.getOrCreate()
# $example on$
dataset = spark.createDataFrame(
[(0, 18, 1.0, Vectors.dense([0.0, 10.0, 0.5]), 1.0)],
["id", "hour", "mobile", "userFeatures", "clicked"])
assembler = VectorAssembler(
inputCols=["hour", "mobile", "userFeatures"],
outputCol="features")
output = assembler.transform(dataset)
print("Assembled columns 'hour', 'mobile', 'userFeatures' to vector column 'features'")
output.select("features", "clicked").show(truncate=False)
# $example off$
spark.stop()
|
apache-2.0
|
Juniper/ceilometer
|
ceilometer/storage/models.py
|
13
|
6509
|
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Model classes for use in the storage API.
"""
from ceilometer.storage import base
class Resource(base.Model):
"""Something for which sample data has been collected."""
def __init__(self, resource_id, project_id,
first_sample_timestamp,
last_sample_timestamp,
source, user_id, metadata):
"""Create a new resource.
:param resource_id: UUID of the resource
:param project_id: UUID of project owning the resource
:param first_sample_timestamp: first sample timestamp captured
:param last_sample_timestamp: last sample timestamp captured
:param source: the identifier for the user/project id definition
:param user_id: UUID of user owning the resource
:param metadata: most current metadata for the resource (a dict)
"""
base.Model.__init__(self,
resource_id=resource_id,
first_sample_timestamp=first_sample_timestamp,
last_sample_timestamp=last_sample_timestamp,
project_id=project_id,
source=source,
user_id=user_id,
metadata=metadata,
)
class Meter(base.Model):
"""Definition of a meter for which sample data has been collected."""
def __init__(self, name, type, unit, resource_id, project_id, source,
user_id):
"""Create a new meter.
:param name: name of the meter
:param type: type of the meter (gauge, delta, cumulative)
:param unit: unit of the meter
:param resource_id: UUID of the resource
:param project_id: UUID of project owning the resource
:param source: the identifier for the user/project id definition
:param user_id: UUID of user owning the resource
"""
base.Model.__init__(self,
name=name,
type=type,
unit=unit,
resource_id=resource_id,
project_id=project_id,
source=source,
user_id=user_id,
)
class Sample(base.Model):
"""One collected data point."""
def __init__(self,
source,
counter_name, counter_type, counter_unit, counter_volume,
user_id, project_id, resource_id,
timestamp, resource_metadata,
message_id,
message_signature,
recorded_at,
):
"""Create a new sample.
:param source: the identifier for the user/project id definition
:param counter_name: the name of the measurement being taken
:param counter_type: the type of the measurement
:param counter_unit: the units for the measurement
:param counter_volume: the measured value
:param user_id: the user that triggered the measurement
:param project_id: the project that owns the resource
:param resource_id: the thing on which the measurement was taken
:param timestamp: the time of the measurement
:param resource_metadata: extra details about the resource
:param message_id: a message identifier
:param recorded_at: sample record timestamp
:param message_signature: a hash created from the rest of the
message data
"""
base.Model.__init__(self,
source=source,
counter_name=counter_name,
counter_type=counter_type,
counter_unit=counter_unit,
counter_volume=counter_volume,
user_id=user_id,
project_id=project_id,
resource_id=resource_id,
timestamp=timestamp,
resource_metadata=resource_metadata,
message_id=message_id,
message_signature=message_signature,
recorded_at=recorded_at)
class Statistics(base.Model):
"""Computed statistics based on a set of sample data."""
def __init__(self, unit,
period, period_start, period_end,
duration, duration_start, duration_end,
groupby, **data):
"""Create a new statistics object.
:param unit: The unit type of the data set
:param period: The length of the time range covered by these stats
:param period_start: The timestamp for the start of the period
:param period_end: The timestamp for the end of the period
:param duration: The total time for the matching samples
:param duration_start: The earliest time for the matching samples
:param duration_end: The latest time for the matching samples
:param groupby: The fields used to group the samples.
:param data: some or all of the following aggregates
min: The smallest volume found
max: The largest volume found
avg: The average of all volumes found
sum: The total of all volumes found
count: The number of samples found
aggregate: name-value pairs for selectable aggregates
"""
base.Model.__init__(self, unit=unit,
period=period, period_start=period_start,
period_end=period_end, duration=duration,
duration_start=duration_start,
duration_end=duration_end,
groupby=groupby,
**data)
|
apache-2.0
|
ElementalAlchemist/txircd
|
txircd/config.py
|
1
|
2906
|
import yaml
class Config(object):
def __init__(self, ircd, configFileName):
self.ircd = ircd
self.fileName = configFileName
def reload(self):
newConfig = self._readConfig(self.fileName)
self.ircd.verifyConfig(newConfig)
self._configData = newConfig
def _readConfig(self, fileName):
configData = {}
try:
with open(fileName, "r") as configFile:
configData = yaml.safe_load(configFile)
if configData is None:
configData = {} # configData isn't allowed to be None, but an empty YAML file will result in None
except Exception as e:
raise ConfigReadError (fileName, e)
if "include" in configData:
for fileName in configData["include"]:
includeConfig = self._readConfig(fileName)
for key, val in includeConfig.iteritems():
if key not in configData:
configData[key] = val
elif not isinstance(configData[key], basestring): # Let's try to merge them if they're collections
if isinstance(val, basestring):
raise ConfigReadError(fileName, "The included configuration file tried to merge a non-string with a string.")
try: # Make sure both things we're merging are still iterable types (not numbers or whatever)
iter(configData[key])
iter(val)
except TypeError:
pass # Just don't merge them if they're not
else:
try:
configData[key] += val # Merge with the + operator
except TypeError: # Except that some collections (dicts) can't
try:
for subkey, subval in val.iteritems(): # So merge them manually
if subkey not in configData[key]:
configData[key][subkey] = subval
except (AttributeError, TypeError): # If either of these, they weren't both dicts (but were still iterable); requires user to resolve
raise ConfigReadError(fileName, "The variable {} could not be successfully merged across files.".format(key))
del configData["include"]
return configData
def __len__(self):
return len(self._configData)
def __getitem__(self, key):
return self._configData[key]
def __setitem__(self, key, value):
self._configData[key] = value
def __iter__(self):
return iter(self._configData)
def get(self, key, defaultValue):
"""
Allows you to get a key from the configuration with a default value if
the configuration key does not exist.
"""
try:
return self._configData[key]
except KeyError:
return defaultValue
class ConfigError(Exception):
pass
class ConfigReadError(ConfigError):
def __init__(self, fileName, desc):
self.fileName = fileName
self.desc = desc
def __str__(self):
return "Error reading configuration file {}: {}".format(self.fileName, self.desc)
class ConfigValidationError(ConfigError):
def __init__(self, key, desc):
self.key = key
self.desc = desc
def __str__(self):
return "Error validating configuration value {}: {}".format(self.key, self.desc)
|
bsd-3-clause
|
jiajiechen/mxnet
|
tools/launch.py
|
3
|
5466
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Launch a distributed job
"""
import argparse
import os, sys
import signal
import logging
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(curr_path, "../dmlc-core/tracker"))
def dmlc_opts(opts):
"""convert from mxnet's opts to dmlc's opts
"""
args = ['--num-workers', str(opts.num_workers),
'--num-servers', str(opts.num_servers),
'--cluster', opts.launcher,
'--host-file', opts.hostfile,
'--sync-dst-dir', opts.sync_dst_dir]
# convert to dictionary
dopts = vars(opts)
for key in ['env_server', 'env_worker', 'env']:
for v in dopts[key]:
args.append('--' + key.replace("_","-"))
args.append(v)
args += opts.command
try:
from dmlc_tracker import opts
except ImportError:
print("Can't load dmlc_tracker package. Perhaps you need to run")
print(" git submodule update --init --recursive")
raise
dmlc_opts = opts.get_opts(args)
return dmlc_opts
def main():
parser = argparse.ArgumentParser(description='Launch a distributed job')
parser.add_argument('-n', '--num-workers', required=True, type=int,
help = 'number of worker nodes to be launched')
parser.add_argument('-s', '--num-servers', type=int,
help = 'number of server nodes to be launched, \
in default it is equal to NUM_WORKERS')
parser.add_argument('-H', '--hostfile', type=str,
help = 'the hostfile of slave machines which will run \
the job. Required for ssh and mpi launcher')
parser.add_argument('--sync-dst-dir', type=str,
help = 'if specificed, it will sync the current \
directory into slave machines\'s SYNC_DST_DIR if ssh \
launcher is used')
parser.add_argument('--launcher', type=str, default='ssh',
choices = ['local', 'ssh', 'mpi', 'sge', 'yarn'],
help = 'the launcher to use')
parser.add_argument('--env-server', action='append', default=[],
help = 'Given a pair of environment_variable:value, sets this value of \
environment variable for the server processes. This overrides values of \
those environment variable on the machine where this script is run from. \
Example OMP_NUM_THREADS:3')
parser.add_argument('--env-worker', action='append', default=[],
help = 'Given a pair of environment_variable:value, sets this value of \
environment variable for the worker processes. This overrides values of \
those environment variable on the machine where this script is run from. \
Example OMP_NUM_THREADS:3')
parser.add_argument('--env', action='append', default=[],
help = 'given a environment variable, passes their \
values from current system to all workers and servers. \
Not necessary when launcher is local as in that case \
all environment variables which are set are copied.')
parser.add_argument('command', nargs='+',
help = 'command for launching the program')
args, unknown = parser.parse_known_args()
args.command += unknown
if args.num_servers is None:
args.num_servers = args.num_workers
args = dmlc_opts(args)
if args.host_file is None or args.host_file == 'None':
if args.cluster == 'yarn':
from dmlc_tracker import yarn
yarn.submit(args)
elif args.cluster == 'local':
from dmlc_tracker import local
local.submit(args)
elif args.cluster == 'sge':
from dmlc_tracker import sge
sge.submit(args)
else:
raise RuntimeError('Unknown submission cluster type %s' % args.cluster)
else:
if args.cluster == 'ssh':
from dmlc_tracker import ssh
ssh.submit(args)
elif args.cluster == 'mpi':
from dmlc_tracker import mpi
mpi.submit(args)
else:
raise RuntimeError('Unknown submission cluster type %s' % args.cluster)
def signal_handler(signal, frame):
logging.info('Stop launcher')
sys.exit(0)
if __name__ == '__main__':
fmt = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=fmt, level=logging.INFO)
signal.signal(signal.SIGINT, signal_handler)
main()
|
apache-2.0
|
rwl/PyCIM
|
CIM15/IEC61970/Informative/InfOperations/ErpPersonScheduleStepRole.py
|
1
|
2895
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Informative.InfCommon.Role import Role
class ErpPersonScheduleStepRole(Role):
"""Roles played between Persons and Schedule Steps.Roles played between Persons and Schedule Steps.
"""
def __init__(self, ErpPerson=None, SwitchingStep=None, *args, **kw_args):
"""Initialises a new 'ErpPersonScheduleStepRole' instance.
@param ErpPerson:
@param SwitchingStep:
"""
self._ErpPerson = None
self.ErpPerson = ErpPerson
self._SwitchingStep = None
self.SwitchingStep = SwitchingStep
super(ErpPersonScheduleStepRole, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["ErpPerson", "SwitchingStep"]
_many_refs = []
def getErpPerson(self):
return self._ErpPerson
def setErpPerson(self, value):
if self._ErpPerson is not None:
filtered = [x for x in self.ErpPerson.SwitchingStepRoles if x != self]
self._ErpPerson._SwitchingStepRoles = filtered
self._ErpPerson = value
if self._ErpPerson is not None:
if self not in self._ErpPerson._SwitchingStepRoles:
self._ErpPerson._SwitchingStepRoles.append(self)
ErpPerson = property(getErpPerson, setErpPerson)
def getSwitchingStep(self):
return self._SwitchingStep
def setSwitchingStep(self, value):
if self._SwitchingStep is not None:
self._SwitchingStep._ErpPersonRole = None
self._SwitchingStep = value
if self._SwitchingStep is not None:
self._SwitchingStep.ErpPersonRole = None
self._SwitchingStep._ErpPersonRole = self
SwitchingStep = property(getSwitchingStep, setSwitchingStep)
|
mit
|
datawire/qpid-proton
|
examples/python/helloworld_direct_tornado.py
|
4
|
1576
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from proton import Message
from proton.handlers import MessagingHandler
from proton_tornado import Container
class HelloWorld(MessagingHandler):
def __init__(self, url):
super(HelloWorld, self).__init__()
self.url = url
def on_start(self, event):
self.acceptor = event.container.listen(self.url)
event.container.create_sender(self.url)
def on_sendable(self, event):
event.sender.send(Message(body=u"Hello World!"))
event.sender.close()
def on_message(self, event):
print event.message.body
def on_accepted(self, event):
event.connection.close()
def on_connection_closed(self, event):
self.acceptor.close()
Container(HelloWorld("localhost:8888/examples")).run()
|
apache-2.0
|
fidomason/kbengine
|
kbe/res/scripts/common/Lib/test/test_pydoc.py
|
68
|
37053
|
import os
import sys
import builtins
import contextlib
import difflib
import inspect
import pydoc
import keyword
import _pickle
import pkgutil
import re
import string
import test.support
import time
import types
import unittest
import urllib.parse
import xml.etree
import textwrap
from io import StringIO
from collections import namedtuple
from test.script_helper import assert_python_ok
from test.support import (
TESTFN, rmtree,
reap_children, reap_threads, captured_output, captured_stdout,
captured_stderr, unlink, requires_docstrings
)
from test import pydoc_mod
try:
import threading
except ImportError:
threading = None
if test.support.HAVE_DOCSTRINGS:
expected_data_docstrings = (
'dictionary for instance variables (if defined)',
'list of weak references to the object (if defined)',
) * 2
else:
expected_data_docstrings = ('', '', '', '')
expected_text_pattern = """
NAME
test.pydoc_mod - This is a test module for test_pydoc
%s
CLASSES
builtins.object
A
B
C
\x20\x20\x20\x20
class A(builtins.object)
| Hello and goodbye
|\x20\x20
| Methods defined here:
|\x20\x20
| __init__()
| Wow, I have no function!
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors defined here:
|\x20\x20
| __dict__%s
|\x20\x20
| __weakref__%s
\x20\x20\x20\x20
class B(builtins.object)
| Data descriptors defined here:
|\x20\x20
| __dict__%s
|\x20\x20
| __weakref__%s
|\x20\x20
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|\x20\x20
| NO_MEANING = 'eggs'
\x20\x20\x20\x20
class C(builtins.object)
| Methods defined here:
|\x20\x20
| get_answer(self)
| Return say_no()
|\x20\x20
| is_it_true(self)
| Return self.get_answer()
|\x20\x20
| say_no(self)
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors defined here:
|\x20\x20
| __dict__
| dictionary for instance variables (if defined)
|\x20\x20
| __weakref__
| list of weak references to the object (if defined)
FUNCTIONS
doc_func()
This function solves all of the world's problems:
hunger
lack of Python
war
\x20\x20\x20\x20
nodoc_func()
DATA
__xyz__ = 'X, Y and Z'
VERSION
1.2.3.4
AUTHOR
Benjamin Peterson
CREDITS
Nobody
FILE
%s
""".strip()
expected_text_data_docstrings = tuple('\n | ' + s if s else ''
for s in expected_data_docstrings)
expected_html_pattern = """
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="#7799ee">
<td valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"> <br><big><big><strong><a href="test.html"><font color="#ffffff">test</font></a>.pydoc_mod</strong></big></big> (version 1.2.3.4)</font></td
><td align=right valign=bottom
><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="file:%s">%s</a>%s</font></td></tr></table>
<p><tt>This is a test module for test_pydoc</tt></p>
<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ee77aa">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ee77aa"><tt> </tt></td><td> </td>
<td width="100%%"><dl>
<dt><font face="helvetica, arial"><a href="builtins.html#object">builtins.object</a>
</font></dt><dd>
<dl>
<dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#A">A</a>
</font></dt><dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#B">B</a>
</font></dt><dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#C">C</a>
</font></dt></dl>
</dd>
</dl>
<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="A">class <strong>A</strong></a>(<a href="builtins.html#object">builtins.object</a>)</font></td></tr>
\x20\x20\x20\x20
<tr bgcolor="#ffc8d8"><td rowspan=2><tt> </tt></td>
<td colspan=2><tt>Hello and goodbye<br> </tt></td></tr>
<tr><td> </td>
<td width="100%%">Methods defined here:<br>
<dl><dt><a name="A-__init__"><strong>__init__</strong></a>()</dt><dd><tt>Wow, I have no function!</tt></dd></dl>
<hr>
Data descriptors defined here:<br>
<dl><dt><strong>__dict__</strong></dt>
<dd><tt>%s</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
<dd><tt>%s</tt></dd>
</dl>
</td></tr></table> <p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="B">class <strong>B</strong></a>(<a href="builtins.html#object">builtins.object</a>)</font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ffc8d8"><tt> </tt></td><td> </td>
<td width="100%%">Data descriptors defined here:<br>
<dl><dt><strong>__dict__</strong></dt>
<dd><tt>%s</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
<dd><tt>%s</tt></dd>
</dl>
<hr>
Data and other attributes defined here:<br>
<dl><dt><strong>NO_MEANING</strong> = 'eggs'</dl>
</td></tr></table> <p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="C">class <strong>C</strong></a>(<a href="builtins.html#object">builtins.object</a>)</font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ffc8d8"><tt> </tt></td><td> </td>
<td width="100%%">Methods defined here:<br>
<dl><dt><a name="C-get_answer"><strong>get_answer</strong></a>(self)</dt><dd><tt>Return <a href="#C-say_no">say_no</a>()</tt></dd></dl>
<dl><dt><a name="C-is_it_true"><strong>is_it_true</strong></a>(self)</dt><dd><tt>Return self.<a href="#C-get_answer">get_answer</a>()</tt></dd></dl>
<dl><dt><a name="C-say_no"><strong>say_no</strong></a>(self)</dt></dl>
<hr>
Data descriptors defined here:<br>
<dl><dt><strong>__dict__</strong></dt>
<dd><tt>dictionary for instance variables (if defined)</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
<dd><tt>list of weak references to the object (if defined)</tt></dd>
</dl>
</td></tr></table></td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#eeaa77">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#eeaa77"><tt> </tt></td><td> </td>
<td width="100%%"><dl><dt><a name="-doc_func"><strong>doc_func</strong></a>()</dt><dd><tt>This function solves all of the world's problems:<br>
hunger<br>
lack of Python<br>
war</tt></dd></dl>
<dl><dt><a name="-nodoc_func"><strong>nodoc_func</strong></a>()</dt></dl>
</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#55aa55">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#55aa55"><tt> </tt></td><td> </td>
<td width="100%%"><strong>__xyz__</strong> = 'X, Y and Z'</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#7799ee">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Author</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#7799ee"><tt> </tt></td><td> </td>
<td width="100%%">Benjamin Peterson</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#7799ee">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Credits</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#7799ee"><tt> </tt></td><td> </td>
<td width="100%%">Nobody</td></tr></table>
""".strip() # ' <- emacs turd
expected_html_data_docstrings = tuple(s.replace(' ', ' ')
for s in expected_data_docstrings)
# output pattern for missing module
missing_pattern = "no Python documentation found for '%s'"
# output pattern for module with bad imports
badimport_pattern = "problem in %s - ImportError: No module named %r"
expected_dynamicattribute_pattern = """
Help on class DA in module %s:
class DA(builtins.object)
| Data descriptors defined here:
|\x20\x20
| __dict__%s
|\x20\x20
| __weakref__%s
|\x20\x20
| ham
|\x20\x20
| ----------------------------------------------------------------------
| Data and other attributes inherited from Meta:
|\x20\x20
| ham = 'spam'
""".strip()
expected_virtualattribute_pattern1 = """
Help on class Class in module %s:
class Class(builtins.object)
| Data and other attributes inherited from Meta:
|\x20\x20
| LIFE = 42
""".strip()
expected_virtualattribute_pattern2 = """
Help on class Class1 in module %s:
class Class1(builtins.object)
| Data and other attributes inherited from Meta1:
|\x20\x20
| one = 1
""".strip()
expected_virtualattribute_pattern3 = """
Help on class Class2 in module %s:
class Class2(Class1)
| Method resolution order:
| Class2
| Class1
| builtins.object
|\x20\x20
| Data and other attributes inherited from Meta1:
|\x20\x20
| one = 1
|\x20\x20
| ----------------------------------------------------------------------
| Data and other attributes inherited from Meta3:
|\x20\x20
| three = 3
|\x20\x20
| ----------------------------------------------------------------------
| Data and other attributes inherited from Meta2:
|\x20\x20
| two = 2
""".strip()
expected_missingattribute_pattern = """
Help on class C in module %s:
class C(builtins.object)
| Data and other attributes defined here:
|\x20\x20
| here = 'present!'
""".strip()
def run_pydoc(module_name, *args, **env):
"""
Runs pydoc on the specified module. Returns the stripped
output of pydoc.
"""
args = args + (module_name,)
# do not write bytecode files to avoid caching errors
rc, out, err = assert_python_ok('-B', pydoc.__file__, *args, **env)
return out.strip()
def get_pydoc_html(module):
"Returns pydoc generated output as html"
doc = pydoc.HTMLDoc()
output = doc.docmodule(module)
loc = doc.getdocloc(pydoc_mod) or ""
if loc:
loc = "<br><a href=\"" + loc + "\">Module Docs</a>"
return output.strip(), loc
def get_pydoc_text(module):
"Returns pydoc generated output as text"
doc = pydoc.TextDoc()
loc = doc.getdocloc(pydoc_mod) or ""
if loc:
loc = "\nMODULE DOCS\n " + loc + "\n"
output = doc.docmodule(module)
# clean up the extra text formatting that pydoc performs
patt = re.compile('\b.')
output = patt.sub('', output)
return output.strip(), loc
def print_diffs(text1, text2):
"Prints unified diffs for two texts"
# XXX now obsolete, use unittest built-in support
lines1 = text1.splitlines(keepends=True)
lines2 = text2.splitlines(keepends=True)
diffs = difflib.unified_diff(lines1, lines2, n=0, fromfile='expected',
tofile='got')
print('\n' + ''.join(diffs))
def get_html_title(text):
# Bit of hack, but good enough for test purposes
header, _, _ = text.partition("</head>")
_, _, title = header.partition("<title>")
title, _, _ = title.partition("</title>")
return title
class PydocBaseTest(unittest.TestCase):
def _restricted_walk_packages(self, walk_packages, path=None):
"""
A version of pkgutil.walk_packages() that will restrict itself to
a given path.
"""
default_path = path or [os.path.dirname(__file__)]
def wrapper(path=None, prefix='', onerror=None):
return walk_packages(path or default_path, prefix, onerror)
return wrapper
@contextlib.contextmanager
def restrict_walk_packages(self, path=None):
walk_packages = pkgutil.walk_packages
pkgutil.walk_packages = self._restricted_walk_packages(walk_packages,
path)
try:
yield
finally:
pkgutil.walk_packages = walk_packages
class PydocDocTest(unittest.TestCase):
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
@requires_docstrings
def test_html_doc(self):
result, doc_loc = get_pydoc_html(pydoc_mod)
mod_file = inspect.getabsfile(pydoc_mod)
mod_url = urllib.parse.quote(mod_file)
expected_html = expected_html_pattern % (
(mod_url, mod_file, doc_loc) +
expected_html_data_docstrings)
if result != expected_html:
print_diffs(expected_html, result)
self.fail("outputs are not equal, see diff above")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
@requires_docstrings
def test_text_doc(self):
result, doc_loc = get_pydoc_text(pydoc_mod)
expected_text = expected_text_pattern % (
(doc_loc,) +
expected_text_data_docstrings +
(inspect.getabsfile(pydoc_mod),))
if result != expected_text:
print_diffs(expected_text, result)
self.fail("outputs are not equal, see diff above")
def test_text_enum_member_with_value_zero(self):
# Test issue #20654 to ensure enum member with value 0 can be
# displayed. It used to throw KeyError: 'zero'.
import enum
class BinaryInteger(enum.IntEnum):
zero = 0
one = 1
doc = pydoc.render_doc(BinaryInteger)
self.assertIn('<BinaryInteger.zero: 0>', doc)
def test_issue8225(self):
# Test issue8225 to ensure no doc link appears for xml.etree
result, doc_loc = get_pydoc_text(xml.etree)
self.assertEqual(doc_loc, "", "MODULE DOCS incorrectly includes a link")
def test_getpager_with_stdin_none(self):
previous_stdin = sys.stdin
try:
sys.stdin = None
pydoc.getpager() # Shouldn't fail.
finally:
sys.stdin = previous_stdin
def test_non_str_name(self):
# issue14638
# Treat illegal (non-str) name like no name
class A:
__name__ = 42
class B:
pass
adoc = pydoc.render_doc(A())
bdoc = pydoc.render_doc(B())
self.assertEqual(adoc.replace("A", "B"), bdoc)
def test_not_here(self):
missing_module = "test.i_am_not_here"
result = str(run_pydoc(missing_module), 'ascii')
expected = missing_pattern % missing_module
self.assertEqual(expected, result,
"documentation for missing module found")
def test_input_strip(self):
missing_module = " test.i_am_not_here "
result = str(run_pydoc(missing_module), 'ascii')
expected = missing_pattern % missing_module.strip()
self.assertEqual(expected, result)
def test_stripid(self):
# test with strings, other implementations might have different repr()
stripid = pydoc.stripid
# strip the id
self.assertEqual(stripid('<function stripid at 0x88dcee4>'),
'<function stripid>')
self.assertEqual(stripid('<function stripid at 0x01F65390>'),
'<function stripid>')
# nothing to strip, return the same text
self.assertEqual(stripid('42'), '42')
self.assertEqual(stripid("<type 'exceptions.Exception'>"),
"<type 'exceptions.Exception'>")
@unittest.skipIf(sys.flags.optimize >= 2,
'Docstrings are omitted with -O2 and above')
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
@requires_docstrings
def test_help_output_redirect(self):
# issue 940286, if output is set in Helper, then all output from
# Helper.help should be redirected
old_pattern = expected_text_pattern
getpager_old = pydoc.getpager
getpager_new = lambda: (lambda x: x)
self.maxDiff = None
buf = StringIO()
helper = pydoc.Helper(output=buf)
unused, doc_loc = get_pydoc_text(pydoc_mod)
module = "test.pydoc_mod"
help_header = """
Help on module test.pydoc_mod in test:
""".lstrip()
help_header = textwrap.dedent(help_header)
expected_help_pattern = help_header + expected_text_pattern
pydoc.getpager = getpager_new
try:
with captured_output('stdout') as output, \
captured_output('stderr') as err:
helper.help(module)
result = buf.getvalue().strip()
expected_text = expected_help_pattern % (
(doc_loc,) +
expected_text_data_docstrings +
(inspect.getabsfile(pydoc_mod),))
self.assertEqual('', output.getvalue())
self.assertEqual('', err.getvalue())
self.assertEqual(expected_text, result)
finally:
pydoc.getpager = getpager_old
def test_namedtuple_public_underscore(self):
NT = namedtuple('NT', ['abc', 'def'], rename=True)
with captured_stdout() as help_io:
pydoc.help(NT)
helptext = help_io.getvalue()
self.assertIn('_1', helptext)
self.assertIn('_replace', helptext)
self.assertIn('_asdict', helptext)
def test_synopsis(self):
self.addCleanup(unlink, TESTFN)
for encoding in ('ISO-8859-1', 'UTF-8'):
with open(TESTFN, 'w', encoding=encoding) as script:
if encoding != 'UTF-8':
print('#coding: {}'.format(encoding), file=script)
print('"""line 1: h\xe9', file=script)
print('line 2: hi"""', file=script)
synopsis = pydoc.synopsis(TESTFN, {})
self.assertEqual(synopsis, 'line 1: h\xe9')
def test_synopsis_sourceless(self):
expected = os.__doc__.splitlines()[0]
filename = os.__cached__
synopsis = pydoc.synopsis(filename)
self.assertEqual(synopsis, expected)
def test_splitdoc_with_description(self):
example_string = "I Am A Doc\n\n\nHere is my description"
self.assertEqual(pydoc.splitdoc(example_string),
('I Am A Doc', '\nHere is my description'))
def test_is_object_or_method(self):
doc = pydoc.Doc()
# Bound Method
self.assertTrue(pydoc._is_some_method(doc.fail))
# Method Descriptor
self.assertTrue(pydoc._is_some_method(int.__add__))
# String
self.assertFalse(pydoc._is_some_method("I am not a method"))
def test_is_package_when_not_package(self):
with test.support.temp_cwd() as test_dir:
self.assertFalse(pydoc.ispackage(test_dir))
def test_is_package_when_is_package(self):
with test.support.temp_cwd() as test_dir:
init_path = os.path.join(test_dir, '__init__.py')
open(init_path, 'w').close()
self.assertTrue(pydoc.ispackage(test_dir))
os.remove(init_path)
def test_allmethods(self):
# issue 17476: allmethods was no longer returning unbound methods.
# This test is a bit fragile in the face of changes to object and type,
# but I can't think of a better way to do it without duplicating the
# logic of the function under test.
class TestClass(object):
def method_returning_true(self):
return True
# What we expect to get back: everything on object...
expected = dict(vars(object))
# ...plus our unbound method...
expected['method_returning_true'] = TestClass.method_returning_true
# ...but not the non-methods on object.
del expected['__doc__']
del expected['__class__']
# inspect resolves descriptors on type into methods, but vars doesn't,
# so we need to update __subclasshook__.
expected['__subclasshook__'] = TestClass.__subclasshook__
methods = pydoc.allmethods(TestClass)
self.assertDictEqual(methods, expected)
class PydocImportTest(PydocBaseTest):
def setUp(self):
self.test_dir = os.mkdir(TESTFN)
self.addCleanup(rmtree, TESTFN)
def test_badimport(self):
# This tests the fix for issue 5230, where if pydoc found the module
# but the module had an internal import error pydoc would report no doc
# found.
modname = 'testmod_xyzzy'
testpairs = (
('i_am_not_here', 'i_am_not_here'),
('test.i_am_not_here_either', 'test.i_am_not_here_either'),
('test.i_am_not_here.neither_am_i', 'test.i_am_not_here'),
('i_am_not_here.{}'.format(modname), 'i_am_not_here'),
('test.{}'.format(modname), 'test.{}'.format(modname)),
)
sourcefn = os.path.join(TESTFN, modname) + os.extsep + "py"
for importstring, expectedinmsg in testpairs:
with open(sourcefn, 'w') as f:
f.write("import {}\n".format(importstring))
result = run_pydoc(modname, PYTHONPATH=TESTFN).decode("ascii")
expected = badimport_pattern % (modname, expectedinmsg)
self.assertEqual(expected, result)
def test_apropos_with_bad_package(self):
# Issue 7425 - pydoc -k failed when bad package on path
pkgdir = os.path.join(TESTFN, "syntaxerr")
os.mkdir(pkgdir)
badsyntax = os.path.join(pkgdir, "__init__") + os.extsep + "py"
with open(badsyntax, 'w') as f:
f.write("invalid python syntax = $1\n")
with self.restrict_walk_packages(path=[TESTFN]):
with captured_stdout() as out:
with captured_stderr() as err:
pydoc.apropos('xyzzy')
# No result, no error
self.assertEqual(out.getvalue(), '')
self.assertEqual(err.getvalue(), '')
# The package name is still matched
with captured_stdout() as out:
with captured_stderr() as err:
pydoc.apropos('syntaxerr')
self.assertEqual(out.getvalue().strip(), 'syntaxerr')
self.assertEqual(err.getvalue(), '')
def test_apropos_with_unreadable_dir(self):
# Issue 7367 - pydoc -k failed when unreadable dir on path
self.unreadable_dir = os.path.join(TESTFN, "unreadable")
os.mkdir(self.unreadable_dir, 0)
self.addCleanup(os.rmdir, self.unreadable_dir)
# Note, on Windows the directory appears to be still
# readable so this is not really testing the issue there
with self.restrict_walk_packages(path=[TESTFN]):
with captured_stdout() as out:
with captured_stderr() as err:
pydoc.apropos('SOMEKEY')
# No result, no error
self.assertEqual(out.getvalue(), '')
self.assertEqual(err.getvalue(), '')
@unittest.skip('causes undesireable side-effects (#20128)')
def test_modules(self):
# See Helper.listmodules().
num_header_lines = 2
num_module_lines_min = 5 # Playing it safe.
num_footer_lines = 3
expected = num_header_lines + num_module_lines_min + num_footer_lines
output = StringIO()
helper = pydoc.Helper(output=output)
helper('modules')
result = output.getvalue().strip()
num_lines = len(result.splitlines())
self.assertGreaterEqual(num_lines, expected)
@unittest.skip('causes undesireable side-effects (#20128)')
def test_modules_search(self):
# See Helper.listmodules().
expected = 'pydoc - '
output = StringIO()
helper = pydoc.Helper(output=output)
with captured_stdout() as help_io:
helper('modules pydoc')
result = help_io.getvalue()
self.assertIn(expected, result)
@unittest.skip('some buildbots are not cooperating (#20128)')
def test_modules_search_builtin(self):
expected = 'gc - '
output = StringIO()
helper = pydoc.Helper(output=output)
with captured_stdout() as help_io:
helper('modules garbage')
result = help_io.getvalue()
self.assertTrue(result.startswith(expected))
def test_importfile(self):
loaded_pydoc = pydoc.importfile(pydoc.__file__)
self.assertIsNot(loaded_pydoc, pydoc)
self.assertEqual(loaded_pydoc.__name__, 'pydoc')
self.assertEqual(loaded_pydoc.__file__, pydoc.__file__)
self.assertEqual(loaded_pydoc.__spec__, pydoc.__spec__)
class TestDescriptions(unittest.TestCase):
def test_module(self):
# Check that pydocfodder module can be described
from test import pydocfodder
doc = pydoc.render_doc(pydocfodder)
self.assertIn("pydocfodder", doc)
def test_class(self):
class C: "New-style class"
c = C()
self.assertEqual(pydoc.describe(C), 'class C')
self.assertEqual(pydoc.describe(c), 'C')
expected = 'C in module %s object' % __name__
self.assertIn(expected, pydoc.render_doc(c))
def test_builtin(self):
for name in ('str', 'str.translate', 'builtins.str',
'builtins.str.translate'):
# test low-level function
self.assertIsNotNone(pydoc.locate(name))
# test high-level function
try:
pydoc.render_doc(name)
except ImportError:
self.fail('finding the doc of {!r} failed'.format(name))
for name in ('notbuiltins', 'strrr', 'strr.translate',
'str.trrrranslate', 'builtins.strrr',
'builtins.str.trrranslate'):
self.assertIsNone(pydoc.locate(name))
self.assertRaises(ImportError, pydoc.render_doc, name)
@staticmethod
def _get_summary_line(o):
text = pydoc.plain(pydoc.render_doc(o))
lines = text.split('\n')
assert len(lines) >= 2
return lines[2]
# these should include "self"
def test_unbound_python_method(self):
self.assertEqual(self._get_summary_line(textwrap.TextWrapper.wrap),
"wrap(self, text)")
@requires_docstrings
def test_unbound_builtin_method(self):
self.assertEqual(self._get_summary_line(_pickle.Pickler.dump),
"dump(self, obj, /)")
# these no longer include "self"
def test_bound_python_method(self):
t = textwrap.TextWrapper()
self.assertEqual(self._get_summary_line(t.wrap),
"wrap(text) method of textwrap.TextWrapper instance")
@requires_docstrings
def test_bound_builtin_method(self):
s = StringIO()
p = _pickle.Pickler(s)
self.assertEqual(self._get_summary_line(p.dump),
"dump(obj, /) method of _pickle.Pickler instance")
# this should *never* include self!
@requires_docstrings
def test_module_level_callable(self):
self.assertEqual(self._get_summary_line(os.stat),
"stat(path, *, dir_fd=None, follow_symlinks=True)")
@unittest.skipUnless(threading, 'Threading required for this test.')
class PydocServerTest(unittest.TestCase):
"""Tests for pydoc._start_server"""
def test_server(self):
# Minimal test that starts the server, then stops it.
def my_url_handler(url, content_type):
text = 'the URL sent was: (%s, %s)' % (url, content_type)
return text
serverthread = pydoc._start_server(my_url_handler, port=0)
self.assertIn('localhost', serverthread.docserver.address)
starttime = time.time()
timeout = 1 #seconds
while serverthread.serving:
time.sleep(.01)
if serverthread.serving and time.time() - starttime > timeout:
serverthread.stop()
break
self.assertEqual(serverthread.error, None)
class PydocUrlHandlerTest(PydocBaseTest):
"""Tests for pydoc._url_handler"""
def test_content_type_err(self):
f = pydoc._url_handler
self.assertRaises(TypeError, f, 'A', '')
self.assertRaises(TypeError, f, 'B', 'foobar')
def test_url_requests(self):
# Test for the correct title in the html pages returned.
# This tests the different parts of the URL handler without
# getting too picky about the exact html.
requests = [
("", "Pydoc: Index of Modules"),
("get?key=", "Pydoc: Index of Modules"),
("index", "Pydoc: Index of Modules"),
("topics", "Pydoc: Topics"),
("keywords", "Pydoc: Keywords"),
("pydoc", "Pydoc: module pydoc"),
("get?key=pydoc", "Pydoc: module pydoc"),
("search?key=pydoc", "Pydoc: Search Results"),
("topic?key=def", "Pydoc: KEYWORD def"),
("topic?key=STRINGS", "Pydoc: TOPIC STRINGS"),
("foobar", "Pydoc: Error - foobar"),
("getfile?key=foobar", "Pydoc: Error - getfile?key=foobar"),
]
with self.restrict_walk_packages():
for url, title in requests:
text = pydoc._url_handler(url, "text/html")
result = get_html_title(text)
self.assertEqual(result, title, text)
path = string.__file__
title = "Pydoc: getfile " + path
url = "getfile?key=" + path
text = pydoc._url_handler(url, "text/html")
result = get_html_title(text)
self.assertEqual(result, title)
class TestHelper(unittest.TestCase):
def test_keywords(self):
self.assertEqual(sorted(pydoc.Helper.keywords),
sorted(keyword.kwlist))
class PydocWithMetaClasses(unittest.TestCase):
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
def test_DynamicClassAttribute(self):
class Meta(type):
def __getattr__(self, name):
if name == 'ham':
return 'spam'
return super().__getattr__(name)
class DA(metaclass=Meta):
@types.DynamicClassAttribute
def ham(self):
return 'eggs'
expected_text_data_docstrings = tuple('\n | ' + s if s else ''
for s in expected_data_docstrings)
output = StringIO()
helper = pydoc.Helper(output=output)
helper(DA)
expected_text = expected_dynamicattribute_pattern % (
(__name__,) + expected_text_data_docstrings[:2])
result = output.getvalue().strip()
if result != expected_text:
print_diffs(expected_text, result)
self.fail("outputs are not equal, see diff above")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
def test_virtualClassAttributeWithOneMeta(self):
class Meta(type):
def __dir__(cls):
return ['__class__', '__module__', '__name__', 'LIFE']
def __getattr__(self, name):
if name =='LIFE':
return 42
return super().__getattr(name)
class Class(metaclass=Meta):
pass
output = StringIO()
helper = pydoc.Helper(output=output)
helper(Class)
expected_text = expected_virtualattribute_pattern1 % __name__
result = output.getvalue().strip()
if result != expected_text:
print_diffs(expected_text, result)
self.fail("outputs are not equal, see diff above")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
def test_virtualClassAttributeWithTwoMeta(self):
class Meta1(type):
def __dir__(cls):
return ['__class__', '__module__', '__name__', 'one']
def __getattr__(self, name):
if name =='one':
return 1
return super().__getattr__(name)
class Meta2(type):
def __dir__(cls):
return ['__class__', '__module__', '__name__', 'two']
def __getattr__(self, name):
if name =='two':
return 2
return super().__getattr__(name)
class Meta3(Meta1, Meta2):
def __dir__(cls):
return list(sorted(set(
['__class__', '__module__', '__name__', 'three'] +
Meta1.__dir__(cls) + Meta2.__dir__(cls))))
def __getattr__(self, name):
if name =='three':
return 3
return super().__getattr__(name)
class Class1(metaclass=Meta1):
pass
class Class2(Class1, metaclass=Meta3):
pass
fail1 = fail2 = False
output = StringIO()
helper = pydoc.Helper(output=output)
helper(Class1)
expected_text1 = expected_virtualattribute_pattern2 % __name__
result1 = output.getvalue().strip()
if result1 != expected_text1:
print_diffs(expected_text1, result1)
fail1 = True
output = StringIO()
helper = pydoc.Helper(output=output)
helper(Class2)
expected_text2 = expected_virtualattribute_pattern3 % __name__
result2 = output.getvalue().strip()
if result2 != expected_text2:
print_diffs(expected_text2, result2)
fail2 = True
if fail1 or fail2:
self.fail("outputs are not equal, see diff above")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __locals__ unexpectedly')
def test_buggy_dir(self):
class M(type):
def __dir__(cls):
return ['__class__', '__name__', 'missing', 'here']
class C(metaclass=M):
here = 'present!'
output = StringIO()
helper = pydoc.Helper(output=output)
helper(C)
expected_text = expected_missingattribute_pattern % __name__
result = output.getvalue().strip()
if result != expected_text:
print_diffs(expected_text, result)
self.fail("outputs are not equal, see diff above")
@reap_threads
def test_main():
try:
test.support.run_unittest(PydocDocTest,
PydocImportTest,
TestDescriptions,
PydocServerTest,
PydocUrlHandlerTest,
TestHelper,
PydocWithMetaClasses,
)
finally:
reap_children()
if __name__ == "__main__":
test_main()
|
lgpl-3.0
|
zenfone-legacy/android_kernel_asus_T00F
|
tools/perf/scripts/python/failed-syscalls-by-pid.py
|
11180
|
2058
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
gpl-2.0
|
tareqalayan/ansible
|
lib/ansible/modules/cloud/vmware/vmware_portgroup_facts.py
|
10
|
4939
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_portgroup_facts
short_description: Gathers facts about an ESXi host's portgroup configuration
description:
- This module can be used to gather facts about an ESXi host's portgroup configuration when ESXi hostname or Cluster name is given.
version_added: '2.6'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Facts will be returned for all hostsystem belonging to this cluster name.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname to gather facts from.
- If C(cluster_name) is not given, this parameter is required.
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather portgroup facts about all ESXi Host in given Cluster
vmware_portgroup_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
- name: Gather portgroup facts about ESXi Host system
vmware_portgroup_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
'''
RETURN = r'''
hosts_firewall_facts:
description: metadata about host's portgroup configuration
returned: on success
type: dict
sample: {
"10.76.33.208": [
{
"forged_transmits": false,
"mac_changes": false,
"name": "VM Network",
"promiscuous_mode": false,
"vlan_id": 0,
"vswitch_name": "vSwitch0"
},
{
"forged_transmits": false,
"mac_changes": false,
"name": "Management Network",
"promiscuous_mode": false,
"vlan_id": 0,
"vswitch_name": "vSwitch0"
},
{
"forged_transmits": false,
"mac_changes": false,
"name": "pg0001",
"promiscuous_mode": false,
"vlan_id": 0,
"vswitch_name": "vSwitch001"
},
]
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class PortgroupFactsManager(PyVmomi):
def __init__(self, module):
super(PortgroupFactsManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
@staticmethod
def normalize_pg_info(portgroup_obj):
pg_info_dict = dict()
pg_info_dict['name'] = portgroup_obj.spec.name
vlan_id = 'N/A'
if portgroup_obj.spec.vlanId:
vlan_id = portgroup_obj.spec.vlanId
pg_info_dict['vlan_id'] = vlan_id
switch_name = 'N/A'
if portgroup_obj.spec.vswitchName:
switch_name = portgroup_obj.spec.vswitchName
pg_info_dict['vswitch_name'] = switch_name
# Network Policy related facts
pg_info_dict['promiscuous_mode'] = bool(portgroup_obj.spec.policy.security.allowPromiscuous)
pg_info_dict['mac_changes'] = bool(portgroup_obj.spec.policy.security.macChanges)
pg_info_dict['forged_transmits'] = bool(portgroup_obj.spec.policy.security.forgedTransmits)
return pg_info_dict
def gather_host_portgroup_facts(self):
hosts_pg_facts = dict()
for host in self.hosts:
pgs = host.config.network.portgroup
hosts_pg_facts[host.name] = []
for pg in pgs:
hosts_pg_facts[host.name].append(self.normalize_pg_info(portgroup_obj=pg))
return hosts_pg_facts
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
]
)
host_pg_mgr = PortgroupFactsManager(module)
module.exit_json(changed=False, hosts_portgroup_facts=host_pg_mgr.gather_host_portgroup_facts())
if __name__ == "__main__":
main()
|
gpl-3.0
|
awslabs/sockeye
|
sockeye/arguments.py
|
1
|
78075
|
# Copyright 2017--2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Defines commandline arguments for the main CLIs with reasonable defaults.
"""
import argparse
import os
import sys
import types
from typing import Any, Callable, Dict, List, Tuple, Optional
import yaml
from . import constants as C
from . import data_io
class ConfigArgumentParser(argparse.ArgumentParser):
"""
Extension of argparse.ArgumentParser supporting config files.
The option --config is added automatically and expects a YAML serialized
dictionary, similar to the return value of parse_args(). Command line
parameters have precedence over config file values. Usage should be
transparent, just substitute argparse.ArgumentParser with this class.
Extended from
https://stackoverflow.com/questions/28579661/getting-required-option-from-namespace-in-python
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.argument_definitions = {} # type: Dict[Tuple, Dict]
self.argument_actions = [] # type: List[Any]
self._overwrite_add_argument(self)
self.add_argument("--config", help="Path to CLI arguments in yaml format "
"(as saved in Sockeye model directories as 'args.yaml'). "
"Commandline arguments have precedence over values in this file.", type=str)
# Note: not FileType so that we can get the path here
def _register_argument(self, _action, *args, **kwargs):
self.argument_definitions[args] = kwargs
self.argument_actions.append(_action)
def _overwrite_add_argument(self, original_object):
def _new_add_argument(this_self, *args, **kwargs):
action = this_self.original_add_argument(*args, **kwargs)
this_self.config_container._register_argument(action, *args, **kwargs)
original_object.original_add_argument = original_object.add_argument
original_object.config_container = self
original_object.add_argument = types.MethodType(_new_add_argument, original_object)
return original_object
def add_argument_group(self, *args, **kwargs):
group = super().add_argument_group(*args, **kwargs)
return self._overwrite_add_argument(group)
def parse_args(self, args=None, namespace=None) -> argparse.Namespace: # type: ignore
# Mini argument parser to find the config file
config_parser = argparse.ArgumentParser(add_help=False)
config_parser.add_argument("--config", type=regular_file())
config_args, _ = config_parser.parse_known_args(args=args)
initial_args = argparse.Namespace()
if config_args.config:
initial_args = load_args(config_args.config)
# Remove the 'required' flag from options loaded from config file
for action in self.argument_actions:
if action.dest in initial_args:
action.required = False
return super().parse_args(args=args, namespace=initial_args)
class StoreDeprecatedAction(argparse.Action):
def __init__(self, option_strings, dest, deprecated_dest, nargs=None, **kwargs):
super(StoreDeprecatedAction, self).__init__(option_strings, dest, **kwargs)
self.deprecated_dest = deprecated_dest
def __call__(self, parser, namespace, value, option_string=None):
setattr(namespace, self.dest, value)
setattr(namespace, self.deprecated_dest, value)
def save_args(args: argparse.Namespace, fname: str):
with open(fname, 'w') as out:
yaml.safe_dump(args.__dict__, out, default_flow_style=False)
def load_args(fname: str) -> argparse.Namespace:
with open(fname, 'r') as inp:
return argparse.Namespace(**yaml.safe_load(inp))
class Removed(argparse.Action):
"""
When this argument is specified, raise an error with the argument's help
message. This is used to notify users when arguments are removed.
"""
def __call__(self, parser, namespace, values, option_string=None):
raise RuntimeError(self.help)
def regular_file() -> Callable:
"""
Returns a method that can be used in argument parsing to check the argument is a regular file or a symbolic link,
but not, e.g., a process substitution.
:return: A method that can be used as a type in argparse.
"""
def check_regular_file(value_to_check):
value_to_check = str(value_to_check)
if not os.path.isfile(value_to_check):
raise argparse.ArgumentTypeError("must exist and be a regular file.")
return value_to_check
return check_regular_file
def regular_folder() -> Callable:
"""
Returns a method that can be used in argument parsing to check the argument is a directory.
:return: A method that can be used as a type in argparse.
"""
def check_regular_directory(value_to_check):
value_to_check = str(value_to_check)
if not os.path.isdir(value_to_check):
raise argparse.ArgumentTypeError("must be a directory.")
return value_to_check
return check_regular_directory
def int_greater_or_equal(threshold: int) -> Callable:
"""
Returns a method that can be used in argument parsing to check that the int argument is greater or equal to `threshold`.
:param threshold: The threshold that we assume the cli argument value is greater or equal to.
:return: A method that can be used as a type in argparse.
"""
def check_greater_equal(value: str):
value_to_check = int(value)
if value_to_check < threshold:
raise argparse.ArgumentTypeError("must be greater or equal to %d." % threshold)
return value_to_check
return check_greater_equal
def float_greater_or_equal(threshold: float) -> Callable:
"""
Returns a method that can be used in argument parsing to check that the float argument is greater or equal to `threshold`.
:param threshold: The threshold that we assume the cli argument value is greater or equal to.
:return: A method that can be used as a type in argparse.
"""
def check_greater_equal(value: str):
value_to_check = float(value)
if value_to_check < threshold:
raise argparse.ArgumentTypeError("must be greater or equal to %f." % threshold)
return value_to_check
return check_greater_equal
def bool_str() -> Callable:
"""
Returns a method that can be used in argument parsing to check that the argument is a valid representation of
a boolean value.
:return: A method that can be used as a type in argparse.
"""
def parse(value: str):
lower_value = value.lower()
if lower_value in ["true", "yes", "1"]:
return True
elif lower_value in ["false", "no", "0"]:
return False
else:
raise argparse.ArgumentTypeError(
"Invalid value for bool argument. Use true/false, yes/no or 1/0.")
return parse
def simple_dict() -> Callable:
"""
A simple dictionary format that does not require spaces or quoting.
Supported types: bool, int, float
:return: A method that can be used as a type in argparse.
"""
def parse(dict_str: str):
def _parse(value: str):
if value.lower() == "true":
return True
if value.lower() == "false":
return False
if "." in value or "e" in value:
return float(value)
return int(value)
_dict = dict()
try:
for entry in dict_str.split(","):
key, value = entry.split(":")
_dict[key] = _parse(value)
except ValueError:
raise argparse.ArgumentTypeError("Specify argument dictionary as key1:value1,key2:value2,..."
" Supported types: bool, int, float.")
return _dict
return parse
def multiple_values(num_values: int = 0,
greater_or_equal: Optional[float] = None,
data_type: Callable = int) -> Callable:
"""
Returns a method to be used in argument parsing to parse a string of the form "<val>:<val>[:<val>...]" into
a tuple of values of type data_type.
:param num_values: Optional number of ints required.
:param greater_or_equal: Optional constraint that all values should be greater or equal to this value.
:param data_type: Type of values. Default: int.
:return: Method for parsing.
"""
def parse(value_to_check):
if ':' in value_to_check:
expected_num_separators = num_values - 1 if num_values else 0
if expected_num_separators > 0 and (value_to_check.count(':') != expected_num_separators):
raise argparse.ArgumentTypeError("Expected either a single value or %d values separated by %s" %
(num_values, C.ARG_SEPARATOR))
values = tuple(map(data_type, value_to_check.split(C.ARG_SEPARATOR, num_values - 1)))
else:
values = tuple([data_type(value_to_check)] * num_values)
if greater_or_equal is not None:
if any((value < greater_or_equal for value in values)):
raise argparse.ArgumentTypeError("Must provide value greater or equal to %d" % greater_or_equal)
return values
return parse
def file_or_stdin() -> Callable:
"""
Returns a file descriptor from stdin or opening a file from a given path.
"""
def parse(path):
if path is None or path == "-":
return sys.stdin
else:
return data_io.smart_open(path)
return parse
def add_average_args(params):
average_params = params.add_argument_group("Averaging")
average_params.add_argument(
"inputs",
metavar="INPUT",
type=str,
nargs="+",
help="either a single model directory (automatic checkpoint selection) "
"or multiple .params files (manual checkpoint selection)")
average_params.add_argument(
"--metric",
help="Name of the metric to choose n-best checkpoints from. Default: %(default)s.",
default=C.PERPLEXITY,
choices=C.METRICS)
average_params.add_argument(
"-n",
type=int,
default=4,
help="number of checkpoints to find. Default: %(default)s.")
average_params.add_argument(
"--output", "-o", required=True, type=str, help="File to write averaged parameters to.")
average_params.add_argument(
"--strategy",
choices=C.AVERAGE_CHOICES,
default=C.AVERAGE_BEST,
help="selection method. Default: %(default)s.")
def add_extract_args(params):
extract_params = params.add_argument_group("Extracting")
extract_params.add_argument("input",
metavar="INPUT",
type=str,
help="Either a model directory (using its %s) or a specific params.x file." % C.PARAMS_BEST_NAME)
extract_params.add_argument('--names', '-n',
nargs='*',
default=[],
help='Names of parameters to be extracted.')
extract_params.add_argument('--list-all', '-l',
action='store_true',
help='List names of all available parameters.')
extract_params.add_argument('--output', '-o',
type=str,
help="File to write extracted parameters to (in .npz format).")
def add_rerank_args(params):
rerank_params = params.add_argument_group("Reranking")
rerank_params.add_argument("--reference", "-r",
type=str,
required=True,
help="File where target reference translations are stored.")
rerank_params.add_argument("--hypotheses", "-hy",
type=str,
required=True,
help="File with nbest translations, one nbest list per line,"
"in JSON format as returned by sockeye.translate with --nbest-size x.")
rerank_params.add_argument("--metric", "-m",
type=str,
required=False,
default=C.RERANK_BLEU,
choices=C.RERANK_METRICS,
help="Sentence-level metric used to compare each nbest translation to the reference."
"Default: %(default)s.")
rerank_params.add_argument("--output", "-o", default=None, help="File to write output to. Default: STDOUT.")
rerank_params.add_argument("--output-best",
action="store_true",
help="Output only the best hypothesis from each nbest list.")
rerank_params.add_argument("--output-reference-instead-of-blank",
action="store_true",
help="When outputting only the best hypothesis (--output-best) and the best hypothesis "
"is a blank line, output the reference instead.")
rerank_params.add_argument("--return-score",
action="store_true",
help="Returns the reranking scores as scores in output JSON objects.")
def add_lexicon_args(params):
lexicon_params = params.add_argument_group("Model & Top-k")
lexicon_params.add_argument("--model", "-m", required=True,
help="Model directory containing source and target vocabularies.")
lexicon_params.add_argument("-k", type=int, default=200,
help="Number of target translations to keep per source. Default: %(default)s.")
def add_lexicon_create_args(params):
lexicon_params = params.add_argument_group("I/O")
lexicon_params.add_argument("--input", "-i", required=True,
help="Probabilistic lexicon (fast_align format) to build top-k lexicon from.")
lexicon_params.add_argument("--output", "-o", required=True, help="File name to write top-k lexicon to.")
def add_lexicon_inspect_args(params):
lexicon_params = params.add_argument_group("Lexicon to inspect")
lexicon_params.add_argument("--lexicon", "-l", required=True, help="File name of top-k lexicon to inspect.")
def add_logging_args(params):
logging_params = params.add_argument_group("Logging")
logging_params.add_argument('--quiet', '-q',
default=False,
action="store_true",
help='Suppress console logging.')
logging_params.add_argument('--quiet-secondary-workers', '-qsw',
default=False,
action="store_true",
help='Suppress console logging for secondary workers when training with Horovod/MPI.')
logging_params.add_argument('--no-logfile',
default=False,
action="store_true",
help='Suppress file logging')
log_levels = ['INFO', 'DEBUG', 'ERROR']
logging_params.add_argument('--loglevel',
default='INFO',
choices=log_levels,
help='Log level. Default: %(default)s.')
logging_params.add_argument('--loglevel-secondary-workers',
default='INFO',
choices=log_levels,
help='Console log level for secondary workers. Default: %(default)s.')
def add_training_data_args(params, required=False):
params.add_argument(C.TRAINING_ARG_SOURCE, '-s',
required=required,
type=regular_file(),
help='Source side of parallel training data.')
params.add_argument('--source-factors', '-sf',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='File(s) containing additional token-parallel source-side factors. Default: %(default)s.')
params.add_argument('--source-factors-use-source-vocab',
required=False,
nargs='+',
type=bool_str(),
default=[],
help='List of bools signaling whether to use the source vocabulary for the source factors. '
'If empty (default) each factor has its own vocabulary.')
params.add_argument('--target-factors', '-tf',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='File(s) containing additional token-parallel target-side factors. Default: %(default)s.')
params.add_argument('--target-factors-use-target-vocab',
required=False,
nargs='+',
type=bool_str(),
default=[],
help='List of bools signaling whether to use the target vocabulary for the target factors. '
'If empty (default) each factor has its own vocabulary.')
params.add_argument(C.TRAINING_ARG_TARGET, '-t',
required=required,
type=regular_file(),
help='Target side of parallel training data.')
def add_validation_data_params(params):
params.add_argument('--validation-source', '-vs',
required=True,
type=regular_file(),
help='Source side of validation data.')
params.add_argument('--validation-source-factors', '-vsf',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='File(s) containing additional token-parallel validation source side factors. '
'Default: %(default)s.')
params.add_argument('--validation-target', '-vt',
required=True,
type=regular_file(),
help='Target side of validation data.')
params.add_argument('--validation-target-factors', '-vtf',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='File(s) containing additional token-parallel validation target side factors. '
'Default: %(default)s.')
def add_prepared_data_args(params):
params.add_argument(C.TRAINING_ARG_PREPARED_DATA, '-d',
type=regular_folder(),
help='Prepared training data directory created through python -m sockeye.prepare_data.')
def add_monitoring_args(params):
params.add_argument('--monitor-pattern',
default=None,
type=str,
help="Pattern to match outputs/weights/gradients to monitor. '.*' monitors everything. "
"Default: %(default)s.")
params.add_argument('--monitor-stat-func',
default=C.STAT_FUNC_DEFAULT,
choices=list(C.MONITOR_STAT_FUNCS.keys()),
help="Statistics function to run on monitored outputs/weights/gradients. "
"Default: %(default)s.")
def add_training_output_args(params):
params.add_argument('--output', '-o',
required=True,
help='Folder where model & training results are written to.')
params.add_argument('--overwrite-output',
action='store_true',
help='Delete all contents of the model directory if it already exists.')
def add_training_io_args(params):
params = params.add_argument_group("Data & I/O")
# Unfortunately we must set --source/--target to not required as we either accept these parameters
# or --prepared-data which can not easily be encoded in argparse.
add_training_data_args(params, required=False)
add_prepared_data_args(params)
add_validation_data_params(params)
add_bucketing_args(params)
add_vocab_args(params)
add_training_output_args(params)
add_monitoring_args(params)
def add_bucketing_args(params):
params.add_argument('--no-bucketing',
action='store_true',
help='Disable bucketing: always unroll the graph to --max-seq-len. Default: %(default)s.')
params.add_argument('--bucket-width',
type=int_greater_or_equal(1),
default=8,
help='Width of buckets in tokens. Default: %(default)s.')
params.add_argument('--bucket-scaling',
action='store_true',
help='Scale source/target buckets based on length ratio to reduce padding. Default: '
'%(default)s.')
params.add_argument('--no-bucket-scaling',
action=Removed,
nargs=0,
help='Removed: The argument "--no-bucket-scaling" has been removed because this is now the '
'default behavior. To activate bucket scaling, use the argument "--bucket-scaling".')
params.add_argument(C.TRAINING_ARG_MAX_SEQ_LEN,
type=multiple_values(num_values=2, greater_or_equal=1),
default=(95, 95),
help='Maximum sequence length in tokens, not counting BOS/EOS tokens (internal max sequence '
'length is X+1). Use "x:x" to specify separate values for src&tgt. Default: %(default)s.')
def add_prepare_data_cli_args(params):
add_training_data_args(params, required=True)
add_vocab_args(params)
add_bucketing_args(params)
params.add_argument('--num-samples-per-shard',
type=int_greater_or_equal(1),
default=10000000,
help='The approximate number of samples per shard. Default: %(default)s.')
params.add_argument('--min-num-shards',
default=1,
type=int_greater_or_equal(1),
help='The minimum number of shards to use, even if they would not '
'reach the desired number of samples per shard. Default: %(default)s.')
params.add_argument('--seed',
type=int,
default=13,
help='Random seed used that makes shard assignments deterministic. Default: %(default)s.')
params.add_argument('--output', '-o',
required=True,
help='Folder where the prepared and possibly sharded data is written to.')
params.add_argument('--max-processes',
type=int_greater_or_equal(1),
default=1,
help='Process the shards in parallel using max-processes processes.')
add_logging_args(params)
def add_device_args(params):
device_params = params.add_argument_group("Device parameters")
device_params.add_argument('--device-ids', default=[-1],
help='List or number of GPUs ids to use. Default: %(default)s. '
'Use negative numbers to automatically acquire a certain number of GPUs, e.g. -5 '
'will find 5 free GPUs. '
'Use positive numbers to acquire a specific GPU id on this host. '
'(Note that automatic acquisition of GPUs assumes that all GPU processes on '
'this host are using automatic sockeye GPU acquisition).',
nargs='+', type=int)
device_params.add_argument('--use-cpu',
action='store_true',
help='Use CPU device instead of GPU.')
device_params.add_argument('--omp-num-threads',
type=int,
help='Set the OMP_NUM_THREADS environment variable (CPU threads). Recommended: set to '
'number of GPUs for training, number of physical CPU cores for inference. Default: '
'%(default)s.')
device_params.add_argument('--env',
help='List of environment variables to be set before importing MXNet. Separated by ",", '
'e.g. --env=OMP_NUM_THREADS=4,MXNET_GPU_WORKER_NTHREADS=3 etc.')
device_params.add_argument('--disable-device-locking',
action='store_true',
help='Just use the specified device ids without locking.')
device_params.add_argument('--lock-dir',
default="/tmp",
help='When acquiring a GPU we do file based locking so that only one Sockeye process '
'can run on the a GPU. This is the folder in which we store the file '
'locks. For locking to work correctly it is assumed all processes use the same '
'lock directory. The only requirement for the directory are file '
'write permissions.')
def add_vocab_args(params):
params.add_argument('--source-vocab',
required=False,
default=None,
help='Existing source vocabulary (JSON).')
params.add_argument('--target-vocab',
required=False,
default=None,
help='Existing target vocabulary (JSON).')
params.add_argument('--source-factor-vocabs',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='Existing source factor vocabulary (-ies) (JSON).')
params.add_argument('--target-factor-vocabs',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='Existing target factor vocabulary (-ies) (JSON).')
params.add_argument(C.VOCAB_ARG_SHARED_VOCAB,
action='store_true',
default=False,
help='Share source and target vocabulary. '
'Will be automatically turned on when using weight tying. Default: %(default)s.')
params.add_argument('--num-words',
type=multiple_values(num_values=2, greater_or_equal=0),
default=(0, 0),
help='Maximum vocabulary size. Use "x:x" to specify separate values for src&tgt. '
'A value of 0 indicates that the vocabulary unrestricted and determined from the data by '
'creating an entry for all words that occur at least --word-min-count times.'
'Default: %(default)s.')
params.add_argument('--word-min-count',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(1, 1),
help='Minimum frequency of words to be included in vocabularies. Default: %(default)s.')
params.add_argument('--pad-vocab-to-multiple-of',
type=int,
default=None,
help='Pad vocabulary to a multiple of this integer. Default: %(default)s.')
def add_model_parameters(params):
model_params = params.add_argument_group("ModelConfig")
model_params.add_argument('--params', '-p',
type=str,
default=None,
help='Initialize model parameters from file. Overrides random initializations.')
model_params.add_argument('--allow-missing-params',
action="store_true",
default=False,
help="Allow missing parameters when initializing model parameters from file. "
"Default: %(default)s.")
model_params.add_argument('--ignore-extra-params',
action="store_true",
default=False,
help="Allow extra parameters when initializing model parameters from file. "
"Default: %(default)s.")
model_params.add_argument('--encoder',
choices=C.ENCODERS,
default=C.TRANSFORMER_TYPE,
help="Type of encoder. Default: %(default)s.")
model_params.add_argument('--decoder',
choices=C.DECODERS,
default=C.TRANSFORMER_TYPE,
help="Type of decoder. Default: %(default)s. "
"'ssru_transformer' uses Simpler Simple Recurrent Units (Kim et al, 2019) "
"as replacement for self-attention layers.")
model_params.add_argument('--num-layers',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(6, 6),
help='Number of layers for encoder & decoder. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
# transformer arguments
model_params.add_argument('--transformer-model-size',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(512, 512),
help='Number of hidden units in transformer layers. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--transformer-attention-heads',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(8, 8),
help='Number of heads for all self-attention when using transformer layers. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--transformer-feed-forward-num-hidden',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(2048, 2048),
help='Number of hidden units in transformers feed forward layers. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--transformer-feed-forward-use-glu',
action='store_true',
default=False,
help='Use Gated Linear Units in transformer feed forward networks (Daupin et al. 2016, '
'arxiv.org/abs/1612.08083; Shazeer 2020, arxiv.org/abs/2002.05202). Default: '
'%(default)s.')
model_params.add_argument('--transformer-activation-type',
type=multiple_values(num_values=2, greater_or_equal=None, data_type=str),
default=(C.RELU, C.RELU),
help='Type of activation to use for each feed forward layer. Use "x:x" to specify '
'different values for encoder & decoder. Supported: {}. Default: '
'%(default)s.'.format(' '.join(C.TRANSFORMER_ACTIVATION_TYPES)))
model_params.add_argument('--transformer-positional-embedding-type',
choices=C.POSITIONAL_EMBEDDING_TYPES,
default=C.FIXED_POSITIONAL_EMBEDDING,
help='The type of positional embedding. Default: %(default)s.')
model_params.add_argument('--transformer-preprocess',
type=multiple_values(num_values=2, greater_or_equal=None, data_type=str),
default=('n', 'n'),
help='Transformer preprocess sequence for encoder and decoder. Supports three types of '
'operations: d=dropout, r=residual connection, n=layer normalization. You can '
'combine in any order, for example: "ndr". '
'Leave empty to not use any of these operations. '
'You can specify separate sequences for encoder and decoder by separating with ":" '
'For example: n:drn '
'Default: %(default)s.')
model_params.add_argument('--transformer-postprocess',
type=multiple_values(num_values=2, greater_or_equal=None, data_type=str),
default=('dr', 'dr'),
help='Transformer postprocess sequence for encoder and decoder. Supports three types of '
'operations: d=dropout, r=residual connection, n=layer normalization. You can '
'combine in any order, for example: "ndr". '
'Leave empty to not use any of these operations. '
'You can specify separate sequences for encoder and decoder by separating with ":" '
'For example: n:drn '
'Default: %(default)s.')
model_params.add_argument('--lhuc',
nargs="+",
default=None,
choices=C.LHUC_CHOICES,
metavar="COMPONENT",
help="Use LHUC (Vilar 2018). Include an amplitude parameter to hidden units for"
" domain adaptation. Needs a pre-trained model. Valid values: {values}."
" Default: %(default)s.".format(
values=", ".join(C.LHUC_CHOICES)))
# embedding arguments
model_params.add_argument('--num-embed',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(None, None),
help='Embedding size for source and target tokens. '
'Use "x:x" to specify separate values for src&tgt. Default: %d.' % C.DEFAULT_NUM_EMBED)
model_params.add_argument('--source-factors-num-embed',
type=int,
nargs='+',
default=[],
help='Embedding size for additional source factors. '
'You must provide as many dimensions as '
'(validation) source factor files. Default: %(default)s.')
model_params.add_argument('--target-factors-num-embed',
type=int,
nargs='+',
default=[],
help='Embedding size for additional target factors. '
'You must provide as many dimensions as '
'(validation) target factor files. Default: %(default)s.')
model_params.add_argument('--source-factors-combine', '-sfc',
choices=C.FACTORS_COMBINE_CHOICES,
default=[C.FACTORS_COMBINE_SUM],
nargs='+',
help='How to combine source factors. Can be either one value which will be applied to '
'all source factors, or a list of values. Default: %(default)s.')
model_params.add_argument('--target-factors-combine', '-tfc',
choices=C.FACTORS_COMBINE_CHOICES,
default=[C.FACTORS_COMBINE_SUM],
nargs='+',
help='How to combine target factors. Can be either one value which will be applied to '
'all target factors, or a list of values. Default: %(default)s.')
model_params.add_argument('--source-factors-share-embedding',
type=bool_str(),
nargs='+',
default=[False],
help='Share the embeddings with the source language. '
'Can be either one value which will be applied '
'to all source factors, or a list of values. Default: %(default)s.')
model_params.add_argument('--target-factors-share-embedding',
type=bool_str(),
nargs='+',
default=[False],
help='Share the embeddings with the target language. '
'Can be either one value which will be applied '
'to all target factors, or a list of values. Default: %(default)s.')
model_params.add_argument('--weight-tying-type',
default=C.WEIGHT_TYING_SRC_TRG_SOFTMAX,
choices=C.WEIGHT_TYING_TYPES,
help='The type of weight tying. source embeddings=src, target embeddings=trg, '
'target softmax weight matrix=softmax. Default: %(default)s.')
model_params.add_argument('--dtype', default=C.DTYPE_FP32, choices=[C.DTYPE_FP32, C.DTYPE_FP16],
help="Data type.")
model_params.add_argument('--amp', action='store_true', help='Use MXNet\'s automatic mixed precision (AMP).')
model_params.add_argument('--amp-scale-interval', type=int, default=2000,
help='Attempt to increase loss scale after this many updates without overflow. '
'Default: %(default)s.')
def add_batch_args(params, default_batch_size=4096, default_batch_type=C.BATCH_TYPE_WORD):
params.add_argument('--batch-size', '-b',
type=int_greater_or_equal(1),
default=default_batch_size,
help='Mini-batch size per process. Depending on --batch-type, this either refers to words or '
'sentences. The effective batch size (update size) is num_processes * batch_size * '
'update_interval. Default: %(default)s.')
params.add_argument('--batch-type',
type=str,
default=default_batch_type,
choices=C.BATCH_TYPES,
help='sentence: each batch contains exactly X sentences. '
'word: each batch contains approximately X target words. '
'max-word: each batch contains at most X target words. '
'Default: %(default)s.')
params.add_argument('--batch-sentences-multiple-of',
type=int,
default=8,
help='For word and max-word batching, guarantee that each batch contains a multiple of X '
'sentences. For word batching, round up or down to nearest multiple. For max-word '
'batching, always round down. Default: %(default)s.')
params.add_argument('--round-batch-sizes-to-multiple-of',
action=Removed,
help='Removed: The argument "--round-batch-sizes-to-multiple-of" has been renamed to '
'"--batch-sentences-multiple-of".')
params.add_argument('--update-interval',
type=int,
default=1,
help='Accumulate gradients over X batches for each model update. Set a value higher than 1 to '
'simulate large batches (ex: batch_size 2560 with update_interval 4 gives effective batch '
'size 10240). Default: %(default)s.')
def add_hybridization_arg(params):
params.add_argument('--no-hybridization',
action='store_true',
help='Turn off hybridization. Hybridization builds a static computation graph and computations will therefore be faster. '
'The downside is that one can not set breakpoints to inspect intermediate results. Default: %(default)s.')
def add_training_args(params):
train_params = params.add_argument_group("Training parameters")
add_batch_args(train_params)
train_params.add_argument('--loss',
default=C.CROSS_ENTROPY_WITOUT_SOFTMAX_OUTPUT,
choices=[C.CROSS_ENTROPY, C.CROSS_ENTROPY_WITOUT_SOFTMAX_OUTPUT],
help='Loss to optimize. Default: %(default)s.')
train_params.add_argument('--label-smoothing',
default=0.1,
type=float,
help='Smoothing constant for label smoothing. Default: %(default)s.')
train_params.add_argument('--length-task',
type=str,
default=None,
choices=[C.LENGTH_TASK_RATIO, C.LENGTH_TASK_LENGTH],
help='If specified, adds an auxiliary task during training to predict source/target length ratios '
'(mean squared error loss), or absolute lengths (Poisson) loss. Default %(default)s.')
train_params.add_argument('--length-task-weight',
type=float_greater_or_equal(0.0),
default=1.0,
help='The weight of the auxiliary --length-task loss. Default %(default)s.')
train_params.add_argument('--length-task-layers',
type=int_greater_or_equal(1),
default=1,
help='Number of fully-connected layers for predicting the length ratio. Default %(default)s.')
train_params.add_argument('--target-factors-weight',
type=float,
nargs='+',
default=[1.0],
help='Weights of target factor losses. If one value is given, it applies to all '
'secondary target factors. For multiple values, the number of weights given has '
'to match the number of target factors. Default: %(default)s.')
train_params.add_argument('--optimized-metric',
default=C.PERPLEXITY,
choices=C.METRICS,
help='Metric to optimize with early stopping {%(choices)s}. Default: %(default)s.')
train_params.add_argument(C.TRAIN_ARGS_CHECKPOINT_INTERVAL,
type=int_greater_or_equal(1),
default=4000,
help='Checkpoint and evaluate every x updates (update-interval * batches). '
'Default: %(default)s.')
train_params.add_argument('--min-samples',
type=int,
default=None,
help='Minimum number of samples before training can stop. Default: %(default)s.')
train_params.add_argument('--max-samples',
type=int,
default=None,
help='Maximum number of samples. Default: %(default)s.')
train_params.add_argument('--min-updates',
type=int,
default=None,
help='Minimum number of updates before training can stop. Default: %(default)s.')
train_params.add_argument('--max-updates',
type=int,
default=None,
help='Maximum number of updates. Default: %(default)s.')
train_params.add_argument('--max-seconds',
type=int,
default=None,
help='Training will stop on the next checkpoint after reaching the maximum seconds. '
'Default: %(default)s.')
train_params.add_argument('--max-checkpoints',
type=int,
default=None,
help='Maximum number of checkpoints to continue training the model '
'before training is stopped. '
'Default: %(default)s.')
train_params.add_argument('--max-num-checkpoint-not-improved',
type=int,
default=None,
help='Maximum number of checkpoints the model is allowed to not improve in '
'<optimized-metric> on validation data before training is stopped. '
'Default: %(default)s.')
train_params.add_argument('--checkpoint-improvement-threshold',
type=float,
default=0.,
help='Improvement in <optimized-metric> over specified number of checkpoints must exceed '
'this value to be considered actual improvement. Default: %(default)s.')
train_params.add_argument('--min-num-epochs',
type=int,
default=None,
help='Minimum number of epochs (passes through the training data) '
'before training can stop. Default: %(default)s.')
train_params.add_argument('--max-num-epochs',
type=int,
default=None,
help='Maximum number of epochs (passes through the training data) Default: %(default)s.')
train_params.add_argument('--embed-dropout',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='Dropout probability for source & target embeddings. Use "x:x" to specify separate '
'values. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-attention',
type=multiple_values(2, data_type=float),
default=(0.1, 0.1),
help='Dropout probability for multi-head attention. Use "x:x" to specify separate '
'values for encoder & decoder. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-act',
type=multiple_values(2, data_type=float),
default=(0.1, 0.1),
help='Dropout probability before activation in feed-forward block. Use "x:x" to specify '
'separate values for encoder & decoder. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-prepost',
type=multiple_values(2, data_type=float),
default=(0.1, 0.1),
help='Dropout probability for pre/postprocessing blocks. Use "x:x" to specify separate '
'values for encoder & decoder. Default: %(default)s.')
train_params.add_argument('--optimizer',
default=C.OPTIMIZER_ADAM,
choices=C.OPTIMIZERS,
help='SGD update rule. Default: %(default)s.')
train_params.add_argument('--optimizer-params',
type=simple_dict(),
default=None,
help='Additional optimizer params as dictionary. Format: key1:value1,key2:value2,...')
train_params.add_argument('--horovod',
action='store_true',
help='Use Horovod/MPI for distributed training (Sergeev and Del Balso 2018, '
'arxiv.org/abs/1802.05799). When using this option, run Sockeye with `horovodrun '
'-np X python3 -m sockeye.train` where X is the number of processes. Increasing '
'the number of processes multiplies the effective batch size (ex: batch_size 2560 '
'with `-np 4` gives effective batch size 10240).')
train_params.add_argument("--kvstore",
type=str,
default=C.KVSTORE_DEVICE,
choices=C.KVSTORE_TYPES,
help="The MXNet kvstore to use. 'device' is recommended for single process training. "
"Use any of 'dist_sync', 'dist_device_sync' and 'dist_async' for distributed "
"training. Default: %(default)s.")
train_params.add_argument('--weight-init',
type=str,
default=C.INIT_XAVIER,
choices=C.INIT_TYPES,
help='Type of base weight initialization. Default: %(default)s.')
train_params.add_argument('--weight-init-scale',
type=float,
default=3.0,
help='Weight initialization scale. Applies to uniform (scale) and xavier (magnitude). '
'Default: %(default)s.')
train_params.add_argument('--weight-init-xavier-factor-type',
type=str,
default=C.INIT_XAVIER_FACTOR_TYPE_AVG,
choices=C.INIT_XAVIER_FACTOR_TYPES,
help='Xavier factor type. Default: %(default)s.')
train_params.add_argument('--weight-init-xavier-rand-type',
type=str,
default=C.RAND_TYPE_UNIFORM,
choices=[C.RAND_TYPE_UNIFORM, C.RAND_TYPE_GAUSSIAN],
help='Xavier random number generator type. Default: %(default)s.')
train_params.add_argument('--initial-learning-rate',
type=float,
default=0.0002,
help='Initial learning rate. Default: %(default)s.')
train_params.add_argument('--weight-decay',
type=float,
default=0.0,
help='Weight decay constant. Default: %(default)s.')
train_params.add_argument('--momentum',
type=float,
default=None,
help='Momentum constant. Default: %(default)s.')
train_params.add_argument('--gradient-clipping-threshold',
type=float,
default=1.0,
help='Clip absolute gradients values greater than this value. '
'Set to negative to disable. Default: %(default)s.')
train_params.add_argument('--gradient-clipping-type',
choices=C.GRADIENT_CLIPPING_TYPES,
default=C.GRADIENT_CLIPPING_TYPE_NONE,
help='The type of gradient clipping. Default: %(default)s.')
train_params.add_argument('--learning-rate-scheduler-type',
default=C.LR_SCHEDULER_PLATEAU_REDUCE,
choices=C.LR_SCHEDULERS,
help='Learning rate scheduler type. Default: %(default)s.')
train_params.add_argument('--learning-rate-t-scale',
type=float,
default=1.0,
help="Step number is multiplied by this value when determining learning rate for the "
"current step. Default: %(default)s.")
train_params.add_argument('--learning-rate-reduce-factor',
type=float,
default=0.9,
help="Factor to multiply learning rate with "
"(for 'plateau-reduce' learning rate scheduler). Default: %(default)s.")
train_params.add_argument('--learning-rate-reduce-num-not-improved',
type=int,
default=8,
help="For 'plateau-reduce' learning rate scheduler. Adjust learning rate "
"if <optimized-metric> did not improve for x checkpoints. Default: %(default)s.")
train_params.add_argument('--learning-rate-warmup',
type=int,
default=0,
help="Number of warmup steps. If set to x, linearly increases learning rate from 10%% "
"to 100%% of the initial learning rate. Default: %(default)s.")
train_params.add_argument('--fixed-param-strategy',
default=None,
choices=C.FIXED_PARAM_STRATEGY_CHOICES,
help="Fix various parameters during training using a named strategy. The strategy "
"name indicates which parameters will be fixed (Wuebker et al., 2018). "
"Default: %(default)s.")
train_params.add_argument('--fixed-param-names',
default=[],
nargs='*',
help="Manually specify names of parameters to fix during training. Default: %(default)s.")
train_params.add_argument(C.TRAIN_ARGS_MONITOR_BLEU,
default=500,
type=int,
help='x>0: decode x sampled sentences from validation data and '
'compute evaluation metrics. x==-1: use full validation data. Default: %(default)s.')
train_params.add_argument('--decode-and-evaluate-device-id',
default=None,
type=int,
help='Separate device for decoding validation data. '
'Use a negative number to automatically acquire a GPU. '
'Use a positive number to acquire a specific GPU. Default: %(default)s.')
train_params.add_argument(C.TRAIN_ARGS_STOP_ON_DECODER_FAILURE,
action="store_true",
help='Stop training as soon as any checkpoint decoder fails (e.g. because there is not '
'enough GPU memory). Default: %(default)s.')
train_params.add_argument('--seed',
type=int,
default=1,
help='Random seed. Default: %(default)s.')
train_params.add_argument('--keep-last-params',
type=int,
default=-1,
help='Keep only the last n params files, use -1 to keep all files. Default: %(default)s')
train_params.add_argument('--keep-initializations',
action="store_true",
help='In addition to keeping the last n params files, also keep params from checkpoint 0.')
train_params.add_argument('--cache-last-best-params',
required=False,
type=int,
default=0,
help='Cache the last n best params files, as distinct from the last n in sequence. '
'Use 0 or negative to disable. Default: %(default)s')
train_params.add_argument('--cache-strategy',
required=False,
type=str,
default=C.AVERAGE_BEST,
choices=C.AVERAGE_CHOICES,
help='Strategy to use when deciding which are the "best" params files. '
'Default: %(default)s')
train_params.add_argument('--cache-metric',
required=False,
type=str,
default=C.PERPLEXITY,
choices=C.METRICS,
help='Metric to use when deciding which are the "best" params files. '
'Default: %(default)s')
train_params.add_argument('--dry-run',
action='store_true',
help="Do not perform any actual training, but print statistics about the model"
" and mode of operation.")
def add_train_cli_args(params):
add_training_io_args(params)
add_model_parameters(params)
add_training_args(params)
add_device_args(params)
add_logging_args(params)
add_hybridization_arg(params)
def add_translate_cli_args(params):
add_inference_args(params)
add_device_args(params)
add_logging_args(params)
add_hybridization_arg(params)
def add_score_cli_args(params):
add_training_data_args(params, required=True)
add_vocab_args(params)
add_device_args(params)
add_batch_args(params, default_batch_size=56, default_batch_type=C.BATCH_TYPE_SENTENCE)
add_hybridization_arg(params)
params = params.add_argument_group("Scoring parameters")
params.add_argument("--model", "-m", required=True,
help="Model directory containing trained model.")
params.add_argument(C.TRAINING_ARG_MAX_SEQ_LEN,
type=multiple_values(num_values=2, greater_or_equal=1),
default=None,
help='Maximum sequence length in tokens.'
'Use "x:x" to specify separate values for src&tgt. Default: Read from model.')
# common params with translate CLI
add_length_penalty_args(params)
add_brevity_penalty_args(params)
params.add_argument("--output", "-o", default=None,
help="File to write output to. Default: STDOUT.")
params.add_argument('--output-type',
default=C.OUTPUT_HANDLER_SCORE,
choices=C.OUTPUT_HANDLERS_SCORING,
help='Output type. Default: %(default)s.')
params.add_argument('--score-type',
choices=C.SCORING_TYPE_CHOICES,
default=C.SCORING_TYPE_DEFAULT,
help='Score type to output. Default: %(default)s')
params.add_argument('--softmax-temperature',
type=float,
default=None,
help='Controls peakiness of model predictions. Values < 1.0 produce '
'peaked predictions, values > 1.0 produce smoothed distributions.')
params.add_argument('--dtype', default=None, choices=[None, C.DTYPE_FP32, C.DTYPE_FP16, C.DTYPE_INT8],
help="Data type. Default: %(default)s infers from saved model.")
add_logging_args(params)
def add_inference_args(params):
decode_params = params.add_argument_group("Inference parameters")
decode_params.add_argument(C.INFERENCE_ARG_INPUT_LONG, C.INFERENCE_ARG_INPUT_SHORT,
default=None,
help='Input file to translate. One sentence per line. '
'If not given, will read from stdin.')
decode_params.add_argument(C.INFERENCE_ARG_INPUT_FACTORS_LONG, C.INFERENCE_ARG_INPUT_FACTORS_SHORT,
required=False,
nargs='+',
type=regular_file(),
default=None,
help='List of input files containing additional source factors,'
'each token-parallel to the source. Default: %(default)s.')
decode_params.add_argument('--json-input',
action='store_true',
default=False,
help="If given, the CLI expects string-serialized json objects as input."
"Requires at least the input text field, for example: "
"{'text': 'some input string'} "
"Optionally, a list of factors can be provided: "
"{'text': 'some input string', 'factors': ['C C C', 'X X X']}.")
decode_params.add_argument(C.INFERENCE_ARG_OUTPUT_LONG, C.INFERENCE_ARG_OUTPUT_SHORT,
default=None,
help='Output file to write translations to. '
'If not given, will write to stdout.')
decode_params.add_argument('--models', '-m',
required=True,
nargs='+',
help='Model folder(s). Use multiple for ensemble decoding. '
'Model determines config, best parameters and vocab files.')
decode_params.add_argument('--checkpoints', '-c',
default=None,
type=int,
nargs='+',
help='If not given, chooses best checkpoints for model(s). '
'If specified, must have the same length as --models and be integer')
decode_params.add_argument('--nbest-size',
type=int_greater_or_equal(1),
default=1,
help='Size of the nbest list of translations. Default: %(default)s.')
decode_params.add_argument('--beam-size', '-b',
type=int_greater_or_equal(1),
default=5,
help='Size of the beam. Default: %(default)s.')
decode_params.add_argument('--greedy', '-g',
action="store_true",
default=False,
help='Enables an alternative, faster greedy decoding implementation. It does not '
'support batch decoding, ensembles, or lexical constraints, and hypothesis scores '
'are not normalized. Default: %(default)s.')
decode_params.add_argument('--beam-search-stop',
choices=[C.BEAM_SEARCH_STOP_ALL, C.BEAM_SEARCH_STOP_FIRST],
default=C.BEAM_SEARCH_STOP_ALL,
help='Stopping criteria. Quit when (all) hypotheses are finished '
'or when a finished hypothesis is in (first) position. Default: %(default)s.')
decode_params.add_argument('--batch-size',
type=int_greater_or_equal(1),
default=1,
help='Batch size during decoding. Determines how many sentences are translated '
'simultaneously. Default: %(default)s.')
decode_params.add_argument('--chunk-size',
type=int_greater_or_equal(1),
default=None,
help='Size of the chunks to be read from input at once. The chunks are sorted and then '
'split into batches. Therefore the larger the chunk size the better the grouping '
'of segments of similar length and therefore the higher the increase in throughput.'
' Default: %d without batching '
'and %d * batch_size with batching.' % (C.CHUNK_SIZE_NO_BATCHING,
C.CHUNK_SIZE_PER_BATCH_SEGMENT))
decode_params.add_argument('--mc-dropout',
default=False,
action='store_true',
help='Turn on dropout during inference (Monte Carlo dropout). '
'This will make translations non-deterministic and might slow '
'down translation speed.')
decode_params.add_argument('--softmax-temperature',
type=float,
default=None,
help='Controls peakiness of model predictions. Values < 1.0 produce '
'peaked predictions, values > 1.0 produce smoothed distributions.')
decode_params.add_argument('--sample',
type=int_greater_or_equal(0),
default=None,
nargs='?',
const=0,
help='Sample from softmax instead of taking best. Optional argument will restrict '
'sampling to top N vocabulary items at each step. Default: %(default)s.')
decode_params.add_argument('--seed',
type=int,
default=None,
help='Random seed used if sampling. Default: %(default)s.')
decode_params.add_argument('--ensemble-mode',
type=str,
default='linear',
choices=['linear', 'log_linear'],
help='Ensemble mode. Default: %(default)s.')
decode_params.add_argument('--bucket-width',
type=int_greater_or_equal(0),
default=10,
help='Bucket width for encoder steps. 0 means no bucketing. Default: %(default)s.')
decode_params.add_argument('--max-input-length',
type=int_greater_or_equal(1),
default=None,
help='Maximum input sequence length. Default: value from model(s).')
decode_params.add_argument('--max-output-length-num-stds',
type=int,
default=C.DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH,
help='Number of target-to-source length ratio standard deviations from training to add '
'to calculate maximum output length for beam search for each sentence. '
'Default: %(default)s.')
decode_params.add_argument('--max-output-length',
type=int_greater_or_equal(1),
default=None,
help='Maximum number of words to generate during translation. '
'If None, it will be computed automatically. Default: %(default)s.')
decode_params.add_argument('--restrict-lexicon',
nargs='+',
type=multiple_values(num_values=2, data_type=str),
default=None,
help="Specify top-k lexicon to restrict output vocabulary to the k most likely context-"
"free translations of the source words in each sentence (Devlin, 2017). See the "
"lexicon module for creating top-k lexicons. To use multiple lexicons, provide "
"'--restrict-lexicon key1:path1 key2:path2 ...' and use JSON input to specify the "
"lexicon for each sentence: "
"{\"text\": \"some input string\", \"restrict_lexicon\": \"key\"}. "
"Default: %(default)s.")
decode_params.add_argument('--restrict-lexicon-topk',
type=int,
default=None,
help="Specify the number of translations to load for each source word from the lexicon "
"given with --restrict-lexicon. Default: Load all entries from the lexicon.")
decode_params.add_argument('--avoid-list',
type=str,
default=None,
help="Specify a file containing phrases (pre-processed, one per line) to block "
"from the output. Default: %(default)s.")
decode_params.add_argument('--strip-unknown-words',
action='store_true',
default=False,
help='Remove any <unk> symbols from outputs. Default: %(default)s.')
decode_params.add_argument('--prevent-unk',
action='store_true',
default=False,
help='Avoid generating <unk> during decoding. Default: %(default)s.')
decode_params.add_argument('--output-type',
default='translation',
choices=C.OUTPUT_HANDLERS,
help='Output type. Default: %(default)s.')
# common params with score CLI
add_length_penalty_args(decode_params)
add_brevity_penalty_args(decode_params)
decode_params.add_argument('--dtype', default=None, choices=[None, C.DTYPE_FP32, C.DTYPE_FP16, C.DTYPE_INT8],
help="Data type. Default: %(default)s infers from saved model.")
def add_length_penalty_args(params):
params.add_argument('--length-penalty-alpha',
default=1.0,
type=float,
help='Alpha factor for the length penalty used in beam search: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. A value of 0.0 will therefore turn off '
'length normalization. Default: %(default)s.')
params.add_argument('--length-penalty-beta',
default=0.0,
type=float,
help='Beta factor for the length penalty used in scoring: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. Default: %(default)s')
def add_brevity_penalty_args(params):
params.add_argument('--brevity-penalty-type',
default='none',
type=str,
choices=[C.BREVITY_PENALTY_NONE, C.BREVITY_PENALTY_LEARNED, C.BREVITY_PENALTY_CONSTANT],
help='If specified, adds brevity penalty to the hypotheses\' scores, calculated with learned '
'or constant length ratios. The latter, by default, uses the length ratio (|ref|/|hyp|) '
'estimated from the training data and averaged over models. Default: %(default)s.')
params.add_argument('--brevity-penalty-weight',
default=1.0,
type=float_greater_or_equal(0.0),
help='Scaler for the brevity penalty in beam search: weight * log(BP) + score. Default: %(default)s')
params.add_argument('--brevity-penalty-constant-length-ratio',
default=0.0,
type=float_greater_or_equal(0.0),
help='Has effect if --brevity-penalty-type is set to \'constant\'. If positive, overrides the length '
'ratio, used for brevity penalty calculation, for all inputs. If zero, uses the average of length '
'ratios from the training data over all models. Default: %(default)s.')
def add_evaluate_args(params):
eval_params = params.add_argument_group("Evaluate parameters")
eval_params.add_argument('--references', '-r',
required=True,
type=str,
help="File with references.")
eval_params.add_argument('--hypotheses', '-i',
type=file_or_stdin(),
default=[sys.stdin],
nargs='+',
help="File(s) with hypotheses. If none will read from stdin. Default: stdin.")
eval_params.add_argument('--metrics',
nargs='+',
choices=C.EVALUATE_METRICS,
default=[C.BLEU, C.CHRF],
help='List of metrics to compute. Default: %(default)s.')
eval_params.add_argument('--sentence', '-s',
action="store_true",
help="Show sentence-level metrics. Default: %(default)s.")
eval_params.add_argument('--offset',
type=float,
default=0.01,
help="Numerical value of the offset of zero n-gram counts for BLEU. Default: %(default)s.")
eval_params.add_argument('--not-strict', '-n',
action="store_true",
help="Do not fail if number of hypotheses does not match number of references. "
"Default: %(default)s.")
def add_build_vocab_args(params):
params.add_argument('-i', '--inputs', required=True, nargs='+', help='List of text files to build vocabulary from.')
params.add_argument('-o', '--output', required=True, type=str, help="Output filename to write vocabulary to.")
add_vocab_args(params)
def add_init_embedding_args(params):
params.add_argument('--weight-files', '-w', required=True, nargs='+',
help='List of input weight files in .npy, .npz or Sockeye parameter format.')
params.add_argument('--vocabularies-in', '-i', required=True, nargs='+',
help='List of input vocabularies as token-index dictionaries in .json format.')
params.add_argument('--vocabularies-out', '-o', required=True, nargs='+',
help='List of output vocabularies as token-index dictionaries in .json format.')
params.add_argument('--names', '-n', nargs='+',
help='List of Sockeye parameter names for (embedding) weights. Default: %(default)s.',
default=[n + "weight" for n in [C.SOURCE_EMBEDDING_PREFIX, C.TARGET_EMBEDDING_PREFIX]])
params.add_argument('--file', '-f', required=True,
help='File to write initialized parameters to.')
params.add_argument('--encoding', '-c', type=str, default=C.VOCAB_ENCODING,
help='Open input vocabularies with specified encoding. Default: %(default)s.')
|
apache-2.0
|
Dandandan/wikiprogramming
|
jsrepl/build/extern/python/reloop-closured/lib/python2.7/plistlib.py
|
141
|
15176
|
r"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
The PropertyList (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the writePlist(rootObject, pathOrFile)
function. 'rootObject' is the top level object, 'pathOrFile' is a
filename or a (writable) file object.
To parse a plist from a file, use the readPlist(pathOrFile) function,
with a file name or a (readable) file object as the only argument. It
returns the top level object (again, usually a dictionary).
To work with plist data in strings, you can use readPlistFromString()
and writePlistToString().
Values can be strings, integers, floats, booleans, tuples, lists,
dictionaries, Data or datetime.datetime objects. String values (including
dictionary keys) may be unicode strings -- they will be written out as
UTF-8.
The <data> plist type is supported through the Data class. This is a
thin wrapper around a Python string.
Generate Plist example:
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat=0.1,
anInt=728,
aDict=dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData=Data("<binary gunk>"),
someMoreData=Data("<lots of binary gunk>" * 10),
aDate=datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
# unicode keys are possible, but a little awkward to use:
pl[u'\xc5benraa'] = "That was a unicode key."
writePlist(pl, fileName)
Parse Plist example:
pl = readPlist(pathOrFile)
print pl["aKey"]
"""
__all__ = [
"readPlist", "writePlist", "readPlistFromString", "writePlistToString",
"readPlistFromResource", "writePlistToResource",
"Plist", "Data", "Dict"
]
# Note: the Plist and Dict classes have been deprecated.
import binascii
import datetime
from cStringIO import StringIO
import re
import warnings
def readPlist(pathOrFile):
"""Read a .plist file. 'pathOrFile' may either be a file name or a
(readable) file object. Return the unpacked root object (which
usually is a dictionary).
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile)
didOpen = 1
p = PlistParser()
rootObject = p.parse(pathOrFile)
if didOpen:
pathOrFile.close()
return rootObject
def writePlist(rootObject, pathOrFile):
"""Write 'rootObject' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile, "w")
didOpen = 1
writer = PlistWriter(pathOrFile)
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(rootObject)
writer.writeln("</plist>")
if didOpen:
pathOrFile.close()
def readPlistFromString(data):
"""Read a plist data from a string. Return the root object.
"""
return readPlist(StringIO(data))
def writePlistToString(rootObject):
"""Return 'rootObject' as a plist-formatted string.
"""
f = StringIO()
writePlist(rootObject, f)
return f.getvalue()
def readPlistFromResource(path, restype='plst', resid=0):
"""Read plst resource from the resource fork of path.
"""
warnings.warnpy3k("In 3.x, readPlistFromResource is removed.",
stacklevel=2)
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdPerm
from Carbon import Res
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdPerm)
Res.UseResFile(resNum)
plistData = Res.Get1Resource(restype, resid).data
Res.CloseResFile(resNum)
return readPlistFromString(plistData)
def writePlistToResource(rootObject, path, restype='plst', resid=0):
"""Write 'rootObject' as a plst resource to the resource fork of path.
"""
warnings.warnpy3k("In 3.x, writePlistToResource is removed.", stacklevel=2)
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdWrPerm
from Carbon import Res
plistData = writePlistToString(rootObject)
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdWrPerm)
Res.UseResFile(resNum)
try:
Res.Get1Resource(restype, resid).RemoveResource()
except Res.Error:
pass
res = Res.Resource(plistData)
res.AddResource(restype, resid, '')
res.WriteResource()
Res.CloseResFile(resNum)
class DumbXMLWriter:
def __init__(self, file, indentLevel=0, indent="\t"):
self.file = file
self.stack = []
self.indentLevel = indentLevel
self.indent = indent
def beginElement(self, element):
self.stack.append(element)
self.writeln("<%s>" % element)
self.indentLevel += 1
def endElement(self, element):
assert self.indentLevel > 0
assert self.stack.pop() == element
self.indentLevel -= 1
self.writeln("</%s>" % element)
def simpleElement(self, element, value=None):
if value is not None:
value = _escapeAndEncode(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
def writeln(self, line):
if line:
self.file.write(self.indentLevel * self.indent + line + "\n")
else:
self.file.write("\n")
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units may be omitted with
# a loss of precision)
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z")
def _dateFromString(s):
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
gd = _dateParser.match(s).groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
return datetime.datetime(*lst)
def _dateToString(d):
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
d.year, d.month, d.day,
d.hour, d.minute, d.second
)
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _escapeAndEncode(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use plistlib.Data instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text.encode("utf-8") # encode as UTF-8
PLISTHEADER = """\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
"""
class PlistWriter(DumbXMLWriter):
def __init__(self, file, indentLevel=0, indent="\t", writeHeader=1):
if writeHeader:
file.write(PLISTHEADER)
DumbXMLWriter.__init__(self, file, indentLevel, indent)
def writeValue(self, value):
if isinstance(value, (str, unicode)):
self.simpleElement("string", value)
elif isinstance(value, bool):
# must switch for bool before int, as bool is a
# subclass of int...
if value:
self.simpleElement("true")
else:
self.simpleElement("false")
elif isinstance(value, (int, long)):
self.simpleElement("integer", "%d" % value)
elif isinstance(value, float):
self.simpleElement("real", repr(value))
elif isinstance(value, dict):
self.writeDict(value)
elif isinstance(value, Data):
self.writeData(value)
elif isinstance(value, datetime.datetime):
self.simpleElement("date", _dateToString(value))
elif isinstance(value, (tuple, list)):
self.writeArray(value)
else:
raise TypeError("unsuported type: %s" % type(value))
def writeData(self, data):
self.beginElement("data")
self.indentLevel -= 1
maxlinelength = 76 - len(self.indent.replace("\t", " " * 8) *
self.indentLevel)
for line in data.asBase64(maxlinelength).split("\n"):
if line:
self.writeln(line)
self.indentLevel += 1
self.endElement("data")
def writeDict(self, d):
self.beginElement("dict")
items = d.items()
items.sort()
for key, value in items:
if not isinstance(key, (str, unicode)):
raise TypeError("keys must be strings")
self.simpleElement("key", key)
self.writeValue(value)
self.endElement("dict")
def writeArray(self, array):
self.beginElement("array")
for value in array:
self.writeValue(value)
self.endElement("array")
class _InternalDict(dict):
# This class is needed while Dict is scheduled for deprecation:
# we only need to warn when a *user* instantiates Dict or when
# the "attribute notation for dict keys" is used.
def __getattr__(self, attr):
try:
value = self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning, 2)
return value
def __setattr__(self, attr, value):
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning, 2)
self[attr] = value
def __delattr__(self, attr):
try:
del self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning, 2)
class Dict(_InternalDict):
def __init__(self, **kwargs):
from warnings import warn
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
PendingDeprecationWarning, 2)
super(Dict, self).__init__(**kwargs)
class Plist(_InternalDict):
"""This class has been deprecated. Use readPlist() and writePlist()
functions instead, together with regular dict objects.
"""
def __init__(self, **kwargs):
from warnings import warn
warn("The Plist class is deprecated, use the readPlist() and "
"writePlist() functions instead", PendingDeprecationWarning, 2)
super(Plist, self).__init__(**kwargs)
def fromFile(cls, pathOrFile):
"""Deprecated. Use the readPlist() function instead."""
rootObject = readPlist(pathOrFile)
plist = cls()
plist.update(rootObject)
return plist
fromFile = classmethod(fromFile)
def write(self, pathOrFile):
"""Deprecated. Use the writePlist() function instead."""
writePlist(self, pathOrFile)
def _encodeBase64(s, maxlinelength=76):
# copied from base64.encodestring(), with added maxlinelength argument
maxbinsize = (maxlinelength//4)*3
pieces = []
for i in range(0, len(s), maxbinsize):
chunk = s[i : i + maxbinsize]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
class Data:
"""Wrapper for binary data."""
def __init__(self, data):
self.data = data
def fromBase64(cls, data):
# base64.decodestring just calls binascii.a2b_base64;
# it seems overkill to use both base64 and binascii.
return cls(binascii.a2b_base64(data))
fromBase64 = classmethod(fromBase64)
def asBase64(self, maxlinelength=76):
return _encodeBase64(self.data, maxlinelength)
def __cmp__(self, other):
if isinstance(other, self.__class__):
return cmp(self.data, other.data)
elif isinstance(other, str):
return cmp(self.data, other)
else:
return cmp(id(self), id(other))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
class PlistParser:
def __init__(self):
self.stack = []
self.currentKey = None
self.root = None
def parse(self, fileobj):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self.handleBeginElement
parser.EndElementHandler = self.handleEndElement
parser.CharacterDataHandler = self.handleData
parser.ParseFile(fileobj)
return self.root
def handleBeginElement(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
if handler is not None:
handler(attrs)
def handleEndElement(self, element):
handler = getattr(self, "end_" + element, None)
if handler is not None:
handler()
def handleData(self, data):
self.data.append(data)
def addObject(self, value):
if self.currentKey is not None:
self.stack[-1][self.currentKey] = value
self.currentKey = None
elif not self.stack:
# this is the root object
self.root = value
else:
self.stack[-1].append(value)
def getData(self):
data = "".join(self.data)
try:
data = data.encode("ascii")
except UnicodeError:
pass
self.data = []
return data
# element handlers
def begin_dict(self, attrs):
d = _InternalDict()
self.addObject(d)
self.stack.append(d)
def end_dict(self):
self.stack.pop()
def end_key(self):
self.currentKey = self.getData()
def begin_array(self, attrs):
a = []
self.addObject(a)
self.stack.append(a)
def end_array(self):
self.stack.pop()
def end_true(self):
self.addObject(True)
def end_false(self):
self.addObject(False)
def end_integer(self):
self.addObject(int(self.getData()))
def end_real(self):
self.addObject(float(self.getData()))
def end_string(self):
self.addObject(self.getData())
def end_data(self):
self.addObject(Data.fromBase64(self.getData()))
def end_date(self):
self.addObject(_dateFromString(self.getData()))
|
mit
|
bliti/django-nonrel-1.5
|
django/contrib/gis/gdal/tests/test_geom.py
|
104
|
20864
|
import json
from binascii import b2a_hex
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from django.contrib.gis.gdal import (OGRGeometry, OGRGeomType, OGRException,
OGRIndexError, SpatialReference, CoordTransform, GDAL_VERSION)
from django.contrib.gis.geometry.test_data import TestDataMixin
from django.utils.six.moves import xrange
from django.utils import unittest
class OGRGeomTest(unittest.TestCase, TestDataMixin):
"This tests the OGR Geometry."
def test00a_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
try:
g = OGRGeomType(1)
g = OGRGeomType(7)
g = OGRGeomType('point')
g = OGRGeomType('GeometrycollectioN')
g = OGRGeomType('LINearrING')
g = OGRGeomType('Unknown')
except:
self.fail('Could not create an OGRGeomType object!')
# Should throw TypeError on this input
self.assertRaises(OGRException, OGRGeomType, 23)
self.assertRaises(OGRException, OGRGeomType, 'fooD')
self.assertRaises(OGRException, OGRGeomType, 9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(True, OGRGeomType(1) == OGRGeomType(1))
self.assertEqual(True, OGRGeomType(7) == 'GeometryCollection')
self.assertEqual(True, OGRGeomType('point') == 'POINT')
self.assertEqual(False, OGRGeomType('point') == 2)
self.assertEqual(True, OGRGeomType('unknown') == 0)
self.assertEqual(True, OGRGeomType(6) == 'MULtiPolyGON')
self.assertEqual(False, OGRGeomType(1) != OGRGeomType('point'))
self.assertEqual(True, OGRGeomType('POINT') != OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertEqual(None, OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test00b_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.assertTrue(OGRGeomType(wkb25bit + 1) == 'Point25D')
self.assertTrue(OGRGeomType('MultiLineString25D') == (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test01a_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test01b_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
exp_gml = g.gml
if GDAL_VERSION >= (1, 8):
# In GDAL 1.8, the non-conformant GML tag <gml:GeometryCollection> was
# replaced with <gml:MultiGeometry>.
exp_gml = exp_gml.replace('GeometryCollection', 'MultiGeometry')
self.assertEqual(exp_gml, geom.gml)
def test01c_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex.encode(), geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test01d_wkb(self):
"Testing WKB input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex.encode())
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test01e_json(self):
"Testing GeoJSON input/output."
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
def test02_points(self):
"Testing Point objects."
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test03_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test04_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(True, linestr == OGRGeometry(ls.wkt))
self.assertEqual(True, linestr != prev)
self.assertRaises(OGRIndexError, linestr.__getitem__, len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test05_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(True, mlinestr == OGRGeometry(mls.wkt))
self.assertEqual(True, mlinestr != prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
self.assertRaises(OGRIndexError, mlinestr.__getitem__, len(mlinestr))
def test06_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
#self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr == OGRGeometry(rr.wkt))
self.assertEqual(True, lr != prev)
prev = lr
def test07a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180,-90,180,90)
p = OGRGeometry.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(True, poly == OGRGeometry(p.wkt))
self.assertEqual(True, poly != prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test07b_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
with self.assertRaises(OGRException):
_ = poly.centroid
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test08_multipolygons(self):
"Testing MultiPolygon objects."
prev = OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(OGRIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test09a_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolyogn after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
def test09b_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test09c_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test10_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test11_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test12_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test13_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test14_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
self.assertRaises(OGRException, mp.add, pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3): self.assertEqual(mpoly, tmp)
def test15_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test16_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test17_pickle(self):
"Testing pickle support."
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = pickle.loads(pickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test18_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
def test19_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertNotEqual(None, OGRGeometry('POINT(0 0)'))
self.assertEqual(False, OGRGeometry('LINESTRING(0 0, 1 1)') == 3)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(OGRGeomTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
bsd-3-clause
|
sarthakmeh03/django
|
django/template/base.py
|
1
|
40259
|
"""
This is the Django template system.
How it works:
The Lexer.tokenize() function converts a template string (i.e., a string containing
markup with custom template tags) to tokens, which can be either plain text
(TOKEN_TEXT), variables (TOKEN_VAR) or block statements (TOKEN_BLOCK).
The Parser() class takes a list of tokens in its constructor, and its parse()
method returns a compiled template -- which is, under the hood, a list of
Node objects.
Each Node is responsible for creating some sort of output -- e.g. simple text
(TextNode), variable values in a given context (VariableNode), results of basic
logic (IfNode), results of looping (ForNode), or anything else. The core Node
types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can
define their own custom node types.
Each Node has a render() method, which takes a Context and returns a string of
the rendered node. For example, the render() method of a Variable Node returns
the variable's value as a string. The render() method of a ForNode returns the
rendered output of whatever was inside the loop, recursively.
The Template class is a convenient wrapper that takes care of template
compilation and rendering.
Usage:
The only thing you should ever use directly in this file is the Template class.
Create a compiled template object with a template_string, then call render()
with a context. In the compilation stage, the TemplateSyntaxError exception
will be raised if the template doesn't have proper syntax.
Sample code:
>>> from django import template
>>> s = '<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>'
>>> t = template.Template(s)
(t is now a compiled template, and its render() method can be called multiple
times with multiple contexts)
>>> c = template.Context({'test':True, 'varvalue': 'Hello'})
>>> t.render(c)
'<html><h1>Hello</h1></html>'
>>> c = template.Context({'test':False, 'varvalue': 'Hello'})
>>> t.render(c)
'<html></html>'
"""
from __future__ import unicode_literals
import inspect
import logging
import re
import warnings
from django.template.context import ( # NOQA: imported for backwards compatibility
BaseContext, Context, ContextPopException, RequestContext,
)
from django.utils import six
from django.utils.deprecation import (
DeprecationInstanceCheck, RemovedInDjango20Warning,
)
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.formats import localize
from django.utils.html import conditional_escape, escape
from django.utils.inspect import getargspec
from django.utils.safestring import (
EscapeData, SafeData, mark_for_escaping, mark_safe,
)
from django.utils.text import (
get_text_list, smart_split, unescape_string_literal,
)
from django.utils.timezone import template_localtime
from django.utils.translation import pgettext_lazy, ugettext_lazy
from .exceptions import TemplateSyntaxError
TOKEN_TEXT = 0
TOKEN_VAR = 1
TOKEN_BLOCK = 2
TOKEN_COMMENT = 3
TOKEN_MAPPING = {
TOKEN_TEXT: 'Text',
TOKEN_VAR: 'Var',
TOKEN_BLOCK: 'Block',
TOKEN_COMMENT: 'Comment',
}
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
TRANSLATOR_COMMENT_MARK = 'Translators'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# match a variable or block tag and capture the entire tag, including start/end
# delimiters
tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
(re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))))
logger = logging.getLogger('django.template')
class TemplateEncodingError(Exception):
pass
@python_2_unicode_compatible
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return self.msg % tuple(force_text(p, errors='replace') for p in self.params)
class Origin(object):
def __init__(self, name, template_name=None, loader=None):
self.name = name
self.template_name = template_name
self.loader = loader
def __str__(self):
return self.name
def __eq__(self, other):
if not isinstance(other, Origin):
return False
return (
self.name == other.name and
self.loader == other.loader
)
def __ne__(self, other):
return not self.__eq__(other)
@property
def loader_name(self):
if self.loader:
return '%s.%s' % (
self.loader.__module__, self.loader.__class__.__name__,
)
class StringOrigin(six.with_metaclass(DeprecationInstanceCheck, Origin)):
alternative = 'django.template.Origin'
deprecation_warning = RemovedInDjango20Warning
class Template(object):
def __init__(self, template_string, origin=None, name=None, engine=None):
try:
template_string = force_text(template_string)
except UnicodeDecodeError:
raise TemplateEncodingError("Templates can only be constructed "
"from unicode or UTF-8 strings.")
# If Template is instantiated directly rather than from an Engine and
# exactly one Django template engine is configured, use that engine.
# This is required to preserve backwards-compatibility for direct use
# e.g. Template('...').render(Context({...}))
if engine is None:
from .engine import Engine
engine = Engine.get_default()
if origin is None:
origin = Origin(UNKNOWN_SOURCE)
self.name = name
self.origin = origin
self.engine = engine
self.source = template_string
self.nodelist = self.compile_nodelist()
def __iter__(self):
for node in self.nodelist:
for subnode in node:
yield subnode
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
with context.render_context.push_state(self):
if context.template is None:
with context.bind_template(self):
context.template_name = self.name
return self._render(context)
else:
return self._render(context)
def compile_nodelist(self):
"""
Parse and compile the template source into a nodelist. If debug
is True and an exception occurs during parsing, the exception is
is annotated with contextual line information where it occurred in the
template source.
"""
if self.engine.debug:
lexer = DebugLexer(self.source)
else:
lexer = Lexer(self.source)
tokens = lexer.tokenize()
parser = Parser(
tokens, self.engine.template_libraries, self.engine.template_builtins,
self.origin,
)
try:
return parser.parse()
except Exception as e:
if self.engine.debug:
e.template_debug = self.get_exception_info(e, e.token)
raise
def get_exception_info(self, exception, token):
"""
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
"""
start, end = token.position
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(self.source)):
if start >= upto and end <= next:
line = num
before = escape(self.source[upto:start])
during = escape(self.source[start:end])
after = escape(self.source[end:next])
source_lines.append((num, escape(self.source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases exc_value.args can be empty or an invalid
# unicode string.
try:
message = force_text(exception.args[0])
except (IndexError, UnicodeDecodeError):
message = '(Could not get exception message)'
return {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': self.origin.name,
'start': start,
'end': end,
}
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class Token(object):
def __init__(self, token_type, contents, position=None, lineno=None):
"""
A token representing a string from the template.
token_type
One of TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, or TOKEN_COMMENT.
contents
The token source string.
position
An optional tuple containing the start and end index of the token
in the template source. This is used for traceback information
when debug is on.
lineno
The line number the token appears on in the template source.
This is used for traceback information and gettext files.
"""
self.token_type, self.contents = token_type, contents
self.lineno = lineno
self.position = position
def __str__(self):
token_name = TOKEN_MAPPING[self.token_type]
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
def split_contents(self):
split = []
bits = iter(smart_split(self.contents))
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith(('_("', "_('")):
sentinel = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinel):
bit = next(bits)
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer(object):
def __init__(self, template_string):
self.template_string = template_string
self.verbatim = False
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
lineno = 1
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, None, lineno, in_tag))
in_tag = not in_tag
lineno += bit.count('\n')
return result
def create_token(self, token_string, position, lineno, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag and token_string.startswith(BLOCK_TAG_START):
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
block_content = token_string[2:-2].strip()
if self.verbatim and block_content == self.verbatim:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
token = Token(TOKEN_VAR, token_string[2:-2].strip(), position, lineno)
elif token_string.startswith(BLOCK_TAG_START):
if block_content[:9] in ('verbatim', 'verbatim '):
self.verbatim = 'end%s' % block_content
token = Token(TOKEN_BLOCK, block_content, position, lineno)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
token = Token(TOKEN_COMMENT, content, position, lineno)
else:
token = Token(TOKEN_TEXT, token_string, position, lineno)
return token
class DebugLexer(Lexer):
def tokenize(self):
"""
Split a template string into tokens and annotates each token with its
start and end position in the source. This is slower than the default
lexer so we only use it when debug is True.
"""
lineno = 1
result = []
upto = 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
token_string = self.template_string[upto:start]
result.append(self.create_token(token_string, (upto, start), lineno, in_tag=False))
lineno += token_string.count('\n')
upto = start
token_string = self.template_string[start:end]
result.append(self.create_token(token_string, (start, end), lineno, in_tag=True))
lineno += token_string.count('\n')
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), lineno, in_tag=False))
return result
class Parser(object):
def __init__(self, tokens, libraries=None, builtins=None, origin=None):
self.tokens = tokens
self.tags = {}
self.filters = {}
self.command_stack = []
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.libraries = libraries
for builtin in builtins:
self.add_library(builtin)
self.origin = origin
def parse(self, parse_until=None):
"""
Iterate through the parser tokens and compiles each one into a node.
If parse_until is provided, parsing will stop once one of the
specified tokens has been reached. This is formatted as a list of
tokens, e.g. ['elif', 'else', 'endif']. If no matching token is
reached, raise an exception with the unclosed block tag details.
"""
if parse_until is None:
parse_until = []
nodelist = NodeList()
while self.tokens:
token = self.next_token()
# Use the raw values here for TOKEN_* for a tiny performance boost.
if token.token_type == 0: # TOKEN_TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == 1: # TOKEN_VAR
if not token.contents:
raise self.error(token, 'Empty variable tag on line %d' % token.lineno)
try:
filter_expression = self.compile_filter(token.contents)
except TemplateSyntaxError as e:
raise self.error(token, e)
var_node = VariableNode(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type == 2: # TOKEN_BLOCK
try:
command = token.contents.split()[0]
except IndexError:
raise self.error(token, 'Empty block tag on line %d' % token.lineno)
if command in parse_until:
# A matching token has been reached. Return control to
# the caller. Put the token back on the token list so the
# caller knows where it terminated.
self.prepend_token(token)
return nodelist
# Add the token to the command stack. This is used for error
# messages if further parsing fails due to an unclosed block
# tag.
self.command_stack.append((command, token))
# Get the tag callback function from the ones registered with
# the parser.
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
# Compile the callback into a node object and add it to
# the node list.
try:
compiled_result = compile_func(self, token)
except Exception as e:
raise self.error(token, e)
self.extend_nodelist(nodelist, compiled_result, token)
# Compile success. Remove the token from the command stack.
self.command_stack.pop()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def extend_nodelist(self, nodelist, node, token):
# Check that non-text nodes don't appear before an extends tag.
if node.must_be_first and nodelist.contains_nontext:
raise self.error(
token, '%r must be the first tag in the template.' % node,
)
if isinstance(nodelist, NodeList) and not isinstance(node, TextNode):
nodelist.contains_nontext = True
# Set origin and token here since we can't modify the node __init__()
# method.
node.token = token
node.origin = self.origin
nodelist.append(node)
def error(self, token, e):
"""
Return an exception annotated with the originating token. Since the
parser can be called recursively, check if a token is already set. This
ensures the innermost token is highlighted if an exception occurs,
e.g. a compile error within the body of an if statement.
"""
if not isinstance(e, Exception):
e = TemplateSyntaxError(e)
if not hasattr(e, 'token'):
e.token = token
return e
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(
token,
"Invalid block tag on line %d: '%s', expected %s. Did you "
"forget to register or load this tag?" % (
token.lineno,
command,
get_text_list(["'%s'" % p for p in parse_until], 'or'),
),
)
raise self.error(
token,
"Invalid block tag on line %d: '%s'. Did you forget to register "
"or load this tag?" % (token.lineno, command)
)
def unclosed_block_tag(self, parse_until):
command, token = self.command_stack.pop()
msg = "Unclosed tag on line %d: '%s'. Looking for one of: %s." % (
token.lineno,
command,
', '.join(parse_until),
)
raise self.error(token, msg)
def next_token(self):
return self.tokens.pop(0)
def prepend_token(self, token):
self.tokens.insert(0, token)
def delete_first_token(self):
del self.tokens[0]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open': re.escape("_("),
'i18n_close': re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:\s*%(filter_sep)s\s*
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': r'\w\.',
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = re.compile(filter_raw_string, re.UNICODE | re.VERBOSE)
class FilterExpression(object):
"""
Parses a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample::
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: "
"%s|%s|%s" %
(token[:upto], token[upto:start],
token[start:]))
if var_obj is None:
var, constant = match.group("var", "constant")
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at "
"start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match.group("filter_name")
args = []
constant_arg, var_arg = match.group("constant_arg", "var_arg")
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' "
"from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
string_if_invalid = context.template.engine.string_if_invalid
if string_if_invalid:
if '%s' in string_if_invalid:
return string_if_invalid % self.var
else:
return string_if_invalid
else:
obj = string_if_invalid
else:
obj = self.var
escape_isnt_last_filter = True
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:
arg_vals.append(arg.resolve(context))
if getattr(func, 'expects_localtime', False):
obj = template_localtime(obj, context.use_tz)
if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
elif isinstance(obj, EscapeData):
with warnings.catch_warnings():
# Ignore mark_for_escaping deprecation as this will be
# removed in Django 2.0.
warnings.simplefilter('ignore', category=RemovedInDjango20Warning)
obj = mark_for_escaping(new_obj)
escape_isnt_last_filter = False
else:
obj = new_obj
if not escape_isnt_last_filter:
warnings.warn(
"escape isn't the last filter in %s and will be applied "
"immediately in Django 2.0 so the output may change."
% [func.__name__ for func, _ in self.filters],
RemovedInDjango20Warning, stacklevel=2
)
return obj
def args_check(name, func, provided):
provided = list(provided)
# First argument, filter input, is implied.
plen = len(provided) + 1
# Check to see if a decorator is providing the real function.
func = getattr(func, '_decorated_function', func)
args, _, _, defaults = getargspec(func)
alen = len(args)
dlen = len(defaults or [])
# Not enough OR Too many
if plen < (alen - dlen) or plen > alen:
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, alen - dlen, plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
class Variable(object):
"""
A template variable, resolvable against a given context. The variable may
be a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':'News'}}
>>> Variable('article.section').resolve(c)
'News'
>>> Variable('article').resolve(c)
{'section': 'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = 'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
if not isinstance(var, six.string_types):
raise TypeError(
"Variable must be a string or number, got %s" % type(var))
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
self.literal = float(var)
# So it's a float... is it an int? If the original value contained a
# dot or an "e" then it was a float, not an int.
if '.' not in var and 'e' not in var.lower():
self.literal = int(self.literal)
# "2." is invalid
if var.endswith('.'):
raise ValueError
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
is_safe = isinstance(value, SafeData)
msgid = value.replace('%', '%%')
msgid = mark_safe(msgid) if is_safe else msgid
if self.message_context:
return pgettext_lazy(self.message_context, msgid)
else:
return ugettext_lazy(msgid)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don't return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError) as e:
# Reraise if the exception was raised by a @property
if not isinstance(current, BaseContext) and bit in dir(current):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = context.template.engine.string_if_invalid
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
inspect.getcallargs(current)
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
except Exception as e:
template_name = getattr(context, 'template_name', None) or 'unknown'
logger.debug(
"Exception while resolving variable '%s' in template '%s'.",
bit,
template_name,
exc_info=True,
)
if getattr(e, 'silent_variable_failure', False):
current = context.template.engine.string_if_invalid
else:
raise
return current
class Node(object):
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
token = None
def render(self, context):
"""
Return the node rendered as a string.
"""
pass
def render_annotated(self, context):
"""
Render the node. If debug is True and an exception occurs during
rendering, the exception is annotated with contextual line information
where it occurred in the template. For internal usage this method is
preferred over using the render method directly.
"""
try:
return self.render(context)
except Exception as e:
if context.template.engine.debug and not hasattr(e, 'template_debug'):
e.template_debug = context.render_context.template.get_exception_info(e, self.token)
raise
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
bits = []
for node in self:
if isinstance(node, Node):
bit = node.render_annotated(context)
else:
bit = node
bits.append(force_text(bit))
return mark_safe(''.join(bits))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
class TextNode(Node):
def __init__(self, s):
self.s = s
def __repr__(self):
rep = "<%s: %r>" % (self.__class__.__name__, self.s[:25])
return force_str(rep, 'ascii', errors='replace')
def render(self, context):
return self.s
def render_value_in_context(value, context):
"""
Converts any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a unicode object. If value
is a string, it is expected to have already been translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
value = force_text(value)
if context.autoescape or isinstance(value, EscapeData):
return conditional_escape(value)
else:
return value
class VariableNode(Node):
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ''
return render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
A utility method for parsing token keyword arguments.
:param bits: A list containing remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments will be removed
from this list.
:param support_legacy: If set to true ``True``, the legacy format
``1 as foo`` will be accepted. Otherwise, only the standard ``foo=1``
format is allowed.
:returns: A dictionary of the arguments retrieved from the ``bits`` token
list.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so the dictionary will be returned as soon as an invalid
argument format is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match.group(1):
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
|
bsd-3-clause
|
eriol/pywt
|
setup.py
|
1
|
9482
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import os
import sys
import subprocess
from functools import partial
from distutils.sysconfig import get_python_inc
import setuptools
from setuptools import setup, Extension
from numpy import get_include as get_numpy_include
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
if not os.path.exists(os.path.join('pywt', '_extensions', '_pywt.c')):
msg = ("Cython must be installed when working with a development "
"version of PyWavelets")
raise RuntimeError(msg)
MAJOR = 1
MINOR = 0
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def get_version_info():
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of pywt.version messes
# up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('pywt/version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load pywt/__init__.py
import imp
version = imp.load_source('pywt.version', 'pywt/version.py')
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='pywt/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM PYWAVELETS SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
with open(filename, 'w') as a:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
if sys.platform == "darwin":
# Don't create resource files on OS X tar.
os.environ["COPY_EXTENDED_ATTRIBUTES_DISABLE"] = "true"
os.environ["COPYFILE_DISABLE"] = "true"
make_ext_path = partial(os.path.join, "pywt", "_extensions")
sources = ["c/common.c", "c/convolution.c", "c/wt.c", "c/wavelets.c", "c/cwt.c"]
sources = list(map(make_ext_path, sources))
source_templates = ["c/convolution.template.c", "c/wt.template.c", "c/cwt.template.c"]
source_templates = list(map(make_ext_path, source_templates))
headers = ["c/templating.h", "c/wavelets_coeffs.h",
"c/common.h", "c/convolution.h", "c/wt.h", "c/wavelets.h", "c/cwt.h"]
headers = list(map(make_ext_path, headers))
header_templates = ["c/convolution.template.h", "c/wt.template.h",
"c/wavelets_coeffs.template.h", "c/cwt.template.h"]
header_templates = list(map(make_ext_path, header_templates))
cython_modules = ['_pywt', '_dwt', '_swt', '_cwt']
cython_sources = [('{0}.pyx' if USE_CYTHON else '{0}.c').format(module)
for module in cython_modules]
c_macros = [("PY_EXTENSION", None)]
cython_macros = []
cythonize_opts = {}
if os.environ.get("CYTHON_TRACE"):
cythonize_opts['linetrace'] = True
cython_macros.append(("CYTHON_TRACE_NOGIL", 1))
# By default C object files are rebuilt for every extension
# C files must be built once only for coverage to work
c_lib = ('c_wt',{'sources': sources,
'depends': source_templates + header_templates + headers,
'include_dirs': [make_ext_path("c"), get_python_inc()],
'macros': c_macros,})
ext_modules = [
Extension('pywt._extensions.{0}'.format(module),
sources=[make_ext_path(source)],
# Doesn't automatically rebuild if library changes
depends=c_lib[1]['sources'] + c_lib[1]['depends'],
include_dirs=[make_ext_path("c"), get_numpy_include()],
define_macros=c_macros + cython_macros,
libraries=[c_lib[0]],)
for module, source, in zip(cython_modules, cython_sources)
]
from setuptools.command.develop import develop
class develop_build_clib(develop):
"""Ugly monkeypatching to get clib to build for development installs
See coverage comment above for why we don't just let libraries be built
via extensions.
All this is a copy of the relevant part of `install_for_development`
for current master (Sep 2016) of setuptools.
Note: if you want to build in-place with ``python setup.py build_ext``,
that will only work if you first do ``python setup.py build_clib``.
"""
def install_for_development(self):
self.run_command('egg_info')
# Build extensions in-place (the next 7 lines are the monkeypatch)
import glob
hitlist = glob.glob(os.path.join('build', '*', 'libc_wt.*'))
if hitlist:
# Remove existing clib - running build_clib twice in a row fails
os.remove(hitlist[0])
self.reinitialize_command('build_clib', inplace=1)
self.run_command('build_clib')
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
self.install_site_py() # ensure that target dir is site-safe
if setuptools.bootstrap_install_from:
self.easy_install(setuptools.bootstrap_install_from)
setuptools.bootstrap_install_from = None
# create an .egg-link in the installation dir, pointing to our egg
from distutils import log
log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
if not self.dry_run:
with open(self.egg_link, "w") as f:
f.write(self.egg_path + "\n" + self.setup_path)
# postprocess the installed distro, fixing up .pth, installing scripts,
# and handling requirements
self.process_distribution(None, self.dist, not self.no_deps)
if __name__ == '__main__':
# Rewrite the version file everytime
write_version_py()
if USE_CYTHON:
ext_modules = cythonize(ext_modules, compiler_directives=cythonize_opts)
setup(
name="PyWavelets",
maintainer="The PyWavelets Developers",
maintainer_email="[email protected]",
url="https://github.com/PyWavelets/pywt",
download_url="https://github.com/PyWavelets/pywt/releases",
license="MIT",
description="PyWavelets, wavelet transform module",
long_description="""\
PyWavelets is a Python wavelet transforms module that includes:
* nD Forward and Inverse Discrete Wavelet Transform (DWT and IDWT)
* 1D and 2D Forward and Inverse Stationary Wavelet Transform (Undecimated Wavelet Transform)
* 1D and 2D Wavelet Packet decomposition and reconstruction
* 1D Continuous Wavelet Tranfsorm
* Computing Approximations of wavelet and scaling functions
* Over 100 built-in wavelet filters and support for custom wavelets
* Single and double precision calculations
* Real and complex calculations
* Results compatible with Matlab Wavelet Toolbox (TM)
""",
keywords=["wavelets", "wavelet transform", "DWT", "SWT", "CWT", "scientific"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: C",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries :: Python Modules"
],
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
version=get_version_info()[0],
packages=['pywt', 'pywt._extensions', 'pywt.data'],
package_data={'pywt.data': ['*.npy', '*.npz']},
ext_modules=ext_modules,
libraries=[c_lib],
cmdclass={'develop': develop_build_clib},
test_suite='nose.collector',
# A function is imported in setup.py, so not really useful
install_requires=["numpy"],
)
|
mit
|
numenta-ci/nupic
|
examples/opf/experiments/anomaly/temporal/simple/description.py
|
3
|
14147
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/current/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'f': {
'clipInput': True,
'fieldname': u'f',
'n': 100,
'name': u'f',
'minval': 0,
'maxval': 5,
'type': 'ScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.05,
'synPermInactiveDec': 0.008,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'cerebro_dummy',
u'streams': [ { u'columns': [u'*'],
u'info': u'hotGym.csv',
u'source': u'file://'+os.path.join(os.path.dirname(__file__), 'data.csv')}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'f', u'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'f', metric='passThruPrediction', inferenceElement='anomalyScore', params={'window': 1000}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
agpl-3.0
|
ddelemeny/calligra
|
3rdparty/google-breakpad/src/tools/gyp/pylib/gyp/common_test.py
|
21
|
1086
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
pilou-/ansible
|
test/units/modules/network/edgeswitch/test_edgeswitch_vlan.py
|
29
|
5640
|
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.edgeswitch import edgeswitch_vlan
from ansible.modules.network.edgeswitch.edgeswitch_vlan import parse_vlan_brief, parse_interfaces_switchport
from units.modules.utils import set_module_args
from .edgeswitch_module import TestEdgeswitchModule, load_fixture
class TestEdgeswitchVlanModule(TestEdgeswitchModule):
module = edgeswitch_vlan
def setUp(self):
super(TestEdgeswitchVlanModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.edgeswitch.edgeswitch_vlan.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.edgeswitch.edgeswitch_vlan.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestEdgeswitchVlanModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for command in commands:
if command.startswith('vlan ') or command == 'exit':
output.append('')
else:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('edgeswitch_vlan_%s' % filename))
return output
self.run_commands.side_effect = load_from_file
self.load_config.return_value = {}
def test_edgeswitch_vlan_create(self):
set_module_args({'vlan_id': '200', 'name': 'video', 'state': 'present'})
result = self.execute_module(changed=True)
expected_commands = [
'vlan database',
'vlan 200',
'vlan name 200 \"video\"',
'exit'
]
self.assertEqual(result['commands'], expected_commands)
def test_edgeswitch_vlan_id_startwith_100(self):
set_module_args({'vlan_id': '100', 'name': 'voice', 'state': 'present'})
result = self.execute_module(changed=False)
expected_commands = []
self.assertEqual(result['commands'], expected_commands)
def test_edgeswitch_vlan_rename(self):
set_module_args({'vlan_id': '100', 'name': 'video', 'state': 'present'})
result = self.execute_module(changed=True)
expected_commands = [
'vlan database',
'vlan name 100 \"video\"',
'exit'
]
self.assertEqual(result['commands'], expected_commands)
def test_edgeswitch_vlan_with_interfaces_range(self):
set_module_args({'vlan_id': '100', 'name': 'voice', 'state': 'present', 'tagged_interfaces': ['0/6-0/8']})
result = self.execute_module(changed=True)
expected_commands = [
'interface 0/6-0/8',
'vlan participation include 100',
'vlan tagging 100',
]
self.assertEqual(result['commands'], expected_commands)
def test_edgeswitch_vlan_with_interfaces_and_newvlan(self):
set_module_args({'vlan_id': '3', 'name': 'vlan3', 'state': 'present', 'untagged_interfaces': ['0/8', '0/7']})
result = self.execute_module(changed=True)
expected_commands = [
'vlan database',
'vlan 3',
'vlan name 3 \"vlan3\"',
'exit',
'interface 0/7-0/8',
'vlan participation include 3',
'vlan pvid 3',
]
self.assertEqual(result['commands'], expected_commands)
def test_parse_interfaces_switchport(self):
result = parse_interfaces_switchport(load_fixture('edgeswitch_vlan_show_interfaces_switchport'))
i1 = {
'interface': '0/1',
'pvid_mode': '1',
'untagged_vlans': ['1'],
'tagged_vlans': ['100'],
'forbidden_vlans': [''],
}
i3 = {
'interface': '0/3',
'pvid_mode': '1',
'untagged_vlans': [''],
'tagged_vlans': ['100'],
'forbidden_vlans': ['1'],
}
i5 = {
'interface': '0/5',
'pvid_mode': '100',
'untagged_vlans': ['100'],
'tagged_vlans': [''],
'forbidden_vlans': [''],
}
self.assertEqual(result['0/1'], i1)
self.assertEqual(result['0/3'], i3)
self.assertEqual(result['0/5'], i5)
def test_parse_vlan_brief(self):
result = parse_vlan_brief(load_fixture('edgeswitch_vlan_show_vlan_brief'))
obj = [
{
'vlan_id': '1',
'name': 'default'
},
{
'vlan_id': '100',
'name': 'voice'
}
]
self.assertEqual(result, obj)
|
gpl-3.0
|
maellak/invenio
|
modules/bibfield/lib/bibfield_regression_tests.py
|
14
|
15803
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibField module regression tests.
"""
__revision__ = "$Id$"
import timeit
from invenio.testutils import InvenioTestCase
from invenio.config import CFG_TMPDIR
from invenio.bibfield import get_record, create_record, create_records
from invenio.bibrecord import record_get_field_values
from invenio.dbquery import run_sql
from invenio.search_engine import get_record as search_engine_get_record
from invenio.testutils import make_test_suite, run_test_suite
class BibFieldRecordFieldValuesTest(InvenioTestCase):
"""
Check values returned by BibField for record fields are consistent or not
"""
@classmethod
def setUpClass(cls):
from invenio.bibfield_config_engine import BibFieldParser
BibFieldParser.reparse()
def test_normal_fields_availability_and_values(self):
"""BibField - access to normal fields"""
record = get_record(12)
self.assertTrue(record.get('asdas') is None)
self.assertEqual(12, record['recid'])
self.assertTrue('recid' in record.get_persistent_identifiers())
self.assertEqual(record['recid'], record.get('recid'))
self.assertEqual('Physics at the front-end of a neutrino factory : a quantitative appraisal', record['title.title'])
self.assertEqual('Physics at the front-end of a neutrino factory : a quantitative appraisal', record['title']['title'])
self.assertFalse('title.subtitle' in record)
self.assertEqual('Physics at the front-end of a neutrino factory : a quantitative appraisal', record.get('title.title'))
self.assertEqual('Mangano', record['authors[0].last_name'])
self.assertEqual('M L', record['authors[0].first_name'])
self.assertEqual(19, len(record['authors']))
self.assertEqual(19, len(record['authors.last_name']))
def test_compare_field_values_with_bibrecord_values(self):
"""BibField - same value as in bibrecord"""
record = get_record(1)
bibrecord_value = record_get_field_values(search_engine_get_record(1), '245', ' ', ' ', 'a')[0]
self.assertEqual(bibrecord_value, record['title.title'])
def test_derived_fields_availability_and_values(self):
"""BibField - values of derived fields"""
record = get_record(12)
self.assertEqual(19, record['number_of_authors'])
def test_calculated_fields_availability_and_values(self):
"""BibField - values of calculated fields"""
record = get_record(31)
self.assertEqual(2, record['number_of_copies'])
run_sql("insert into crcITEM(barcode, id_bibrec) VALUES('test',31)")
self.assertEqual(3, record.get('number_of_copies', reset_cache=True))
run_sql("delete from crcITEM WHERE barcode='test'")
self.assertEqual(2, record.get('number_of_copies', reset_cache=True))
self.assertEqual(0, record['number_of_citations'])
record = get_record(81)
self.assertEqual(4, record['number_of_citations'])
def test_get_record_using_field_filter(self):
"""BibField - get record filtering fields"""
authors = get_record(12, fields=('authors',))
self.assertEquals(len(authors['authors']), 19)
mainauthor_title = get_record(12, fields=('authors[0]', 'title'))
self.assertTrue('authors[0].full_name' in mainauthor_title)
self.assertTrue('title' in mainauthor_title)
class BibFieldCreateRecordTests(InvenioTestCase):
"""
Bibfield - demo file parsing test
"""
@classmethod
def setUpClass(cls):
from invenio.bibfield_config_engine import BibFieldParser
BibFieldParser.reparse()
def setUp(self):
"""Initialize stuff"""
f = open(CFG_TMPDIR + '/demobibdata.xml', 'r')
blob = f.read()
f.close()
self.recs = [rec for rec in create_records(blob, master_format='marc', schema='xml')]
def test_records_created(self):
""" BibField - demo file how many records are created """
self.assertEqual(141, len(self.recs))
def test_create_record_with_collection_tag(self):
""" BibField - create_record() for single record in collection"""
blob = """
<collection>
<record>
<controlfield tag="001">33</controlfield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
</record>
</collection>
"""
record = create_record(blob, master_format='marc', schema='xml')
record1 = create_records(blob, master_format='marc', schema='xml')[0]
self.assertEqual(record1.keys(), record.keys())
# def test_empty_collection(self):
# """bibfield - empty collection"""
# blob_error0 = """<collection></collection>"""
# rec = create_record(blob_error0, master_format='marc', schema='xml')
# self.assertTrue(rec.is_empty())
# records = create_records(blob_error0)
# self.assertEqual(len(records), 0)
def test_fft_url_tags(self):
"""BibField - FFT versus URL"""
marc_blob = """
<record>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">CERN-HI-6206002</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">At CERN in 1962</subfield>
<subfield code="s">eight Nobel prizewinners</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">1962</subfield>
</datafield>
<datafield tag="506" ind1="1" ind2=" ">
<subfield code="a">jekyll_only</subfield>
</datafield>
<datafield tag="521" ind1=" " ind2=" ">
<subfield code="a">In 1962, CERN hosted the 11th International Conference on High Energy Physics. Among the distinguished visitors were eight Nobel prizewinners.Left to right: Cecil F. Powell, Isidor I. Rabi, Werner Heisenberg, Edwin M. McMillan, Emile Segre, Tsung Dao Lee, Chen Ning Yang and Robert Hofstadter.</subfield>
</datafield>
<datafield tag="590" ind1=" " ind2=" ">
<subfield code="a">En 1962, le CERN est l'hote de la onzieme Conference Internationale de Physique des Hautes Energies. Parmi les visiteurs eminents se trouvaient huit laureats du prix Nobel.De gauche a droite: Cecil F. Powell, Isidor I. Rabi, Werner Heisenberg, Edwin M. McMillan, Emile Segre, Tsung Dao Lee, Chen Ning Yang et Robert Hofstadter.</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">Press</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Personalities and History of CERN</subfield>
</datafield>
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="a">Nobel laureate</subfield>
</datafield>
<datafield tag="FFT" ind1=" " ind2=" ">
<subfield code="a">http://invenio-software.org/download/invenio-demo-site-files/6206002.jpg</subfield>
<subfield code="x">http://invenio-software.org/download/invenio-demo-site-files/6206002.gif</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="o">0000736PHOPHO</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="y">1962</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="b">81</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="1">
<subfield code="c">1998-07-23</subfield>
<subfield code="l">50</subfield>
<subfield code="m">2002-07-15</subfield>
<subfield code="o">CM</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">http://www.nobel.se/physics/laureates/1950/index.html</subfield>
<subfield code="y">The Nobel Prize in Physics 1950 : Cecil Frank Powell</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">http://www.nobel.se/physics/laureates/1944/index.html</subfield>
<subfield code="y">The Nobel Prize in Physics 1944 : Isidor Isaac Rabi</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">http://www.nobel.se/physics/laureates/1932/index.html</subfield>
<subfield code="y">The Nobel Prize in Physics 1932 : Werner Karl Heisenberg</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">http://www.nobel.se/chemistry/laureates/1951/index.html</subfield>
<subfield code="y">The Nobel Prize in Chemistry 1951 : Edwin Mattison McMillan</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">http://www.nobel.se/physics/laureates/1959/index.html</subfield>
<subfield code="y">The Nobel Prize in Physics 1959 : Emilio Gino Segre</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">http://www.nobel.se/physics/laureates/1957/index.html</subfield>
<subfield code="y">The Nobel Prize in Physics 1957 : Chen Ning Yang and Tsung-Dao Lee</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">http://www.nobel.se/physics/laureates/1961/index.html</subfield>
<subfield code="y">The Nobel Prize in Physics 1961 : Robert Hofstadter</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="P">
<subfield code="s">6206002 (1962)</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="S">
<subfield code="s">n</subfield>
<subfield code="w">199830</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">PICTURE</subfield>
</datafield>
</record>"""
rec = create_record(marc_blob, master_format='marc', schema='xml')
self.assertTrue('fft' in rec)
self.assertTrue(len(rec['fft']) == 1)
self.assertTrue(rec['fft[0].path'] == "http://invenio-software.org/download/invenio-demo-site-files/6206002.jpg")
self.assertTrue('url' in rec)
self.assertTrue(len(rec['url']) == 7)
self.assertTrue(rec['url[0].url'] == "http://www.nobel.se/physics/laureates/1950/index.html")
def test_bibdoc_integration(self):
"""BibField - bibdoc integration"""
rec = get_record(7)
self.assertTrue('files' in rec)
self.assertEquals(len(rec['files']), 2)
image = rec['files'][1]
self.assertEquals(image['eformat'], '.jpeg')
self.assertEquals(image['name'], '9806033')
bibdoc = rec['bibdocs'].list_latest_files()[1]
self.assertEquals(image['name'], bibdoc.name)
class BibFieldLegacyTests(InvenioTestCase):
"""
Legacy functionality tests
"""
@classmethod
def setUpClass(cls):
from invenio.bibfield_config_engine import BibFieldParser
BibFieldParser.reparse()
def test_get_legacy_recstruct(self):
"""BibField - legacy functions"""
from invenio.search_engine import get_record as search_engine_get_record
from invenio.bibrecord import record_get_field_value
bibfield_recstruct = get_record(8).legacy_create_recstruct()
bibrecord = search_engine_get_record(8)
self.assertEqual(record_get_field_value(bibfield_recstruct, '100', code='a'),
record_get_field_value(bibrecord, '100', code='a'))
self.assertEqual(len(bibfield_recstruct['999']), len(bibrecord['999']))
class BibFieldProducerTests(InvenioTestCase):
"""
Low level output tests
"""
@classmethod
def setUpClass(cls):
from invenio.bibfield_config_engine import BibFieldParser
BibFieldParser.reparse()
def test_produce_json_for_marc(self):
"""BibField - produce json marc"""
record = get_record(1)
produced_marc = record.produce('json_for_marc')
self.assertTrue({'001': 1} in produced_marc)
# def test_produce_json_for_dublin_core(self):
# """bibfield - produce json dublin core"""
# record = get_record(1)
# date = record.get('version_id').strftime('%Y-%m-%dT%H:%M:%SZ')
# produced_dc = record.produce_json_for_dc()
# self.assertTrue({'dc:date': date} in produced_dc)
class BibFieldSpeedTests(InvenioTestCase):
"""
Ensures that the speed is at least as with bibrecord
"""
@classmethod
def setUpClass(cls):
from invenio.bibfield_config_engine import BibFieldParser
BibFieldParser.reparse()
def test_speed_get_record(self):
"""BibField - speed test on get_record"""
time_bibfield = timeit.timeit('r = get_record(10)', setup='from invenio.bibfield import get_record', number=1000)
time_bibrecord = timeit.timeit('r = get_record(10)', setup='from invenio.search_engine import get_record', number=1000)
self.assertTrue(time_bibfield <= time_bibrecord*2)
def test_speed_get_field(self):
"""BibFIeld - speed test on get field"""
time_bibfield = timeit.timeit("x = r['authors.full_name']", setup='from invenio.bibfield import get_record; r=get_record(10)', number=1000)
time_bibrecord = timeit.timeit("x = record_get_field_values(r, '700', '', '', 'a') + record_get_field_values(r, '100', '', '', 'a')", \
setup='from invenio.bibrecord import record_get_field_values; from invenio.search_engine import get_record; r=get_record(10)', number=1000)
self.assertTrue(time_bibfield <= time_bibrecord*2)
TEST_SUITE = make_test_suite(BibFieldRecordFieldValuesTest,
BibFieldCreateRecordTests,
BibFieldLegacyTests,
BibFieldSpeedTests
)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
gpl-2.0
|
MiyamotoAkira/kivy
|
kivy/core/spelling/spelling_enchant.py
|
70
|
1329
|
'''
Enchant Spelling: Implements spelling backend based on enchant.
'''
import enchant
from kivy.core.spelling import SpellingBase, NoSuchLangError
from kivy.compat import PY2
class SpellingEnchant(SpellingBase):
'''
Spelling backend based on the enchant library.
'''
def __init__(self, language=None):
self._language = None
super(SpellingEnchant, self).__init__(language)
def select_language(self, language):
try:
self._language = enchant.Dict(language)
except enchant.DictNotFoundError:
err = 'Enchant Backend: No language for "%s"' % (language, )
raise NoSuchLangError(err)
def list_languages(self):
# Note: We do NOT return enchant.list_dicts because that also returns
# the enchant dict objects and not only the language identifiers.
return enchant.list_languages()
def check(self, word):
if not word:
return None
return self._language.check(word)
def suggest(self, fragment):
suggestions = self._language.suggest(fragment)
# Don't show suggestions that are invalid
suggestions = [s for s in suggestions if self.check(s)]
if PY2:
suggestions = [s.decode('utf-8') for s in suggestions]
return suggestions
|
mit
|
ModdedPA/android_external_chromium_org
|
build/android/pylib/utils/reraiser_thread.py
|
36
|
3871
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Thread and ThreadGroup that reraise exceptions on the main thread."""
import logging
import sys
import threading
import time
import traceback
import watchdog_timer
class TimeoutError(Exception):
"""Module-specific timeout exception."""
pass
class ReraiserThread(threading.Thread):
"""Thread class that can reraise exceptions."""
def __init__(self, func, args=[], kwargs={}, name=None):
"""Initialize thread.
Args:
func: callable to call on a new thread.
args: list of positional arguments for callable, defaults to empty.
kwargs: dictionary of keyword arguments for callable, defaults to empty.
name: thread name, defaults to Thread-N.
"""
super(ReraiserThread, self).__init__(name=name)
self.daemon = True
self._func = func
self._args = args
self._kwargs = kwargs
self._exc_info = None
def ReraiseIfException(self):
"""Reraise exception if an exception was raised in the thread."""
if self._exc_info:
raise self._exc_info[0], self._exc_info[1], self._exc_info[2]
#override
def run(self):
"""Overrides Thread.run() to add support for reraising exceptions."""
try:
self._func(*self._args, **self._kwargs)
except:
self._exc_info = sys.exc_info()
raise
class ReraiserThreadGroup(object):
"""A group of ReraiserThread objects."""
def __init__(self, threads=[]):
"""Initialize thread group.
Args:
threads: a list of ReraiserThread objects; defaults to empty.
"""
self._threads = threads
def Add(self, thread):
"""Add a thread to the group.
Args:
thread: a ReraiserThread object.
"""
self._threads.append(thread)
def StartAll(self):
"""Start all threads."""
for thread in self._threads:
thread.start()
def _JoinAll(self, watcher=watchdog_timer.WatchdogTimer(None)):
"""Join all threads without stack dumps.
Reraises exceptions raised by the child threads and supports breaking
immediately on exceptions raised on the main thread.
Args:
watcher: Watchdog object providing timeout, by default waits forever.
"""
alive_threads = self._threads[:]
while alive_threads:
for thread in alive_threads[:]:
if watcher.IsTimedOut():
raise TimeoutError('Timed out waiting for %d of %d threads.' %
(len(alive_threads), len(self._threads)))
# Allow the main thread to periodically check for interrupts.
thread.join(0.1)
if not thread.isAlive():
alive_threads.remove(thread)
# All threads are allowed to complete before reraising exceptions.
for thread in self._threads:
thread.ReraiseIfException()
def JoinAll(self, watcher=watchdog_timer.WatchdogTimer(None)):
"""Join all threads.
Reraises exceptions raised by the child threads and supports breaking
immediately on exceptions raised on the main thread. Unfinished threads'
stacks will be logged on watchdog timeout.
Args:
watcher: Watchdog object providing timeout, by default waits forever.
"""
try:
self._JoinAll(watcher)
except TimeoutError:
for thread in (t for t in self._threads if t.isAlive()):
stack = sys._current_frames()[thread.ident]
logging.critical('*' * 80)
logging.critical('Stack dump for timed out thread \'%s\'', thread.name)
logging.critical('*' * 80)
for filename, lineno, name, line in traceback.extract_stack(stack):
logging.critical('File: "%s", line %d, in %s', filename, lineno, name)
if line:
logging.critical(' %s', line.strip())
logging.critical('*' * 80)
raise
|
bsd-3-clause
|
ivh/VAMDC-VALD
|
other/legacy/valdxsams.py
|
4
|
3501
|
from vamdc.xmltools import *
import vamdc.db as db
t=db.tools
subel=e.SubElement
NS='http://www.w3.org/2001/XMLSchema-instance'
SCHEMA='http://www-amdis.iaea.org/xsams/schema/xsams-0.1.xsd'
header="""<?xml version="1.0" encoding="UTF-8"?>
<XSAMSData xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="http://www-amdis.iaea.org/xsams/schema/xsams-0.1.xsd"/>"""
def xsams_root():
location_attribute = '{%s}noNameSpaceSchemaLocation' % NS
return e.Element('XSAMSData',attrib={location_attribute:SCHEMA})
def write_state(atoms,data=None):
atom=subel(atoms,'Atom')
nuchar=subel(atom,'NuclearCharge')
nuchar.text='1'
isotope=subel(atom,'Isotope')
ionstate=subel(isotope,'IonState')
ioncharge=subel(ionstate,'IonCharge')
ioncharge.text='0'
atomstate=subel(ionstate,'AtomicState')
atomstate.set('stateID','bla')
descr=subel(atomstate,'Description')
descr.text='put term designation here'
numdata=subel(atomstate,'AtomicNumericalData')
energy=subel(numdata,'StateEnergy')
val=subel(energy,'Value')
val.text='5.6'
val.set('units','1/cm')
lande=subel(numdata,'LandeFactor')
qnum=subel(atomstate,'AtomicQuantumNumbers')
parit=subel(qnum,'Parity')
parit.text='even'
J=subel(qnum,'TotalAngularMomentum')
J.text='2.5'
atcomp=subel(atomstate,'AtomicComposition')
comp=subel(atcomp,'Component')
term=subel(comp,'Term')
ls=subel(term,'LS')
l=subel(ls,'L')
val=subel(l,'Value')
val.text='2.5'
s=subel(ls,'S')
val=subel(s,'Value')
val.text='2.5'
def write_source(sources,data):
pass
def write_transition(processes,data):
pass
def getdata(curs,query):
curs.execute(query)
return curs.fetchall()
def tostring(xml):
return e.tostring(xml,xml_declaration=True,encoding='UTF-8',pretty_print=True)
def run(curs,outname=None,query=None):
if not query: query='select * from transitions where vacwave between 86 and 86.2'
root=xsams_root()
sources=subel(root,'Sources')
methods=subel(root,'Methods')
states=subel(root,'States')
atoms=subel(states,'Atoms')
processes=subel(root,'Processes')
data=getdata(curs,query)
for d in data:
print len(d)
id,vacwave,airwave,species,loggf,landeff,gammarad,gammastark,gammawaals,srctag,acflag,accur,comment,wave_ref,loggf_ref,lande_ref,gammarad_ref,gamastark_ref,gammawaals_ref,upstate,lostate=d
#UpStateID='%s-%s-%s'%(species,upcoupling,upterm)
#UpStateID=UpStateID.replace(' ','-')
#LoStateID='%s-%s-%s'%(species,locoupling,loterm)
#LoStateID=LoStateID.replace(' ','-')
#loqs=getdata(curs,'SELECT * from qnums WHERE idstring="%s"'%lostate)
#hiqs=getdata(curs,'SELECT * from qnums WHERE idstring="%s"'%upstate)
lostate=getdata(curs,'SELECT * from states WHERE id="%s"'%lostate)
write_state(atoms,lostate)
write_state(states,data)
write_transition(processes,data)
write_source(sources,data)
#print tostring(root)
#print xvald.xpath('//text()')
#for trans in xvald.iter('wavel'):
# print 'wavel: %s'%str(trans.text)
if outname:
f=open(outname,'w')
f.write("""<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="http://tmy.se/t/vald2xsams.xsl"?>
""")
f.write(e.tostring(root,pretty_print=True))
f.close()
return root
|
gpl-3.0
|
frodejohansen/codebrag
|
scripts/dist/lib/s3cmd-1.5.0-alpha1/S3/CloudFront.py
|
4
|
32530
|
## Amazon CloudFront support
## Author: Michal Ludvig <[email protected]>
## http://www.logix.cz/michal
## License: GPL Version 2
import sys
import time
import httplib
import random
from datetime import datetime
from logging import debug, info, warning, error
try:
import xml.etree.ElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
from Config import Config
from Exceptions import *
from Utils import getTreeFromXml, appendXmlTextNode, getDictFromTree, dateS3toPython, sign_string, getBucketFromHostname, getHostnameFromBucket
from S3Uri import S3Uri, S3UriS3
from FileLists import fetch_remote_list
cloudfront_api_version = "2010-11-01"
cloudfront_resource = "/%(api_ver)s/distribution" % { 'api_ver' : cloudfront_api_version }
def output(message):
sys.stdout.write(message + "\n")
def pretty_output(label, message):
#label = ("%s " % label).ljust(20, ".")
label = ("%s:" % label).ljust(15)
output("%s %s" % (label, message))
class DistributionSummary(object):
## Example:
##
## <DistributionSummary>
## <Id>1234567890ABC</Id>
## <Status>Deployed</Status>
## <LastModifiedTime>2009-01-16T11:49:02.189Z</LastModifiedTime>
## <DomainName>blahblahblah.cloudfront.net</DomainName>
## <S3Origin>
## <DNSName>example.bucket.s3.amazonaws.com</DNSName>
## </S3Origin>
## <CNAME>cdn.example.com</CNAME>
## <CNAME>img.example.com</CNAME>
## <Comment>What Ever</Comment>
## <Enabled>true</Enabled>
## </DistributionSummary>
def __init__(self, tree):
if tree.tag != "DistributionSummary":
raise ValueError("Expected <DistributionSummary /> xml, got: <%s />" % tree.tag)
self.parse(tree)
def parse(self, tree):
self.info = getDictFromTree(tree)
self.info['Enabled'] = (self.info['Enabled'].lower() == "true")
if self.info.has_key("CNAME") and type(self.info['CNAME']) != list:
self.info['CNAME'] = [self.info['CNAME']]
def uri(self):
return S3Uri("cf://%s" % self.info['Id'])
class DistributionList(object):
## Example:
##
## <DistributionList xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">
## <Marker />
## <MaxItems>100</MaxItems>
## <IsTruncated>false</IsTruncated>
## <DistributionSummary>
## ... handled by DistributionSummary() class ...
## </DistributionSummary>
## </DistributionList>
def __init__(self, xml):
tree = getTreeFromXml(xml)
if tree.tag != "DistributionList":
raise ValueError("Expected <DistributionList /> xml, got: <%s />" % tree.tag)
self.parse(tree)
def parse(self, tree):
self.info = getDictFromTree(tree)
## Normalise some items
self.info['IsTruncated'] = (self.info['IsTruncated'].lower() == "true")
self.dist_summs = []
for dist_summ in tree.findall(".//DistributionSummary"):
self.dist_summs.append(DistributionSummary(dist_summ))
class Distribution(object):
## Example:
##
## <Distribution xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">
## <Id>1234567890ABC</Id>
## <Status>InProgress</Status>
## <LastModifiedTime>2009-01-16T13:07:11.319Z</LastModifiedTime>
## <DomainName>blahblahblah.cloudfront.net</DomainName>
## <DistributionConfig>
## ... handled by DistributionConfig() class ...
## </DistributionConfig>
## </Distribution>
def __init__(self, xml):
tree = getTreeFromXml(xml)
if tree.tag != "Distribution":
raise ValueError("Expected <Distribution /> xml, got: <%s />" % tree.tag)
self.parse(tree)
def parse(self, tree):
self.info = getDictFromTree(tree)
## Normalise some items
self.info['LastModifiedTime'] = dateS3toPython(self.info['LastModifiedTime'])
self.info['DistributionConfig'] = DistributionConfig(tree = tree.find(".//DistributionConfig"))
def uri(self):
return S3Uri("cf://%s" % self.info['Id'])
class DistributionConfig(object):
## Example:
##
## <DistributionConfig>
## <Origin>somebucket.s3.amazonaws.com</Origin>
## <CallerReference>s3://somebucket/</CallerReference>
## <Comment>http://somebucket.s3.amazonaws.com/</Comment>
## <Enabled>true</Enabled>
## <Logging>
## <Bucket>bu.ck.et</Bucket>
## <Prefix>/cf-somebucket/</Prefix>
## </Logging>
## </DistributionConfig>
EMPTY_CONFIG = "<DistributionConfig><S3Origin><DNSName/></S3Origin><CallerReference/><Enabled>true</Enabled></DistributionConfig>"
xmlns = "http://cloudfront.amazonaws.com/doc/%(api_ver)s/" % { 'api_ver' : cloudfront_api_version }
def __init__(self, xml = None, tree = None):
if xml is None:
xml = DistributionConfig.EMPTY_CONFIG
if tree is None:
tree = getTreeFromXml(xml)
if tree.tag != "DistributionConfig":
raise ValueError("Expected <DistributionConfig /> xml, got: <%s />" % tree.tag)
self.parse(tree)
def parse(self, tree):
self.info = getDictFromTree(tree)
self.info['Enabled'] = (self.info['Enabled'].lower() == "true")
if not self.info.has_key("CNAME"):
self.info['CNAME'] = []
if type(self.info['CNAME']) != list:
self.info['CNAME'] = [self.info['CNAME']]
self.info['CNAME'] = [cname.lower() for cname in self.info['CNAME']]
if not self.info.has_key("Comment"):
self.info['Comment'] = ""
if not self.info.has_key("DefaultRootObject"):
self.info['DefaultRootObject'] = ""
## Figure out logging - complex node not parsed by getDictFromTree()
logging_nodes = tree.findall(".//Logging")
if logging_nodes:
logging_dict = getDictFromTree(logging_nodes[0])
logging_dict['Bucket'], success = getBucketFromHostname(logging_dict['Bucket'])
if not success:
warning("Logging to unparsable bucket name: %s" % logging_dict['Bucket'])
self.info['Logging'] = S3UriS3("s3://%(Bucket)s/%(Prefix)s" % logging_dict)
else:
self.info['Logging'] = None
def __str__(self):
tree = ET.Element("DistributionConfig")
tree.attrib['xmlns'] = DistributionConfig.xmlns
## Retain the order of the following calls!
s3org = appendXmlTextNode("S3Origin", '', tree)
appendXmlTextNode("DNSName", self.info['S3Origin']['DNSName'], s3org)
appendXmlTextNode("CallerReference", self.info['CallerReference'], tree)
for cname in self.info['CNAME']:
appendXmlTextNode("CNAME", cname.lower(), tree)
if self.info['Comment']:
appendXmlTextNode("Comment", self.info['Comment'], tree)
appendXmlTextNode("Enabled", str(self.info['Enabled']).lower(), tree)
# don't create a empty DefaultRootObject element as it would result in a MalformedXML error
if str(self.info['DefaultRootObject']):
appendXmlTextNode("DefaultRootObject", str(self.info['DefaultRootObject']), tree)
if self.info['Logging']:
logging_el = ET.Element("Logging")
appendXmlTextNode("Bucket", getHostnameFromBucket(self.info['Logging'].bucket()), logging_el)
appendXmlTextNode("Prefix", self.info['Logging'].object(), logging_el)
tree.append(logging_el)
return ET.tostring(tree)
class Invalidation(object):
## Example:
##
## <Invalidation xmlns="http://cloudfront.amazonaws.com/doc/2010-11-01/">
## <Id>id</Id>
## <Status>status</Status>
## <CreateTime>date</CreateTime>
## <InvalidationBatch>
## <Path>/image1.jpg</Path>
## <Path>/image2.jpg</Path>
## <Path>/videos/movie.flv</Path>
## <CallerReference>my-batch</CallerReference>
## </InvalidationBatch>
## </Invalidation>
def __init__(self, xml):
tree = getTreeFromXml(xml)
if tree.tag != "Invalidation":
raise ValueError("Expected <Invalidation /> xml, got: <%s />" % tree.tag)
self.parse(tree)
def parse(self, tree):
self.info = getDictFromTree(tree)
def __str__(self):
return str(self.info)
class InvalidationList(object):
## Example:
##
## <InvalidationList>
## <Marker/>
## <NextMarker>Invalidation ID</NextMarker>
## <MaxItems>2</MaxItems>
## <IsTruncated>true</IsTruncated>
## <InvalidationSummary>
## <Id>[Second Invalidation ID]</Id>
## <Status>Completed</Status>
## </InvalidationSummary>
## <InvalidationSummary>
## <Id>[First Invalidation ID]</Id>
## <Status>Completed</Status>
## </InvalidationSummary>
## </InvalidationList>
def __init__(self, xml):
tree = getTreeFromXml(xml)
if tree.tag != "InvalidationList":
raise ValueError("Expected <InvalidationList /> xml, got: <%s />" % tree.tag)
self.parse(tree)
def parse(self, tree):
self.info = getDictFromTree(tree)
def __str__(self):
return str(self.info)
class InvalidationBatch(object):
## Example:
##
## <InvalidationBatch>
## <Path>/image1.jpg</Path>
## <Path>/image2.jpg</Path>
## <Path>/videos/movie.flv</Path>
## <Path>/sound%20track.mp3</Path>
## <CallerReference>my-batch</CallerReference>
## </InvalidationBatch>
def __init__(self, reference = None, distribution = None, paths = []):
if reference:
self.reference = reference
else:
if not distribution:
distribution="0"
self.reference = "%s.%s.%s" % (distribution,
datetime.strftime(datetime.now(),"%Y%m%d%H%M%S"),
random.randint(1000,9999))
self.paths = []
self.add_objects(paths)
def add_objects(self, paths):
self.paths.extend(paths)
def get_reference(self):
return self.reference
def __str__(self):
tree = ET.Element("InvalidationBatch")
for path in self.paths:
if len(path) < 1 or path[0] != "/":
path = "/" + path
appendXmlTextNode("Path", path, tree)
appendXmlTextNode("CallerReference", self.reference, tree)
return ET.tostring(tree)
class CloudFront(object):
operations = {
"CreateDist" : { 'method' : "POST", 'resource' : "" },
"DeleteDist" : { 'method' : "DELETE", 'resource' : "/%(dist_id)s" },
"GetList" : { 'method' : "GET", 'resource' : "" },
"GetDistInfo" : { 'method' : "GET", 'resource' : "/%(dist_id)s" },
"GetDistConfig" : { 'method' : "GET", 'resource' : "/%(dist_id)s/config" },
"SetDistConfig" : { 'method' : "PUT", 'resource' : "/%(dist_id)s/config" },
"Invalidate" : { 'method' : "POST", 'resource' : "/%(dist_id)s/invalidation" },
"GetInvalList" : { 'method' : "GET", 'resource' : "/%(dist_id)s/invalidation" },
"GetInvalInfo" : { 'method' : "GET", 'resource' : "/%(dist_id)s/invalidation/%(request_id)s" },
}
## Maximum attempts of re-issuing failed requests
_max_retries = 5
dist_list = None
def __init__(self, config):
self.config = config
## --------------------------------------------------
## Methods implementing CloudFront API
## --------------------------------------------------
def GetList(self):
response = self.send_request("GetList")
response['dist_list'] = DistributionList(response['data'])
if response['dist_list'].info['IsTruncated']:
raise NotImplementedError("List is truncated. Ask s3cmd author to add support.")
## TODO: handle Truncated
return response
def CreateDistribution(self, uri, cnames_add = [], comment = None, logging = None, default_root_object = None):
dist_config = DistributionConfig()
dist_config.info['Enabled'] = True
dist_config.info['S3Origin']['DNSName'] = uri.host_name()
dist_config.info['CallerReference'] = str(uri)
dist_config.info['DefaultRootObject'] = default_root_object
if comment == None:
dist_config.info['Comment'] = uri.public_url()
else:
dist_config.info['Comment'] = comment
for cname in cnames_add:
if dist_config.info['CNAME'].count(cname) == 0:
dist_config.info['CNAME'].append(cname)
if logging:
dist_config.info['Logging'] = S3UriS3(logging)
request_body = str(dist_config)
debug("CreateDistribution(): request_body: %s" % request_body)
response = self.send_request("CreateDist", body = request_body)
response['distribution'] = Distribution(response['data'])
return response
def ModifyDistribution(self, cfuri, cnames_add = [], cnames_remove = [],
comment = None, enabled = None, logging = None,
default_root_object = None):
if cfuri.type != "cf":
raise ValueError("Expected CFUri instead of: %s" % cfuri)
# Get current dist status (enabled/disabled) and Etag
info("Checking current status of %s" % cfuri)
response = self.GetDistConfig(cfuri)
dc = response['dist_config']
if enabled != None:
dc.info['Enabled'] = enabled
if comment != None:
dc.info['Comment'] = comment
if default_root_object != None:
dc.info['DefaultRootObject'] = default_root_object
for cname in cnames_add:
if dc.info['CNAME'].count(cname) == 0:
dc.info['CNAME'].append(cname)
for cname in cnames_remove:
while dc.info['CNAME'].count(cname) > 0:
dc.info['CNAME'].remove(cname)
if logging != None:
if logging == False:
dc.info['Logging'] = False
else:
dc.info['Logging'] = S3UriS3(logging)
response = self.SetDistConfig(cfuri, dc, response['headers']['etag'])
return response
def DeleteDistribution(self, cfuri):
if cfuri.type != "cf":
raise ValueError("Expected CFUri instead of: %s" % cfuri)
# Get current dist status (enabled/disabled) and Etag
info("Checking current status of %s" % cfuri)
response = self.GetDistConfig(cfuri)
if response['dist_config'].info['Enabled']:
info("Distribution is ENABLED. Disabling first.")
response['dist_config'].info['Enabled'] = False
response = self.SetDistConfig(cfuri, response['dist_config'],
response['headers']['etag'])
warning("Waiting for Distribution to become disabled.")
warning("This may take several minutes, please wait.")
while True:
response = self.GetDistInfo(cfuri)
d = response['distribution']
if d.info['Status'] == "Deployed" and d.info['Enabled'] == False:
info("Distribution is now disabled")
break
warning("Still waiting...")
time.sleep(10)
headers = {}
headers['if-match'] = response['headers']['etag']
response = self.send_request("DeleteDist", dist_id = cfuri.dist_id(),
headers = headers)
return response
def GetDistInfo(self, cfuri):
if cfuri.type != "cf":
raise ValueError("Expected CFUri instead of: %s" % cfuri)
response = self.send_request("GetDistInfo", dist_id = cfuri.dist_id())
response['distribution'] = Distribution(response['data'])
return response
def GetDistConfig(self, cfuri):
if cfuri.type != "cf":
raise ValueError("Expected CFUri instead of: %s" % cfuri)
response = self.send_request("GetDistConfig", dist_id = cfuri.dist_id())
response['dist_config'] = DistributionConfig(response['data'])
return response
def SetDistConfig(self, cfuri, dist_config, etag = None):
if etag == None:
debug("SetDistConfig(): Etag not set. Fetching it first.")
etag = self.GetDistConfig(cfuri)['headers']['etag']
debug("SetDistConfig(): Etag = %s" % etag)
request_body = str(dist_config)
debug("SetDistConfig(): request_body: %s" % request_body)
headers = {}
headers['if-match'] = etag
response = self.send_request("SetDistConfig", dist_id = cfuri.dist_id(),
body = request_body, headers = headers)
return response
def InvalidateObjects(self, uri, paths, default_index_file, invalidate_default_index_on_cf, invalidate_default_index_root_on_cf):
# joseprio: if the user doesn't want to invalidate the default index
# path, or if the user wants to invalidate the root of the default
# index, we need to process those paths
if default_index_file is not None and (not invalidate_default_index_on_cf or invalidate_default_index_root_on_cf):
new_paths = []
default_index_suffix = '/' + default_index_file
for path in paths:
if path.endswith(default_index_suffix) or path == default_index_file:
if invalidate_default_index_on_cf:
new_paths.append(path)
if invalidate_default_index_root_on_cf:
new_paths.append(path[:-len(default_index_file)])
else:
new_paths.append(path)
paths = new_paths
# uri could be either cf:// or s3:// uri
cfuri = self.get_dist_name_for_bucket(uri)
if len(paths) > 999:
try:
tmp_filename = Utils.mktmpfile()
f = open(tmp_filename, "w")
f.write("\n".join(paths)+"\n")
f.close()
warning("Request to invalidate %d paths (max 999 supported)" % len(paths))
warning("All the paths are now saved in: %s" % tmp_filename)
except:
pass
raise ParameterError("Too many paths to invalidate")
invalbatch = InvalidationBatch(distribution = cfuri.dist_id(), paths = paths)
debug("InvalidateObjects(): request_body: %s" % invalbatch)
response = self.send_request("Invalidate", dist_id = cfuri.dist_id(),
body = str(invalbatch))
response['dist_id'] = cfuri.dist_id()
if response['status'] == 201:
inval_info = Invalidation(response['data']).info
response['request_id'] = inval_info['Id']
debug("InvalidateObjects(): response: %s" % response)
return response
def GetInvalList(self, cfuri):
if cfuri.type != "cf":
raise ValueError("Expected CFUri instead of: %s" % cfuri)
response = self.send_request("GetInvalList", dist_id = cfuri.dist_id())
response['inval_list'] = InvalidationList(response['data'])
return response
def GetInvalInfo(self, cfuri):
if cfuri.type != "cf":
raise ValueError("Expected CFUri instead of: %s" % cfuri)
if cfuri.request_id() is None:
raise ValueError("Expected CFUri with Request ID")
response = self.send_request("GetInvalInfo", dist_id = cfuri.dist_id(), request_id = cfuri.request_id())
response['inval_status'] = Invalidation(response['data'])
return response
## --------------------------------------------------
## Low-level methods for handling CloudFront requests
## --------------------------------------------------
def send_request(self, op_name, dist_id = None, request_id = None, body = None, headers = {}, retries = _max_retries):
operation = self.operations[op_name]
if body:
headers['content-type'] = 'text/plain'
request = self.create_request(operation, dist_id, request_id, headers)
conn = self.get_connection()
debug("send_request(): %s %s" % (request['method'], request['resource']))
conn.request(request['method'], request['resource'], body, request['headers'])
http_response = conn.getresponse()
response = {}
response["status"] = http_response.status
response["reason"] = http_response.reason
response["headers"] = dict(http_response.getheaders())
response["data"] = http_response.read()
conn.close()
debug("CloudFront: response: %r" % response)
if response["status"] >= 500:
e = CloudFrontError(response)
if retries:
warning(u"Retrying failed request: %s" % op_name)
warning(unicode(e))
warning("Waiting %d sec..." % self._fail_wait(retries))
time.sleep(self._fail_wait(retries))
return self.send_request(op_name, dist_id, body, retries - 1)
else:
raise e
if response["status"] < 200 or response["status"] > 299:
raise CloudFrontError(response)
return response
def create_request(self, operation, dist_id = None, request_id = None, headers = None):
resource = cloudfront_resource + (
operation['resource'] % { 'dist_id' : dist_id, 'request_id' : request_id })
if not headers:
headers = {}
if headers.has_key("date"):
if not headers.has_key("x-amz-date"):
headers["x-amz-date"] = headers["date"]
del(headers["date"])
if not headers.has_key("x-amz-date"):
headers["x-amz-date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
signature = self.sign_request(headers)
headers["Authorization"] = "AWS "+self.config.access_key+":"+signature
request = {}
request['resource'] = resource
request['headers'] = headers
request['method'] = operation['method']
return request
def sign_request(self, headers):
string_to_sign = headers['x-amz-date']
signature = sign_string(string_to_sign)
debug(u"CloudFront.sign_request('%s') = %s" % (string_to_sign, signature))
return signature
def get_connection(self):
if self.config.proxy_host != "":
raise ParameterError("CloudFront commands don't work from behind a HTTP proxy")
return httplib.HTTPSConnection(self.config.cloudfront_host)
def _fail_wait(self, retries):
# Wait a few seconds. The more it fails the more we wait.
return (self._max_retries - retries + 1) * 3
def get_dist_name_for_bucket(self, uri):
if (uri.type == "cf"):
return uri
if (uri.type != "s3"):
raise ParameterError("CloudFront or S3 URI required instead of: %s" % arg)
debug("_get_dist_name_for_bucket(%r)" % uri)
if CloudFront.dist_list is None:
response = self.GetList()
CloudFront.dist_list = {}
for d in response['dist_list'].dist_summs:
if d.info.has_key("S3Origin"):
CloudFront.dist_list[getBucketFromHostname(d.info['S3Origin']['DNSName'])[0]] = d.uri()
elif d.info.has_key("CustomOrigin"):
# Aral: This used to skip over distributions with CustomOrigin, however, we mustn't
# do this since S3 buckets that are set up as websites use custom origins.
# Thankfully, the custom origin URLs they use start with the URL of the
# S3 bucket. Here, we make use this naming convention to support this use case.
distListIndex = getBucketFromHostname(d.info['CustomOrigin']['DNSName'])[0];
distListIndex = distListIndex[:len(uri.bucket())]
CloudFront.dist_list[distListIndex] = d.uri()
else:
# Aral: I'm not sure when this condition will be reached, but keeping it in there.
continue
debug("dist_list: %s" % CloudFront.dist_list)
try:
return CloudFront.dist_list[uri.bucket()]
except Exception, e:
debug(e)
raise ParameterError("Unable to translate S3 URI to CloudFront distribution name: %s" % arg)
class Cmd(object):
"""
Class that implements CloudFront commands
"""
class Options(object):
cf_cnames_add = []
cf_cnames_remove = []
cf_comment = None
cf_enable = None
cf_logging = None
cf_default_root_object = None
def option_list(self):
return [opt for opt in dir(self) if opt.startswith("cf_")]
def update_option(self, option, value):
setattr(Cmd.options, option, value)
options = Options()
@staticmethod
def _parse_args(args):
cf = CloudFront(Config())
cfuris = []
for arg in args:
uri = cf.get_dist_name_for_bucket(S3Uri(arg))
cfuris.append(uri)
return cfuris
@staticmethod
def info(args):
cf = CloudFront(Config())
if not args:
response = cf.GetList()
for d in response['dist_list'].dist_summs:
if d.info.has_key("S3Origin"):
origin = S3UriS3.httpurl_to_s3uri(d.info['S3Origin']['DNSName'])
elif d.info.has_key("CustomOrigin"):
origin = "http://%s/" % d.info['CustomOrigin']['DNSName']
else:
origin = "<unknown>"
pretty_output("Origin", origin)
pretty_output("DistId", d.uri())
pretty_output("DomainName", d.info['DomainName'])
if d.info.has_key("CNAME"):
pretty_output("CNAMEs", ", ".join(d.info['CNAME']))
pretty_output("Status", d.info['Status'])
pretty_output("Enabled", d.info['Enabled'])
output("")
else:
cfuris = Cmd._parse_args(args)
for cfuri in cfuris:
response = cf.GetDistInfo(cfuri)
d = response['distribution']
dc = d.info['DistributionConfig']
if dc.info.has_key("S3Origin"):
origin = S3UriS3.httpurl_to_s3uri(dc.info['S3Origin']['DNSName'])
elif dc.info.has_key("CustomOrigin"):
origin = "http://%s/" % dc.info['CustomOrigin']['DNSName']
else:
origin = "<unknown>"
pretty_output("Origin", origin)
pretty_output("DistId", d.uri())
pretty_output("DomainName", d.info['DomainName'])
if dc.info.has_key("CNAME"):
pretty_output("CNAMEs", ", ".join(dc.info['CNAME']))
pretty_output("Status", d.info['Status'])
pretty_output("Comment", dc.info['Comment'])
pretty_output("Enabled", dc.info['Enabled'])
pretty_output("DfltRootObject", dc.info['DefaultRootObject'])
pretty_output("Logging", dc.info['Logging'] or "Disabled")
pretty_output("Etag", response['headers']['etag'])
@staticmethod
def create(args):
cf = CloudFront(Config())
buckets = []
for arg in args:
uri = S3Uri(arg)
if uri.type != "s3":
raise ParameterError("Bucket can only be created from a s3:// URI instead of: %s" % arg)
if uri.object():
raise ParameterError("Use s3:// URI with a bucket name only instead of: %s" % arg)
if not uri.is_dns_compatible():
raise ParameterError("CloudFront can only handle lowercase-named buckets.")
buckets.append(uri)
if not buckets:
raise ParameterError("No valid bucket names found")
for uri in buckets:
info("Creating distribution from: %s" % uri)
response = cf.CreateDistribution(uri, cnames_add = Cmd.options.cf_cnames_add,
comment = Cmd.options.cf_comment,
logging = Cmd.options.cf_logging,
default_root_object = Cmd.options.cf_default_root_object)
d = response['distribution']
dc = d.info['DistributionConfig']
output("Distribution created:")
pretty_output("Origin", S3UriS3.httpurl_to_s3uri(dc.info['S3Origin']['DNSName']))
pretty_output("DistId", d.uri())
pretty_output("DomainName", d.info['DomainName'])
pretty_output("CNAMEs", ", ".join(dc.info['CNAME']))
pretty_output("Comment", dc.info['Comment'])
pretty_output("Status", d.info['Status'])
pretty_output("Enabled", dc.info['Enabled'])
pretty_output("DefaultRootObject", dc.info['DefaultRootObject'])
pretty_output("Etag", response['headers']['etag'])
@staticmethod
def delete(args):
cf = CloudFront(Config())
cfuris = Cmd._parse_args(args)
for cfuri in cfuris:
response = cf.DeleteDistribution(cfuri)
if response['status'] >= 400:
error("Distribution %s could not be deleted: %s" % (cfuri, response['reason']))
output("Distribution %s deleted" % cfuri)
@staticmethod
def modify(args):
cf = CloudFront(Config())
if len(args) > 1:
raise ParameterError("Too many parameters. Modify one Distribution at a time.")
try:
cfuri = Cmd._parse_args(args)[0]
except IndexError, e:
raise ParameterError("No valid Distribution URI found.")
response = cf.ModifyDistribution(cfuri,
cnames_add = Cmd.options.cf_cnames_add,
cnames_remove = Cmd.options.cf_cnames_remove,
comment = Cmd.options.cf_comment,
enabled = Cmd.options.cf_enable,
logging = Cmd.options.cf_logging,
default_root_object = Cmd.options.cf_default_root_object)
if response['status'] >= 400:
error("Distribution %s could not be modified: %s" % (cfuri, response['reason']))
output("Distribution modified: %s" % cfuri)
response = cf.GetDistInfo(cfuri)
d = response['distribution']
dc = d.info['DistributionConfig']
pretty_output("Origin", S3UriS3.httpurl_to_s3uri(dc.info['S3Origin']['DNSName']))
pretty_output("DistId", d.uri())
pretty_output("DomainName", d.info['DomainName'])
pretty_output("Status", d.info['Status'])
pretty_output("CNAMEs", ", ".join(dc.info['CNAME']))
pretty_output("Comment", dc.info['Comment'])
pretty_output("Enabled", dc.info['Enabled'])
pretty_output("DefaultRootObject", dc.info['DefaultRootObject'])
pretty_output("Etag", response['headers']['etag'])
@staticmethod
def invalinfo(args):
cf = CloudFront(Config())
cfuris = Cmd._parse_args(args)
requests = []
for cfuri in cfuris:
if cfuri.request_id():
requests.append(str(cfuri))
else:
inval_list = cf.GetInvalList(cfuri)
try:
for i in inval_list['inval_list'].info['InvalidationSummary']:
requests.append("/".join(["cf:/", cfuri.dist_id(), i["Id"]]))
except:
continue
for req in requests:
cfuri = S3Uri(req)
inval_info = cf.GetInvalInfo(cfuri)
st = inval_info['inval_status'].info
pretty_output("URI", str(cfuri))
pretty_output("Status", st['Status'])
pretty_output("Created", st['CreateTime'])
pretty_output("Nr of paths", len(st['InvalidationBatch']['Path']))
pretty_output("Reference", st['InvalidationBatch']['CallerReference'])
output("")
# vim:et:ts=4:sts=4:ai
|
agpl-3.0
|
jpshort/odoo
|
marcos_addons/marcos_ncf/models.py
|
3
|
1293
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields, expression
class product_category(osv.osv):
"""
This field is a helper to migrate category from another source
"""
_inherit = "product.category"
_columns = {
"oldref": fields.char("Ref", help="Helper on migration")
}
|
agpl-3.0
|
bjowi/newsdiffs
|
website/settings_dev.py
|
3
|
2657
|
# Django settings for newsdiffer project.
import os.path
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Eric Price', '[email protected]'),
('Jennifer 8. Lee', '[email protected]'),
('Gregory Price', '[email protected]'),
)
MANAGERS = ADMINS
WEBAPP_ROOT = os.path.dirname(os.path.abspath(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.dirname(WEBAPP_ROOT)+'/newsdiffs.db',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '%p^2v#afb+ew#3en+%r55^gm4av_=e+s7w6a5(#ky92yp*56+l'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.filesystem.load_template_source',
# 'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
)
ROOT_URLCONF = 'website.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'south',
'frontend',
)
|
mit
|
makermade/arm_android-19_arm-linux-androideabi-4.8
|
lib/python2.7/sunau.py
|
156
|
16537
|
"""Stuff to parse Sun and NeXT audio files.
An audio file consists of a header followed by the data. The structure
of the header is as follows.
+---------------+
| magic word |
+---------------+
| header size |
+---------------+
| data size |
+---------------+
| encoding |
+---------------+
| sample rate |
+---------------+
| # of channels |
+---------------+
| info |
| |
+---------------+
The magic word consists of the 4 characters '.snd'. Apart from the
info field, all header fields are 4 bytes in size. They are all
32-bit unsigned integers encoded in big-endian byte order.
The header size really gives the start of the data.
The data size is the physical size of the data. From the other
parameters the number of frames can be calculated.
The encoding gives the way in which audio samples are encoded.
Possible values are listed below.
The info field currently consists of an ASCII string giving a
human-readable description of the audio file. The info field is
padded with NUL bytes to the header size.
Usage.
Reading audio files:
f = sunau.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
When the setpos() and rewind() methods are not used, the seek()
method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' or 'ULAW')
getcompname() -- returns human-readable version of
compression type ('not compressed' matches 'NONE')
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- returns None (for compatibility with the
aifc module)
getmark(id) -- raises an error since the mark does not
exist (for compatibility with the aifc module)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell() and the position given to setpos()
are compatible and have nothing to do with the actual position in the
file.
The close() method is called automatically when the class instance
is destroyed.
Writing audio files:
f = sunau.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)-- set all parameters at once
tell() -- return current position in output file
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
The close() method is called automatically when the class instance
is destroyed.
"""
# from <multimedia/audio_filehdr.h>
AUDIO_FILE_MAGIC = 0x2e736e64
AUDIO_FILE_ENCODING_MULAW_8 = 1
AUDIO_FILE_ENCODING_LINEAR_8 = 2
AUDIO_FILE_ENCODING_LINEAR_16 = 3
AUDIO_FILE_ENCODING_LINEAR_24 = 4
AUDIO_FILE_ENCODING_LINEAR_32 = 5
AUDIO_FILE_ENCODING_FLOAT = 6
AUDIO_FILE_ENCODING_DOUBLE = 7
AUDIO_FILE_ENCODING_ADPCM_G721 = 23
AUDIO_FILE_ENCODING_ADPCM_G722 = 24
AUDIO_FILE_ENCODING_ADPCM_G723_3 = 25
AUDIO_FILE_ENCODING_ADPCM_G723_5 = 26
AUDIO_FILE_ENCODING_ALAW_8 = 27
# from <multimedia/audio_hdr.h>
AUDIO_UNKNOWN_SIZE = 0xFFFFFFFFL # ((unsigned)(~0))
_simple_encodings = [AUDIO_FILE_ENCODING_MULAW_8,
AUDIO_FILE_ENCODING_LINEAR_8,
AUDIO_FILE_ENCODING_LINEAR_16,
AUDIO_FILE_ENCODING_LINEAR_24,
AUDIO_FILE_ENCODING_LINEAR_32,
AUDIO_FILE_ENCODING_ALAW_8]
class Error(Exception):
pass
def _read_u32(file):
x = 0L
for i in range(4):
byte = file.read(1)
if byte == '':
raise EOFError
x = x*256 + ord(byte)
return x
def _write_u32(file, x):
data = []
for i in range(4):
d, m = divmod(x, 256)
data.insert(0, m)
x = d
for i in range(4):
file.write(chr(int(data[i])))
class Au_read:
def __init__(self, f):
if type(f) == type(''):
import __builtin__
f = __builtin__.open(f, 'rb')
self.initfp(f)
def __del__(self):
if self._file:
self.close()
def initfp(self, file):
self._file = file
self._soundpos = 0
magic = int(_read_u32(file))
if magic != AUDIO_FILE_MAGIC:
raise Error, 'bad magic number'
self._hdr_size = int(_read_u32(file))
if self._hdr_size < 24:
raise Error, 'header size too small'
if self._hdr_size > 100:
raise Error, 'header size ridiculously large'
self._data_size = _read_u32(file)
if self._data_size != AUDIO_UNKNOWN_SIZE:
self._data_size = int(self._data_size)
self._encoding = int(_read_u32(file))
if self._encoding not in _simple_encodings:
raise Error, 'encoding not (yet) supported'
if self._encoding in (AUDIO_FILE_ENCODING_MULAW_8,
AUDIO_FILE_ENCODING_ALAW_8):
self._sampwidth = 2
self._framesize = 1
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_8:
self._framesize = self._sampwidth = 1
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_16:
self._framesize = self._sampwidth = 2
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_24:
self._framesize = self._sampwidth = 3
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_32:
self._framesize = self._sampwidth = 4
else:
raise Error, 'unknown encoding'
self._framerate = int(_read_u32(file))
self._nchannels = int(_read_u32(file))
self._framesize = self._framesize * self._nchannels
if self._hdr_size > 24:
self._info = file.read(self._hdr_size - 24)
for i in range(len(self._info)):
if self._info[i] == '\0':
self._info = self._info[:i]
break
else:
self._info = ''
def getfp(self):
return self._file
def getnchannels(self):
return self._nchannels
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getnframes(self):
if self._data_size == AUDIO_UNKNOWN_SIZE:
return AUDIO_UNKNOWN_SIZE
if self._encoding in _simple_encodings:
return self._data_size / self._framesize
return 0 # XXX--must do some arithmetic here
def getcomptype(self):
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
return 'ULAW'
elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
return 'ALAW'
else:
return 'NONE'
def getcompname(self):
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
return 'CCITT G.711 u-law'
elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
return 'CCITT G.711 A-law'
else:
return 'not compressed'
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
return None
def getmark(self, id):
raise Error, 'no marks'
def readframes(self, nframes):
if self._encoding in _simple_encodings:
if nframes == AUDIO_UNKNOWN_SIZE:
data = self._file.read()
else:
data = self._file.read(nframes * self._framesize * self._nchannels)
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
import audioop
data = audioop.ulaw2lin(data, self._sampwidth)
return data
return None # XXX--not implemented yet
def rewind(self):
self._soundpos = 0
self._file.seek(self._hdr_size)
def tell(self):
return self._soundpos
def setpos(self, pos):
if pos < 0 or pos > self.getnframes():
raise Error, 'position not in range'
self._file.seek(pos * self._framesize + self._hdr_size)
self._soundpos = pos
def close(self):
self._file = None
class Au_write:
def __init__(self, f):
if type(f) == type(''):
import __builtin__
f = __builtin__.open(f, 'wb')
self.initfp(f)
def __del__(self):
if self._file:
self.close()
def initfp(self, file):
self._file = file
self._framerate = 0
self._nchannels = 0
self._sampwidth = 0
self._framesize = 0
self._nframes = AUDIO_UNKNOWN_SIZE
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._info = ''
self._comptype = 'ULAW' # default is U-law
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels not in (1, 2, 4):
raise Error, 'only 1, 2, or 4 channels supported'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth not in (1, 2, 4):
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._framerate:
raise Error, 'sample width not specified'
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if nframes < 0:
raise Error, '# of frames cannot be negative'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, type, name):
if type in ('NONE', 'ULAW'):
self._comptype = type
else:
raise Error, 'unknown compression type'
def getcomptype(self):
return self._comptype
def getcompname(self):
if self._comptype == 'ULAW':
return 'CCITT G.711 u-law'
elif self._comptype == 'ALAW':
return 'CCITT G.711 A-law'
else:
return 'not compressed'
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written()
nframes = len(data) / self._framesize
if self._comptype == 'ULAW':
import audioop
data = audioop.lin2ulaw(data, self._sampwidth)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
self._ensure_header_written()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
self._file.flush()
self._file = None
#
# private methods
#
def _ensure_header_written(self):
if not self._nframeswritten:
if not self._nchannels:
raise Error, '# of channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'frame rate not specified'
self._write_header()
def _write_header(self):
if self._comptype == 'NONE':
if self._sampwidth == 1:
encoding = AUDIO_FILE_ENCODING_LINEAR_8
self._framesize = 1
elif self._sampwidth == 2:
encoding = AUDIO_FILE_ENCODING_LINEAR_16
self._framesize = 2
elif self._sampwidth == 4:
encoding = AUDIO_FILE_ENCODING_LINEAR_32
self._framesize = 4
else:
raise Error, 'internal error'
elif self._comptype == 'ULAW':
encoding = AUDIO_FILE_ENCODING_MULAW_8
self._framesize = 1
else:
raise Error, 'internal error'
self._framesize = self._framesize * self._nchannels
_write_u32(self._file, AUDIO_FILE_MAGIC)
header_size = 25 + len(self._info)
header_size = (header_size + 7) & ~7
_write_u32(self._file, header_size)
if self._nframes == AUDIO_UNKNOWN_SIZE:
length = AUDIO_UNKNOWN_SIZE
else:
length = self._nframes * self._framesize
_write_u32(self._file, length)
self._datalength = length
_write_u32(self._file, encoding)
_write_u32(self._file, self._framerate)
_write_u32(self._file, self._nchannels)
self._file.write(self._info)
self._file.write('\0'*(header_size - len(self._info) - 24))
def _patchheader(self):
self._file.seek(8)
_write_u32(self._file, self._datawritten)
self._datalength = self._datawritten
self._file.seek(0, 2)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Au_read(f)
elif mode in ('w', 'wb'):
return Au_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open
|
gpl-2.0
|
doismellburning/edx-platform
|
common/lib/capa/capa/tests/test_correctmap.py
|
107
|
7116
|
"""
Tests to verify that CorrectMap behaves correctly
"""
import unittest
from capa.correctmap import CorrectMap
import datetime
class CorrectMapTest(unittest.TestCase):
"""
Tests to verify that CorrectMap behaves correctly
"""
def setUp(self):
super(CorrectMapTest, self).setUp()
self.cmap = CorrectMap()
def test_set_input_properties(self):
# Set the correctmap properties for three inputs
self.cmap.set(
answer_id='1_2_1',
correctness='correct',
npoints=5,
msg='Test message',
hint='Test hint',
hintmode='always',
queuestate={
'key': 'secretstring',
'time': '20130228100026'
}
)
self.cmap.set(
answer_id='2_2_1',
correctness='incorrect',
npoints=None,
msg=None,
hint=None,
hintmode=None,
queuestate=None
)
self.cmap.set(
answer_id='3_2_1',
correctness='partially-correct',
npoints=3,
msg=None,
hint=None,
hintmode=None,
queuestate=None
)
# Assert that each input has the expected properties
self.assertTrue(self.cmap.is_correct('1_2_1'))
self.assertFalse(self.cmap.is_correct('2_2_1'))
self.assertTrue(self.cmap.is_correct('3_2_1'))
self.assertTrue(self.cmap.is_partially_correct('3_2_1'))
self.assertFalse(self.cmap.is_partially_correct('2_2_1'))
# Intentionally testing an item that's not in cmap.
self.assertFalse(self.cmap.is_partially_correct('9_2_1'))
self.assertEqual(self.cmap.get_correctness('1_2_1'), 'correct')
self.assertEqual(self.cmap.get_correctness('2_2_1'), 'incorrect')
self.assertEqual(self.cmap.get_correctness('3_2_1'), 'partially-correct')
self.assertEqual(self.cmap.get_npoints('1_2_1'), 5)
self.assertEqual(self.cmap.get_npoints('2_2_1'), 0)
self.assertEqual(self.cmap.get_npoints('3_2_1'), 3)
self.assertEqual(self.cmap.get_msg('1_2_1'), 'Test message')
self.assertEqual(self.cmap.get_msg('2_2_1'), None)
self.assertEqual(self.cmap.get_hint('1_2_1'), 'Test hint')
self.assertEqual(self.cmap.get_hint('2_2_1'), None)
self.assertEqual(self.cmap.get_hintmode('1_2_1'), 'always')
self.assertEqual(self.cmap.get_hintmode('2_2_1'), None)
self.assertTrue(self.cmap.is_queued('1_2_1'))
self.assertFalse(self.cmap.is_queued('2_2_1'))
self.assertEqual(self.cmap.get_queuetime_str('1_2_1'), '20130228100026')
self.assertEqual(self.cmap.get_queuetime_str('2_2_1'), None)
self.assertTrue(self.cmap.is_right_queuekey('1_2_1', 'secretstring'))
self.assertFalse(self.cmap.is_right_queuekey('1_2_1', 'invalidstr'))
self.assertFalse(self.cmap.is_right_queuekey('1_2_1', ''))
self.assertFalse(self.cmap.is_right_queuekey('1_2_1', None))
self.assertFalse(self.cmap.is_right_queuekey('2_2_1', 'secretstring'))
self.assertFalse(self.cmap.is_right_queuekey('2_2_1', 'invalidstr'))
self.assertFalse(self.cmap.is_right_queuekey('2_2_1', ''))
self.assertFalse(self.cmap.is_right_queuekey('2_2_1', None))
def test_get_npoints(self):
# Set the correctmap properties for 4 inputs
# 1) correct, 5 points
# 2) correct, None points
# 3) incorrect, 5 points
# 4) incorrect, None points
# 5) correct, 0 points
# 4) partially correct, 2.5 points
# 5) partially correct, None points
self.cmap.set(
answer_id='1_2_1',
correctness='correct',
npoints=5.3
)
self.cmap.set(
answer_id='2_2_1',
correctness='correct',
npoints=None
)
self.cmap.set(
answer_id='3_2_1',
correctness='incorrect',
npoints=5
)
self.cmap.set(
answer_id='4_2_1',
correctness='incorrect',
npoints=None
)
self.cmap.set(
answer_id='5_2_1',
correctness='correct',
npoints=0
)
self.cmap.set(
answer_id='6_2_1',
correctness='partially-correct',
npoints=2.5
)
self.cmap.set(
answer_id='7_2_1',
correctness='partially-correct',
npoints=None
)
# Assert that we get the expected points
# If points assigned --> npoints
# If no points assigned and correct --> 1 point
# If no points assigned and partially correct --> 1 point
# If no points assigned and incorrect --> 0 points
self.assertEqual(self.cmap.get_npoints('1_2_1'), 5.3)
self.assertEqual(self.cmap.get_npoints('2_2_1'), 1)
self.assertEqual(self.cmap.get_npoints('3_2_1'), 5)
self.assertEqual(self.cmap.get_npoints('4_2_1'), 0)
self.assertEqual(self.cmap.get_npoints('5_2_1'), 0)
self.assertEqual(self.cmap.get_npoints('6_2_1'), 2.5)
self.assertEqual(self.cmap.get_npoints('7_2_1'), 1)
def test_set_overall_message(self):
# Default is an empty string string
self.assertEqual(self.cmap.get_overall_message(), "")
# Set a message that applies to the whole question
self.cmap.set_overall_message("Test message")
# Retrieve the message
self.assertEqual(self.cmap.get_overall_message(), "Test message")
# Setting the message to None --> empty string
self.cmap.set_overall_message(None)
self.assertEqual(self.cmap.get_overall_message(), "")
def test_update_from_correctmap(self):
# Initialize a CorrectMap with some properties
self.cmap.set(
answer_id='1_2_1',
correctness='correct',
npoints=5,
msg='Test message',
hint='Test hint',
hintmode='always',
queuestate={
'key': 'secretstring',
'time': '20130228100026'
}
)
self.cmap.set_overall_message("Test message")
# Create a second cmap, then update it to have the same properties
# as the first cmap
other_cmap = CorrectMap()
other_cmap.update(self.cmap)
# Assert that it has all the same properties
self.assertEqual(
other_cmap.get_overall_message(),
self.cmap.get_overall_message()
)
self.assertEqual(
other_cmap.get_dict(),
self.cmap.get_dict()
)
def test_update_from_invalid(self):
# Should get an exception if we try to update() a CorrectMap
# with a non-CorrectMap value
invalid_list = [None, "string", 5, datetime.datetime.today()]
for invalid in invalid_list:
with self.assertRaises(Exception):
self.cmap.update(invalid)
|
agpl-3.0
|
apache/beam
|
sdks/python/apache_beam/runners/pipeline_context_test.py
|
5
|
3727
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the windowing classes."""
# pytype: skip-file
import unittest
from apache_beam import coders
from apache_beam.runners import pipeline_context
from apache_beam.transforms import environments
class PipelineContextTest(unittest.TestCase):
def test_deduplication(self):
context = pipeline_context.PipelineContext()
bytes_coder_ref = context.coders.get_id(coders.BytesCoder())
bytes_coder_ref2 = context.coders.get_id(coders.BytesCoder())
self.assertEqual(bytes_coder_ref, bytes_coder_ref2)
def test_deduplication_by_proto(self):
context = pipeline_context.PipelineContext()
env_proto = environments.SubprocessSDKEnvironment(
command_string="foo").to_runner_api(None)
env_ref_1 = context.environments.get_by_proto(env_proto)
env_ref_2 = context.environments.get_by_proto(env_proto, deduplicate=True)
self.assertEqual(env_ref_1, env_ref_2)
def test_equal_environments_are_deduplicated_when_fetched_by_obj_or_proto(
self):
context = pipeline_context.PipelineContext()
env = environments.SubprocessSDKEnvironment(command_string="foo")
env_proto = env.to_runner_api(None)
id_from_proto = context.environments.get_by_proto(env_proto)
id_from_obj = context.environments.get_id(env)
self.assertEqual(id_from_obj, id_from_proto)
self.assertEqual(
context.environments.get_by_id(id_from_obj).command_string, "foo")
env = environments.SubprocessSDKEnvironment(command_string="bar")
env_proto = env.to_runner_api(None)
id_from_obj = context.environments.get_id(env)
id_from_proto = context.environments.get_by_proto(
env_proto, deduplicate=True)
self.assertEqual(id_from_obj, id_from_proto)
self.assertEqual(
context.environments.get_by_id(id_from_obj).command_string, "bar")
def test_serialization(self):
context = pipeline_context.PipelineContext()
float_coder_ref = context.coders.get_id(coders.FloatCoder())
bytes_coder_ref = context.coders.get_id(coders.BytesCoder())
proto = context.to_runner_api()
context2 = pipeline_context.PipelineContext.from_runner_api(proto)
self.assertEqual(
coders.FloatCoder(), context2.coders.get_by_id(float_coder_ref))
self.assertEqual(
coders.BytesCoder(), context2.coders.get_by_id(bytes_coder_ref))
def test_common_id_assignment(self):
context = pipeline_context.PipelineContext()
float_coder_ref = context.coders.get_id(coders.FloatCoder())
bytes_coder_ref = context.coders.get_id(coders.BytesCoder())
context2 = pipeline_context.PipelineContext(
component_id_map=context.component_id_map)
bytes_coder_ref2 = context2.coders.get_id(coders.BytesCoder())
float_coder_ref2 = context2.coders.get_id(coders.FloatCoder())
self.assertEqual(bytes_coder_ref, bytes_coder_ref2)
self.assertEqual(float_coder_ref, float_coder_ref2)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
chirayudesai/linux-msm-fusion3
|
scripts/tracing/draw_functrace.py
|
14676
|
3560
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
gpl-2.0
|
iconmix/skins-addons
|
script.iconmixtools/resources/lib/unidecode/x27.py
|
87
|
3785
|
data = (
'[?]', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'[?]', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
gpl-3.0
|
kennethgillen/ansible
|
lib/ansible/modules/cloud/cloudstack/cs_cluster.py
|
39
|
13114
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_cluster
short_description: Manages host clusters on Apache CloudStack based clouds.
description:
- Create, update and remove clusters.
version_added: "2.1"
author: "René Moser (@resmo)"
options:
name:
description:
- name of the cluster.
required: true
zone:
description:
- Name of the zone in which the cluster belongs to.
- If not set, default zone is used.
required: false
default: null
pod:
description:
- Name of the pod in which the cluster belongs to.
required: false
default: null
cluster_type:
description:
- Type of the cluster.
- Required if C(state=present)
required: false
default: null
choices: [ 'CloudManaged', 'ExternalManaged' ]
hypervisor:
description:
- Name the hypervisor to be used.
- Required if C(state=present).
required: false
default: none
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
url:
description:
- URL for the cluster
required: false
default: null
username:
description:
- Username for the cluster.
required: false
default: null
password:
description:
- Password for the cluster.
required: false
default: null
guest_vswitch_name:
description:
- Name of virtual switch used for guest traffic in the cluster.
- This would override zone wide traffic label setting.
required: false
default: null
guest_vswitch_type:
description:
- Type of virtual switch used for guest traffic in the cluster.
- Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)
required: false
default: null
choices: [ 'vmwaresvs', 'vmwaredvs' ]
public_vswitch_name:
description:
- Name of virtual switch used for public traffic in the cluster.
- This would override zone wide traffic label setting.
required: false
default: null
public_vswitch_type:
description:
- Type of virtual switch used for public traffic in the cluster.
- Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)
required: false
default: null
choices: [ 'vmwaresvs', 'vmwaredvs' ]
vms_ip_address:
description:
- IP address of the VSM associated with this cluster.
required: false
default: null
vms_username:
description:
- Username for the VSM associated with this cluster.
required: false
default: null
vms_password:
description:
- Password for the VSM associated with this cluster.
required: false
default: null
ovm3_cluster:
description:
- Ovm3 native OCFS2 clustering enabled for cluster.
required: false
default: null
ovm3_pool:
description:
- Ovm3 native pooling enabled for cluster.
required: false
default: null
ovm3_vip:
description:
- Ovm3 vip to use for pool (and cluster).
required: false
default: null
state:
description:
- State of the cluster.
required: false
default: 'present'
choices: [ 'present', 'absent', 'disabled', 'enabled' ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure a cluster is present
- local_action:
module: cs_cluster
name: kvm-cluster-01
zone: ch-zrh-ix-01
hypervisor: KVM
cluster_type: CloudManaged
# Ensure a cluster is disabled
- local_action:
module: cs_cluster
name: kvm-cluster-01
zone: ch-zrh-ix-01
state: disabled
# Ensure a cluster is enabled
- local_action:
module: cs_cluster
name: kvm-cluster-01
zone: ch-zrh-ix-01
state: enabled
# Ensure a cluster is absent
- local_action:
module: cs_cluster
name: kvm-cluster-01
zone: ch-zrh-ix-01
state: absent
'''
RETURN = '''
---
id:
description: UUID of the cluster.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the cluster.
returned: success
type: string
sample: cluster01
allocation_state:
description: State of the cluster.
returned: success
type: string
sample: Enabled
cluster_type:
description: Type of the cluster.
returned: success
type: string
sample: ExternalManaged
cpu_overcommit_ratio:
description: The CPU overcommit ratio of the cluster.
returned: success
type: string
sample: 1.0
memory_overcommit_ratio:
description: The memory overcommit ratio of the cluster.
returned: success
type: string
sample: 1.0
managed_state:
description: Whether this cluster is managed by CloudStack.
returned: success
type: string
sample: Managed
ovm3_vip:
description: Ovm3 VIP to use for pooling and/or clustering
returned: success
type: string
sample: 10.10.10.101
hypervisor:
description: Hypervisor of the cluster
returned: success
type: string
sample: VMware
zone:
description: Name of zone the cluster is in.
returned: success
type: string
sample: ch-gva-2
pod:
description: Name of pod the cluster is in.
returned: success
type: string
sample: pod01
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackCluster(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackCluster, self).__init__(module)
self.returns = {
'allocationstate': 'allocation_state',
'hypervisortype': 'hypervisor',
'clustertype': 'cluster_type',
'podname': 'pod',
'managedstate': 'managed_state',
'memoryovercommitratio': 'memory_overcommit_ratio',
'cpuovercommitratio': 'cpu_overcommit_ratio',
'ovm3vip': 'ovm3_vip',
}
self.cluster = None
def _get_common_cluster_args(self):
args = {
'clustername': self.module.params.get('name'),
'hypervisor': self.module.params.get('hypervisor'),
'clustertype': self.module.params.get('cluster_type'),
}
state = self.module.params.get('state')
if state in ['enabled', 'disabled']:
args['allocationstate'] = state.capitalize()
return args
def get_pod(self, key=None):
args = {
'name': self.module.params.get('pod'),
'zoneid': self.get_zone(key='id'),
}
pods = self.cs.listPods(**args)
if pods:
return self._get_by_key(key, pods['pod'][0])
self.module.fail_json(msg="Pod %s not found in zone %s." % (self.module.params.get('pod'), self.get_zone(key='name')))
def get_cluster(self):
if not self.cluster:
args = {}
uuid = self.module.params.get('id')
if uuid:
args['id'] = uuid
clusters = self.cs.listClusters(**args)
if clusters:
self.cluster = clusters['cluster'][0]
return self.cluster
args['name'] = self.module.params.get('name')
clusters = self.cs.listClusters(**args)
if clusters:
self.cluster = clusters['cluster'][0]
# fix different return from API then request argument given
self.cluster['hypervisor'] = self.cluster['hypervisortype']
self.cluster['clustername'] = self.cluster['name']
return self.cluster
def present_cluster(self):
cluster = self.get_cluster()
if cluster:
cluster = self._update_cluster()
else:
cluster = self._create_cluster()
return cluster
def _create_cluster(self):
required_params = [
'cluster_type',
'hypervisor',
]
self.module.fail_on_missing_params(required_params=required_params)
args = self._get_common_cluster_args()
args['zoneid'] = self.get_zone(key='id')
args['podid'] = self.get_pod(key='id')
args['url'] = self.module.params.get('url')
args['username'] = self.module.params.get('username')
args['password'] = self.module.params.get('password')
args['guestvswitchname'] = self.module.params.get('guest_vswitch_name')
args['guestvswitchtype'] = self.module.params.get('guest_vswitch_type')
args['publicvswitchtype'] = self.module.params.get('public_vswitch_name')
args['publicvswitchtype'] = self.module.params.get('public_vswitch_type')
args['vsmipaddress'] = self.module.params.get('vms_ip_address')
args['vsmusername'] = self.module.params.get('vms_username')
args['vmspassword'] = self.module.params.get('vms_password')
args['ovm3cluster'] = self.module.params.get('ovm3_cluster')
args['ovm3pool'] = self.module.params.get('ovm3_pool')
args['ovm3vip'] = self.module.params.get('ovm3_vip')
self.result['changed'] = True
cluster = None
if not self.module.check_mode:
res = self.cs.addCluster(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
# API returns a list as result CLOUDSTACK-9205
if isinstance(res['cluster'], list):
cluster = res['cluster'][0]
else:
cluster = res['cluster']
return cluster
def _update_cluster(self):
cluster = self.get_cluster()
args = self._get_common_cluster_args()
args['id'] = cluster['id']
if self.has_changed(args, cluster):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.updateCluster(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
cluster = res['cluster']
return cluster
def absent_cluster(self):
cluster = self.get_cluster()
if cluster:
self.result['changed'] = True
args = {
'id': cluster['id'],
}
if not self.module.check_mode:
res = self.cs.deleteCluster(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
return cluster
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
zone=dict(default=None),
pod=dict(default=None),
cluster_type=dict(choices=['CloudManaged', 'ExternalManaged'], default=None),
hypervisor=dict(choices=CS_HYPERVISORS, default=None),
state=dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'),
url=dict(default=None),
username=dict(default=None),
password=dict(default=None, no_log=True),
guest_vswitch_name=dict(default=None),
guest_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs'], default=None),
public_vswitch_name=dict(default=None),
public_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs'], default=None),
vms_ip_address=dict(default=None),
vms_username=dict(default=None),
vms_password=dict(default=None, no_log=True),
ovm3_cluster=dict(default=None),
ovm3_pool=dict(default=None),
ovm3_vip=dict(default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_cluster = AnsibleCloudStackCluster(module)
state = module.params.get('state')
if state in ['absent']:
cluster = acs_cluster.absent_cluster()
else:
cluster = acs_cluster.present_cluster()
result = acs_cluster.get_result(cluster)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
qedsoftware/commcare-hq
|
custom/ilsgateway/tanzania/warehouse/updater.py
|
1
|
27752
|
from datetime import datetime, timedelta
import logging
import itertools
from celery.canvas import chain
from celery.task import task
from django.db import transaction, connection
from django.db.models import Q
from django.db.models.aggregates import Avg, Sum
from corehq.apps.locations.dbaccessors import get_users_by_location_id
from corehq.apps.products.models import SQLProduct
from corehq.apps.locations.models import Location, SQLLocation
from custom.ilsgateway.tanzania.warehouse import const
from custom.ilsgateway.tanzania.warehouse.alerts import populate_no_primary_alerts, \
populate_facility_stockout_alerts, create_alert
from dimagi.utils.chunked import chunked
from dimagi.utils.couch.bulk import get_docs
from dimagi.utils.dates import get_business_day_of_month, add_months, months_between
from casexml.apps.stock.models import StockReport
from custom.ilsgateway.models import SupplyPointStatus, SupplyPointStatusTypes, DeliveryGroups, \
OrganizationSummary, GroupSummary, SupplyPointStatusValues, Alert, ProductAvailabilityData, \
ILSGatewayConfig
"""
These functions and variables are ported from:
https://github.com/dimagi/logistics/blob/tz-master/logistics_project/apps/tanzania/reporting/run_reports.py
"""
def _is_valid_status(facility, date, status_type):
if status_type not in const.NEEDED_STATUS_TYPES:
return False
code = facility.metadata.get('group')
if not code:
return False
dg = DeliveryGroups(date.month)
if status_type == SupplyPointStatusTypes.R_AND_R_FACILITY:
return dg.current_submitting_group() == code
elif status_type == SupplyPointStatusTypes.DELIVERY_FACILITY:
return dg.current_delivering_group() == code
return True
def _get_window_date(status_type, date):
# we need this method because the soh and super reports actually
# are sometimes treated as reports for _next_ month
if status_type == SupplyPointStatusTypes.SOH_FACILITY or \
status_type == SupplyPointStatusTypes.SUPERVISION_FACILITY:
# if the date is after the last business day of the month
# count it for the next month
if date.date() >= get_business_day_of_month(date.year, date.month, -1):
year, month = add_months(date.year, date.month, 1)
return datetime(year, month, 1)
return datetime(date.year, date.month, 1)
def is_on_time(status_date, warehouse_date, status_type):
"""
on_time requirement
SOH report should be submitted before 6th business day of the month.
R & R report should be submitted before 13th business day of the month.
Otherwise reports are marked as late response.
"""
if status_type == SupplyPointStatusTypes.SOH_FACILITY:
if status_date.date() < get_business_day_of_month(warehouse_date.year, warehouse_date.month, 6):
return True
if status_type == SupplyPointStatusTypes.R_AND_R_FACILITY:
if status_date.date() < get_business_day_of_month(warehouse_date.year, warehouse_date.month, 13):
return True
return False
def average_lead_time(facility_id, window_date):
end_date = datetime(window_date.year, window_date.month % 12 + 1, 1)
received = SupplyPointStatus.objects.filter(
location_id=facility_id,
status_date__lt=end_date,
status_value=SupplyPointStatusValues.RECEIVED,
status_type=SupplyPointStatusTypes.DELIVERY_FACILITY).order_by('status_date')
total_time = timedelta(days=0)
count = 0
last_receipt = datetime(1900, 1, 1)
for receipt in received:
if receipt.status_date - last_receipt < timedelta(days=30):
last_receipt = receipt.status_date
continue
last_receipt = receipt.status_date
last_submitted = SupplyPointStatus.objects.filter(
location_id=facility_id,
status_date__lt=receipt.status_date,
status_value=SupplyPointStatusValues.SUBMITTED,
status_type=SupplyPointStatusTypes.R_AND_R_FACILITY).order_by('-status_date')
if last_submitted.count():
ltime = receipt.status_date - last_submitted[0].status_date
if timedelta(days=30) < ltime < timedelta(days=100):
total_time += ltime
count += 1
else:
continue
return total_time / count if count else None
def needed_status_types(org_summary):
facility = Location.get(org_summary.location_id)
return [status_type for status_type in const.NEEDED_STATUS_TYPES if _is_valid_status(facility,
org_summary.date, status_type)]
def not_responding_facility(org_summary):
for status_type in needed_status_types(org_summary):
group_summary, created = GroupSummary.objects.get_or_create(org_summary=org_summary,
title=status_type)
group_summary.total = 1
assert group_summary.responded in (0, 1)
if group_summary.title == SupplyPointStatusTypes.SOH_FACILITY and not group_summary.responded:
# TODO: this might not be right unless we also clear it
create_alert(org_summary.location_id, org_summary.date,
'soh_not_responding', {'number': 1})
elif group_summary.title == SupplyPointStatusTypes.R_AND_R_FACILITY and not group_summary.responded:
# TODO: this might not be right unless we also clear it
create_alert(org_summary.location_id, org_summary.date,
'rr_not_responded', {'number': 1})
elif group_summary.title == SupplyPointStatusTypes.DELIVERY_FACILITY and not group_summary.responded:
# TODO: this might not be right unless we also clear it
create_alert(org_summary.location_id, org_summary.date,
'delivery_not_responding', {'number': 1})
else:
# not an expected / needed group. ignore for now
pass
group_summary.save()
@transaction.atomic
def update_product_availability_facility_data(facility, products, start_date, end_date):
# product availability
existing_data = ProductAvailabilityData.objects.filter(
date__range=(
datetime(start_date.year, start_date.month, 1),
datetime(end_date.year, end_date.month, 1)
),
location_id=facility.get_id
)
product_data_dict = {
(pa.date, pa.location_id, pa.product): pa for pa in existing_data
}
product_data_list = []
previous_month = {}
for year, month in months_between(start_date, end_date):
window_date = datetime(year, month, 1)
for p in products:
now = datetime.utcnow()
if (window_date, facility.get_id, p.product_id) in product_data_dict:
previous_month[p.product_id] = product_data_dict[window_date, facility.get_id, p.product_id]
continue
else:
product_data = ProductAvailabilityData(
date=window_date,
location_id=facility.get_id,
product=p.product_id,
create_date=now,
update_date=now
)
# set defaults
product_data.total = 1
prev = None
if p.product_id in previous_month:
prev = previous_month[p.product_id]
if not prev:
previous_reports = ProductAvailabilityData.objects.filter(
product=p.product_id,
location_id=facility._id,
date__lt=window_date,
total=1
)
if previous_reports.count():
prev = previous_reports.latest('date')
if prev:
product_data.with_stock = prev.with_stock
product_data.without_stock = prev.without_stock
product_data.without_data = prev.without_data
else:
# otherwise we use the defaults
product_data.with_stock = 0
product_data.without_stock = 0
product_data.without_data = 1
if product_data.pk is not None:
product_data.save()
else:
product_data_list.append(product_data)
assert (product_data.with_stock + product_data.without_stock + product_data.without_data) == 1, \
"bad product data config for %s" % product_data
previous_month[p.product_id] = product_data
ProductAvailabilityData.objects.bulk_create(product_data_list)
def default_start_date():
return datetime(2012, 1, 1)
def _get_test_locations(domain):
"""
returns test region and all its children
"""
test_region = SQLLocation.objects.get(domain=domain, external_id=const.TEST_REGION_ID)
sql_locations = SQLLocation.objects.filter(
Q(domain=domain) & (Q(parent=test_region) | Q(parent__parent=test_region))
).exclude(is_archived=True).order_by('id').only('location_id')
return [sql_location.couch_location for sql_location in sql_locations] + \
[test_region.couch_location]
def populate_report_data(start_date, end_date, domain, runner, strict=True):
facilities = Location.filter_by_type(domain, 'FACILITY')
non_facilities = list(Location.filter_by_type(domain, 'DISTRICT'))
non_facilities += list(Location.filter_by_type(domain, 'REGION'))
non_facilities += list(Location.filter_by_type(domain, 'MSDZONE'))
non_facilities += list(Location.filter_by_type(domain, 'MOHSW'))
if runner.location:
if runner.location.location_type.name.upper() != 'FACILITY':
facilities = []
non_facilities = itertools.dropwhile(
lambda location: location.location_id != runner.location.location_id,
non_facilities
)
else:
facilities = itertools.dropwhile(
lambda location: location.location_id != runner.location.location_id,
facilities
)
facilities_chunked_list = chunked(facilities, 5)
for chunk in facilities_chunked_list:
res = chain(process_facility_warehouse_data.si(fac, start_date, end_date, runner) for fac in chunk)()
res.get()
non_facilities_chunked_list = chunked(non_facilities, 50)
# then populate everything above a facility off a warehouse table
for chunk in non_facilities_chunked_list:
res = chain(
process_non_facility_warehouse_data.si(org, start_date, end_date, runner, strict)
for org in chunk
)()
res.get()
runner.location = None
runner.save()
@task(queue='logistics_background_queue')
def process_facility_warehouse_data(facility, start_date, end_date, runner=None):
"""
process all the facility-level warehouse tables
"""
logging.info("processing facility %s (%s)" % (facility.name, str(facility._id)))
sql_location = facility.sql_location
if runner:
runner.location = sql_location
runner.save()
for alert_type in [const.SOH_NOT_RESPONDING, const.RR_NOT_RESPONDED, const.DELIVERY_NOT_RESPONDING]:
alert = Alert.objects.filter(location_id=facility._id, date__gte=start_date, date__lt=end_date,
type=alert_type)
alert.delete()
supply_point_id = sql_location.supply_point_id
location_id = facility._id
new_statuses = SupplyPointStatus.objects.filter(
location_id=facility._id,
status_date__gte=start_date,
status_date__lt=end_date
).order_by('status_date').iterator()
process_facility_statuses(location_id, new_statuses)
new_reports = StockReport.objects.filter(
stocktransaction__case_id=supply_point_id,
date__gte=start_date,
date__lt=end_date,
stocktransaction__type='stockonhand'
).distinct().order_by('date').iterator()
process_facility_product_reports(location_id, new_reports)
new_trans = get_latest_transaction_from_each_month(supply_point_id, start_date, end_date)
process_facility_transactions(location_id, new_trans, start_date, end_date)
products = SQLProduct.objects.filter(domain=facility.domain, is_archived=False)
users = get_users_by_location_id(facility.domain, facility.get_id)
# go through all the possible values in the date ranges
# # and make sure there are warehouse tables there
for year, month in months_between(start_date, end_date):
window_date = datetime(year, month, 1)
# create org_summary for every fac/date combo
org_summary, created = OrganizationSummary.objects.get_or_create(
location_id=facility._id,
date=window_date
)
org_summary.total_orgs = 1
alt = average_lead_time(facility._id, window_date)
if alt:
alt = alt.days
org_summary.average_lead_time_in_days = alt or 0
org_summary.save()
# create group_summary for every org_summary title combo
for title in const.NEEDED_STATUS_TYPES:
GroupSummary.objects.get_or_create(org_summary=org_summary,
title=title)
# update all the non-response data
not_responding_facility(org_summary)
# alerts
with transaction.atomic():
populate_no_primary_alerts(facility, window_date, users)
populate_facility_stockout_alerts(facility, window_date)
update_product_availability_facility_data(facility, products, start_date, end_date)
update_historical_data_for_location(facility)
@transaction.atomic
def process_facility_statuses(facility_id, statuses, alerts=True):
"""
For a given facility and list of statuses, update the appropriate
data warehouse tables. This should only be called on supply points
that are facilities.
"""
facility = Location.get(facility_id)
for status in statuses:
warehouse_date = _get_window_date(status.status_type, status.status_date)
if _is_valid_status(facility, status.status_date, status.status_type):
org_summary = OrganizationSummary.objects.get_or_create(
location_id=facility_id,
date=warehouse_date
)[0]
group_summary = GroupSummary.objects.get_or_create(
org_summary=org_summary,
title=status.status_type
)[0]
group_summary.total = 1
if status.status_value not in (SupplyPointStatusValues.REMINDER_SENT,
SupplyPointStatusValues.ALERT_SENT):
# we've responded to this query
group_summary.responded = 1
if status.status_value in [SupplyPointStatusValues.SUBMITTED,
SupplyPointStatusValues.RECEIVED]:
group_summary.complete = 1
else:
group_summary.complete = group_summary.complete or 0
if group_summary.complete:
if is_on_time(status.status_date, warehouse_date, status.status_type):
group_summary.on_time = 1
else:
group_summary.on_time = group_summary.on_time
else:
group_summary.on_time = 0
group_summary.save()
if alerts:
if status.status_value == SupplyPointStatusValues.NOT_SUBMITTED \
and status.status_type == SupplyPointStatusTypes.R_AND_R_FACILITY:
create_alert(facility_id, status.status_date, const.RR_NOT_SUBMITTED,
{'number': 1})
if status.status_value == SupplyPointStatusValues.NOT_RECEIVED \
and status.status_type == SupplyPointStatusTypes.DELIVERY_FACILITY:
create_alert(facility_id, status.status_date, const.DELIVERY_NOT_RECEIVED,
{'number': 1})
def process_facility_product_reports(facility_id, reports):
"""
For a given facility and list of ProductReports, update the appropriate
data warehouse tables. This should only be called on supply points
that are facilities. Currently this only affects stock on hand reporting
data. We need to use this method instead of the statuses because partial
stock on hand reports don't create valid status, but should be treated
like valid submissions in most of the rest of the site.
"""
months_updated = {}
for report in reports:
stock_transactions = report.stocktransaction_set.filter(type='stockonhand')
assert stock_transactions.count() > 0
warehouse_date = _get_window_date(SupplyPointStatusTypes.SOH_FACILITY, report.date)
if warehouse_date in months_updated:
# an optimization to avoid repeatedly doing this work for each
# product report for the entire month
continue
org_summary = OrganizationSummary.objects.get_or_create(location_id=facility_id, date=warehouse_date)[0]
group_summary = GroupSummary.objects.get_or_create(org_summary=org_summary,
title=SupplyPointStatusTypes.SOH_FACILITY)[0]
group_summary.total = 1
group_summary.responded = 1
group_summary.complete = 1
if is_on_time(report.date, warehouse_date, SupplyPointStatusTypes.SOH_FACILITY):
group_summary.on_time = 1
group_summary.save()
months_updated[warehouse_date] = None # update the cache of stuff we've dealt with
def get_latest_transaction_from_each_month(case_id, start_date, end_date):
query = '''
SELECT DISTINCT ON (year, month, st.product_id) date_part('year', sr.date) as year,
date_part('month', sr.date) as month, st.product_id, st.stock_on_hand
FROM stock_stocktransaction st JOIN stock_stockreport sr ON st.report_id=sr.id
WHERE case_id=%s AND sr.date BETWEEN %s AND %s ORDER BY year DESC,
month DESC, st.product_id, sr.date DESC;
'''
cursor = connection.cursor()
cursor.execute(query, [case_id, start_date, end_date])
return cursor.fetchall()
@transaction.atomic
def process_facility_transactions(facility_id, transactions, start_date, end_date):
"""
For a given facility and list of transactions, update the appropriate
data warehouse tables. This should only be called on supply points
that are facilities.
"""
existing_data = ProductAvailabilityData.objects.filter(
date__range=(
datetime(start_date.year, start_date.month, 1),
datetime(end_date.year, end_date.month, 1)
),
location_id=facility_id
)
product_data_dict = {
(pa.date, pa.location_id, pa.product): pa for pa in existing_data
}
for year, month, product_id, stock_on_hand in transactions:
date = datetime(int(year), int(month), 1)
if (date, facility_id, product_id) in product_data_dict:
product_data = product_data_dict[(date, facility_id, product_id)]
else:
product_data = ProductAvailabilityData(
product=product_id,
location_id=facility_id,
date=date
)
product_data.total = 1
product_data.without_data = 0
if stock_on_hand <= 0:
product_data.without_stock = 1
product_data.with_stock = 0
else:
product_data.without_stock = 0
product_data.with_stock = 1
product_data.save()
def get_non_archived_facilities_below(location):
return list(location.sql_location
.get_descendants(include_self=True)
.filter(is_archived=False,
location_type__name='FACILITY')
.couch_locations())
@task(queue='logistics_background_queue')
def process_non_facility_warehouse_data(location, start_date, end_date, runner=None, strict=True):
start_date = datetime(start_date.year, start_date.month, 1)
end_date = datetime(end_date.year, end_date.month, 1)
if runner:
runner.location = location.sql_location
runner.save()
facs = get_non_archived_facilities_below(location)
fac_ids = [f._id for f in facs]
logging.info("processing non-facility %s (%s), %s children"
% (location.name, str(location.location_id), len(facs)))
prods = SQLProduct.objects.filter(domain=location.domain, is_archived=False)
sub_summaries = OrganizationSummary.objects.filter(
location_id__in=fac_ids, date__range=(start_date, end_date), average_lead_time_in_days__gt=0
).values('date').annotate(average_time=Avg('average_lead_time_in_days'))
sub_summaries = {
(subsummary['date'].year, subsummary['date'].month): subsummary
for subsummary in sub_summaries
}
sub_prods = ProductAvailabilityData.objects.filter(
location_id__in=fac_ids, date__range=(start_date, end_date)
).values('product', 'date').annotate(
total_sum=Sum('total'),
with_stock_sum=Sum('with_stock'),
without_stock_sum=Sum('without_stock'),
)
sub_prods = {
((sub_prod['date'].year, sub_prod['date'].month), sub_prod['product']): sub_prod for sub_prod in sub_prods
}
sub_group_summaries = GroupSummary.objects.filter(
org_summary__location_id__in=fac_ids,
org_summary__date__range=(start_date, end_date)
).values('title', 'org_summary__date').annotate(
total_sum=Sum('total'),
responded_sum=Sum('responded'),
on_time_sum=Sum('on_time'),
complete_sum=Sum('complete')
)
sub_group_summaries = {
((sub_group_summary['org_summary__date'].year, sub_group_summary['org_summary__date'].month), sub_group_summary['title']): sub_group_summary
for sub_group_summary in sub_group_summaries
}
total_orgs = len(facs)
for year, month in months_between(start_date, end_date):
window_date = datetime(year, month, 1)
org_summary = OrganizationSummary.objects.get_or_create(
location_id=location.location_id, date=window_date
)[0]
org_summary.total_orgs = total_orgs
# lead times
if (year, month) in sub_summaries:
sub_summary = sub_summaries[year, month]
org_summary.average_lead_time_in_days = sub_summary['average_time']
else:
org_summary.average_lead_time_in_days = 0
org_summary.save()
# product availability
for p in prods:
product_data = ProductAvailabilityData.objects.get_or_create(product=p.product_id,
location_id=location.location_id,
date=window_date)[0]
sub_prod = sub_prods.get(((year, month), p.product_id), {})
product_data.total = sub_prod.get('total_sum', 0)
if strict:
assert product_data.total == total_orgs, \
"total should match number of sub facilities %s-%s" % (product_data.total, total_orgs)
product_data.with_stock = sub_prod.get('with_stock_sum', 0)
product_data.without_stock = sub_prod.get('without_stock_sum', 0)
product_data.without_data = product_data.total - product_data.with_stock - product_data.without_stock
product_data.save()
dg = DeliveryGroups(month=month, facs=facs)
for status_type in const.NEEDED_STATUS_TYPES:
gsum = GroupSummary.objects.get_or_create(org_summary=org_summary, title=status_type)[0]
sub_sum = sub_group_summaries.get(((year, month), status_type), {})
gsum.total = sub_sum.get('total_sum', 0)
gsum.responded = sub_sum.get('responded_sum', 0)
gsum.on_time = sub_sum.get('on_time_sum', 0)
gsum.complete = sub_sum.get('complete_sum', 0)
gsum.save()
if status_type == SupplyPointStatusTypes.DELIVERY_FACILITY:
expected = len(dg.delivering())
elif status_type == SupplyPointStatusTypes.R_AND_R_FACILITY:
expected = len(dg.submitting())
elif status_type == SupplyPointStatusTypes.SOH_FACILITY \
or status_type == SupplyPointStatusTypes.SUPERVISION_FACILITY:
expected = len(facs)
if gsum.total != expected:
logging.info("expected %s but was %s for %s" % (expected, gsum.total, gsum))
for alert_type in [const.RR_NOT_SUBMITTED, const.DELIVERY_NOT_RECEIVED,
const.SOH_NOT_RESPONDING, const.RR_NOT_RESPONDED, const.DELIVERY_NOT_RESPONDING]:
sub_alerts = Alert.objects.filter(location_id__in=fac_ids, date=window_date, type=alert_type)
aggregate_response_alerts(location.location_id, window_date, sub_alerts, alert_type)
update_historical_data_for_location(location)
def aggregate_response_alerts(location_id, date, alerts, alert_type):
total = sum([s.number for s in alerts])
if total > 0:
create_alert(location_id, date, alert_type, {'number': total})
def update_historical_data_for_location(loc):
"""
Fill with zeros data for all months between default start date and date of earliest location's summary.
E.g. Location is created at 2016-02-10 and earliest summary is from 2016-03-01 whereas
default start date is equal to 2012-01-01, so we need to generate data for all months
between 2012-01-01 and 2016-03-01.
This function is important for all locations created after initial run of report runner.
"""
start_date = default_start_date()
try:
earliest_org_summary = OrganizationSummary.objects.filter(location_id=loc.location_id).earliest('date')
earliest_org_summary_date = earliest_org_summary.date
except OrganizationSummary.DoesNotExist:
earliest_org_summary_date = loc.sql_location.created_at
if start_date >= earliest_org_summary_date:
return
for year, month in months_between(start_date, earliest_org_summary_date):
window_date = datetime(year, month, 1)
for cls in [OrganizationSummary, ProductAvailabilityData, GroupSummary]:
_init_warehouse_model(cls, loc, window_date)
def _init_warehouse_model(cls, location, date):
if cls == OrganizationSummary:
_init_default(location, date)
elif cls == ProductAvailabilityData:
_init_with_product(location, date)
elif cls == GroupSummary:
_init_group_summary(location, date)
def _init_default(location, date):
OrganizationSummary.objects.get_or_create(location_id=location.location_id, date=date)
def _init_with_product(location, date):
for p in SQLProduct.objects.filter(domain=location.domain, is_archived=False):
ProductAvailabilityData.objects.get_or_create(location_id=location.location_id, date=date, product=p.product_id)
def _init_group_summary(location, date):
org_summary = OrganizationSummary.objects.get(location_id=location.location_id, date=date)
for title in const.NEEDED_STATUS_TYPES:
GroupSummary.objects.get_or_create(org_summary=org_summary,
title=title)
|
bsd-3-clause
|
pearfalse/disasterclass
|
src/data/data_processor.py
|
1
|
5844
|
#!/usr/bin/python
from __future__ import print_function
import os, sys, json
from math import log
from datetime import datetime
import re
def quoteAndEscape(s):
r"""Enquote s, escaping quote marks and backslashes. As a convenience, \n and \r are also stripped."""
return "\"%s\"" % s.replace("\n",'').replace("\r",'').replace("\\", "\\\\").replace("\"", "\\\"")
# print(sys.argv)
fpoutname = sys.argv[1]
if fpoutname == '-':
print("Printing to stdout", file=sys.stderr)
fpout = sys.stdout;
else:
print("Saving to %s"%fpoutname, file=sys.stderr)
fpout = open(fpoutname, 'wt')
if len(sys.argv) > 2:
jpath = sys.argv[2]
else:
jpath = "data.json"
jroot = json.load(open(jpath, "rt"))
Template = u"""/*
Disasterclass: data.d || Minecraft block and item data
Automatically generated on %(Date)s; DO NOT EDIT
Written in the D programming language.
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
module disasterclass.data;
import disasterclass.support;
import std.exception : assumeUnique;
import std.string : format;
public {
string blockOrItemName(BlockID block, BlockData data = 0)
{
auto firstMatchB = block in Blocks;
if (firstMatchB !is null) {
assert(data <= 15);
if (firstMatchB.subData is null) return firstMatchB.name;
return firstMatchB.subData.subName[data];
}
auto firstMatchI = block in Items;
if (firstMatchI is null || firstMatchI.name is null) return "[unknown item %%d]".format(block);
// todo: use damage for subitem
return firstMatchI.name;
}
}
package:
struct Block
{
string name;
BlockVariantNames* subData;
uint flags;
}
struct BlockVariantNames
{
string[16] subName;
}
struct Item
{
string name;
uint flags;
}
static __gshared immutable Block[BlockID] Blocks;
static __gshared immutable Item[BlockID] Items;
//SubitemData
//%(SubitemDeclarations)s
//;
enum BlockType : BlockID
{
%(BlockTypesEnum)s
}
enum ItemType : ItemID
{
%(ItemTypesEnum)s
}
%(FlagsEnum)s
shared static this()
{
Block[BlockID] b;
%(BlockDataDeclarations)s
b.rehash();
Blocks = b.assumeUnique();
Item [BlockID] i;
%(ItemDataDeclarations)s
i.rehash();
Items = i.assumeUnique();
}
"""
class Flags:
NotValidId = 0x80000000
Block = 0x40000000
UsesDataField = 0x20000000
TileEntity = 0x10000000
BlockLightLevel_M = 0x0f000000
Transparent = 0x00400000
Opaque = 0x00200000
Solid = 0x00100000
SensitiveToPlacement = 0x00040000
WallSupport = 0x00020000
FloorSupport = 0x00010000
BlockItemIdsDiffer = 0x00008000
UsesDamage = 0x00002000
OnlyCreativeLegit = 0x00001000
OnlySilkTouchLegit = 0x00000800
Tool = 0x00000010
Armour = 0x00000008
LogMaxStack_M = 0x00000007
BlockLightLevel_S = 24
LogMaxStack_S = 0
@classmethod
def asDCode(cls):
flagNames = [x for x in dir(cls) if not x.endswith('__') and type(cls.__dict__[x]) == int]
maxKeyLength = max(len(x) for x in flagNames)
flags = [(x, cls.__dict__[x]) for x in flagNames if x[-2:] != "_S"]
flags.sort(cmp = lambda a, b: cmp(b[1], a[1]))
flags += [(x, cls.__dict__[x]) for x in flagNames if x[-2:] == "_S"]
s = ",\n".join( "\t%s = 0x%s" % (x[0].ljust(maxKeyLength), hex(x[1])[2:].zfill(8)) for x in flags )
return "enum Flags {\n%s\n}" % s
def makeEnumName(s):
match = rMusicDisc.match(s)
if match:
# is a music disc
return rMusicDisc.sub("Music_Disc_\\1", s)
else:
# do standard substitutions
return s.replace("\x20", "_").replace("'", '').replace("(", '').replace(")", '').replace("-", '') # assuming no other problems
Fields = {}
Fields['Date'] = datetime.now().isoformat()
Fields['SubitemDeclarations'] = " <not implemented>"
Fields['ItemDataDeclarations'] = "\t// not implemented"
Fields['FlagsEnum'] = Flags.asDCode()
idd = []
maxBlockIdLen = max(len(str(x['Id'])) for x in jroot['Blocks'])
rMusicDisc = re.compile(r"^(.*) Disc$")
for blk in jroot['Blocks']:
blk['_IdPadded'] = str(blk["Id"]).rjust(maxBlockIdLen, "\x20")
blk['_NameQuoted'] = quoteAndEscape(blk['Name'])
blk['_EnumName'] = makeEnumName(blk['Name'])
flags = 0
logstack = int(log(blk['MaxStack'], 2))
assert(1<<logstack == blk['MaxStack'])
assert(logstack <= 7)
flags |= logstack
if "IsArmour" in blk['Flags']:
flags |= Flags.Armour
if "IsTool" in blk['Flags']:
flags |= Flags.Tool
if "IsBlock" in blk['Flags'] or blk['Id'] <= 255:
flags |= Flags.Block
if "UsesDamage" in blk['Flags']:
flags |= Flags.UsesDamage
if "DifferentItemId" in blk['Flags']:
flags |= Flags.BlockItemIdsDiffer
if "SensitiveToPlacement" in blk['Flags']:
flags |= Flags.SensitiveToPlacement
if "Light" in blk:
blockLight = blk['Light']
assert(blockLight <= 15)
assert(blockLight >= 0)
flags |= (blockLight << Flags.BlockLightLevel_S)
if "TileEntity" in blk['Flags']:
flags |= Flags.TileEntity
if "HasData" in blk['Flags']:
flags |= Flags.UsesDataField
if "Transparent" in blk['Flags']:
flags |= Flags.Transparent
if "Opaque" in blk['Flags']:
flags |= Flags.Opaque
if "Solid" in blk['Flags']:
flags |= Flags.Solid
blk['Flags'] = "%08x" % flags
idd.append("""\tb[%(_IdPadded)s] = Block(%(_NameQuoted)s, null, 0x%(Flags)s);""" % blk)
enumNames = [(b['_EnumName'], b['Id']) for b in jroot["Blocks"]]
enumNames.sort(lambda a, b: cmp(a[1], b[1]))
maxEnumLength = max(len(x[0]) for x in enumNames)
Fields['BlockTypesEnum'] = ",\n".join("\t%s = %d" % (x[0].ljust(maxEnumLength), x[1]) for x in enumNames)
Fields['ItemTypesEnum'] = 'None = 0 // currently unsupported'
Fields['BlockDataDeclarations'] = '\n'.join(idd)
print(Template % Fields, file=fpout)
|
mpl-2.0
|
pombredanne/blivet-1
|
blivet/deviceaction.py
|
1
|
21894
|
# deviceaction.py
# Device modification action classes for anaconda's storage configuration
# module.
#
# Copyright (C) 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <[email protected]>
#
from udev import *
import math
from devices import StorageDevice
from devices import PartitionDevice
from devices import LVMLogicalVolumeDevice
from formats import getFormat
from errors import *
from parted import partitionFlag, PARTITION_LBA
from flags import flags
import gettext
_ = lambda x: gettext.ldgettext("blivet", x)
import logging
log = logging.getLogger("blivet")
from contextlib import contextmanager
@contextmanager
def progress_report_stub(message):
yield
try:
from pyanaconda.progress import progress_report
except ImportError:
progress_report = progress_report_stub
# The values are just hints as to the ordering.
# Eg: fsmod and devmod ordering depends on the mod (shrink -v- grow)
ACTION_TYPE_NONE = 0
ACTION_TYPE_DESTROY = 1000
ACTION_TYPE_RESIZE = 500
ACTION_TYPE_CREATE = 100
action_strings = {ACTION_TYPE_NONE: "None",
ACTION_TYPE_DESTROY: "Destroy",
ACTION_TYPE_RESIZE: "Resize",
ACTION_TYPE_CREATE: "Create"}
ACTION_OBJECT_NONE = 0
ACTION_OBJECT_FORMAT = 1
ACTION_OBJECT_DEVICE = 2
object_strings = {ACTION_OBJECT_NONE: "None",
ACTION_OBJECT_FORMAT: "Format",
ACTION_OBJECT_DEVICE: "Device"}
RESIZE_SHRINK = 88
RESIZE_GROW = 89
resize_strings = {RESIZE_SHRINK: "Shrink",
RESIZE_GROW: "Grow"}
def action_type_from_string(type_string):
if type_string is None:
return None
for (k,v) in action_strings.items():
if v.lower() == type_string.lower():
return k
return resize_type_from_string(type_string)
def action_object_from_string(type_string):
if type_string is None:
return None
for (k,v) in object_strings.items():
if v.lower() == type_string.lower():
return k
def resize_type_from_string(type_string):
if type_string is None:
return None
for (k,v) in resize_strings.items():
if v.lower() == type_string.lower():
return k
class DeviceAction(object):
""" An action that will be carried out in the future on a Device.
These classes represent actions to be performed on devices or
filesystems.
The operand Device instance will be modified according to the
action, but no changes will be made to the underlying device or
filesystem until the DeviceAction instance's execute method is
called. The DeviceAction instance's cancel method should reverse
any modifications made to the Device instance's attributes.
If the Device instance represents a pre-existing device, the
constructor should call any methods or set any attributes that the
action will eventually change. Device/DeviceFormat classes should verify
that the requested modifications are reasonable and raise an
exception if not.
Only one action of any given type/object pair can exist for any
given device at any given time. This is enforced by the
DeviceTree.
Basic usage:
a = DeviceAction(dev)
a.execute()
OR
a = DeviceAction(dev)
a.cancel()
XXX should we back up the device with a deep copy for forcibly
cancelling actions?
The downside is that we lose any checking or verification that
would get done when resetting the Device instance's attributes to
their original values.
The upside is that we would be guaranteed to achieve a total
reversal. No chance of, eg: resizes ending up altering Device
size due to rounding or other miscalculation.
"""
type = ACTION_TYPE_NONE
obj = ACTION_OBJECT_NONE
typeDesc = ""
_id = 0
def __init__(self, device):
if not isinstance(device, StorageDevice):
raise ValueError("arg 1 must be a StorageDevice instance")
self.device = device
# Establish a unique id for each action instance. Making shallow or
# deep copyies of DeviceAction instances will require __copy__ and
# __deepcopy__ methods to handle incrementing the id in the copy
self.id = DeviceAction._id
DeviceAction._id += 1
def execute(self):
""" perform the action """
pass
def cancel(self):
""" cancel the action """
pass
@property
def isDestroy(self):
return self.type == ACTION_TYPE_DESTROY
@property
def isCreate(self):
return self.type == ACTION_TYPE_CREATE
@property
def isResize(self):
return self.type == ACTION_TYPE_RESIZE
@property
def isShrink(self):
return (self.type == ACTION_TYPE_RESIZE and self.dir == RESIZE_SHRINK)
@property
def isGrow(self):
return (self.type == ACTION_TYPE_RESIZE and self.dir == RESIZE_GROW)
@property
def isDevice(self):
return self.obj == ACTION_OBJECT_DEVICE
@property
def isFormat(self):
return self.obj == ACTION_OBJECT_FORMAT
@property
def format(self):
return self.device.format
@property
def typeString(self):
""" String indicating if this action is a create, destroy or resize. """
return action_strings[self.type]
@property
def objectString(self):
""" String indicating if this action's operand is device or format. """
return object_strings[self.obj]
@property
def resizeString(self):
""" String representing the direction of a resize action. """
s = ""
if self.isResize:
s = resize_strings[self.dir]
return s
@property
def objectTypeString(self):
""" String representing the type of the operand device or format. """
if self.isFormat:
s = self.format.name
else:
s = self.device.type
return s
def __str__(self):
s = "[%d] %s %s" % (self.id, self.typeString, self.objectString)
if self.isResize:
s += " (%s)" % self.resizeString
if self.isFormat:
s += " %s on" % self.format.desc
s += " %s %s (id %d)" % (self.device.type, self.device.name,
self.device.id)
return s
def requires(self, action):
""" Return True if self requires action. """
return False
def obsoletes(self, action):
""" Return True is self obsoletes action.
DeviceAction instances obsolete other DeviceAction instances with
lower id and same device.
"""
return (self.device.id == action.device.id and
self.type == action.type and
self.obj == action.obj and
self.id > action.id)
class ActionCreateDevice(DeviceAction):
""" Action representing the creation of a new device. """
type = ACTION_TYPE_CREATE
obj = ACTION_OBJECT_DEVICE
typeDesc = _("create device")
def __init__(self, device):
if device.exists:
raise ValueError("device already exists")
# FIXME: assert device.fs is None
DeviceAction.__init__(self, device)
def execute(self):
self.device.create()
def requires(self, action):
""" Return True if self requires action.
Device create actions require other actions when either of the
following is true:
- this action's device depends on the other action's device
- both actions are partition create actions on the same disk
and this partition has a higher number
"""
rc = False
if self.device.dependsOn(action.device):
rc = True
elif (action.isCreate and action.isDevice and
isinstance(self.device, PartitionDevice) and
isinstance(action.device, PartitionDevice) and
self.device.disk == action.device.disk):
# create partitions in ascending numerical order
selfNum = self.device.partedPartition.number
otherNum = action.device.partedPartition.number
if selfNum > otherNum:
rc = True
elif (action.isCreate and action.isDevice and
isinstance(self.device, LVMLogicalVolumeDevice) and
isinstance(action.device, LVMLogicalVolumeDevice) and
self.device.vg == action.device.vg and
action.device.singlePV and not self.device.singlePV):
rc = True
return rc
class ActionDestroyDevice(DeviceAction):
""" An action representing the deletion of an existing device. """
type = ACTION_TYPE_DESTROY
obj = ACTION_OBJECT_DEVICE
typeDesc = _("destroy device")
def __init__(self, device):
# XXX should we insist that device.fs be None?
DeviceAction.__init__(self, device)
if device.exists:
device.teardown()
def execute(self):
self.device.destroy()
# Make sure libparted does not keep cached info for this device
# and returns it when we create a new device with the same name
if self.device.partedDevice:
try:
self.device.partedDevice.removeFromCache()
except Exception:
pass
def requires(self, action):
""" Return True if self requires action.
Device destroy actions require other actions when either of the
following is true:
- the other action's device depends on this action's device
- both actions are partition create actions on the same disk
and this partition has a lower number
"""
rc = False
if action.device.dependsOn(self.device) and action.isDestroy:
rc = True
elif (action.isDestroy and action.isDevice and
isinstance(self.device, PartitionDevice) and
isinstance(action.device, PartitionDevice) and
self.device.disk == action.device.disk):
# remove partitions in descending numerical order
selfNum = self.device.partedPartition.number
otherNum = action.device.partedPartition.number
if selfNum < otherNum:
rc = True
elif (action.isDestroy and action.isFormat and
action.device.id == self.device.id):
# device destruction comes after destruction of device's format
rc = True
return rc
def obsoletes(self, action):
""" Return True if self obsoletes action.
- obsoletes all actions w/ lower id that act on the same device,
including self, if device does not exist
- obsoletes all but ActionDestroyFormat actions w/ lower id on the
same device if device exists
"""
rc = False
if action.device.id == self.device.id:
if self.id >= action.id and not self.device.exists:
rc = True
elif self.id > action.id and \
self.device.exists and \
not (action.isDestroy and action.isFormat):
rc = True
return rc
class ActionResizeDevice(DeviceAction):
""" An action representing the resizing of an existing device. """
type = ACTION_TYPE_RESIZE
obj = ACTION_OBJECT_DEVICE
typeDesc = _("resize device")
def __init__(self, device, newsize):
if not device.resizable:
raise ValueError("device is not resizable")
if long(math.floor(device.currentSize)) == newsize:
raise ValueError("new size same as old size")
DeviceAction.__init__(self, device)
if newsize > long(math.floor(device.currentSize)):
self.dir = RESIZE_GROW
else:
self.dir = RESIZE_SHRINK
if device.targetSize > 0:
self.origsize = device.targetSize
else:
self.origsize = device.size
self.device.targetSize = newsize
def execute(self):
self.device.resize()
def cancel(self):
self.device.targetSize = self.origsize
def requires(self, action):
""" Return True if self requires action.
A device resize action requires another action if:
- the other action is a format resize on the same device and
both are shrink operations
- the other action grows a device (or format it contains) that
this action's device depends on
- the other action shrinks a device (or format it contains)
that depends on this action's device
"""
retval = False
if action.isResize:
if self.device.id == action.device.id and \
self.dir == action.dir and \
action.isFormat and self.isShrink:
retval = True
elif action.isGrow and self.device.dependsOn(action.device):
retval = True
elif action.isShrink and action.device.dependsOn(self.device):
retval = True
return retval
class ActionCreateFormat(DeviceAction):
""" An action representing creation of a new filesystem. """
type = ACTION_TYPE_CREATE
obj = ACTION_OBJECT_FORMAT
typeDesc = _("create format")
def __init__(self, device, format=None):
DeviceAction.__init__(self, device)
if format:
self.origFormat = device.format
if self.device.format.exists:
self.device.format.teardown()
self.device.format = format
else:
self.origFormat = getFormat(None)
def execute(self):
msg = _("Creating %(type)s on %(device)s") % {"type": self.device.format.type, "device": self.device.path}
with progress_report(msg):
self.device.setup()
if isinstance(self.device, PartitionDevice):
for flag in partitionFlag.keys():
# Keep the LBA flag on pre-existing partitions
if flag in [ PARTITION_LBA, self.format.partedFlag ]:
continue
self.device.unsetFlag(flag)
if self.format.partedFlag is not None:
self.device.setFlag(self.format.partedFlag)
if self.format.partedSystem is not None:
self.device.partedPartition.system = self.format.partedSystem
self.device.disk.format.commitToDisk()
self.device.format.create(device=self.device.path,
options=self.device.formatArgs)
# Get the UUID now that the format is created
udev_settle()
self.device.updateSysfsPath()
info = udev_get_block_device(self.device.sysfsPath)
# only do this if the format has a device known to udev
# (the format might not have a normal device at all)
if info:
self.device.format.uuid = udev_device_get_uuid(info)
self.device.deviceLinks = udev_device_get_symlinks(info)
elif self.device.format.type != "tmpfs":
# udev lookup failing is a serious issue for anything other than tmpfs
log.error("udev lookup failed for device: %s" % self.device)
def cancel(self):
self.device.format = self.origFormat
def requires(self, action):
""" Return True if self requires action.
Format create action can require another action if:
- this action's device depends on the other action's device
and the other action is not a device destroy action
- the other action is a create or resize of this action's
device
"""
return ((self.device.dependsOn(action.device) and
not (action.isDestroy and action.isDevice)) or
(action.isDevice and (action.isCreate or action.isResize) and
self.device.id == action.device.id))
def obsoletes(self, action):
""" Return True if this action obsoletes action.
Format create actions obsolete the following actions:
- format actions w/ lower id on this action's device, other
than those that destroy existing formats
"""
return (self.device.id == action.device.id and
self.obj == action.obj and
not (action.isDestroy and action.format.exists) and
self.id > action.id)
class ActionDestroyFormat(DeviceAction):
""" An action representing the removal of an existing filesystem. """
type = ACTION_TYPE_DESTROY
obj = ACTION_OBJECT_FORMAT
typeDesc = _("destroy format")
def __init__(self, device):
DeviceAction.__init__(self, device)
self.origFormat = self.device.format
if device.format.exists:
device.format.teardown()
self.device.format = None
def execute(self):
""" wipe the filesystem signature from the device """
status = self.device.status
self.device.setup(orig=True)
self.format.destroy()
udev_settle()
if flags.installer_mode or not status:
self.device.teardown()
def cancel(self):
self.device.format = self.origFormat
@property
def format(self):
return self.origFormat
def requires(self, action):
""" Return True if self requires action.
Format destroy actions require other actions when:
- the other action's device depends on this action's device
and the other action is a destroy action
"""
return action.device.dependsOn(self.device) and action.isDestroy
def obsoletes(self, action):
""" Return True if this action obsoletes action.
Format destroy actions obsolete the following actions:
- format actions w/ lower id on same device, including self if
format does not exist
- format destroy action on a non-existent format shouldn't
obsolete a format destroy action on an existing one
"""
return (self.device.id == action.device.id and
self.obj == action.obj and
(self.id > action.id or
(self.id == action.id and not self.format.exists)) and
not (action.format.exists and not self.format.exists))
class ActionResizeFormat(DeviceAction):
""" An action representing the resizing of an existing filesystem.
XXX Do we even want to support resizing of a filesystem without
also resizing the device it resides on?
"""
type = ACTION_TYPE_RESIZE
obj = ACTION_OBJECT_FORMAT
typeDesc = _("resize format")
def __init__(self, device, newsize):
if not device.format.resizable:
raise ValueError("format is not resizable")
if long(math.floor(device.format.currentSize)) == newsize:
raise ValueError("new size same as old size")
DeviceAction.__init__(self, device)
if newsize > long(math.floor(device.format.currentSize)):
self.dir = RESIZE_GROW
else:
self.dir = RESIZE_SHRINK
self.origSize = self.device.format.targetSize
self.device.format.targetSize = newsize
def execute(self):
msg = _("Resizing filesystem on %(device)s") % {"device": self.device.path}
with progress_report(msg):
self.device.setup(orig=True)
self.device.format.doResize()
def cancel(self):
self.device.format.targetSize = self.origSize
def requires(self, action):
""" Return True if self requires action.
A format resize action requires another action if:
- the other action is a device resize on the same device and
both are grow operations
- the other action shrinks a device (or format it contains)
that depends on this action's device
- the other action grows a device (or format) that this
action's device depends on
"""
retval = False
if action.isResize:
if self.device.id == action.device.id and \
self.dir == action.dir and \
action.isDevice and self.isGrow:
retval = True
elif action.isShrink and action.device.dependsOn(self.device):
retval = True
elif action.isGrow and self.device.dependsOn(action.device):
retval = True
return retval
|
gpl-2.0
|
julianwang/cinder
|
cinder/tests/unit/api/contrib/test_volume_manage.py
|
4
|
8420
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
import webob
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
def db_service_get_by_host_and_topic(context, host, topic):
"""Replacement for db.service_get_by_host_and_topic.
We stub the db.service_get_by_host_and_topic method to return something
for a specific host, and raise an exception for anything else. We don't
use the returned data (the code under test just use the call to check for
existence of a host, so the content returned doesn't matter.
"""
if host == 'host_ok':
return {}
raise exception.ServiceNotFound(service_id=host)
# Some of the tests check that volume types are correctly validated during a
# volume manage operation. This data structure represents an existing volume
# type.
fake_vt = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'name': 'good_fakevt'}
def vt_get_volume_type_by_name(context, name):
"""Replacement for cinder.volume.volume_types.get_volume_type_by_name.
Overrides cinder.volume.volume_types.get_volume_type_by_name to return
the volume type based on inspection of our fake structure, rather than
going to the Cinder DB.
"""
if name == fake_vt['name']:
return fake_vt
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
def vt_get_volume_type(context, vt_id):
"""Replacement for cinder.volume.volume_types.get_volume_type.
Overrides cinder.volume.volume_types.get_volume_type to return the
volume type based on inspection of our fake structure, rather than going
to the Cinder DB.
"""
if vt_id == fake_vt['id']:
return fake_vt
raise exception.VolumeTypeNotFound(volume_type_id=vt_id)
def api_manage(*args, **kwargs):
"""Replacement for cinder.volume.api.API.manage_existing.
Overrides cinder.volume.api.API.manage_existing to return some fake volume
data structure, rather than initiating a real volume managing.
Note that we don't try to replicate any passed-in information (e.g. name,
volume type) in the returned structure.
"""
vol = {
'status': 'creating',
'display_name': 'fake_name',
'availability_zone': 'nova',
'tenant_id': 'fake',
'created_at': 'DONTCARE',
'id': 'ffffffff-0000-ffff-0000-ffffffffffff',
'volume_type': None,
'snapshot_id': None,
'user_id': 'fake',
'launched_at': 'DONTCARE',
'size': 0,
'attach_status': 'detached',
'volume_type_id': None}
return vol
@mock.patch('cinder.db.service_get_by_host_and_topic',
db_service_get_by_host_and_topic)
@mock.patch('cinder.volume.volume_types.get_volume_type_by_name',
vt_get_volume_type_by_name)
@mock.patch('cinder.volume.volume_types.get_volume_type',
vt_get_volume_type)
class VolumeManageTest(test.TestCase):
"""Test cases for cinder/api/contrib/volume_manage.py
The API extension adds a POST /os-volume-manage API that is passed a cinder
host name, and a driver-specific reference parameter. If everything
is passed correctly, then the cinder.volume.api.API.manage_existing method
is invoked to manage an existing storage object on the host.
In this set of test cases, we are ensuring that the code correctly parses
the request structure and raises the correct exceptions when things are not
right, and calls down into cinder.volume.api.API.manage_existing with the
correct arguments.
"""
def setUp(self):
super(VolumeManageTest, self).setUp()
def _get_resp(self, body):
"""Helper to execute an os-volume-manage API call."""
req = webob.Request.blank('/v2/fake/os-volume-manage')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.environ['cinder.context'] = context.RequestContext('admin',
'fake',
True)
req.body = jsonutils.dumps(body)
res = req.get_response(app())
return res
@mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage)
def test_manage_volume_ok(self, mock_api_manage):
"""Test successful manage volume execution.
Tests for correct operation when valid arguments are passed in the
request body. We ensure that cinder.volume.api.API.manage_existing got
called with the correct arguments, and that we return the correct HTTP
code to the caller.
"""
body = {'volume': {'host': 'host_ok',
'ref': 'fake_ref'}}
res = self._get_resp(body)
self.assertEqual(res.status_int, 202, res)
# Check that the manage API was called with the correct arguments.
self.assertEqual(mock_api_manage.call_count, 1)
args = mock_api_manage.call_args[0]
self.assertEqual(args[1], body['volume']['host'])
self.assertEqual(args[2], body['volume']['ref'])
def test_manage_volume_missing_host(self):
"""Test correct failure when host is not specified."""
body = {'volume': {'ref': 'fake_ref'}}
res = self._get_resp(body)
self.assertEqual(res.status_int, 400)
def test_manage_volume_missing_ref(self):
"""Test correct failure when the ref is not specified."""
body = {'volume': {'host': 'host_ok'}}
res = self._get_resp(body)
self.assertEqual(res.status_int, 400)
pass
@mock.patch('cinder.volume.api.API.manage_existing', api_manage)
def test_manage_volume_volume_type_by_uuid(self):
"""Tests for correct operation when a volume type is specified by ID.
We wrap cinder.volume.api.API.manage_existing so that managing is not
actually attempted.
"""
body = {'volume': {'host': 'host_ok',
'ref': 'fake_ref',
'volume_type':
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}}
res = self._get_resp(body)
self.assertEqual(res.status_int, 202, res)
pass
@mock.patch('cinder.volume.api.API.manage_existing', api_manage)
def test_manage_volume_volume_type_by_name(self):
"""Tests for correct operation when a volume type is specified by name.
We wrap cinder.volume.api.API.manage_existing so that managing is not
actually attempted.
"""
body = {'volume': {'host': 'host_ok',
'ref': 'fake_ref',
'volume_type': 'good_fakevt'}}
res = self._get_resp(body)
self.assertEqual(res.status_int, 202, res)
pass
def test_manage_volume_bad_volume_type_by_uuid(self):
"""Test failure on nonexistent volume type specified by ID."""
body = {'volume': {'host': 'host_ok',
'ref': 'fake_ref',
'volume_type':
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'}}
res = self._get_resp(body)
self.assertEqual(res.status_int, 404, res)
pass
def test_manage_volume_bad_volume_type_by_name(self):
"""Test failure on nonexistent volume type specified by name."""
body = {'volume': {'host': 'host_ok',
'ref': 'fake_ref',
'volume_type': 'bad_fakevt'}}
res = self._get_resp(body)
self.assertEqual(res.status_int, 404, res)
pass
|
apache-2.0
|
kkuunnddaannkk/vispy
|
vispy/gloo/buffer.py
|
21
|
16293
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from os import path as op
from traceback import extract_stack, format_list
import weakref
from . globject import GLObject
from ..util import logger
from ..ext.six import string_types
# ------------------------------------------------------------ Buffer class ---
class Buffer(GLObject):
""" Generic GPU buffer.
A generic buffer is an interface used to upload data to a GPU array buffer
(ARRAY_BUFFER or ELEMENT_ARRAY_BUFFER). It keeps track of
buffer size but does not have any CPU storage. You can consider it as
write-only.
The `set_data` is a deferred operation: you can call it even if an OpenGL
context is not available. The `update` function is responsible to upload
pending data to GPU memory and requires an active GL context.
The Buffer class only deals with data in terms of bytes; it is not
aware of data type or element size.
Parameters
----------
data : ndarray | None
Buffer data.
nbytes : int | None
Buffer byte size.
"""
def __init__(self, data=None, nbytes=None):
GLObject.__init__(self)
self._views = [] # Views on this buffer (stored using weakrefs)
self._valid = True # To invalidate buffer views
self._nbytes = 0 # Bytesize in bytes, set in resize_bytes()
# Set data
if data is not None:
if nbytes is not None:
raise ValueError("Cannot specify both data and nbytes.")
self.set_data(data, copy=False)
elif nbytes is not None:
self.resize_bytes(nbytes)
@property
def nbytes(self):
""" Buffer size in bytes """
return self._nbytes
def set_subdata(self, data, offset=0, copy=False):
""" Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
"""
data = np.array(data, copy=copy)
nbytes = data.nbytes
if offset < 0:
raise ValueError("Offset must be positive")
elif (offset + nbytes) > self._nbytes:
raise ValueError("Data does not fit into buffer")
# If the whole buffer is to be written, we clear any pending data
# (because they will be overwritten anyway)
if nbytes == self._nbytes and offset == 0:
self._glir.command('SIZE', self._id, nbytes)
self._glir.command('DATA', self._id, offset, data)
def set_data(self, data, copy=False):
""" Set data in the buffer (deferred operation).
This completely resets the size and contents of the buffer.
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
"""
data = np.array(data, copy=copy)
nbytes = data.nbytes
if nbytes != self._nbytes:
self.resize_bytes(nbytes)
else:
# Use SIZE to discard any previous data setting
self._glir.command('SIZE', self._id, nbytes)
if nbytes: # Only set data if there *is* data
self._glir.command('DATA', self._id, 0, data)
def resize_bytes(self, size):
""" Resize this buffer (deferred operation).
Parameters
----------
size : int
New buffer size in bytes.
"""
self._nbytes = size
self._glir.command('SIZE', self._id, size)
# Invalidate any view on this buffer
for view in self._views:
if view() is not None:
view()._valid = False
self._views = []
# -------------------------------------------------------- DataBuffer class ---
class DataBuffer(Buffer):
""" GPU data buffer that is aware of data type and elements size
Parameters
----------
data : ndarray | None
Buffer data.
"""
def __init__(self, data=None):
self._size = 0 # number of elements in buffer, set in resize_bytes()
self._dtype = None
self._stride = 0
self._itemsize = 0
self._last_dim = None
Buffer.__init__(self, data)
def _prepare_data(self, data):
# Can be overrriden by subclasses
if not isinstance(data, np.ndarray):
raise TypeError("DataBuffer data must be numpy array.")
return data
def set_subdata(self, data, offset=0, copy=False, **kwargs):
""" Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional keyword arguments.
"""
data = self._prepare_data(data, **kwargs)
offset = offset * self.itemsize
Buffer.set_subdata(self, data=data, offset=offset, copy=copy)
def set_data(self, data, copy=False, **kwargs):
""" Set data (deferred operation)
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional arguments.
"""
data = self._prepare_data(data, **kwargs)
self._dtype = data.dtype
self._stride = data.strides[-1]
self._itemsize = self._dtype.itemsize
Buffer.set_data(self, data=data, copy=copy)
@property
def dtype(self):
""" Buffer dtype """
return self._dtype
@property
def offset(self):
""" Buffer offset (in bytes) relative to base """
return 0
@property
def stride(self):
""" Stride of data in memory """
return self._stride
@property
def size(self):
""" Number of elements in the buffer """
return self._size
@property
def itemsize(self):
""" The total number of bytes required to store the array data """
return self._itemsize
@property
def glsl_type(self):
""" GLSL declaration strings required for a variable to hold this data.
"""
if self.dtype is None:
return None
dtshape = self.dtype[0].shape
n = dtshape[0] if dtshape else 1
if n > 1:
dtype = 'vec%d' % n
else:
dtype = 'float' if 'f' in self.dtype[0].base.kind else 'int'
return 'attribute', dtype
def resize_bytes(self, size):
""" Resize the buffer (in-place, deferred operation)
Parameters
----------
size : integer
New buffer size in bytes
Notes
-----
This clears any pending operations.
"""
Buffer.resize_bytes(self, size)
self._size = size // self.itemsize
def __getitem__(self, key):
""" Create a view on this buffer. """
view = DataBufferView(self, key)
self._views.append(weakref.ref(view))
return view
def __setitem__(self, key, data):
""" Set data (deferred operation) """
# Setting a whole field of the buffer: only allowed if we have CPU
# storage. Note this case (key is string) only happen with base buffer
if isinstance(key, string_types):
raise ValueError("Cannot set non-contiguous data on buffer")
# Setting one or several elements
elif isinstance(key, int):
if key < 0:
key += self.size
if key < 0 or key > self.size:
raise IndexError("Buffer assignment index out of range")
start, stop, step = key, key + 1, 1
elif isinstance(key, slice):
start, stop, step = key.indices(self.size)
if stop < start:
start, stop = stop, start
elif key == Ellipsis:
start, stop, step = 0, self.size, 1
else:
raise TypeError("Buffer indices must be integers or strings")
# Contiguous update?
if step != 1:
raise ValueError("Cannot set non-contiguous data on buffer")
# Make sure data is an array
if not isinstance(data, np.ndarray):
data = np.array(data, dtype=self.dtype, copy=False)
# Make sure data is big enough
if data.size < stop - start:
data = np.resize(data, stop - start)
elif data.size > stop - start:
raise ValueError('Data too big to fit GPU data.')
# Set data
offset = start # * self.itemsize
self.set_subdata(data=data, offset=offset, copy=True)
def __repr__(self):
return ("<%s size=%s last_dim=%s>" %
(self.__class__.__name__, self.size, self._last_dim))
class DataBufferView(DataBuffer):
""" View on a sub-region of a DataBuffer.
Parameters
----------
base : DataBuffer
The buffer accessed by this view.
key : str, int, slice, or Ellpsis
The index into the base buffer that defines a sub-region of the buffer
to view. String arguments select a single field from multi-field
dtypes, and other allowed types select a subset of rows.
Notes
-----
It is generally not necessary to instantiate this class manually; use
``base_buffer[key]`` instead.
"""
# Note that this class is a bit evil: it is a subclass of GLObject,
# Buffer and DataBuffer, but any of these __init__'s are not called ...
def __init__(self, base, key):
# Note how this never runs the super's __init__,
# all attributes must thus be set here ...
self._base = base
self._key = key
self._stride = base.stride
if isinstance(key, string_types):
self._dtype = base.dtype[key]
self._offset = base.dtype.fields[key][1]
self._nbytes = base.size * self._dtype.itemsize
self._size = base.size
self._itemsize = self._dtype.itemsize
return
if isinstance(key, int):
if key < 0:
key += base.size
if key < 0 or key > base.size:
raise IndexError("Buffer assignment index out of range")
start, stop, step = key, key + 1, 1
elif isinstance(key, slice):
start, stop, step = key.indices(base.size)
if stop < start:
start, stop = stop, start
elif key == Ellipsis:
start, stop, step = 0, base.size, 1
else:
raise TypeError("Buffer indices must be integers or strings")
if step != 1:
raise ValueError("Cannot access non-contiguous data")
self._itemsize = base.itemsize
self._offset = start * self.itemsize
self._size = stop - start
self._dtype = base.dtype
self._nbytes = self.size * self.itemsize
@property
def glir(self):
return self._base.glir
@property
def id(self):
return self._base.id
@property
def _last_dim(self):
return self._base._last_dim
def set_subdata(self, data, offset=0, copy=False, **kwargs):
raise RuntimeError("Cannot set data on buffer view.")
def set_data(self, data, copy=False, **kwargs):
raise RuntimeError("Cannot set data on buffer view.")
@property
def offset(self):
""" Buffer offset (in bytes) relative to base """
return self._offset
@property
def base(self):
"""Buffer base if this buffer is a view on another buffer. """
return self._base
def resize_bytes(self, size):
raise RuntimeError("Cannot resize buffer view.")
def __getitem__(self, key):
raise RuntimeError("Can only access data from a base buffer")
def __setitem__(self, key, data):
raise RuntimeError("Cannot set data on Buffer view")
def __repr__(self):
return ("<DataBufferView on %r at offset=%d size=%d>" %
(self.base, self.offset, self.size))
# ------------------------------------------------------ VertexBuffer class ---
class VertexBuffer(DataBuffer):
""" Buffer for vertex attribute data
Parameters
----------
data : ndarray
Buffer data (optional)
"""
_GLIR_TYPE = 'VertexBuffer'
def _prepare_data(self, data, convert=False):
# Build a structured view of the data if:
# -> it is not already a structured array
# -> shape if 1-D or last dimension is 1,2,3 or 4
if isinstance(data, list):
data = np.array(data, dtype=np.float32)
if not isinstance(data, np.ndarray):
raise ValueError('Data must be a ndarray (got %s)' % type(data))
if data.dtype.isbuiltin:
if convert is True:
data = data.astype(np.float32)
if data.dtype in (np.float64, np.int64):
raise TypeError('data must be 32-bit not %s'
% data.dtype)
c = data.shape[-1] if data.ndim > 1 else 1
if c in [2, 3, 4]:
if not data.flags['C_CONTIGUOUS']:
logger.warning('Copying discontiguous data for struct '
'dtype:\n%s' % _last_stack_str())
data = data.copy()
else:
c = 1
if self._last_dim and c != self._last_dim:
raise ValueError('Last dimension should be %s not %s'
% (self._last_dim, c))
data = data.view(dtype=[('f0', data.dtype.base, c)])
self._last_dim = c
return data
def _last_stack_str():
"""Print stack trace from call that didn't originate from here"""
stack = extract_stack()
for s in stack[::-1]:
if op.join('vispy', 'gloo', 'buffer.py') not in __file__:
break
return format_list([s])[0]
# ------------------------------------------------------- IndexBuffer class ---
class IndexBuffer(DataBuffer):
""" Buffer for index data
Parameters
----------
data : ndarray | None
Buffer data.
"""
_GLIR_TYPE = 'IndexBuffer'
def __init__(self, data=None):
DataBuffer.__init__(self, data)
self._last_dim = 1
def _prepare_data(self, data, convert=False):
if isinstance(data, list):
data = np.array(data, dtype=np.uint32)
if not isinstance(data, np.ndarray):
raise ValueError('Data must be a ndarray (got %s)' % type(data))
if not data.dtype.isbuiltin:
raise TypeError("Element buffer dtype cannot be structured")
else:
if convert:
if data.dtype is not np.uint32:
data = data.astype(np.uint32)
else:
if data.dtype not in [np.uint32, np.uint16, np.uint8]:
raise TypeError("Invalid dtype for IndexBuffer: %r" %
data.dtype)
return data
|
bsd-3-clause
|
ahewer/ematoblender
|
ematoblender/scripts/ema_blender/ema_bpy/bpy_static_video.py
|
3
|
3208
|
__author__ = 'Kristy'
'''Create the context (UV texture of an arbitrary image on a plane) so that a video texture can be displayed in BGE.
This texture uses Alpha Add, so that the black of the ultrasound is not shown.
Inspired by:
http://www.tutorialsforblender3d.com/Blender_GameEngine/Alpha/Alpha_GLSL_3.html
http://pymove3d.sudile.com/stations/blender-basics/h_material/hi_material_uv_mapping.html
https://www.youtube.com/watch?v=jgAippm3QXw
'''
import bpy
import os
from .bpy_workspace import set_workspace_properties, postfn_gamemaster_reset_decorator
from ematoblender.scripts.ema_shared import properties as pps
@postfn_gamemaster_reset_decorator
def create_video_plane(alpha='ADD', planename='UltrasoundPlane', placeholderfile='./images/black.jpg'):
"""Create a plane at the cursor location that nominally shows the placeholderfile, but really is a video updated by name.
Alpha controls whether black parts are shown as transparent (ADD is transparent, and transparent when no video is shown if a black image is used.
Set alpha as 'OPAQUE' to show black images/video as such in the GE."""
print('ADDING VIDEO PLANE WITH INITIAL IMAGE: {}'.format(placeholderfile))
set_workspace_properties() # put into game mode
# add a plane called UltrasoundPlane
bpy.ops.mesh.primitive_plane_add()
plane = bpy.context.object
plane.name = planename
# add a material to the plane called 'Screen'
bpy.ops.material.new()
screenmat = bpy.data.materials[-1]
screenmat.name = 'Screen'
screenmat.use_shadeless = True
# remove all shadow behaviour
screenmat.use_shadows = False
screenmat.use_cast_shadows = False
screenmat.use_cast_buffer_shadows = False
# apply the material to the plane
plane.data.materials.append(screenmat)
# game settings
screenmat.game_settings.use_backface_culling = True
screenmat.game_settings.alpha_blend = alpha
# if an abspath to the placeholder file is not given, look for it relative to this blend file
if not os.path.isabs(placeholderfile):
fp = os.path.normpath(bpy.path.abspath('//'+placeholderfile))
else:
fp = placeholderfile
_, placeholdername = os.path.split(fp)
print('Initialising a blank video texture called: {}\n'.format(fp))
bpy.ops.image.open(filepath=fp)
# Add any texture
mytex = bpy.data.textures.new('holdertex', type="IMAGE")
image = bpy.data.images[placeholdername]
mytex.image = image
# connect texture and material
slot = screenmat.texture_slots.add()
screenmat.active_texture = mytex
slot.texture_coords = 'UV'
slot.mapping = "FLAT"
# put the plane in edit mode, project the image onto the plane (UV coords)
bpy.ops.object.mode_set(mode='EDIT')
first_context = bpy.context.area.type
bpy.context.area.type = 'IMAGE_EDITOR'
for area in bpy.context.screen.areas:
if area.type == 'IMAGE_EDITOR':
bpy.ops.image.open(filepath=fp)
bpy.context.area.type = first_context
bpy.ops.object.mode_set(mode='OBJECT')
if __name__ == "__main__":
create_video_plane(alpha='ADD', planename='UltrasoundPlane', placeholderfile='./images/black.jpg')
|
gpl-3.0
|
saurabh6790/pow-lib
|
wnf.py
|
22
|
25832
|
#!/usr/bin/env python2.7
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import sys
if __name__=="__main__":
sys.path = [".", "lib", "app"] + sys.path
import webnotes
def main():
parsed_args = webnotes._dict(vars(setup_parser()))
fn = get_function(parsed_args)
if parsed_args.get("site")=="all":
for site in get_sites():
args = parsed_args.copy()
args["site"] = site
run(fn, args)
else:
run(fn, parsed_args)
def cmd(fn):
def new_fn(*args, **kwargs):
import inspect
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
new_kwargs = {}
for a in fnargs:
if a in kwargs:
new_kwargs[a] = kwargs.get(a)
return fn(*args, **new_kwargs)
return new_fn
def run(fn, args):
if isinstance(args.get(fn), (list, tuple)):
out = globals().get(fn)(*args.get(fn), **args)
else:
out = globals().get(fn)(**args)
return out
def get_function(args):
for fn, val in args.items():
if (val or isinstance(val, list)) and globals().get(fn):
return fn
def get_sites():
import os
import conf
return [site for site in os.listdir(conf.sites_dir)
if not os.path.islink(os.path.join(conf.sites_dir, site))
and os.path.isdir(os.path.join(conf.sites_dir, site))]
def setup_parser():
import argparse
parser = argparse.ArgumentParser(description="Run webnotes utility functions")
setup_install(parser)
setup_utilities(parser)
setup_translation(parser)
setup_git(parser)
# common
parser.add_argument("-f", "--force", default=False, action="store_true",
help="Force execution where applicable (look for [-f] in help)")
parser.add_argument("--quiet", default=True, action="store_false", dest="verbose",
help="Don't show verbose output where applicable")
parser.add_argument("--site", nargs="?", metavar="SITE-NAME or all",
help="Run for a particular site")
parser.add_argument("--plugin", nargs="?", metavar="PLUGIN-NAME",
help="Run for a particular plugin")
return parser.parse_args()
def setup_install(parser):
parser.add_argument("--install", metavar="DB-NAME", nargs=1,
help="Install a new app")
parser.add_argument("--root-password", nargs=1,
help="Root password for new app")
parser.add_argument("--reinstall", default=False, action="store_true",
help="Install a fresh app in db_name specified in conf.py")
parser.add_argument("--restore", metavar=("DB-NAME", "SQL-FILE"), nargs=2,
help="Restore from an sql file")
parser.add_argument("--install_fixtures", default=False, action="store_true",
help="(Re)Install install-fixtures from app/startup/install_fixtures")
parser.add_argument("--make_demo", default=False, action="store_true",
help="Install demo in demo_db_name specified in conf.py")
parser.add_argument("--make_demo_fresh", default=False, action="store_true",
help="(Re)Install demo in demo_db_name specified in conf.py")
parser.add_argument("--add_system_manager", nargs="+",
metavar=("EMAIL", "[FIRST-NAME] [LAST-NAME]"), help="Add a user with all roles")
def setup_utilities(parser):
# update
parser.add_argument("-u", "--update", nargs="*", metavar=("REMOTE", "BRANCH"),
help="Perform git pull, run patches, sync schema and rebuild files/translations")
parser.add_argument("--reload_gunicorn", default=False, action="store_true", help="reload gunicorn on update")
parser.add_argument("--patch", nargs=1, metavar="PATCH-MODULE",
help="Run a particular patch [-f]")
parser.add_argument("-l", "--latest", default=False, action="store_true",
help="Run patches, sync schema and rebuild files/translations")
parser.add_argument("--sync_all", default=False, action="store_true",
help="Reload all doctypes, pages, etc. using txt files [-f]")
parser.add_argument("--update_all_sites", nargs="*", metavar=("REMOTE", "BRANCH"),
help="Perform git pull, run patches, sync schema and rebuild files/translations")
parser.add_argument("--reload_doc", nargs=3,
metavar=('"MODULE"', '"DOCTYPE"', '"DOCNAME"'))
# build
parser.add_argument("-b", "--build", default=False, action="store_true",
help="Minify + concatenate JS and CSS files, build translations")
parser.add_argument("-w", "--watch", default=False, action="store_true",
help="Watch and concatenate JS and CSS files as and when they change")
# misc
parser.add_argument("--backup", default=False, action="store_true",
help="Take backup of database in backup folder [--with_files]")
parser.add_argument("--move", default=False, action="store_true",
help="Move site to different directory defined by --dest_dir")
parser.add_argument("--dest_dir", nargs=1, metavar="DEST-DIR",
help="Move site to different directory")
parser.add_argument("--with_files", default=False, action="store_true",
help="Also take backup of files")
parser.add_argument("--domain", nargs="*",
help="Get or set domain in Website Settings")
parser.add_argument("--make_conf", nargs="*", metavar=("DB-NAME", "DB-PASSWORD"),
help="Create new conf.py file")
parser.add_argument("--make_custom_server_script", nargs=1, metavar="DOCTYPE",
help="Create new conf.py file")
parser.add_argument("--set_admin_password", metavar='ADMIN-PASSWORD', nargs=1,
help="Set administrator password")
parser.add_argument("--mysql", action="store_true", help="get mysql shell for a site")
parser.add_argument("--serve", action="store_true", help="Run development server")
parser.add_argument("--profile", action="store_true", help="enable profiling in development server")
parser.add_argument("--smtp", action="store_true", help="Run smtp debug server",
dest="smtp_debug_server")
parser.add_argument("--python", action="store_true", help="get python shell for a site")
parser.add_argument("--ipython", action="store_true", help="get ipython shell for a site")
parser.add_argument("--get_site_status", action="store_true", help="Get site details")
parser.add_argument("--update_site_config", nargs=1,
metavar="SITE-CONFIG-JSON",
help="Update site_config.json for a given --site")
parser.add_argument("--port", default=8000, type=int, help="port for development server")
# clear
parser.add_argument("--clear_web", default=False, action="store_true",
help="Clear website cache")
parser.add_argument("--build_sitemap", default=False, action="store_true",
help="Build Website Sitemap")
parser.add_argument("--rebuild_sitemap", default=False, action="store_true",
help="Rebuild Website Sitemap")
parser.add_argument("--clear_cache", default=False, action="store_true",
help="Clear cache, doctype cache and defaults")
parser.add_argument("--reset_perms", default=False, action="store_true",
help="Reset permissions for all doctypes")
# scheduler
parser.add_argument("--run_scheduler", default=False, action="store_true",
help="Trigger scheduler")
parser.add_argument("--run_scheduler_event", nargs=1,
metavar="all | daily | weekly | monthly",
help="Run a scheduler event")
# replace
parser.add_argument("--replace", nargs=3,
metavar=("SEARCH-REGEX", "REPLACE-BY", "FILE-EXTN"),
help="Multi-file search-replace [-f]")
# import/export
parser.add_argument("--export_doc", nargs=2, metavar=('"DOCTYPE"', '"DOCNAME"'))
parser.add_argument("--export_doclist", nargs=3, metavar=("DOCTYPE", "NAME", "PATH"),
help="""Export doclist as json to the given path, use '-' as name for Singles.""")
parser.add_argument("--export_csv", nargs=2, metavar=("DOCTYPE", "PATH"),
help="""Dump DocType as csv""")
parser.add_argument("--import_doclist", nargs=1, metavar="PATH",
help="""Import (insert/update) doclist. If the argument is a directory, all files ending with .json are imported""")
def setup_git(parser):
parser.add_argument("--pull", nargs="*", metavar=("REMOTE", "BRANCH"),
help="Run git pull for both repositories")
parser.add_argument("-p", "--push", nargs="*", metavar=("REMOTE", "BRANCH"),
help="Run git push for both repositories")
parser.add_argument("--status", default=False, action="store_true",
help="Run git status for both repositories")
parser.add_argument("--commit", nargs=1, metavar="COMMIT-MSG",
help="Run git commit COMMIT-MSG for both repositories")
parser.add_argument("--checkout", nargs=1, metavar="BRANCH",
help="Run git checkout BRANCH for both repositories")
parser.add_argument("--git", nargs="*", metavar="OPTIONS",
help="Run git command for both repositories")
parser.add_argument("--bump", metavar=("REPO", "VERSION-TYPE"), nargs=2,
help="Bump project version")
def setup_translation(parser):
parser.add_argument("--build_message_files", default=False, action="store_true",
help="Build message files for translation")
parser.add_argument("--export_messages", nargs=2, metavar=("LANG-CODE", "FILENAME"),
help="""Export all messages for a language to translation in a csv file.
Example, lib/wnf.py --export_messages hi hindi.csv""")
parser.add_argument("--import_messages", nargs=2, metavar=("LANG-CODE", "FILENAME"),
help="""Import messages for a language and make language files.
Example, lib/wnf.py --import_messages hi hindi.csv""")
parser.add_argument("--google_translate", nargs=3,
metavar=("LANG-CODE", "INFILE", "OUTFILE"),
help="Auto translate using Google Translate API")
parser.add_argument("--translate", nargs=1, metavar="LANG-CODE",
help="""Rebuild translation for the given langauge and
use Google Translate to tranlate untranslated messages. use "all" """)
# methods
# install
@cmd
def install(db_name, source_sql=None, site=None, verbose=True, force=False, root_password=None, site_config=None, admin_password='admin'):
from webnotes.install_lib.install import Installer
inst = Installer('root', db_name=db_name, site=site, root_password=root_password, site_config=site_config)
inst.install(db_name, source_sql=source_sql, verbose=verbose, force=force, admin_password=admin_password)
webnotes.destroy()
@cmd
def reinstall(site=None, verbose=True):
webnotes.init(site=site)
install(webnotes.conf.db_name, site=site, verbose=verbose, force=True)
@cmd
def restore(db_name, source_sql, site=None, verbose=True, force=False):
install(db_name, source_sql, site=site, verbose=verbose, force=force)
@cmd
def install_fixtures(site=None):
webnotes.init(site=site)
from webnotes.install_lib.install import install_fixtures
install_fixtures()
webnotes.destroy()
@cmd
def add_system_manager(email, first_name=None, last_name=None, site=None):
webnotes.connect(site=site)
webnotes.profile.add_system_manager(email, first_name, last_name)
webnotes.conn.commit()
webnotes.destroy()
@cmd
def make_demo(site=None):
import utilities.demo.make_demo
webnotes.init(site=site)
utilities.demo.make_demo.make()
webnotes.destroy()
@cmd
def make_demo_fresh(site=None):
import utilities.demo.make_demo
webnotes.init(site=site)
utilities.demo.make_demo.make(reset=True)
webnotes.destroy()
# utilities
@cmd
def update(remote=None, branch=None, site=None, reload_gunicorn=False):
pull(remote=remote, branch=branch, site=site)
# maybe there are new framework changes, any consequences?
reload(webnotes)
if not site: build()
latest(site=site)
if reload_gunicorn:
import subprocess
subprocess.check_output("killall -HUP gunicorn".split())
@cmd
def latest(site=None, verbose=True):
import webnotes.modules.patch_handler
import webnotes.model.sync
import webnotes.plugins
from website.doctype.website_sitemap_config.website_sitemap_config import build_website_sitemap_config
webnotes.connect(site=site)
try:
# run patches
webnotes.local.patch_log_list = []
webnotes.modules.patch_handler.run_all()
if verbose:
print "\n".join(webnotes.local.patch_log_list)
# sync
webnotes.model.sync.sync_all()
# remove __init__.py from plugins
webnotes.plugins.remove_init_files()
# build website config if any changes in templates etc.
build_website_sitemap_config()
except webnotes.modules.patch_handler.PatchError, e:
print "\n".join(webnotes.local.patch_log_list)
raise
finally:
webnotes.destroy()
@cmd
def sync_all(site=None, force=False):
import webnotes.model.sync
webnotes.connect(site=site)
webnotes.model.sync.sync_all(force=force)
webnotes.destroy()
@cmd
def patch(patch_module, site=None, force=False):
import webnotes.modules.patch_handler
webnotes.connect(site=site)
webnotes.local.patch_log_list = []
webnotes.modules.patch_handler.run_single(patch_module, force=force)
print "\n".join(webnotes.local.patch_log_list)
webnotes.destroy()
@cmd
def update_all_sites(remote=None, branch=None, verbose=True):
pull(remote, branch)
# maybe there are new framework changes, any consequences?
reload(webnotes)
build()
for site in get_sites():
latest(site=site, verbose=verbose)
@cmd
def reload_doc(module, doctype, docname, plugin=None, site=None, force=False):
webnotes.connect(site=site)
webnotes.reload_doc(module, doctype, docname, plugin=plugin, force=force)
webnotes.conn.commit()
webnotes.destroy()
@cmd
def build():
import webnotes.build
webnotes.build.bundle(False)
@cmd
def watch():
import webnotes.build
webnotes.build.watch(True)
@cmd
def backup(site=None, with_files=False, verbose=True, backup_path_db=None, backup_path_files=None):
from webnotes.utils.backups import scheduled_backup
webnotes.connect(site=site)
odb = scheduled_backup(ignore_files=not with_files, backup_path_db=backup_path_db, backup_path_files=backup_path_files)
if verbose:
from webnotes.utils import now
print "database backup taken -", odb.backup_path_db, "- on", now()
if with_files:
print "files backup taken -", odb.backup_path_files, "- on", now()
webnotes.destroy()
return odb
@cmd
def move(site=None, dest_dir=None):
import os
if not dest_dir:
raise Exception, "--dest_dir is required for --move"
if not os.path.isdir(dest_dir):
raise Exception, "destination is not a directory or does not exist"
webnotes.init(site=site)
old_path = webnotes.utils.get_site_path()
new_path = os.path.join(dest_dir, site)
# check if site dump of same name already exists
site_dump_exists = True
count = 0
while site_dump_exists:
final_new_path = new_path + (count and str(count) or "")
site_dump_exists = os.path.exists(final_new_path)
count = int(count or 0) + 1
os.rename(old_path, final_new_path)
webnotes.destroy()
return os.path.basename(final_new_path)
@cmd
def domain(host_url=None, site=None):
webnotes.connect(site=site)
if host_url:
webnotes.conn.set_value("Website Settings", None, "subdomain", host_url)
webnotes.conn.commit()
else:
print webnotes.conn.get_value("Website Settings", None, "subdomain")
webnotes.destroy()
@cmd
def make_conf(db_name=None, db_password=None, site=None, site_config=None):
from webnotes.install_lib.install import make_conf
make_conf(db_name=db_name, db_password=db_password, site=site, site_config=site_config)
@cmd
def make_custom_server_script(doctype, site=None):
from core.doctype.custom_script.custom_script import make_custom_server_script_file
webnotes.connect(site=site)
make_custom_server_script_file(doctype)
webnotes.destroy()
# clear
@cmd
def clear_cache(site=None):
import webnotes.sessions
webnotes.connect(site=site)
webnotes.sessions.clear_cache()
webnotes.destroy()
@cmd
def clear_web(site=None):
import webnotes.webutils
webnotes.connect(site=site)
from website.doctype.website_sitemap_config.website_sitemap_config import build_website_sitemap_config
build_website_sitemap_config()
webnotes.webutils.clear_cache()
webnotes.destroy()
@cmd
def build_sitemap(site=None):
from website.doctype.website_sitemap_config.website_sitemap_config import build_website_sitemap_config
webnotes.connect(site=site)
build_website_sitemap_config()
webnotes.destroy()
@cmd
def rebuild_sitemap(site=None):
from website.doctype.website_sitemap_config.website_sitemap_config import rebuild_website_sitemap_config
webnotes.connect(site=site)
rebuild_website_sitemap_config()
webnotes.destroy()
@cmd
def reset_perms(site=None):
webnotes.connect(site=site)
for d in webnotes.conn.sql_list("""select name from `tabDocType`
where ifnull(istable, 0)=0 and ifnull(custom, 0)=0"""):
webnotes.clear_cache(doctype=d)
webnotes.reset_perms(d)
webnotes.destroy()
# scheduler
@cmd
def run_scheduler(site=None):
from webnotes.utils.file_lock import create_lock, delete_lock
import webnotes.utils.scheduler
webnotes.init(site=site)
if create_lock('scheduler'):
webnotes.connect(site=site)
print webnotes.utils.scheduler.execute()
delete_lock('scheduler')
webnotes.destroy()
@cmd
def run_scheduler_event(event, site=None):
import webnotes.utils.scheduler
webnotes.connect(site=site)
print webnotes.utils.scheduler.trigger("execute_" + event)
webnotes.destroy()
# replace
@cmd
def replace(search_regex, replacement, extn, force=False):
print search_regex, replacement, extn
replace_code('.', search_regex, replacement, extn, force=force)
# import/export
@cmd
def export_doc(doctype, docname, site=None):
import webnotes.modules
webnotes.connect(site=site)
webnotes.modules.export_doc(doctype, docname)
webnotes.destroy()
@cmd
def export_doclist(doctype, name, path, site=None):
from core.page.data_import_tool import data_import_tool
webnotes.connect(site=site)
data_import_tool.export_json(doctype, name, path)
webnotes.destroy()
@cmd
def export_csv(doctype, path, site=None):
from core.page.data_import_tool import data_import_tool
webnotes.connect(site=site)
data_import_tool.export_csv(doctype, path)
webnotes.destroy()
@cmd
def import_doclist(path, site=None, force=False):
from core.page.data_import_tool import data_import_tool
webnotes.connect(site=site)
data_import_tool.import_doclist(path, overwrite=force)
webnotes.destroy()
# translation
@cmd
def build_message_files(site=None):
import webnotes.translate
webnotes.connect(site=site)
webnotes.translate.build_message_files()
webnotes.destroy()
@cmd
def export_messages(lang, outfile, site=None):
import webnotes.translate
webnotes.connect(site=site)
webnotes.translate.export_messages(lang, outfile)
webnotes.destroy()
@cmd
def import_messages(lang, infile, site=None):
import webnotes.translate
webnotes.connect(site=site)
webnotes.translate.import_messages(lang, infile)
webnotes.destroy()
@cmd
def google_translate(lang, infile, outfile, site=None):
import webnotes.translate
webnotes.connect(site=site)
webnotes.translate.google_translate(lang, infile, outfile)
webnotes.destroy()
@cmd
def translate(lang, site=None):
import webnotes.translate
webnotes.connect(site=site)
webnotes.translate.translate(lang)
webnotes.destroy()
# git
@cmd
def git(param):
if isinstance(param, (list, tuple)):
param = " ".join(param)
import os
os.system("""cd lib && git %s""" % param)
os.system("""cd app && git %s""" % param)
def get_remote_and_branch(remote=None, branch=None):
if not (remote and branch):
webnotes.init()
if not webnotes.conf.branch:
raise Exception("Please specify remote and branch")
remote = remote or "origin"
branch = branch or webnotes.conf.branch
webnotes.destroy()
return remote, branch
@cmd
def pull(remote=None, branch=None):
remote, branch = get_remote_and_branch(remote, branch)
git(("pull", remote, branch))
@cmd
def push(remote=None, branch=None):
remote, branch = get_remote_and_branch(remote, branch)
git(("push", remote, branch))
@cmd
def status():
git("status")
@cmd
def commit(message):
git("""commit -a -m "%s" """ % message.replace('"', '\"'))
@cmd
def checkout(branch):
git(("checkout", branch))
@cmd
def set_admin_password(admin_password, site=None):
import webnotes
webnotes.connect(site=site)
webnotes.conn.sql("""update __Auth set `password`=password(%s)
where user='Administrator'""", (admin_password,))
webnotes.conn.commit()
webnotes.destroy()
@cmd
def mysql(site=None):
import webnotes
import commands, os
msq = commands.getoutput('which mysql')
webnotes.init(site=site)
os.execv(msq, [msq, '-u', webnotes.conf.db_name, '-p'+webnotes.conf.db_password, webnotes.conf.db_name, '-h', webnotes.conf.db_host or "localhost", "-A"])
webnotes.destroy()
@cmd
def python(site=None):
import webnotes
import commands, os
python = commands.getoutput('which python')
webnotes.init(site=site)
if site:
os.environ["site"] = site
os.environ["PYTHONSTARTUP"] = os.path.join(os.path.dirname(__file__), "pythonrc.py")
os.execv(python, [python])
webnotes.destroy()
@cmd
def ipython(site=None):
import webnotes
webnotes.connect(site=site)
import IPython
IPython.embed()
@cmd
def smtp_debug_server():
import commands, os
python = commands.getoutput('which python')
os.execv(python, [python, '-m', "smtpd", "-n", "-c", "DebuggingServer", "localhost:25"])
@cmd
def serve(port=8000, profile=False):
import webnotes.app
webnotes.app.serve(port=port, profile=profile)
def replace_code(start, txt1, txt2, extn, search=None, force=False):
"""replace all txt1 by txt2 in files with extension (extn)"""
import webnotes.utils
import os, re
esc = webnotes.utils.make_esc('[]')
if not search: search = esc(txt1)
for wt in os.walk(start, followlinks=1):
for fn in wt[2]:
if fn.split('.')[-1]==extn:
fpath = os.path.join(wt[0], fn)
with open(fpath, 'r') as f:
content = f.read()
if re.search(search, content):
res = search_replace_with_prompt(fpath, txt1, txt2, force)
if res == 'skip':
return 'skip'
def search_replace_with_prompt(fpath, txt1, txt2, force=False):
""" Search and replace all txt1 by txt2 in the file with confirmation"""
from termcolor import colored
with open(fpath, 'r') as f:
content = f.readlines()
tmp = []
for c in content:
if c.find(txt1) != -1:
print fpath
print colored(txt1, 'red').join(c[:-1].split(txt1))
a = ''
if force:
c = c.replace(txt1, txt2)
else:
while a.lower() not in ['y', 'n', 'skip']:
a = raw_input('Do you want to Change [y/n/skip]?')
if a.lower() == 'y':
c = c.replace(txt1, txt2)
elif a.lower() == 'skip':
return 'skip'
tmp.append(c)
with open(fpath, 'w') as f:
f.write(''.join(tmp))
print colored('Updated', 'green')
@cmd
def get_site_status(site=None, verbose=False):
import webnotes
import webnotes.utils
from webnotes.profile import get_system_managers
from core.doctype.profile.profile import get_total_users, get_active_users, \
get_website_users, get_active_website_users
import json
webnotes.connect(site=site)
ret = {
'last_backup_on': webnotes.local.conf.last_backup_on,
'active_users': get_active_users(),
'total_users': get_total_users(),
'active_website_users': get_active_website_users(),
'website_users': get_website_users(),
'system_managers': "\n".join(get_system_managers()),
'default_company': webnotes.conn.get_default("company"),
'disk_usage': webnotes.utils.get_disk_usage(),
'working_directory': webnotes.utils.get_base_path()
}
# country, timezone, industry
control_panel_details = webnotes.conn.get_value("Control Panel", "Control Panel",
["country", "time_zone", "industry"], as_dict=True)
if control_panel_details:
ret.update(control_panel_details)
# basic usage/progress analytics
for doctype in ("Company", "Customer", "Item", "Quotation", "Sales Invoice",
"Journal Voucher", "Stock Ledger Entry"):
key = doctype.lower().replace(" ", "_") + "_exists"
ret[key] = 1 if webnotes.conn.count(doctype) else 0
webnotes.destroy()
if verbose:
print json.dumps(ret, indent=1, sort_keys=True)
return ret
@cmd
def update_site_config(site_config, site, verbose=False):
import json
if isinstance(site_config, basestring):
site_config = json.loads(site_config)
webnotes.init(site=site)
webnotes.conf.site_config.update(site_config)
site_config_path = webnotes.get_conf_path(webnotes.conf.sites_dir, site)
with open(site_config_path, "w") as f:
json.dump(webnotes.conf.site_config, f, indent=1, sort_keys=True)
webnotes.destroy()
@cmd
def bump(repo, bump_type):
import json
assert repo in ['lib', 'app']
assert bump_type in ['minor', 'major', 'patch']
def validate(repo_path):
import git
repo = git.Repo(repo_path)
if repo.active_branch != 'master':
raise Exception, "Current branch not master in {}".format(repo_path)
def bump_version(version, version_type):
import semantic_version
v = semantic_version.Version(version)
if version_type == 'minor':
v.minor += 1
elif version_type == 'major':
v.major += 1
elif version_type == 'patch':
v.patch += 1
return unicode(v)
def add_tag(repo_path, version):
import git
repo = git.Repo(repo_path)
repo.index.add(['config.json'])
repo.index.commit('bumped to version {}'.format(version))
repo.create_tag('v' + version, repo.head)
def update_framework_requirement(version):
with open('app/config.json') as f:
config = json.load(f)
config['requires_framework_version'] = '==' + version
with open('app/config.json', 'w') as f:
json.dump(config, f, indent=1, sort_keys=True)
validate('lib/')
validate('app/')
if repo == 'app':
with open('app/config.json') as f:
config = json.load(f)
new_version = bump_version(config['app_version'], bump_type)
config['app_version'] = new_version
with open('app/config.json', 'w') as f:
json.dump(config, f, indent=1, sort_keys=True)
add_tag('app/', new_version)
elif repo == 'lib':
with open('lib/config.json') as f:
config = json.load(f)
new_version = bump_version(config['framework_version'], bump_type)
config['framework_version'] = new_version
with open('lib/config.json', 'w') as f:
json.dump(config, f, indent=1, sort_keys=True)
add_tag('lib/', new_version)
update_framework_requirement(new_version)
bump('app', bump_type)
if __name__=="__main__":
main()
|
mit
|
Mazecreator/tensorflow
|
tensorflow/python/kernel_tests/reduce_join_op_test.py
|
116
|
14445
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ReduceJoin op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def _input_array(num_dims):
"""Creates an ndarray where each element is the binary of its linear index.
Args:
num_dims: The number of dimensions to create.
Returns:
An ndarray of shape [2] * num_dims.
"""
formatter = "{:0%db}" % num_dims
strings = [formatter.format(i) for i in xrange(2**num_dims)]
return np.array(strings, dtype="S%d" % num_dims).reshape([2] * num_dims)
def _joined_array(num_dims, reduce_dim):
"""Creates an ndarray with the result from reduce_join on input_array.
Args:
num_dims: The number of dimensions of the original input array.
reduce_dim: The dimension to reduce.
Returns:
An ndarray of shape [2] * (num_dims - 1).
"""
formatter = "{:0%db}" % (num_dims - 1)
result = np.zeros(shape=[2] * (num_dims - 1), dtype="S%d" % (2 * num_dims))
flat = result.ravel()
for i in xrange(2**(num_dims - 1)):
dims = formatter.format(i)
flat[i] = "".join([(dims[:reduce_dim] + "%d" + dims[reduce_dim:]) % j
for j in xrange(2)])
return result
class UnicodeTestCase(test.TestCase):
"""Test case with Python3-compatible string comparator."""
def assertAllEqualUnicode(self, truth, actual):
self.assertAllEqual(
np.array(truth).astype("U"), np.array(actual).astype("U"))
class ReduceJoinTestHelperTest(UnicodeTestCase):
"""Tests for helper functions."""
def testInputArray(self):
num_dims = 3
truth = ["{:03b}".format(i) for i in xrange(2**num_dims)]
output_array = _input_array(num_dims).reshape([-1])
self.assertAllEqualUnicode(truth, output_array)
def testJoinedArray(self):
num_dims = 3
truth_dim_zero = [["000100", "001101"], ["010110", "011111"]]
truth_dim_one = [["000010", "001011"], ["100110", "101111"]]
truth_dim_two = [["000001", "010011"], ["100101", "110111"]]
output_array_dim_zero = _joined_array(num_dims, reduce_dim=0)
output_array_dim_one = _joined_array(num_dims, reduce_dim=1)
output_array_dim_two = _joined_array(num_dims, reduce_dim=2)
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqualUnicode(truth_dim_two, output_array_dim_two)
class ReduceJoinTest(UnicodeTestCase):
def _testReduceJoin(self,
input_array,
truth,
truth_shape,
reduction_indices,
keep_dims=False,
separator=""):
"""Compares the output of reduce_join to an expected result.
Args:
input_array: The string input to be joined.
truth: An array or np.array of the expected result.
truth_shape: An array or np.array of the expected shape.
reduction_indices: The indices to reduce over.
keep_dims: Whether or not to retain reduced dimensions.
separator: The separator to use for joining.
"""
with self.test_session():
output = string_ops.reduce_join(
inputs=input_array,
reduction_indices=reduction_indices,
keep_dims=keep_dims,
separator=separator)
output_array = output.eval()
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, output.get_shape())
def _testMultipleReduceJoin(self,
input_array,
reduction_indices,
separator=" "):
"""Tests reduce_join for one input and multiple reduction_indices.
Does so by comparing the output to that from nested reduce_string_joins.
The correctness of single-dimension reduce_join is verified by other
tests below using _testReduceJoin.
Args:
input_array: The input to test.
reduction_indices: The indices to reduce.
separator: The separator to use when joining.
"""
with self.test_session():
output = string_ops.reduce_join(
inputs=input_array,
reduction_indices=reduction_indices,
keep_dims=False,
separator=separator)
output_keep_dims = string_ops.reduce_join(
inputs=input_array,
reduction_indices=reduction_indices,
keep_dims=True,
separator=separator)
truth = input_array
for index in reduction_indices:
truth = string_ops.reduce_join(
inputs=truth,
reduction_indices=index,
keep_dims=True,
separator=separator)
if not reduction_indices:
truth = constant_op.constant(truth)
truth_squeezed = array_ops.squeeze(truth, squeeze_dims=reduction_indices)
output_array = output.eval()
output_keep_dims_array = output_keep_dims.eval()
truth_array = truth.eval()
truth_squeezed_array = truth_squeezed.eval()
self.assertAllEqualUnicode(truth_array, output_keep_dims_array)
self.assertAllEqualUnicode(truth_squeezed_array, output_array)
self.assertAllEqual(truth.get_shape(), output_keep_dims.get_shape())
self.assertAllEqual(truth_squeezed.get_shape(), output.get_shape())
def testRankOne(self):
input_array = ["this", "is", "a", "test"]
truth = "thisisatest"
truth_shape = []
self._testReduceJoin(input_array, truth, truth_shape, reduction_indices=0)
def testRankTwo(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array, truth_dim_zero, truth_shape_dim_zero, reduction_indices=0)
self._testReduceJoin(
input_array, truth_dim_one, truth_shape_dim_one, reduction_indices=1)
expected_val = "thisisatestpleasedonotpanic"
expected_shape = None
self._testReduceJoin(
input_array, expected_val, expected_shape, reduction_indices=None)
# When using Tensor for input with reduction_indices=None, shape is known.
expected_val = "thisisatestpleasedonotpanic"
expected_shape = []
self._testReduceJoin(
constant_op.constant(input_array), expected_val,
expected_shape, reduction_indices=None)
# Using [] reduction_indices is a no-op.
expected_val = input_array
expected_shape = [2, 4]
self._testReduceJoin(
input_array, expected_val, expected_shape, reduction_indices=[])
def testRankFive(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(
input_array, truths[i], truth_shape, reduction_indices=i)
def testNegative(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(
input_array, truths[i], truth_shape, reduction_indices=i - 5)
def testSingletonDimension(self):
input_arrays = [
_input_array(num_dims=5).reshape([2] * i + [1] + [2] * (5 - i))
for i in xrange(6)
]
truth = _input_array(num_dims=5)
truth_shape = [2] * 5
for i in xrange(6):
self._testReduceJoin(
input_arrays[i], truth, truth_shape, reduction_indices=i)
def testSeparator(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["this please", "is do", "a not", "test panic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["this is a test", "please do not panic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
reduction_indices=0,
separator=" ")
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
reduction_indices=1,
separator=" ")
def testUnknownShape(self):
input_array = [["a"], ["b"]]
truth = ["ab"]
truth_shape = None
with self.test_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
reduced = string_ops.reduce_join(placeholder, reduction_indices=0)
output_array = reduced.eval(feed_dict={placeholder.name: input_array})
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, reduced.get_shape())
def testUnknownIndices(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape = None
with self.test_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(
input_array, reduction_indices=placeholder)
output_array_dim_zero = reduced.eval(feed_dict={placeholder.name: [0]})
output_array_dim_one = reduced.eval(feed_dict={placeholder.name: [1]})
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqual(truth_shape, reduced.get_shape())
def testKeepDims(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = [["thisplease", "isdo", "anot", "testpanic"]]
truth_shape_dim_zero = [1, 4]
truth_dim_one = [["thisisatest"], ["pleasedonotpanic"]]
truth_shape_dim_one = [2, 1]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
reduction_indices=0,
keep_dims=True)
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
reduction_indices=1,
keep_dims=True)
expected_val = [["thisisatestpleasedonotpanic"]]
expected_shape = [1, 1]
self._testReduceJoin(
constant_op.constant(input_array), expected_val, expected_shape,
keep_dims=True, reduction_indices=None)
# Using [] reduction_indices is a no-op.
expected_val = input_array
expected_shape = [2, 4]
self._testReduceJoin(
input_array, expected_val, expected_shape,
keep_dims=True, reduction_indices=[])
def testMultiIndex(self):
num_dims = 3
input_array = _input_array(num_dims=num_dims)
# Also tests [].
for i in xrange(num_dims + 1):
for permutation in itertools.permutations(xrange(num_dims), i):
self._testMultipleReduceJoin(input_array, reduction_indices=permutation)
def testInvalidReductionIndices(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "Invalid reduction dim"):
string_ops.reduce_join(inputs="", reduction_indices=0)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], reduction_indices=-3)
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], reduction_indices=2)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], reduction_indices=[0, -3])
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], reduction_indices=[0, 2])
def testZeroDims(self):
with self.test_session():
inputs = np.zeros([0, 1], dtype=str)
# Reduction that drops the dim of size 0.
output = string_ops.reduce_join(inputs=inputs, reduction_indices=0)
self.assertAllEqualUnicode([""], output.eval())
# Reduction that keeps the dim of size 0.
output = string_ops.reduce_join(inputs=inputs, reduction_indices=1)
output_shape = output.eval().shape
self.assertAllEqual([0], output_shape)
def testInvalidArgsUnknownShape(self):
with self.test_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
index_too_high = string_ops.reduce_join(placeholder, reduction_indices=1)
duplicate_index = string_ops.reduce_join(
placeholder, reduction_indices=[-1, 1])
with self.assertRaisesOpError("Invalid reduction dimension 1"):
index_too_high.eval(feed_dict={placeholder.name: [""]})
with self.assertRaisesOpError("Duplicate reduction dimension 1"):
duplicate_index.eval(feed_dict={placeholder.name: [[""]]})
def testInvalidArgsUnknownIndices(self):
with self.test_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(
["test", "test2"], reduction_indices=placeholder)
with self.assertRaisesOpError("reduction dimension -2"):
reduced.eval(feed_dict={placeholder.name: -2})
with self.assertRaisesOpError("reduction dimension 2"):
reduced.eval(feed_dict={placeholder.name: 2})
if __name__ == "__main__":
test.main()
|
apache-2.0
|
rajsadho/django
|
django/conf/locale/en_AU/formats.py
|
504
|
2117
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j M Y' # '25 Oct 2006'
TIME_FORMAT = 'P' # '2:30 p.m.'
DATETIME_FORMAT = 'j M Y, P' # '25 Oct 2006, 2:30 p.m.'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'j F' # '25 October'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 p.m.'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
bsd-3-clause
|
mKeRix/home-assistant
|
tests/components/fan/test_device_action.py
|
15
|
3216
|
"""The tests for Fan device actions."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.fan import DOMAIN
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
async def test_get_actions(hass, device_reg, entity_reg):
"""Test we get the expected actions from a fan."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_actions = [
{
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "fan.test_5678",
},
{
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "fan.test_5678",
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_action(hass):
"""Test for turn_on and turn_off actions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "event",
"event_type": "test_event_turn_off",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "fan.entity",
"type": "turn_off",
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_turn_on",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "fan.entity",
"type": "turn_on",
},
},
]
},
)
turn_off_calls = async_mock_service(hass, "fan", "turn_off")
turn_on_calls = async_mock_service(hass, "fan", "turn_on")
hass.bus.async_fire("test_event_turn_off")
await hass.async_block_till_done()
assert len(turn_off_calls) == 1
assert len(turn_on_calls) == 0
hass.bus.async_fire("test_event_turn_on")
await hass.async_block_till_done()
assert len(turn_off_calls) == 1
assert len(turn_on_calls) == 1
|
mit
|
dileep-p/ansible-modules-core
|
database/postgresql/postgresql_user.py
|
89
|
25160
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: postgresql_user
short_description: Adds or removes a users (roles) from a PostgreSQL database.
description:
- Add or remove PostgreSQL users (roles) from a remote host and, optionally,
grant the users access to an existing database or tables.
- The fundamental function of the module is to create, or delete, roles from
a PostgreSQL cluster. Privilege assignment, or removal, is an optional
step, which works on one database at a time. This allows for the module to
be called several times in the same module to modify the permissions on
different databases, or to grant permissions to already existing users.
- A user cannot be removed until all the privileges have been stripped from
the user. In such situation, if the module tries to remove the user it
will fail. To avoid this from happening the fail_on_user option signals
the module to try to remove the user, but if not possible keep going; the
module will report if changes happened and separately if the user was
removed or not.
version_added: "0.6"
options:
name:
description:
- name of the user (role) to add or remove
required: true
default: null
password:
description:
- set the user's password, before 1.4 this was required.
- "When passing an encrypted password, the encrypted parameter must also be true, and it must be generated with the format C('str[\\"md5\\"] + md5[ password + username ]'), resulting in a total of 35 characters. An easy way to do this is: C(echo \\"md5`echo -n \\"verysecretpasswordJOE\\" | md5`\\"). Note that if encrypted is set, the stored password will be hashed whether or not it is pre-encrypted."
required: false
default: null
db:
description:
- name of database where permissions will be granted
required: false
default: null
fail_on_user:
description:
- if C(yes), fail when user can't be removed. Otherwise just log and continue
required: false
default: 'yes'
choices: [ "yes", "no" ]
port:
description:
- Database port to connect to.
required: false
default: 5432
login_user:
description:
- User (role) used to authenticate with PostgreSQL
required: false
default: postgres
login_password:
description:
- Password used to authenticate with PostgreSQL
required: false
default: null
login_host:
description:
- Host running PostgreSQL.
required: false
default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections
required: false
default: null
priv:
description:
- "PostgreSQL privileges string in the format: C(table:priv1,priv2)"
required: false
default: null
role_attr_flags:
description:
- "PostgreSQL role attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER"
required: false
default: ""
choices: [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB",
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ]
state:
description:
- The user (role) state
required: false
default: present
choices: [ "present", "absent" ]
encrypted:
description:
- whether the password is stored hashed in the database. boolean. Passwords can be passed already hashed or unhashed, and postgresql ensures the stored password is hashed when encrypted is set.
required: false
default: false
version_added: '1.4'
expires:
description:
- sets the user's password expiration.
required: false
default: null
version_added: '1.4'
no_password_changes:
description:
- if C(yes), don't inspect database for password changes. Effective when C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make password changes as necessary.
required: false
default: 'no'
choices: [ "yes", "no" ]
version_added: '2.0'
notes:
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module. If
the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
- If the passlib library is installed, then passwords that are encrypted
in the DB but not encrypted when passed as arguments can be checked for
changes. If the passlib library is not installed, unencrypted passwords
stored in the DB encrypted will be assumed to have changed.
- If you specify PUBLIC as the user, then the privilege changes will apply
to all users. You may not specify password or role_attr_flags when the
PUBLIC user is specified.
requirements: [ psycopg2 ]
author: "Lorin Hochstein (@lorin)"
'''
EXAMPLES = '''
# Create django user and grant access to database and products table
- postgresql_user: db=acme name=django password=ceec4eif7ya priv=CONNECT/products:ALL
# Create rails user, grant privilege to create other databases and demote rails from super user status
- postgresql_user: name=rails password=secret role_attr_flags=CREATEDB,NOSUPERUSER
# Remove test user privileges from acme
- postgresql_user: db=acme name=test priv=ALL/products:ALL state=absent fail_on_user=no
# Remove test user from test database and the cluster
- postgresql_user: db=test name=test priv=ALL state=absent
# Example privileges string format
INSERT,UPDATE/table:SELECT/anothertable:ALL
# Remove an existing user's password
- postgresql_user: db=test user=test password=NULL
'''
import re
import itertools
try:
import psycopg2
import psycopg2.extras
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
_flags = ('SUPERUSER', 'CREATEROLE', 'CREATEUSER', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' % f for f in _flags)))
VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
database=frozenset(('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
)
# map to cope with idiosyncracies of SUPERUSER and LOGIN
PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
CREATEUSER='rolcreateuser', CREATEDB='rolcreatedb',
INHERIT='rolinherit', LOGIN='rolcanlogin',
REPLICATION='rolreplication')
class InvalidFlagsError(Exception):
pass
class InvalidPrivsError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def user_exists(cursor, user):
# The PUBLIC user is a special case that is always there
if user == 'PUBLIC':
return True
query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
cursor.execute(query, {'user': user})
return cursor.rowcount > 0
def user_add(cursor, user, password, role_attr_flags, encrypted, expires):
"""Create a new database user (role)."""
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal
query_password_data = dict(password=password, expires=expires)
query = ['CREATE USER %(user)s' % { "user": pg_quote_identifier(user, 'role')}]
if password is not None:
query.append("WITH %(crypt)s" % { "crypt": encrypted })
query.append("PASSWORD %(password)s")
if expires is not None:
query.append("VALID UNTIL %(expires)s")
query.append(role_attr_flags)
query = ' '.join(query)
cursor.execute(query, query_password_data)
return True
def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires, no_password_changes):
"""Change user password and/or attributes. Return True if changed, False otherwise."""
changed = False
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal
if user == 'PUBLIC':
if password is not None:
module.fail_json(msg="cannot change the password for PUBLIC user")
elif role_attr_flags != '':
module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
else:
return False
# Handle passwords.
if not no_password_changes and (password is not None or role_attr_flags != ''):
# Select password and all flag-like columns in order to verify changes.
query_password_data = dict(password=password, expires=expires)
select = "SELECT * FROM pg_authid where rolname=%(user)s"
cursor.execute(select, {"user": user})
# Grab current role attributes.
current_role_attrs = cursor.fetchone()
# Do we actually need to do anything?
pwchanging = False
if password is not None:
if encrypted:
if password.startswith('md5'):
if password != current_role_attrs['rolpassword']:
pwchanging = True
else:
try:
from passlib.hash import postgres_md5 as pm
if pm.encrypt(password, user) != current_role_attrs['rolpassword']:
pwchanging = True
except ImportError:
# Cannot check if passlib is not installed, so assume password is different
pwchanging = True
else:
if password != current_role_attrs['rolpassword']:
pwchanging = True
role_attr_flags_changing = False
if role_attr_flags:
role_attr_flags_dict = {}
for r in role_attr_flags.split(' '):
if r.startswith('NO'):
role_attr_flags_dict[r.replace('NO', '', 1)] = False
else:
role_attr_flags_dict[r] = True
for role_attr_name, role_attr_value in role_attr_flags_dict.items():
if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
role_attr_flags_changing = True
expires_changing = (expires is not None and expires == current_roles_attrs['rol_valid_until'])
if not pwchanging and not role_attr_flags_changing and not expires_changing:
return False
alter = ['ALTER USER %(user)s' % {"user": pg_quote_identifier(user, 'role')}]
if pwchanging:
alter.append("WITH %(crypt)s" % {"crypt": encrypted})
alter.append("PASSWORD %(password)s")
alter.append(role_attr_flags)
elif role_attr_flags:
alter.append('WITH %s' % role_attr_flags)
if expires is not None:
alter.append("VALID UNTIL %(expires)s")
try:
cursor.execute(' '.join(alter), query_password_data)
except psycopg2.InternalError, e:
if e.pgcode == '25006':
# Handle errors due to read-only transactions indicated by pgcode 25006
# ERROR: cannot execute ALTER ROLE in a read-only transaction
changed = False
module.fail_json(msg=e.pgerror)
return changed
else:
raise psycopg2.InternalError, e
# Grab new role attributes.
cursor.execute(select, {"user": user})
new_role_attrs = cursor.fetchone()
# Detect any differences between current_ and new_role_attrs.
for i in range(len(current_role_attrs)):
if current_role_attrs[i] != new_role_attrs[i]:
changed = True
return changed
def user_delete(cursor, user):
"""Try to remove a user. Returns True if successful otherwise False"""
cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
try:
cursor.execute("DROP USER %s" % pg_quote_identifier(user, 'role'))
except:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
return False
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
return True
def has_table_privileges(cursor, user, table, privs):
"""
Return the difference between the privileges that a user already has and
the privileges that they desire to have.
:returns: tuple of:
* privileges that they have and were requested
* privileges they currently hold but were not requested
* privileges requested that they do not hold
"""
cur_privs = get_table_privileges(cursor, user, table)
have_currently = cur_privs.intersection(privs)
other_current = cur_privs.difference(privs)
desired = privs.difference(cur_privs)
return (have_currently, other_current, desired)
def get_table_privileges(cursor, user, table):
if '.' in table:
schema, table = table.split('.', 1)
else:
schema = 'public'
query = '''SELECT privilege_type FROM information_schema.role_table_grants
WHERE grantee=%s AND table_name=%s AND table_schema=%s'''
cursor.execute(query, (user, table, schema))
return frozenset([x[0] for x in cursor.fetchall()])
def grant_table_privileges(cursor, user, table, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
query = 'GRANT %s ON TABLE %s TO %s' % (
privs, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query)
def revoke_table_privileges(cursor, user, table, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
query = 'REVOKE %s ON TABLE %s FROM %s' % (
privs, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query)
def get_database_privileges(cursor, user, db):
priv_map = {
'C':'CREATE',
'T':'TEMPORARY',
'c':'CONNECT',
}
query = 'SELECT datacl FROM pg_database WHERE datname = %s'
cursor.execute(query, (db,))
datacl = cursor.fetchone()[0]
if datacl is None:
return set()
r = re.search('%s=(C?T?c?)/[a-z]+\,?' % user, datacl)
if r is None:
return set()
o = set()
for v in r.group(1):
o.add(priv_map[v])
return normalize_privileges(o, 'database')
def has_database_privileges(cursor, user, db, privs):
"""
Return the difference between the privileges that a user already has and
the privileges that they desire to have.
:returns: tuple of:
* privileges that they have and were requested
* privileges they currently hold but were not requested
* privileges requested that they do not hold
"""
cur_privs = get_database_privileges(cursor, user, db)
have_currently = cur_privs.intersection(privs)
other_current = cur_privs.difference(privs)
desired = privs.difference(cur_privs)
return (have_currently, other_current, desired)
def grant_database_privileges(cursor, user, db, privs):
# Note: priv escaped by parse_privs
privs =', '.join(privs)
if user == "PUBLIC":
query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
privs, pg_quote_identifier(db, 'database'))
else:
query = 'GRANT %s ON DATABASE %s TO %s' % (
privs, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query)
def revoke_database_privileges(cursor, user, db, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
if user == "PUBLIC":
query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
privs, pg_quote_identifier(db, 'database'))
else:
query = 'REVOKE %s ON DATABASE %s FROM %s' % (
privs, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query)
def revoke_privileges(cursor, user, privs):
if privs is None:
return False
revoke_funcs = dict(table=revoke_table_privileges, database=revoke_database_privileges)
check_funcs = dict(table=has_table_privileges, database=has_database_privileges)
changed = False
for type_ in privs:
for name, privileges in privs[type_].iteritems():
# Check that any of the privileges requested to be removed are
# currently granted to the user
differences = check_funcs[type_](cursor, user, name, privileges)
if differences[0]:
revoke_funcs[type_](cursor, user, name, privileges)
changed = True
return changed
def grant_privileges(cursor, user, privs):
if privs is None:
return False
grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges)
check_funcs = dict(table=has_table_privileges, database=has_database_privileges)
grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges)
check_funcs = dict(table=has_table_privileges, database=has_database_privileges)
changed = False
for type_ in privs:
for name, privileges in privs[type_].iteritems():
# Check that any of the privileges requested for the user are
# currently missing
differences = check_funcs[type_](cursor, user, name, privileges)
if differences[2]:
grant_funcs[type_](cursor, user, name, privileges)
changed = True
return changed
def parse_role_attrs(role_attr_flags):
"""
Parse role attributes string for user creation.
Format:
attributes[,attributes,...]
Where:
attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
[ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB",
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ]
"""
if ',' in role_attr_flags:
flag_set = frozenset(r.upper() for r in role_attr_flags.split(","))
elif role_attr_flags:
flag_set = frozenset((role_attr_flags.upper(),))
else:
flag_set = frozenset()
if not flag_set.issubset(VALID_FLAGS):
raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
' '.join(flag_set.difference(VALID_FLAGS)))
o_flags = ' '.join(flag_set)
return o_flags
def normalize_privileges(privs, type_):
new_privs = set(privs)
if 'ALL' in new_privs:
new_privs.update(VALID_PRIVS[type_])
new_privs.remove('ALL')
if 'TEMP' in new_privs:
new_privs.add('TEMPORARY')
new_privs.remove('TEMP')
return new_privs
def parse_privs(privs, db):
"""
Parse privilege string to determine permissions for database db.
Format:
privileges[/privileges/...]
Where:
privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
"""
if privs is None:
return privs
o_privs = {
'database':{},
'table':{}
}
for token in privs.split('/'):
if ':' not in token:
type_ = 'database'
name = db
priv_set = frozenset(x.strip().upper() for x in token.split(',') if x.strip())
else:
type_ = 'table'
name, privileges = token.split(':', 1)
priv_set = frozenset(x.strip().upper() for x in privileges.split(',') if x.strip())
if not priv_set.issubset(VALID_PRIVS[type_]):
raise InvalidPrivsError('Invalid privs specified for %s: %s' %
(type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
priv_set = normalize_privileges(priv_set, type_)
o_privs[type_][name] = priv_set
return o_privs
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default=""),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
user=dict(required=True, aliases=['name']),
password=dict(default=None),
state=dict(default="present", choices=["absent", "present"]),
priv=dict(default=None),
db=dict(default=''),
port=dict(default='5432'),
fail_on_user=dict(type='bool', default='yes'),
role_attr_flags=dict(default=''),
encrypted=dict(type='bool', default='no'),
no_password_changes=dict(type='bool', default='no'),
expires=dict(default=None)
),
supports_check_mode = True
)
user = module.params["user"]
password = module.params["password"]
state = module.params["state"]
fail_on_user = module.params["fail_on_user"]
db = module.params["db"]
if db == '' and module.params["priv"] is not None:
module.fail_json(msg="privileges require a database to be specified")
privs = parse_privs(module.params["priv"], db)
port = module.params["port"]
no_password_changes = module.params["no_password_changes"]
try:
role_attr_flags = parse_role_attrs(module.params["role_attr_flags"])
except InvalidFlagsError, e:
module.fail_json(msg=str(e))
if module.params["encrypted"]:
encrypted = "ENCRYPTED"
else:
encrypted = "UNENCRYPTED"
expires = module.params["expires"]
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port",
"db":"database"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
if k in params_map and v != "" )
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except Exception, e:
module.fail_json(msg="unable to connect to database: %s" % e)
kw = dict(user=user)
changed = False
user_removed = False
if state == "present":
if user_exists(cursor, user):
try:
changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires, no_password_changes)
except SQLParseError, e:
module.fail_json(msg=str(e))
else:
try:
changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires)
except SQLParseError, e:
module.fail_json(msg=str(e))
try:
changed = grant_privileges(cursor, user, privs) or changed
except SQLParseError, e:
module.fail_json(msg=str(e))
else:
if user_exists(cursor, user):
if module.check_mode:
changed = True
kw['user_removed'] = True
else:
try:
changed = revoke_privileges(cursor, user, privs)
user_removed = user_delete(cursor, user)
except SQLParseError, e:
module.fail_json(msg=str(e))
changed = changed or user_removed
if fail_on_user and not user_removed:
msg = "unable to remove user"
module.fail_json(msg=msg)
kw['user_removed'] = user_removed
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
module.exit_json(**kw)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
main()
|
gpl-3.0
|
facebookexperimental/eden
|
eden/hg-server/tests/test-revert-t.py
|
2
|
50393
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) Mercurial Contributors.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import os
import generateworkingcopystates
from testutil.autofix import eq
from testutil.dott import feature, sh, testtmp # noqa: F401
def dircontent():
# generate a simple text view of the directory for easy comparison
files = os.listdir(".")
files.sort()
output = []
for filename in files:
if os.path.isdir(filename):
continue
content = open(filename).read()
output.append("%-6s %s" % (content.strip(), filename))
return "\n".join(output)
sh % "hg init repo"
sh % "cd repo"
sh % "echo 123" > "a"
sh % "echo 123" > "c"
sh % "echo 123" > "e"
sh % "hg add a c e"
sh % "hg commit -m first a c e"
# nothing changed
sh % "hg revert" == r"""
abort: no files or directories specified
(use --all to revert all files)
[255]"""
sh % "hg revert --all"
# Introduce some changes and revert them
# --------------------------------------
sh % "echo 123" > "b"
sh % "hg status" == "? b"
sh % "echo 12" > "c"
sh % "hg status" == r"""
M c
? b"""
sh % "hg add b"
sh % "hg status" == r"""
M c
A b"""
sh % "hg rm a"
sh % "hg status" == r"""
M c
A b
R a"""
# revert removal of a file
sh % "hg revert a"
sh % "hg status" == r"""
M c
A b"""
# revert addition of a file
sh % "hg revert b"
sh % "hg status" == r"""
M c
? b"""
# revert modification of a file (--no-backup)
sh % "hg revert --no-backup c"
sh % "hg status" == "? b"
# revert deletion (! status) of a added file
# ------------------------------------------
sh % "hg add b"
sh % "hg status b" == "A b"
sh % "rm b"
sh % "hg status b" == "! b"
sh % "hg revert -v b" == "forgetting b"
sh % "hg status b" == "b: * (glob)"
sh % "ls" == r"""
a
c
e"""
# Test creation of backup (.orig) files
# -------------------------------------
sh % "echo z" > "e"
sh % "hg revert --all -v" == r"""
saving current version of e as e.orig
reverting e"""
# Test creation of backup (.orig) file in configured file location
# ----------------------------------------------------------------
sh % "echo z" > "e"
sh % "hg revert --all -v --config 'ui.origbackuppath=.hg/origbackups'" == r"""
creating directory: $TESTTMP/repo/.hg/origbackups
saving current version of e as $TESTTMP/repo/.hg/origbackups/e
reverting e"""
sh % "rm -rf .hg/origbackups"
# revert on clean file (no change)
# --------------------------------
sh % "hg revert a" == "no changes needed to a"
# revert on an untracked file
# ---------------------------
sh % "echo q" > "q"
sh % "hg revert q" == "file not managed: q"
sh % "rm q"
# revert on file that does not exists
# -----------------------------------
sh % "hg revert notfound" == "notfound: no such file in rev 334a9e57682c"
sh % "touch d"
sh % "hg add d"
sh % "hg rm a"
sh % "hg commit -m second"
sh % "echo z" > "z"
sh % "hg add z"
sh % "hg st" == r"""
A z
? e.orig"""
# revert to another revision (--rev)
# ----------------------------------
sh % "hg revert --all -r0" == r"""
adding a
removing d
forgetting z"""
# revert explicitly to parent (--rev)
# -----------------------------------
sh % "hg revert --all -rtip" == r"""
forgetting a
undeleting d"""
sh % "rm a *.orig"
# revert to another revision (--rev) and exact match
# --------------------------------------------------
# exact match are more silent
sh % "hg revert -r0 a"
sh % "hg st a" == "A a"
sh % "hg rm d"
sh % "hg st d" == "R d"
# should keep d removed
sh % "hg revert -r0 d" == "no changes needed to d"
sh % "hg st d" == "R d"
sh % "hg update -C" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
# revert of exec bit
# ------------------
if feature.check(["execbit"]):
sh % "chmod +x c"
sh % "hg revert --all" == "reverting c"
sh % "test -x c" == "[1]"
sh % "chmod +x c"
sh % "hg commit -m exe"
sh % "chmod -x c"
sh % "hg revert --all" == "reverting c"
sh % "test -x c"
sh % "echo executable" == "executable"
# Test that files reverted to other than the parent are treated as
# "modified", even if none of mode, size and timestamp of it isn't
# changed on the filesystem (see also issue4583).
sh % "echo 321" > "e"
sh % "hg diff --git" == r"""
diff --git a/e b/e
--- a/e
+++ b/e
@@ -1,1 +1,1 @@
-123
+321"""
sh % "hg commit -m 'ambiguity from size'"
sh % "cat e" == "321"
sh % "touch -t 200001010000 e"
sh % "hg debugrebuildstate"
(
sh % "cat"
<< r"""
[fakedirstatewritetime]
# emulate invoking dirstate.write() via repo.status()
# at 2000-01-01 00:00
fakenow = 2000-01-01 00:00:00
[extensions]
fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
"""
>> ".hg/hgrc"
)
sh % "hg revert -r 0 e"
(
sh % "cat"
<< r"""
[extensions]
fakedirstatewritetime = !
"""
>> ".hg/hgrc"
)
sh % "cat e" == "123"
sh % "touch -t 200001010000 e"
sh % "hg status -A e" == "M e"
sh % "cd .."
# Issue241: update and revert produces inconsistent repositories
# --------------------------------------------------------------
sh % "hg init a"
sh % "cd a"
sh % "echo a" >> "a"
sh % "hg commit -A -d '1 0' -m a" == "adding a"
sh % "echo a" >> "a"
sh % "hg commit -d '2 0' -m a"
sh % "hg update 0" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
sh % "mkdir b"
sh % "echo b" > "b/b"
# call `hg revert` with no file specified
# ---------------------------------------
sh % "hg revert -rtip" == r"""
abort: no files or directories specified
(use --all to revert all files, or 'hg update 1' to update)
[255]"""
# call `hg revert` with -I
# ---------------------------
sh % "echo a" >> "a"
sh % "hg revert -I a" == "reverting a"
# call `hg revert` with -X
# ---------------------------
sh % "echo a" >> "a"
sh % "hg revert -X d" == "reverting a"
# call `hg revert` with --all
# ---------------------------
sh % "hg revert --all -rtip" == "reverting a"
sh % "rm 'a.orig'"
# Issue332: confusing message when reverting directory
# ----------------------------------------------------
sh % "hg ci -A -m b" == "adding b/b"
sh % "echo foobar" > "b/b"
sh % "mkdir newdir"
sh % "echo foo" > "newdir/newfile"
sh % "hg add newdir/newfile"
sh % "hg revert b newdir" == r"""
reverting b/b
forgetting newdir/newfile"""
sh % "echo foobar" > "b/b"
sh % "hg revert ." == "reverting b/b"
# reverting a rename target should revert the source
# --------------------------------------------------
sh % "hg mv a newa"
sh % "hg revert newa"
sh % "hg st a newa" == "? newa"
# Also true for move overwriting an existing file
sh % "hg mv --force a b/b"
sh % "hg revert b/b"
sh % "hg status a b/b"
sh % "cd .."
sh % "hg init ignored"
sh % "cd ignored"
sh % "echo ignored" > ".gitignore"
sh % "echo ignoreddir" >> ".gitignore"
sh % "echo removed" >> ".gitignore"
sh % "mkdir ignoreddir"
sh % "touch ignoreddir/file"
sh % "touch ignoreddir/removed"
sh % "touch ignored"
sh % "touch removed"
# 4 ignored files (we will add/commit everything)
sh % "hg st -A -X .gitignore" == r"""
I ignored
I ignoreddir/file
I ignoreddir/removed
I removed"""
sh % "hg ci -qAm 'add files' ignored ignoreddir/file ignoreddir/removed removed"
sh % "echo" >> "ignored"
sh % "echo" >> "ignoreddir/file"
sh % "hg rm removed ignoreddir/removed"
# should revert ignored* and undelete *removed
# --------------------------------------------
sh % "hg revert -a --no-backup" == r"""
reverting ignored
reverting ignoreddir/file
undeleting ignoreddir/removed
undeleting removed"""
sh % "hg st -mardi"
sh % "hg up -qC"
sh % "echo" >> "ignored"
sh % "hg rm removed"
# should silently revert the named files
# --------------------------------------
sh % "hg revert --no-backup ignored removed"
sh % "hg st -mardi"
# Reverting copy (issue3920)
# --------------------------
# someone set up us the copies
sh % "rm .gitignore"
sh % "hg update -C" == "0 files updated, 0 files merged, 0 files removed, 0 files unresolved"
sh % "hg mv ignored allyour"
sh % "hg copy removed base"
sh % "hg commit -m rename"
# copies and renames, you have no chance to survive make your time (issue3920)
sh % "hg update '.^'" == "1 files updated, 0 files merged, 2 files removed, 0 files unresolved"
sh % "hg revert -rtip -a" == r"""
adding allyour
adding base
removing ignored"""
sh % "hg status -C" == r"""
A allyour
ignored
A base
removed
R ignored"""
# Test revert of a file added by one side of the merge
# ====================================================
# remove any pending change
sh % "hg revert --all" == r"""
forgetting allyour
forgetting base
undeleting ignored"""
sh % "hg purge --all"
# Adds a new commit
sh % "echo foo" > "newadd"
sh % "hg add newadd"
sh % "hg commit -m 'other adds'"
# merge it with the other head
sh % "hg merge" == r"""
2 files updated, 0 files merged, 1 files removed, 0 files unresolved
(branch merge, don't forget to commit)"""
sh % "hg summary" == r"""
parent: b8ec310b2d4e
other adds
parent: f6180deb8fbe
rename
commit: 2 modified, 1 removed (merge)
phases: 3 draft"""
# clarifies who added what
sh % "hg status" == r"""
M allyour
M base
R ignored"""
sh % "hg status --change 'p1()'" == "A newadd"
sh % "hg status --change 'p2()'" == r"""
A allyour
A base
R ignored"""
# revert file added by p1() to p1() state
# -----------------------------------------
sh % "hg revert -r 'p1()' 'glob:newad?'"
sh % "hg status" == r"""
M allyour
M base
R ignored"""
# revert file added by p1() to p2() state
# ------------------------------------------
sh % "hg revert -r 'p2()' 'glob:newad?'" == "removing newadd"
sh % "hg status" == r"""
M allyour
M base
R ignored
R newadd"""
# revert file added by p2() to p2() state
# ------------------------------------------
sh % "hg revert -r 'p2()' 'glob:allyou?'"
sh % "hg status" == r"""
M allyour
M base
R ignored
R newadd"""
# revert file added by p2() to p1() state
# ------------------------------------------
sh % "hg revert -r 'p1()' 'glob:allyou?'" == "removing allyour"
sh % "hg status" == r"""
M base
R allyour
R ignored
R newadd"""
# Systematic behavior validation of most possible cases
# =====================================================
# This section tests most of the possible combinations of revision states and
# working directory states. The number of possible cases is significant but they
# but they all have a slightly different handling. So this section commits to
# and testing all of them to allow safe refactoring of the revert code.
# A python script is used to generate a file history for each combination of
# states, on one side the content (or lack thereof) in two revisions, and
# on the other side, the content and "tracked-ness" of the working directory. The
# three states generated are:
# - a "base" revision
# - a "parent" revision
# - the working directory (based on "parent")
# The files generated have names of the form:
# <rev1-content>_<rev2-content>_<working-copy-content>-<tracked-ness>
# All known states are not tested yet. See inline documentation for details.
# Special cases from merge and rename are not tested by this section.
# Write the python script to disk
# -------------------------------
# check list of planned files
eq(
generateworkingcopystates.main("filelist", 2),
r"""
content1_content1_content1-tracked
content1_content1_content1-untracked
content1_content1_content3-tracked
content1_content1_content3-untracked
content1_content1_missing-tracked
content1_content1_missing-untracked
content1_content2_content1-tracked
content1_content2_content1-untracked
content1_content2_content2-tracked
content1_content2_content2-untracked
content1_content2_content3-tracked
content1_content2_content3-untracked
content1_content2_missing-tracked
content1_content2_missing-untracked
content1_missing_content1-tracked
content1_missing_content1-untracked
content1_missing_content3-tracked
content1_missing_content3-untracked
content1_missing_missing-tracked
content1_missing_missing-untracked
missing_content2_content2-tracked
missing_content2_content2-untracked
missing_content2_content3-tracked
missing_content2_content3-untracked
missing_content2_missing-tracked
missing_content2_missing-untracked
missing_missing_content3-tracked
missing_missing_content3-untracked
missing_missing_missing-tracked
missing_missing_missing-untracked""",
)
# Script to make a simple text version of the content
# ---------------------------------------------------
# Generate appropriate repo state
# -------------------------------
sh % "hg init revert-ref"
sh % "cd revert-ref"
# Generate base changeset
generateworkingcopystates.main("state", 2, 1)
sh % "hg addremove --similarity 0" == r"""
adding content1_content1_content1-tracked
adding content1_content1_content1-untracked
adding content1_content1_content3-tracked
adding content1_content1_content3-untracked
adding content1_content1_missing-tracked
adding content1_content1_missing-untracked
adding content1_content2_content1-tracked
adding content1_content2_content1-untracked
adding content1_content2_content2-tracked
adding content1_content2_content2-untracked
adding content1_content2_content3-tracked
adding content1_content2_content3-untracked
adding content1_content2_missing-tracked
adding content1_content2_missing-untracked
adding content1_missing_content1-tracked
adding content1_missing_content1-untracked
adding content1_missing_content3-tracked
adding content1_missing_content3-untracked
adding content1_missing_missing-tracked
adding content1_missing_missing-untracked"""
sh % "hg status" == r"""
A content1_content1_content1-tracked
A content1_content1_content1-untracked
A content1_content1_content3-tracked
A content1_content1_content3-untracked
A content1_content1_missing-tracked
A content1_content1_missing-untracked
A content1_content2_content1-tracked
A content1_content2_content1-untracked
A content1_content2_content2-tracked
A content1_content2_content2-untracked
A content1_content2_content3-tracked
A content1_content2_content3-untracked
A content1_content2_missing-tracked
A content1_content2_missing-untracked
A content1_missing_content1-tracked
A content1_missing_content1-untracked
A content1_missing_content3-tracked
A content1_missing_content3-untracked
A content1_missing_missing-tracked
A content1_missing_missing-untracked"""
sh % "hg commit -m base"
# (create a simple text version of the content)
eq(
dircontent(),
r"""
content1 content1_content1_content1-tracked
content1 content1_content1_content1-untracked
content1 content1_content1_content3-tracked
content1 content1_content1_content3-untracked
content1 content1_content1_missing-tracked
content1 content1_content1_missing-untracked
content1 content1_content2_content1-tracked
content1 content1_content2_content1-untracked
content1 content1_content2_content2-tracked
content1 content1_content2_content2-untracked
content1 content1_content2_content3-tracked
content1 content1_content2_content3-untracked
content1 content1_content2_missing-tracked
content1 content1_content2_missing-untracked
content1 content1_missing_content1-tracked
content1 content1_missing_content1-untracked
content1 content1_missing_content3-tracked
content1 content1_missing_content3-untracked
content1 content1_missing_missing-tracked
content1 content1_missing_missing-untracked""",
)
# Create parent changeset
generateworkingcopystates.main("state", 2, 2)
sh % "hg addremove --similarity 0" == r"""
removing content1_missing_content1-tracked
removing content1_missing_content1-untracked
removing content1_missing_content3-tracked
removing content1_missing_content3-untracked
removing content1_missing_missing-tracked
removing content1_missing_missing-untracked
adding missing_content2_content2-tracked
adding missing_content2_content2-untracked
adding missing_content2_content3-tracked
adding missing_content2_content3-untracked
adding missing_content2_missing-tracked
adding missing_content2_missing-untracked"""
sh % "hg status" == r"""
M content1_content2_content1-tracked
M content1_content2_content1-untracked
M content1_content2_content2-tracked
M content1_content2_content2-untracked
M content1_content2_content3-tracked
M content1_content2_content3-untracked
M content1_content2_missing-tracked
M content1_content2_missing-untracked
A missing_content2_content2-tracked
A missing_content2_content2-untracked
A missing_content2_content3-tracked
A missing_content2_content3-untracked
A missing_content2_missing-tracked
A missing_content2_missing-untracked
R content1_missing_content1-tracked
R content1_missing_content1-untracked
R content1_missing_content3-tracked
R content1_missing_content3-untracked
R content1_missing_missing-tracked
R content1_missing_missing-untracked"""
sh % "hg commit -m parent"
# (create a simple text version of the content)
eq(
dircontent(),
r"""
content1 content1_content1_content1-tracked
content1 content1_content1_content1-untracked
content1 content1_content1_content3-tracked
content1 content1_content1_content3-untracked
content1 content1_content1_missing-tracked
content1 content1_content1_missing-untracked
content2 content1_content2_content1-tracked
content2 content1_content2_content1-untracked
content2 content1_content2_content2-tracked
content2 content1_content2_content2-untracked
content2 content1_content2_content3-tracked
content2 content1_content2_content3-untracked
content2 content1_content2_missing-tracked
content2 content1_content2_missing-untracked
content2 missing_content2_content2-tracked
content2 missing_content2_content2-untracked
content2 missing_content2_content3-tracked
content2 missing_content2_content3-untracked
content2 missing_content2_missing-tracked
content2 missing_content2_missing-untracked""",
)
# Setup working directory
generateworkingcopystates.main("state", 2, "wc")
sh % "hg addremove --similarity 0" == r"""
adding content1_missing_content1-tracked
adding content1_missing_content1-untracked
adding content1_missing_content3-tracked
adding content1_missing_content3-untracked
adding content1_missing_missing-tracked
adding content1_missing_missing-untracked
adding missing_missing_content3-tracked
adding missing_missing_content3-untracked
adding missing_missing_missing-tracked
adding missing_missing_missing-untracked"""
sh % "hg forget *_*_*-untracked"
sh % "rm *_*_missing-*"
sh % "hg status" == r"""
M content1_content1_content3-tracked
M content1_content2_content1-tracked
M content1_content2_content3-tracked
M missing_content2_content3-tracked
A content1_missing_content1-tracked
A content1_missing_content3-tracked
A missing_missing_content3-tracked
R content1_content1_content1-untracked
R content1_content1_content3-untracked
R content1_content1_missing-untracked
R content1_content2_content1-untracked
R content1_content2_content2-untracked
R content1_content2_content3-untracked
R content1_content2_missing-untracked
R missing_content2_content2-untracked
R missing_content2_content3-untracked
R missing_content2_missing-untracked
! content1_content1_missing-tracked
! content1_content2_missing-tracked
! content1_missing_missing-tracked
! missing_content2_missing-tracked
! missing_missing_missing-tracked
? content1_missing_content1-untracked
? content1_missing_content3-untracked
? missing_missing_content3-untracked"""
sh % "hg status --rev 'desc(\"base\")'" == r"""
M content1_content1_content3-tracked
M content1_content2_content2-tracked
M content1_content2_content3-tracked
M content1_missing_content3-tracked
A missing_content2_content2-tracked
A missing_content2_content3-tracked
A missing_missing_content3-tracked
R content1_content1_content1-untracked
R content1_content1_content3-untracked
R content1_content1_missing-untracked
R content1_content2_content1-untracked
R content1_content2_content2-untracked
R content1_content2_content3-untracked
R content1_content2_missing-untracked
R content1_missing_content1-untracked
R content1_missing_content3-untracked
R content1_missing_missing-untracked
! content1_content1_missing-tracked
! content1_content2_missing-tracked
! content1_missing_missing-tracked
! missing_content2_missing-tracked
! missing_missing_missing-tracked
? missing_missing_content3-untracked"""
# (create a simple text version of the content)
eq(
dircontent(),
r"""
content1 content1_content1_content1-tracked
content1 content1_content1_content1-untracked
content3 content1_content1_content3-tracked
content3 content1_content1_content3-untracked
content1 content1_content2_content1-tracked
content1 content1_content2_content1-untracked
content2 content1_content2_content2-tracked
content2 content1_content2_content2-untracked
content3 content1_content2_content3-tracked
content3 content1_content2_content3-untracked
content1 content1_missing_content1-tracked
content1 content1_missing_content1-untracked
content3 content1_missing_content3-tracked
content3 content1_missing_content3-untracked
content2 missing_content2_content2-tracked
content2 missing_content2_content2-untracked
content3 missing_content2_content3-tracked
content3 missing_content2_content3-untracked
content3 missing_missing_content3-tracked
content3 missing_missing_content3-untracked""",
)
sh % "cd .."
# Test revert --all to parent content
# -----------------------------------
# (setup from reference repo)
sh % "cp -R revert-ref revert-parent-all"
sh % "cd revert-parent-all"
# check revert output
sh % "hg revert --all" == r"""
undeleting content1_content1_content1-untracked
reverting content1_content1_content3-tracked
undeleting content1_content1_content3-untracked
reverting content1_content1_missing-tracked
undeleting content1_content1_missing-untracked
reverting content1_content2_content1-tracked
undeleting content1_content2_content1-untracked
undeleting content1_content2_content2-untracked
reverting content1_content2_content3-tracked
undeleting content1_content2_content3-untracked
reverting content1_content2_missing-tracked
undeleting content1_content2_missing-untracked
forgetting content1_missing_content1-tracked
forgetting content1_missing_content3-tracked
forgetting content1_missing_missing-tracked
undeleting missing_content2_content2-untracked
reverting missing_content2_content3-tracked
undeleting missing_content2_content3-untracked
reverting missing_content2_missing-tracked
undeleting missing_content2_missing-untracked
forgetting missing_missing_content3-tracked
forgetting missing_missing_missing-tracked"""
# Compare resulting directory with revert target.
# The diff is filtered to include change only. The only difference should be
# additional `.orig` backup file when applicable.
eq(
dircontent(),
r"""
content1 content1_content1_content1-tracked
content1 content1_content1_content1-untracked
content1 content1_content1_content3-tracked
content3 content1_content1_content3-tracked.orig
content1 content1_content1_content3-untracked
content3 content1_content1_content3-untracked.orig
content1 content1_content1_missing-tracked
content1 content1_content1_missing-untracked
content2 content1_content2_content1-tracked
content1 content1_content2_content1-tracked.orig
content2 content1_content2_content1-untracked
content1 content1_content2_content1-untracked.orig
content2 content1_content2_content2-tracked
content2 content1_content2_content2-untracked
content2 content1_content2_content3-tracked
content3 content1_content2_content3-tracked.orig
content2 content1_content2_content3-untracked
content3 content1_content2_content3-untracked.orig
content2 content1_content2_missing-tracked
content2 content1_content2_missing-untracked
content1 content1_missing_content1-tracked
content1 content1_missing_content1-untracked
content3 content1_missing_content3-tracked
content3 content1_missing_content3-untracked
content2 missing_content2_content2-tracked
content2 missing_content2_content2-untracked
content2 missing_content2_content3-tracked
content3 missing_content2_content3-tracked.orig
content2 missing_content2_content3-untracked
content3 missing_content2_content3-untracked.orig
content2 missing_content2_missing-tracked
content2 missing_content2_missing-untracked
content3 missing_missing_content3-tracked
content3 missing_missing_content3-untracked""",
)
sh % "cd .."
# Test revert --all to "base" content
# -----------------------------------
# (setup from reference repo)
sh % "cp -R revert-ref revert-base-all"
sh % "cd revert-base-all"
# check revert output
sh % "hg revert --all --rev 'desc(base)'" == r"""
undeleting content1_content1_content1-untracked
reverting content1_content1_content3-tracked
undeleting content1_content1_content3-untracked
reverting content1_content1_missing-tracked
undeleting content1_content1_missing-untracked
undeleting content1_content2_content1-untracked
reverting content1_content2_content2-tracked
undeleting content1_content2_content2-untracked
reverting content1_content2_content3-tracked
undeleting content1_content2_content3-untracked
reverting content1_content2_missing-tracked
undeleting content1_content2_missing-untracked
adding content1_missing_content1-untracked
reverting content1_missing_content3-tracked
adding content1_missing_content3-untracked
reverting content1_missing_missing-tracked
adding content1_missing_missing-untracked
removing missing_content2_content2-tracked
removing missing_content2_content3-tracked
removing missing_content2_missing-tracked
forgetting missing_missing_content3-tracked
forgetting missing_missing_missing-tracked"""
# Compare resulting directory with revert target.
# The diff is filtered to include change only. The only difference should be
# additional `.orig` backup file when applicable.
eq(
dircontent(),
r"""
content1 content1_content1_content1-tracked
content1 content1_content1_content1-untracked
content1 content1_content1_content3-tracked
content3 content1_content1_content3-tracked.orig
content1 content1_content1_content3-untracked
content3 content1_content1_content3-untracked.orig
content1 content1_content1_missing-tracked
content1 content1_content1_missing-untracked
content1 content1_content2_content1-tracked
content1 content1_content2_content1-untracked
content1 content1_content2_content2-tracked
content1 content1_content2_content2-untracked
content2 content1_content2_content2-untracked.orig
content1 content1_content2_content3-tracked
content3 content1_content2_content3-tracked.orig
content1 content1_content2_content3-untracked
content3 content1_content2_content3-untracked.orig
content1 content1_content2_missing-tracked
content1 content1_content2_missing-untracked
content1 content1_missing_content1-tracked
content1 content1_missing_content1-untracked
content1 content1_missing_content3-tracked
content3 content1_missing_content3-tracked.orig
content1 content1_missing_content3-untracked
content3 content1_missing_content3-untracked.orig
content1 content1_missing_missing-tracked
content1 content1_missing_missing-untracked
content2 missing_content2_content2-untracked
content3 missing_content2_content3-tracked.orig
content3 missing_content2_content3-untracked
content3 missing_missing_content3-tracked
content3 missing_missing_content3-untracked""",
)
sh % "cd .."
# Test revert to parent content with explicit file name
# -----------------------------------------------------
# (setup from reference repo)
sh % "cp -R revert-ref revert-parent-explicit"
sh % "cd revert-parent-explicit"
# revert all files individually and check the output
# (output is expected to be different than in the --all case)
files = generateworkingcopystates.main("filelist", 2)
output = []
for myfile in files.split("\n"):
output.append("### revert for: {}".format(myfile))
output.append((sh % "hg revert {}".format(myfile)).output)
eq(
"\n".join(output),
r"""
### revert for: content1_content1_content1-tracked
no changes needed to content1_content1_content1-tracked
### revert for: content1_content1_content1-untracked
### revert for: content1_content1_content3-tracked
### revert for: content1_content1_content3-untracked
### revert for: content1_content1_missing-tracked
### revert for: content1_content1_missing-untracked
### revert for: content1_content2_content1-tracked
### revert for: content1_content2_content1-untracked
### revert for: content1_content2_content2-tracked
no changes needed to content1_content2_content2-tracked
### revert for: content1_content2_content2-untracked
### revert for: content1_content2_content3-tracked
### revert for: content1_content2_content3-untracked
### revert for: content1_content2_missing-tracked
### revert for: content1_content2_missing-untracked
### revert for: content1_missing_content1-tracked
### revert for: content1_missing_content1-untracked
file not managed: content1_missing_content1-untracked
### revert for: content1_missing_content3-tracked
### revert for: content1_missing_content3-untracked
file not managed: content1_missing_content3-untracked
### revert for: content1_missing_missing-tracked
### revert for: content1_missing_missing-untracked
content1_missing_missing-untracked: no such file in rev cbcb7147d2a0
### revert for: missing_content2_content2-tracked
no changes needed to missing_content2_content2-tracked
### revert for: missing_content2_content2-untracked
### revert for: missing_content2_content3-tracked
### revert for: missing_content2_content3-untracked
### revert for: missing_content2_missing-tracked
### revert for: missing_content2_missing-untracked
### revert for: missing_missing_content3-tracked
### revert for: missing_missing_content3-untracked
file not managed: missing_missing_content3-untracked
### revert for: missing_missing_missing-tracked
### revert for: missing_missing_missing-untracked
missing_missing_missing-untracked: no such file in rev cbcb7147d2a0""",
)
# check resulting directory against the --all run
# (There should be no difference)
eq(
dircontent(),
r"""
content1 content1_content1_content1-tracked
content1 content1_content1_content1-untracked
content1 content1_content1_content3-tracked
content3 content1_content1_content3-tracked.orig
content1 content1_content1_content3-untracked
content3 content1_content1_content3-untracked.orig
content1 content1_content1_missing-tracked
content1 content1_content1_missing-untracked
content2 content1_content2_content1-tracked
content1 content1_content2_content1-tracked.orig
content2 content1_content2_content1-untracked
content1 content1_content2_content1-untracked.orig
content2 content1_content2_content2-tracked
content2 content1_content2_content2-untracked
content2 content1_content2_content3-tracked
content3 content1_content2_content3-tracked.orig
content2 content1_content2_content3-untracked
content3 content1_content2_content3-untracked.orig
content2 content1_content2_missing-tracked
content2 content1_content2_missing-untracked
content1 content1_missing_content1-tracked
content1 content1_missing_content1-untracked
content3 content1_missing_content3-tracked
content3 content1_missing_content3-untracked
content2 missing_content2_content2-tracked
content2 missing_content2_content2-untracked
content2 missing_content2_content3-tracked
content3 missing_content2_content3-tracked.orig
content2 missing_content2_content3-untracked
content3 missing_content2_content3-untracked.orig
content2 missing_content2_missing-tracked
content2 missing_content2_missing-untracked
content3 missing_missing_content3-tracked
content3 missing_missing_content3-untracked""",
)
sh % "cd .."
# Test revert to "base" content with explicit file name
# -----------------------------------------------------
# (setup from reference repo)
sh % "cp -R revert-ref revert-base-explicit"
sh % "cd revert-base-explicit"
# revert all files individually and check the output
# (output is expected to be different than in the --all case)
files = generateworkingcopystates.main("filelist", 2)
output = []
for myfile in files.split("\n"):
output.append("### revert for: {}".format(myfile))
output.append((sh % "hg revert {}".format(myfile)).output)
eq(
"\n".join(output),
r"""
### revert for: content1_content1_content1-tracked
no changes needed to content1_content1_content1-tracked
### revert for: content1_content1_content1-untracked
### revert for: content1_content1_content3-tracked
### revert for: content1_content1_content3-untracked
### revert for: content1_content1_missing-tracked
### revert for: content1_content1_missing-untracked
### revert for: content1_content2_content1-tracked
### revert for: content1_content2_content1-untracked
### revert for: content1_content2_content2-tracked
no changes needed to content1_content2_content2-tracked
### revert for: content1_content2_content2-untracked
### revert for: content1_content2_content3-tracked
### revert for: content1_content2_content3-untracked
### revert for: content1_content2_missing-tracked
### revert for: content1_content2_missing-untracked
### revert for: content1_missing_content1-tracked
### revert for: content1_missing_content1-untracked
file not managed: content1_missing_content1-untracked
### revert for: content1_missing_content3-tracked
### revert for: content1_missing_content3-untracked
file not managed: content1_missing_content3-untracked
### revert for: content1_missing_missing-tracked
### revert for: content1_missing_missing-untracked
content1_missing_missing-untracked: no such file in rev cbcb7147d2a0
### revert for: missing_content2_content2-tracked
no changes needed to missing_content2_content2-tracked
### revert for: missing_content2_content2-untracked
### revert for: missing_content2_content3-tracked
### revert for: missing_content2_content3-untracked
### revert for: missing_content2_missing-tracked
### revert for: missing_content2_missing-untracked
### revert for: missing_missing_content3-tracked
### revert for: missing_missing_content3-untracked
file not managed: missing_missing_content3-untracked
### revert for: missing_missing_missing-tracked
### revert for: missing_missing_missing-untracked
missing_missing_missing-untracked: no such file in rev cbcb7147d2a0""",
)
# check resulting directory against the --all run
# (There should be no difference)
eq(
dircontent(),
r"""
content1 content1_content1_content1-tracked
content1 content1_content1_content1-untracked
content1 content1_content1_content3-tracked
content3 content1_content1_content3-tracked.orig
content1 content1_content1_content3-untracked
content3 content1_content1_content3-untracked.orig
content1 content1_content1_missing-tracked
content1 content1_content1_missing-untracked
content2 content1_content2_content1-tracked
content1 content1_content2_content1-tracked.orig
content2 content1_content2_content1-untracked
content1 content1_content2_content1-untracked.orig
content2 content1_content2_content2-tracked
content2 content1_content2_content2-untracked
content2 content1_content2_content3-tracked
content3 content1_content2_content3-tracked.orig
content2 content1_content2_content3-untracked
content3 content1_content2_content3-untracked.orig
content2 content1_content2_missing-tracked
content2 content1_content2_missing-untracked
content1 content1_missing_content1-tracked
content1 content1_missing_content1-untracked
content3 content1_missing_content3-tracked
content3 content1_missing_content3-untracked
content2 missing_content2_content2-tracked
content2 missing_content2_content2-untracked
content2 missing_content2_content3-tracked
content3 missing_content2_content3-tracked.orig
content2 missing_content2_content3-untracked
content3 missing_content2_content3-untracked.orig
content2 missing_content2_missing-tracked
content2 missing_content2_missing-untracked
content3 missing_missing_content3-tracked
content3 missing_missing_content3-untracked""",
)
sh % "cd .."
# Test revert to parent content with explicit file name but ignored files
# -----------------------------------------------------------------------
# (setup from reference repo)
sh % "cp -R revert-ref revert-parent-explicit-ignored"
sh % "cd revert-parent-explicit-ignored"
sh % "echo *" > ".gitignore"
# revert all files individually and check the output
# (output is expected to be different than in the --all case)
files = generateworkingcopystates.main("filelist", 2)
output = []
for myfile in files.split("\n"):
output.append("### revert for: {}".format(myfile))
output.append((sh % "hg revert {}".format(myfile)).output)
eq(
"\n".join(output),
r"""
### revert for: content1_content1_content1-tracked
no changes needed to content1_content1_content1-tracked
### revert for: content1_content1_content1-untracked
### revert for: content1_content1_content3-tracked
### revert for: content1_content1_content3-untracked
### revert for: content1_content1_missing-tracked
### revert for: content1_content1_missing-untracked
### revert for: content1_content2_content1-tracked
### revert for: content1_content2_content1-untracked
### revert for: content1_content2_content2-tracked
no changes needed to content1_content2_content2-tracked
### revert for: content1_content2_content2-untracked
### revert for: content1_content2_content3-tracked
### revert for: content1_content2_content3-untracked
### revert for: content1_content2_missing-tracked
### revert for: content1_content2_missing-untracked
### revert for: content1_missing_content1-tracked
### revert for: content1_missing_content1-untracked
file not managed: content1_missing_content1-untracked
### revert for: content1_missing_content3-tracked
### revert for: content1_missing_content3-untracked
file not managed: content1_missing_content3-untracked
### revert for: content1_missing_missing-tracked
### revert for: content1_missing_missing-untracked
content1_missing_missing-untracked: no such file in rev cbcb7147d2a0
### revert for: missing_content2_content2-tracked
no changes needed to missing_content2_content2-tracked
### revert for: missing_content2_content2-untracked
### revert for: missing_content2_content3-tracked
### revert for: missing_content2_content3-untracked
### revert for: missing_content2_missing-tracked
### revert for: missing_content2_missing-untracked
### revert for: missing_missing_content3-tracked
### revert for: missing_missing_content3-untracked
file not managed: missing_missing_content3-untracked
### revert for: missing_missing_missing-tracked
### revert for: missing_missing_missing-untracked
missing_missing_missing-untracked: no such file in rev cbcb7147d2a0""",
)
# check resulting directory against the --all run
# (There should be no difference)
eq(
dircontent(),
r"""
content1_content1_content1-tracked content1_content1_content1-untracked content1_content1_content3-tracked content1_content1_content3-untracked content1_content2_content1-tracked content1_content2_content1-untracked content1_content2_content2-tracked content1_content2_content2-untracked content1_content2_content3-tracked content1_content2_content3-untracked content1_missing_content1-tracked content1_missing_content1-untracked content1_missing_content3-tracked content1_missing_content3-untracked missing_content2_content2-tracked missing_content2_content2-untracked missing_content2_content3-tracked missing_content2_content3-untracked missing_missing_content3-tracked missing_missing_content3-untracked .gitignore
content1 content1_content1_content1-tracked
content1 content1_content1_content1-untracked
content1 content1_content1_content3-tracked
content3 content1_content1_content3-tracked.orig
content1 content1_content1_content3-untracked
content3 content1_content1_content3-untracked.orig
content1 content1_content1_missing-tracked
content1 content1_content1_missing-untracked
content2 content1_content2_content1-tracked
content1 content1_content2_content1-tracked.orig
content2 content1_content2_content1-untracked
content1 content1_content2_content1-untracked.orig
content2 content1_content2_content2-tracked
content2 content1_content2_content2-untracked
content2 content1_content2_content3-tracked
content3 content1_content2_content3-tracked.orig
content2 content1_content2_content3-untracked
content3 content1_content2_content3-untracked.orig
content2 content1_content2_missing-tracked
content2 content1_content2_missing-untracked
content1 content1_missing_content1-tracked
content1 content1_missing_content1-untracked
content3 content1_missing_content3-tracked
content3 content1_missing_content3-untracked
content2 missing_content2_content2-tracked
content2 missing_content2_content2-untracked
content2 missing_content2_content3-tracked
content3 missing_content2_content3-tracked.orig
content2 missing_content2_content3-untracked
content3 missing_content2_content3-untracked.orig
content2 missing_content2_missing-tracked
content2 missing_content2_missing-untracked
content3 missing_missing_content3-tracked
content3 missing_missing_content3-untracked""",
)
sh % "cd .."
# Test revert to "base" content with explicit file name
# -----------------------------------------------------
# (setup from reference repo)
sh % "cp -R revert-ref revert-base-explicit-ignored"
sh % "cd revert-base-explicit-ignored"
sh % "echo *" > ".gitignore"
# revert all files individually and check the output
# (output is expected to be different than in the --all case)
files = generateworkingcopystates.main("filelist", 2)
output = []
for myfile in files.split("\n"):
output.append("### revert for: {}".format(myfile))
output.append((sh % "hg revert {}".format(myfile)).output)
eq(
"\n".join(output),
r"""
### revert for: content1_content1_content1-tracked
no changes needed to content1_content1_content1-tracked
### revert for: content1_content1_content1-untracked
### revert for: content1_content1_content3-tracked
### revert for: content1_content1_content3-untracked
### revert for: content1_content1_missing-tracked
### revert for: content1_content1_missing-untracked
### revert for: content1_content2_content1-tracked
### revert for: content1_content2_content1-untracked
### revert for: content1_content2_content2-tracked
no changes needed to content1_content2_content2-tracked
### revert for: content1_content2_content2-untracked
### revert for: content1_content2_content3-tracked
### revert for: content1_content2_content3-untracked
### revert for: content1_content2_missing-tracked
### revert for: content1_content2_missing-untracked
### revert for: content1_missing_content1-tracked
### revert for: content1_missing_content1-untracked
file not managed: content1_missing_content1-untracked
### revert for: content1_missing_content3-tracked
### revert for: content1_missing_content3-untracked
file not managed: content1_missing_content3-untracked
### revert for: content1_missing_missing-tracked
### revert for: content1_missing_missing-untracked
content1_missing_missing-untracked: no such file in rev cbcb7147d2a0
### revert for: missing_content2_content2-tracked
no changes needed to missing_content2_content2-tracked
### revert for: missing_content2_content2-untracked
### revert for: missing_content2_content3-tracked
### revert for: missing_content2_content3-untracked
### revert for: missing_content2_missing-tracked
### revert for: missing_content2_missing-untracked
### revert for: missing_missing_content3-tracked
### revert for: missing_missing_content3-untracked
file not managed: missing_missing_content3-untracked
### revert for: missing_missing_missing-tracked
### revert for: missing_missing_missing-untracked
missing_missing_missing-untracked: no such file in rev cbcb7147d2a0""",
)
# check resulting directory against the --all run
# (There should be no difference)
eq(
dircontent(),
r"""
content1_content1_content1-tracked content1_content1_content1-untracked content1_content1_content3-tracked content1_content1_content3-untracked content1_content2_content1-tracked content1_content2_content1-untracked content1_content2_content2-tracked content1_content2_content2-untracked content1_content2_content3-tracked content1_content2_content3-untracked content1_missing_content1-tracked content1_missing_content1-untracked content1_missing_content3-tracked content1_missing_content3-untracked missing_content2_content2-tracked missing_content2_content2-untracked missing_content2_content3-tracked missing_content2_content3-untracked missing_missing_content3-tracked missing_missing_content3-untracked .gitignore
content1 content1_content1_content1-tracked
content1 content1_content1_content1-untracked
content1 content1_content1_content3-tracked
content3 content1_content1_content3-tracked.orig
content1 content1_content1_content3-untracked
content3 content1_content1_content3-untracked.orig
content1 content1_content1_missing-tracked
content1 content1_content1_missing-untracked
content2 content1_content2_content1-tracked
content1 content1_content2_content1-tracked.orig
content2 content1_content2_content1-untracked
content1 content1_content2_content1-untracked.orig
content2 content1_content2_content2-tracked
content2 content1_content2_content2-untracked
content2 content1_content2_content3-tracked
content3 content1_content2_content3-tracked.orig
content2 content1_content2_content3-untracked
content3 content1_content2_content3-untracked.orig
content2 content1_content2_missing-tracked
content2 content1_content2_missing-untracked
content1 content1_missing_content1-tracked
content1 content1_missing_content1-untracked
content3 content1_missing_content3-tracked
content3 content1_missing_content3-untracked
content2 missing_content2_content2-tracked
content2 missing_content2_content2-untracked
content2 missing_content2_content3-tracked
content3 missing_content2_content3-tracked.orig
content2 missing_content2_content3-untracked
content3 missing_content2_content3-untracked.orig
content2 missing_content2_missing-tracked
content2 missing_content2_missing-untracked
content3 missing_missing_content3-tracked
content3 missing_missing_content3-untracked""",
)
sh % "cd .."
# Revert to an ancestor of P2 during a merge (issue5052)
# -----------------------------------------------------
# (prepare the repository)
sh % "hg init issue5052"
sh % "cd issue5052"
sh % "echo '*\\.orig'" > ".gitignore"
sh % "echo 0" > "root"
sh % "hg ci -qAm C0"
sh % "echo 0" > "A"
sh % "hg ci -qAm C1"
sh % "echo 1" >> "A"
sh % "hg ci -qm C2"
sh % "hg up -q 0"
sh % "echo 1" > "B"
sh % "hg ci -qAm C3"
sh % "hg status --rev 'ancestor(.,2)' --rev 2" == "A A"
sh % "hg log -G -T '{rev} ({files})\\n'" == r"""
@ 3 (B)
│
│ o 2 (A)
│ │
│ o 1 (A)
├─╯
o 0 (.gitignore root)"""
# actual tests: reverting to something else than a merge parent
sh % "hg merge" == r"""
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)"""
sh % "hg status --rev 'p1()'" == "M A"
sh % "hg status --rev 'p2()'" == "A B"
sh % "hg status --rev 1" == r"""
M A
A B"""
sh % "hg revert --rev 1 --all" == r"""
reverting A
removing B"""
sh % "hg status --rev 1"
# From the other parents
sh % "hg up -C 'p2()'" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
sh % "hg merge" == r"""
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)"""
sh % "hg status --rev 'p1()'" == "M B"
sh % "hg status --rev 'p2()'" == "A A"
sh % "hg status --rev 1" == r"""
M A
A B"""
sh % "hg revert --rev 1 --all" == r"""
reverting A
removing B"""
sh % "hg status --rev 1"
sh % "cd .."
|
gpl-2.0
|
freedomDR/shiny-robot
|
src/GrayScaleTransformation/GrayLevelStratification.py
|
1
|
1106
|
import numpy as np
import matplotlib.pyplot as plt
import cv2
plt.figure(1)
plt.subplot(1,2,1)
x = [0,2,2,3.5,3.5,5]
y = [0.5,0.5,3,3,0.5,0.5]
plt.plot(x,y,'-')
plt.axis([0,5,0,5])
plt.gca().set_aspect(1)
plt.subplot(1,2,2)
x = [0,1.5,1.5,2.5,2.5,5]
y = [0,1.5,4,4,2.5,5]
plt.plot(x,y,'-')
plt.axis([0,5,0,5])
plt.gca().set_aspect(1)
plt.figure(2)
img = cv2.imread('../../ImageMaterial/DIP3/Fig0312(a)(kidney).tif',cv2.IMREAD_GRAYSCALE)
plt.subplot(1,3,1)
plt.imshow(img,cmap='Greys_r')
mean = np.mean(img)
a,b = 3.9,4
left,right = mean-(np.max(img)-np.min(img))/a, mean+(np.max(img)-np.min(img))/b
def getValue(value):
if value >= left and value <= right:
return 1
else:
return 255
plt.subplot(1,3,2)
img1 = [[1*getValue(img[x][y])for y in range(img.shape[1])]for x in range(img.shape[0])]
plt.imshow(img1,cmap='Greys_r')
a,b = 5,5
left,right = mean-(np.max(img)-np.min(img))/a, mean+(np.max(img)-np.min(img))/b
plt.subplot(1,3,3)
img2 = [[img[x][y]*getValue(img[x][y])for y in range(img.shape[1])]for x in range(img.shape[0])]
plt.imshow(img2,cmap='Greys_r')
plt.show()
|
gpl-3.0
|
tcpcloud/openvstorage
|
ovs/dal/lists/brandinglist.py
|
1
|
1275
|
# Copyright 2014 Open vStorage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
BrandingList module
"""
from ovs.dal.datalist import DataList
from ovs.dal.dataobject import DataObjectList
from ovs.dal.hybrids.branding import Branding
class BrandingList(object):
"""
This BrandingList class contains various lists regarding to the Branding class
"""
@staticmethod
def get_brandings():
"""
Returns a list of all brandings
"""
brandings = DataList({'object': Branding,
'data': DataList.select.GUIDS,
'query': {'type': DataList.where_operator.AND,
'items': []}}).data
return DataObjectList(brandings, Branding)
|
apache-2.0
|
zmeeey5/test_proj
|
node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py
|
1534
|
3426
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_filelist_paths = {
}
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params['generator_flags']['output_dir']
except KeyError:
filepath = '.'
filename = os.path.join(filepath, 'dump.json')
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
|
gpl-2.0
|
Allow2CEO/browser-ios
|
brave/node_modules/hashset-cpp/vendor/depot_tools/third_party/gsutil/gslib/addlhelp/prod.py
|
51
|
8580
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gslib.help_provider import HELP_NAME
from gslib.help_provider import HELP_NAME_ALIASES
from gslib.help_provider import HELP_ONE_LINE_SUMMARY
from gslib.help_provider import HelpProvider
from gslib.help_provider import HELP_TEXT
from gslib.help_provider import HelpType
from gslib.help_provider import HELP_TYPE
_detailed_help_text = ("""
<B>OVERVIEW</B>
If you use gsutil in large production tasks (such as uploading or
downloading many GB of data each night), there are a number of things
you can do to help ensure success. Specifically, this section discusses
how to script large production tasks around gsutil's resumable transfer
mechanism.
<B>BACKGROUND ON RESUMABLE TRANSFERS</B>
First, it's helpful to understand gsutil's resumable transfer mechanism,
and how your script needs to be implemented around this mechanism to work
reliably. gsutil uses the resumable transfer support in the boto library
when you attempt to upload or download a file larger than a configurable
threshold (by default, this threshold is 1MB). When a transfer fails
partway through (e.g., because of an intermittent network problem),
boto uses a randomized binary exponential backoff-and-retry strategy:
wait a random period between [0..1] seconds and retry; if that fails,
wait a random period between [0..2] seconds and retry; and if that
fails, wait a random period between [0..4] seconds, and so on, up to a
configurable number of times (the default is 6 times). Thus, the retry
actually spans a randomized period up to 1+2+4+8+16+32=63 seconds.
If the transfer fails each of these attempts with no intervening
progress, gsutil gives up on the transfer, but keeps a "tracker" file
for it in a configurable location (the default location is ~/.gsutil/,
in a file named by a combination of the SHA1 hash of the name of the
bucket and object being transferred and the last 16 characters of the
file name). When transfers fail in this fashion, you can rerun gsutil
at some later time (e.g., after the networking problem has been
resolved), and the resumable transfer picks up where it left off.
<B>SCRIPTING DATA TRANSFER TASKS</B>
To script large production data transfer tasks around this mechanism,
you can implement a script that runs periodically, determines which file
transfers have not yet succeeded, and runs gsutil to copy them. Below,
we offer a number of suggestions about how this type of scripting should
be implemented:
1. When resumable transfers fail without any progress 6 times in a row
over the course of up to 63 seconds, it probably won't work to simply
retry the transfer immediately. A more successful strategy would be to
have a cron job that runs every 30 minutes, determines which transfers
need to be run, and runs them. If the network experiences intermittent
problems, the script picks up where it left off and will eventually
succeed (once the network problem has been resolved).
2. If your business depends on timely data transfer, you should consider
implementing some network monitoring. For example, you can implement
a task that attempts a small download every few minutes and raises an
alert if the attempt fails for several attempts in a row (or more or less
frequently depending on your requirements), so that your IT staff can
investigate problems promptly. As usual with monitoring implementations,
you should experiment with the alerting thresholds, to avoid false
positive alerts that cause your staff to begin ignoring the alerts.
3. There are a variety of ways you can determine what files remain to be
transferred. We recommend that you avoid attempting to get a complete
listing of a bucket containing many objects (e.g., tens of thousands
or more). One strategy is to structure your object names in a way that
represents your transfer process, and use gsutil prefix wildcards to
request partial bucket listings. For example, if your periodic process
involves downloading the current day's objects, you could name objects
using a year-month-day-object-ID format and then find today's objects by
using a command like gsutil ls gs://bucket/2011-09-27-*. Note that it
is more efficient to have a non-wildcard prefix like this than to use
something like gsutil ls gs://bucket/*-2011-09-27. The latter command
actually requests a complete bucket listing and then filters in gsutil,
while the former asks Google Storage to return the subset of objects
whose names start with everything up to the *.
For data uploads, another technique would be to move local files from a "to
be processed" area to a "done" area as your script successfully copies files
to the cloud. You can do this in parallel batches by using a command like:
gsutil -m cp -R to_upload/subdir_$i gs://bucket/subdir_$i
where i is a shell loop variable. Make sure to check the shell $status
variable is 0 after each gsutil cp command, to detect if some of the copies
failed, and rerun the affected copies.
With this strategy, the file system keeps track of all remaining work to
be done.
4. If you have really large numbers of objects in a single bucket
(say hundreds of thousands or more), you should consider tracking your
objects in a database instead of using bucket listings to enumerate
the objects. For example this database could track the state of your
downloads, so you can determine what objects need to be downloaded by
your periodic download script by querying the database locally instead
of performing a bucket listing.
5. Make sure you don't delete partially downloaded files after a transfer
fails: gsutil picks up where it left off (and performs an MD5 check of
the final downloaded content to ensure data integrity), so deleting
partially transferred files will cause you to lose progress and make
more wasteful use of your network. You should also make sure whatever
process is waiting to consume the downloaded data doesn't get pointed
at the partially downloaded files. One way to do this is to download
into a staging directory and then move successfully downloaded files to
a directory where consumer processes will read them.
6. If you have a fast network connection, you can speed up the transfer of
large numbers of files by using the gsutil -m (multi-threading /
multi-processing) option. Be aware, however, that gsutil doesn't attempt to
keep track of which files were downloaded successfully in cases where some
files failed to download. For example, if you use multi-threaded transfers
to download 100 files and 3 failed to download, it is up to your scripting
process to determine which transfers didn't succeed, and retry them. A
periodic check-and-run approach like outlined earlier would handle this case.
If you use parallel transfers (gsutil -m) you might want to experiment with
the number of threads being used (via the parallel_thread_count setting
in the .boto config file). By default, gsutil uses 24 threads. Depending
on your network speed, available memory, CPU load, and other conditions,
this may or may not be optimal. Try experimenting with higher or lower
numbers of threads, to find the best number of threads for your environment.
""")
class CommandOptions(HelpProvider):
"""Additional help about using gsutil for production tasks."""
help_spec = {
# Name of command or auxiliary help info for which this help applies.
HELP_NAME : 'prod',
# List of help name aliases.
HELP_NAME_ALIASES : ['production', 'resumable', 'resumable upload',
'resumable transfer', 'resumable download',
'scripts', 'scripting'],
# Type of help:
HELP_TYPE : HelpType.ADDITIONAL_HELP,
# One line summary of this help.
HELP_ONE_LINE_SUMMARY : 'Scripting production data transfers with gsutil',
# The full help text.
HELP_TEXT : _detailed_help_text,
}
|
mpl-2.0
|
xcat2/confluent
|
confluent_server/confluent/netutil.py
|
2
|
12433
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this will implement noderange grammar
import confluent.exceptions as exc
import codecs
import netifaces
import struct
import eventlet.green.socket as socket
import eventlet.support.greendns
import os
getaddrinfo = eventlet.support.greendns.getaddrinfo
def mask_to_cidr(mask):
maskn = socket.inet_pton(socket.AF_INET, mask)
maskn = struct.unpack('!I', maskn)[0]
cidr = 32
while maskn & 0b1 == 0 and cidr > 0:
cidr -= 1
maskn >>= 1
return cidr
def cidr_to_mask(cidr):
return socket.inet_ntop(
socket.AF_INET, struct.pack('!I', (2**32 - 1) ^ (2**(32 - cidr) - 1)))
def ip_on_same_subnet(first, second, prefix):
if first.startswith('::ffff:') and '.' in first:
first = first.replace('::ffff:', '')
if second.startswith('::ffff:') and '.' in second:
second = second.replace('::ffff:', '')
addrinf = socket.getaddrinfo(first, None, 0, socket.SOCK_STREAM)[0]
fam = addrinf[0]
ip = socket.inet_pton(fam, addrinf[-1][0])
ip = int(codecs.encode(bytes(ip), 'hex'), 16)
addrinf = socket.getaddrinfo(second, None, 0, socket.SOCK_STREAM)[0]
if fam != addrinf[0]:
return False
txtaddr = addrinf[-1][0].split('%')[0]
oip = socket.inet_pton(fam, txtaddr)
oip = int(codecs.encode(bytes(oip), 'hex'), 16)
if fam == socket.AF_INET:
addrlen = 32
elif fam == socket.AF_INET6:
addrlen = 128
else:
raise Exception("Unknown address family {0}".format(fam))
mask = 2 ** prefix - 1 << (addrlen - prefix)
return ip & mask == oip & mask
def address_is_local(address):
for iface in netifaces.interfaces():
for i4 in netifaces.ifaddresses(iface).get(2, []):
cidr = mask_to_cidr(i4['netmask'])
if ip_on_same_subnet(i4['addr'], address, cidr):
return True
for i6 in netifaces.ifaddresses(iface).get(10, []):
cidr = int(i6['netmask'].split('/')[1])
laddr = i6['addr'].split('%')[0]
if ip_on_same_subnet(laddr, address, cidr):
return True
return False
_idxtoifnamemap = {}
def _rebuildidxmap():
_idxtoifnamemap.clear()
for iname in os.listdir('/sys/class/net'):
try:
ci = int(open('/sys/class/net/{0}/ifindex'.format(iname)).read())
_idxtoifnamemap[ci] = iname
except Exception: # there may be non interface in /sys/class/net
pass
def myiptonets(svrip):
fam = netifaces.AF_INET
if ':' in svrip:
fam = netifaces.AF_INET6
relevantnic = None
for iface in netifaces.interfaces():
for addr in netifaces.ifaddresses(iface).get(fam, []):
addr = addr.get('addr', '')
addr = addr.split('%')[0]
if addresses_match(addr, svrip):
relevantnic = iface
break
else:
continue
break
return inametonets(relevantnic)
def _iftonets(ifidx):
if isinstance(ifidx, int):
_rebuildidxmap()
ifidx = _idxtoifnamemap.get(ifidx, None)
return inametonets(ifidx)
def inametonets(iname):
addrs = netifaces.ifaddresses(iname)
try:
addrs = addrs[netifaces.AF_INET]
except KeyError:
return
for addr in addrs:
ip = struct.unpack('!I', socket.inet_aton(addr['addr']))[0]
mask = struct.unpack('!I', socket.inet_aton(addr['netmask']))[0]
net = ip & mask
net = socket.inet_ntoa(struct.pack('!I', net))
yield (net, mask_to_cidr(addr['netmask']), addr['addr'])
# TODO(jjohnson2): have a method to arbitrate setting methods, to aid
# in correct matching of net.* based on parameters, mainly for pxe
# The scheme for pxe:
# For one: the candidate net.* should have pxe set to true, to help
# disambiguate from interfaces meant for bmc access
# bmc relies upon hardwaremanagement.manager, plus we don't collect
# that mac address
# the ip as reported by recvmsg to match the subnet of that net.* interface
# if switch and port available, that should match.
def get_nic_config(configmanager, node, ip=None, mac=None, ifidx=None,
serverip=None):
"""Fetch network configuration parameters for a nic
For a given node and interface, find and retrieve the pertinent network
configuration data. The desired configuration can be searched
either by ip or by mac.
:param configmanager: The relevant confluent.config.ConfigManager
instance.
:param node: The name of the node
:param ip: An IP address on the intended subnet
:param mac: The mac address of the interface
:param ifidx: The local index relevant to the network.
:returns: A dict of parameters, 'ipv4_gateway', ....
"""
# ip parameter *could* be the result of recvmsg with cmsg to tell
# pxe *our* ip address, or it could be the desired ip address
#TODO(jjohnson2): ip address, prefix length, mac address,
# join a bond/bridge, vlan configs, etc.
# also other nic criteria, physical location, driver and index...
nodenetattribs = configmanager.get_node_attributes(
node, 'net*').get(node, {})
cfgbyname = {}
for attrib in nodenetattribs:
segs = attrib.split('.')
if len(segs) == 2:
name = None
else:
name = segs[1]
if name not in cfgbyname:
cfgbyname[name] = {}
cfgbyname[name][segs[-1]] = nodenetattribs[attrib].get('value',
None)
cfgdata = {
'ipv4_gateway': None,
'ipv4_address': None,
'ipv4_method': None,
'prefix': None,
}
nets = None
needsvrip = False
if ifidx is not None:
dhcprequested = False
nets = list(_iftonets(ifidx))
if not nets:
cfgdata['ipv4_broken'] = True
if serverip is not None:
needsvrip = True
dhcprequested = False
nets = list(myiptonets(serverip))
genericmethod = 'static'
ipbynodename = None
try:
ipbynodename = socket.getaddrinfo(
node, 0, socket.AF_INET, socket.SOCK_DGRAM)[0][-1][0]
except Exception:
ipbynodename = None
if nets is not None:
candgws = []
candsrvs = []
for net in nets:
net, prefix, svrip = net
candsrvs.append(svrip)
cfgdata['deploy_server'] = svrip
for candidate in cfgbyname:
ipmethod = cfgbyname[candidate].get('ipv4_method', 'static')
if ipmethod == 'dhcp':
dhcprequested = True
continue
if ipmethod == 'firmwaredhcp':
genericmethod = ipmethod
candip = cfgbyname[candidate].get('ipv4_address', None)
if candip and '/' in candip:
candip, candprefix = candip.split('/')
if int(candprefix) != prefix:
continue
candgw = cfgbyname[candidate].get('ipv4_gateway', None)
if candip:
try:
if ip_on_same_subnet(net, candip, prefix):
cfgdata['ipv4_address'] = candip
cfgdata['ipv4_method'] = ipmethod
cfgdata['ipv4_gateway'] = cfgbyname[candidate].get(
'ipv4_gateway', None)
cfgdata['prefix'] = prefix
if ipbynodename and ipbynodename == candip:
cfgdata['matchesnodename'] = True
return cfgdata
except Exception as e:
cfgdata['error_msg'] = "Error trying to evaluate net.*ipv4_address attribute value '{0}' on {1}: {2}".format(candip, node, str(e))
elif candgw:
if ip_on_same_subnet(net, candgw, prefix):
candgws.append(candgw)
if dhcprequested:
if not cfgdata.get('ipv4_method', None):
cfgdata['ipv4_method'] = 'dhcp'
return cfgdata
if ipbynodename == None:
return cfgdata
for net in nets:
net, prefix, svrip = net
if ip_on_same_subnet(net, ipbynodename, prefix):
cfgdata['matchesnodename'] = True
cfgdata['ipv4_address'] = ipbynodename
cfgdata['ipv4_method'] = genericmethod
cfgdata['prefix'] = prefix
break
for svr in candsrvs:
if ip_on_same_subnet(svr, ipbynodename, prefix):
cfgdata['deploy_server'] = svr
break
for gw in candgws:
if ip_on_same_subnet(gw, ipbynodename, prefix):
cfgdata['ipv4_gateway'] = gw
break
return cfgdata
if ip is not None:
prefixlen = get_prefix_len_for_ip(ip)
cfgdata['prefix'] = prefixlen
for setting in nodenetattribs:
if 'ipv4_gateway' not in setting:
continue
gw = nodenetattribs[setting].get('value', None)
if gw is None or not gw:
continue
if ip_on_same_subnet(ip, gw, prefixlen):
cfgdata['ipv4_gateway'] = gw
break
return cfgdata
def get_prefix_len_for_ip(ip):
# for now, we'll use the system route table
# later may provide for configuration lookup to override the route
# table
ip = getaddrinfo(ip, 0, socket.AF_INET)[0][-1][0]
try:
ipn = socket.inet_aton(ip)
except socket.error: # For now, assume 64 for ipv6
return 64
# It comes out big endian, regardless of host arch
ipn = struct.unpack('>I', ipn)[0]
rf = open('/proc/net/route')
ri = rf.read()
rf.close()
ri = ri.split('\n')[1:]
for rl in ri:
if not rl:
continue
rd = rl.split('\t')
if rd[1] == '00000000': # default gateway, not useful for this
continue
# don't have big endian to look at, assume that it is host endian
maskn = struct.unpack('I', struct.pack('>I', int(rd[7], 16)))[0]
netn = struct.unpack('I', struct.pack('>I', int(rd[1], 16)))[0]
if ipn & maskn == netn:
nbits = 0
while maskn:
nbits += 1
maskn = maskn << 1 & 0xffffffff
return nbits
raise exc.NotImplementedException("Non local addresses not supported")
def addresses_match(addr1, addr2):
"""Check two network addresses for similarity
Is it zero padded in one place, not zero padded in another? Is one place by name and another by IP??
Is one context getting a normal IPv4 address and another getting IPv4 in IPv6 notation?
This function examines the two given names, performing the required changes to compare them for equivalency
:param addr1:
:param addr2:
:return: True if the given addresses refer to the same thing
"""
for addrinfo in socket.getaddrinfo(addr1, 0, 0, socket.SOCK_STREAM):
rootaddr1 = socket.inet_pton(addrinfo[0], addrinfo[4][0])
if addrinfo[0] == socket.AF_INET6 and rootaddr1[:12] == b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff':
# normalize to standard IPv4
rootaddr1 = rootaddr1[-4:]
for otherinfo in socket.getaddrinfo(addr2, 0, 0, socket.SOCK_STREAM):
otheraddr = socket.inet_pton(otherinfo[0], otherinfo[4][0])
if otherinfo[0] == socket.AF_INET6 and otheraddr[:12] == b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff':
otheraddr = otheraddr[-4:]
if otheraddr == rootaddr1:
return True
return False
|
apache-2.0
|
thnkloud9/Tandem
|
server/scheduler/dialer.py
|
1
|
1504
|
import logging
from bson.objectid import ObjectId
from twilio.rest import TwilioRestClient
from pymongo import MongoClient
ACCOUNT_SID = "ACd2d64bee9b5d72522c0fa5e33c7aa2fc"
AUTH_TOKEN = "8f54d45265a4fe99157003841e499388"
client = MongoClient('localhost', 27017)
db = client['tandem']
client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN)
def getMobile(user_id):
users = db['users']
lookup = {'_id': ObjectId(user_id)}
user = users.find_one(lookup)
logging.info('looking up {0}'.format(user_id))
if not user:
return None
return str(user['mobile'])
def verify(user_id):
mobile = getMobile(user_id)
if not mobile:
return None
call = client.calls.create(
to='+' + mobile,
from_="+4923198928606",
url="https://tandem.marklewis.me/mobile/verify",
)
return call.sid
def startPracticeSession(practice_session_id):
practice_sessions = db['practice_sessions']
lookup = {'_id': ObjectId(practice_session_id)}
practice_session = practice_sessions.find_one(lookup)
mobile = getMobile(practice_session['submitted_by'])
logging.info('playing practice_set {0} for user {1} using {2}'.format(practice_session_id, practice_session['submitted_by'], mobile))
call = client.calls.create(
to='+' + mobile,
from_="+4923198928606",
url="https://tandem.marklewis.me/mobile/voice?practice_session={0}".format(practice_session_id),
)
return call.sid
|
gpl-2.0
|
40223139/203739test
|
static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/base.py
|
603
|
4652
|
#!/usr/bin/env python
## https://bitbucket.org/pygame/pygame/raw/2383b8ab0e2273bc83c545ab9c18fee1f3459c64/pygame/base.py
'''Pygame core routines
Contains the core routines that are used by the rest of the
pygame modules. Its routines are merged directly into the pygame
namespace. This mainly includes the auto-initialization `init` and
`quit` routines.
There is a small module named `locals` that also gets merged into
this namespace. This contains all the constants needed by pygame.
Object constructors also get placed into this namespace, you can
call functions like `Rect` and `Surface` to create objects of
that type. As a convenience, you can import the members of
pygame.locals directly into your module's namespace with::
from pygame.locals import *
Most of the pygame examples do this if you'd like to take a look.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import sys
#import SDL
_quitfunctions = []
class error(RuntimeError):
pass
def init():
'''Autoinitialize all imported pygame modules.
Initialize all imported pygame modules. Includes pygame modules
that are not part of the base modules (like font and image).
It does not raise exceptions, but instead silently counts which
modules have failed to init. The return argument contains a count
of the number of modules initialized, and the number of modules
that failed to initialize.
You can always initialize the modules you want by hand. The
modules that need it have an `init` and `quit` routine built in,
which you can call directly. They also have a `get_init` routine
which you can use to doublecheck the initialization. Note that
the manual `init` routines will raise an exception on error. Be
aware that most platforms require the display module to be
initialized before others. This `init` will handle that for you,
but if you initialize by hand, be aware of this constraint.
As with the manual `init` routines. It is safe to call this
`init` as often as you like.
:rtype: int, int
:return: (count_passed, count_failed)
'''
success = 0
fail = 0
#SDL.SDL_Init(SDL.SDL_INIT_EVENTTHREAD | SDL.SDL_INIT_TIMER)
if _video_autoinit():
success += 1
else:
fail += 1
for mod in sys.modules.values():
if hasattr(mod, '__PYGAMEinit__') and callable(mod.__PYGAMEinit__):
try:
mod.__PYGAMEinit__()
success += 1
except:
fail += 1
return success, fail
def register_quit(func):
'''Routine to call when pygame quits.
The given callback routine will be called when pygame is
quitting. Quit callbacks are served on a 'last in, first out'
basis.
'''
_quitfunctions.append(func)
def _video_autoquit():
if SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
SDL.SDL_QuitSubSystem(SDL.SDL_INIT_VIDEO)
def _video_autoinit():
return 1
#if not SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
# SDL.SDL_InitSubSystem(SDL.SDL_INIT_VIDEO)
# SDL.SDL_EnableUNICODE(1)
#return 1
def _atexit_quit():
while _quitfunctions:
func = _quitfunctions.pop()
func()
_video_autoquit()
#SDL.SDL_Quit()
def get_sdl_version():
'''Get the version of the linked SDL runtime.
:rtype: int, int, int
:return: major, minor, patch
'''
#v = SDL.SDL_Linked_Version()
#return v.major, v.minor, v.patch
return None, None, None
def quit():
'''Uninitialize all pygame modules.
Uninitialize all pygame modules that have been initialized. Even
if you initialized the module by hand, this `quit` will
uninitialize it for you.
All the pygame modules are uninitialized automatically when your
program exits, so you will usually not need this routine. If you
program plans to keep running after it is done with pygame, then
would be a good time to make this call.
'''
_atexit_quit()
def get_error():
'''Get current error message.
SDL maintains an internal current error message. This message is
usually given to you when an SDL related exception occurs, but
sometimes you may want to call this directly yourself.
:rtype: str
'''
#return SDL.SDL_GetError()
return ''
def _rgba_from_obj(obj):
if not type(obj) in (tuple, list):
return None
if len(obj) == 1:
return _rgba_from_obj(obj[0])
elif len(obj) == 3:
return (int(obj[0]), int(obj[1]), int(obj[2]), 255)
elif len(obj) == 4:
return obj
else:
return None
atexit.register(_atexit_quit)
|
gpl-3.0
|
BernhardPosselt/fish
|
listentothefish/listentothefish/homepage/models.py
|
2
|
1084
|
from django.db import models
from django.utils.translation import ugettext as _
from django.template.defaultfilters import date as _date
class Event(models.Model):
date = models.DateTimeField(_('Datum'))
title = models.CharField(_('Titel'), max_length=250)
place = models.CharField(_('Ort'), max_length=250)
short_description = models.CharField(_('Kurzbeschreibung'), max_length=50)
description = models.TextField(_('Beschreibung'))
last_modified = models.DateTimeField(_('Letzte Bearbeitung'), auto_now=True)
image = models.ImageField(_('Bild'), upload_to='events', blank=True)
type = models.ForeignKey('EventType')
def __str__(self):
return self.title
def formatted_date(self):
return _date(self.date, 'D')
class EventType(models.Model):
name = models.CharField(_('Vortragstyp'), max_length=25)
color = models.CharField(_('Farbe (zB. #225566 oder red)'), max_length=25)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Eventart'
verbose_name_plural = 'Eventarten'
|
gpl-3.0
|
tommilligan/pyqubes
|
setup.py
|
1
|
1849
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='pyqubes',
version='0.0.3',
license='Apache License 2.0',
url='https://github.com/tommilligan/pyqubes/',
author='Tom Milligan',
author_email='[email protected]',
description="QubesOS dom0 automation in Python",
keywords='qubes qubesos QubesOS wrapper recipe dom0 vm templatevm appvm',
packages=find_packages(exclude=['tests']),
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Natural Language :: English',
'Topic :: Desktop Environment',
'Topic :: System :: Installation/Setup'
],
zip_safe=False,
platforms='any',
install_requires=[
'six >= 1.10.0'
],
tests_require=['nose2 >= 0.6.5'],
test_suite='nose2.collector.collector',
# Install these with "pip install -e isoprene_pumpjack[dev]
extras_require={
'dev': [
'sphinx >= 1.5.3',
'sphinx-argparse >= 0.1.17',
'sphinx_rtd_theme >= 0.1.9',
'codeclimate-test-reporter >= 0.2.1',
'cov-core >= 1.15.0',
'nose2 >= 0.6.5',
'mock >= 2.0.0'
]
},
entry_points={
'console_scripts': [
]
},
)
|
apache-2.0
|
hpsilva/profitpy
|
profit/lib/widgets/plot.py
|
18
|
41789
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase <[email protected]>
# Distributed under the terms of the GNU General Public License v2
##
#
# This module defines the Plot class for display of plots and
# associated controls.
#
##
from PyQt4.QtCore import QRectF, QString, QTimer, QVariant
from PyQt4.QtCore import Qt, pyqtSignature
from PyQt4.QtGui import QBrush, QColor, QColorDialog, QFont, QFontDialog
from PyQt4.QtGui import QStandardItem, QStandardItemModel, QMenu, QPen, QFrame
from PyQt4.Qwt5 import QwtLegend, QwtPicker, QwtPlot, QwtPlotCurve
from PyQt4.Qwt5 import QwtPlotGrid, QwtPlotPicker, QwtPlotZoomer, QwtPainter
from PyQt4.Qwt5 import QwtPlotMarker, QwtPlotPanner, QwtSymbol, QwtText
from ib.ext.TickType import TickType
from profit.lib import Settings, Signals, defaults
from profit.lib.gui import ValueColorItem, colorIcon, complementColor
from profit.lib.widgets.plotdatadialog import PlotDataDialog
from profit.lib.widgets.plotitemdialog import PlotItemDialog
from profit.lib.widgets.ui_plot import Ui_Plot
allAxes = \
xBottom, xTop, yRight, yLeft = \
QwtPlot.xBottom, QwtPlot.xTop, QwtPlot.yRight, QwtPlot.yLeft
def changePen(getr, setr, parent):
""" Allow the user to change a pen with a PlotItemDialog.
@param getr callable that returns current pen
@param setr callable to set selected pen if dialog is accepted
@param parent ancestor of dialog
@return new pen if dialog is accepted, otherwise None
"""
oldpen = getr()
dlg = PlotItemDialog(oldpen, parent)
if dlg.exec_() == dlg.Accepted:
newpen = QPen(dlg.selectedPen)
setr(newpen)
return newpen
def changeColor(getr, setr, parent):
""" Allow the user to change a color with a QColorDialog.
@param getr callable that returns current color
@param setr callable to set selected color if dialog is accepted
@param parent ancestor of dialog
@return new color if dialog is accepted, otherwise None
"""
oldcolor = QColor(getr())
newcolor = QColorDialog.getColor(oldcolor, parent)
if newcolor.isValid():
setr(newcolor)
return newcolor
class PlotCurve(QwtPlotCurve):
""" Specialized plot curve.
"""
dataMarker = None
settingsLoaded = False
def updateLegend(self, legend, enable=False):
""" Framework hook to update plot legend with this curve.
@param legend QwtLegend instance
@param enable=False must be true to include this curve in legend
@return None
"""
if self.isVisible() and enable:
QwtPlotCurve.updateLegend(self, legend)
class PlotGrid(QwtPlotGrid):
""" Specalized plot grid.
QwtPlotGrid instances will not draw their minor grids if the major
grid is also not enabled. This class reimplements 'draw' and
'drawLines' to overcome this limitation. Code for both was taken
from the Qwt sources.
"""
def __init__(self):
""" Constructor.
"""
QwtPlotGrid.__init__(self)
self.enableX(False)
self.enableY(False)
self.enableXMin(False)
self.enableYMin(False)
def draw(self, painter, mx, my, rect):
""" Draws minor and major grids.
@param painter QPainter instance
@param mx QwtScaleMap instance
@param my QwtScaleMap instance
@param rect QRect instance
@return None
"""
painter.setPen(self.minPen())
sdx = self.xScaleDiv()
sdy = self.yScaleDiv()
if self.xMinEnabled():
self.drawLines(
painter, rect, Qt.Vertical, mx, sdx.ticks(sdx.MinorTick))
self.drawLines(
painter, rect, Qt.Vertical, mx, sdx.ticks(sdx.MediumTick))
if self.yMinEnabled():
self.drawLines(
painter, rect, Qt.Horizontal, my, sdy.ticks(sdy.MinorTick))
self.drawLines(
painter, rect, Qt.Horizontal, my, sdy.ticks(sdy.MediumTick))
painter.setPen(self.majPen())
if self.xEnabled():
self.drawLines(
painter, rect, Qt.Vertical, mx, sdx.ticks(sdx.MajorTick))
if self.yEnabled():
self.drawLines(
painter, rect, Qt.Horizontal, my, sdy.ticks(sdy.MajorTick))
def drawLines(self, painter, rect, orientation, scalemap, values):
""" Draws specified lines.
@param painter QPainter instance
@param rect QRect instance
@param orientation Qt.Horizontal or Qt.Vertical
@param scalemap QwtScaleMap instance
@param values list of x or y values for line drawing
@return None
"""
x1 = rect.left()
x2 = rect.right()
y1 = rect.top()
y2 = rect.bottom()
for v in values:
value = scalemap.transform(v)
if orientation == Qt.Horizontal:
if ((value >= y1) and (value <= y2)):
QwtPainter.drawLine(painter, x1, value, x2, value)
else:
if ((value >= x1) and (value <= x2)):
QwtPainter.drawLine(painter, value, y1, value, y2)
class PlotDataMarker(QwtPlotMarker):
""" Specialized plot data marker.
"""
def __init__(self):
QwtPlotMarker.__init__(self)
def cloneFromValue(self, curve, x, y):
""" Creates and returns new plot marker similar to this one.
@param curve QwtPlotCurve instance
@param x marker x value
@param y marker y value
@return new PlotDataMarker instance
"""
clone = type(self)()
clone.setLineStyle(self.lineStyle())
clone.setLinePen(self.linePen())
clone.setSymbol(self.symbol())
clone.setAxis(curve.xAxis(), curve.yAxis())
clone.setValue(x, y)
return clone
def restyleFrom(self, other):
""" Matches the style of this instance given an example.
@param other QwtPlotMarker instance
@return None
"""
self.setLineStyle(other.lineStyle())
self.setLinePen(other.linePen())
self.setSymbol(other.symbol())
class PlotPanner(QwtPlotPanner):
""" Stub for future implementation.
"""
def __init__(self, canvas):
QwtPlotPanner.__init__(self, canvas)
self.setMouseButton(Qt.MidButton)
class PlotPicker(QwtPlotPicker):
""" Stub for future implementation.
"""
def __init__(self, canvas):
QwtPlotPicker.__init__(
self, xBottom, yRight, self.NoSelection, self.CrossRubberBand,
self.AlwaysOn, canvas)
def trackerText(self, pos):
pos = self.invTransform(pos)
band = self.rubberBand()
if band == self.HLineRubberBand:
label = '%.3f' % pos.y()
elif band == self.VLineRubberBand:
label = '%.3f' % pos.x()
else:
label = '%i, %.3f' % (pos.x(), pos.y(), )
return QwtText(label)
class PlotZoomer(QwtPlotZoomer):
""" Stub for future implementation.
"""
def __init__(self, canvas):
QwtPlotZoomer.__init__(
self, xBottom, yRight, self.DragSelection, self.AlwaysOff, canvas)
class Legend(QwtLegend):
""" Stub for future implementation.
"""
class ControlTreeValueItem(QStandardItem, ValueColorItem):
def __init__(self, text):
QStandardItem.__init__(self, text)
self.setEditable(False)
self.setTextAlignment(Qt.AlignVCenter|Qt.AlignRight)
def setText(self, text):
try:
v = float(self.text())
c = float(text)
except (ValueError, ):
pass
else:
if c != v: # explicitly ignore unchanged values
self.setForeground(self.compMap[cmp(c, v)])
QStandardItem.setText(self, text)
class ControlTreeItem(QStandardItem):
""" Self-configuring control tree item.
"""
def __init__(self, text, data, key, checkable=True):
""" Constructor.
@param text value for this item
@param data reference to data series for this item
"""
QStandardItem.__init__(self, text)
if checkable:
self.setCheckable(True)
self.setCheckState(Qt.Unchecked)
self.setEditable(False)
self.curve = PlotCurve(text)
self.curve.setYAxis(yRight)
self.curve.setVisible(False)
self.data = data
self.key = key
def isChecked(self):
""" True if this item is checked.
"""
return self.checkState() == Qt.Checked
def name(self):
""" Name of item including parent names if any.
"""
names = []
while self:
names.append(getattr(self, 'key', str(self.text())))
self = self.parent()
return str.join('/', reversed(names))
def setColor(self, color):
""" Sets the icon and color for this item.
@param color QColor instance
@return None
"""
self.color = color
self.setIcon(colorIcon(color))
class Plot(QFrame, Ui_Plot):
""" Plot container.
"""
def __init__(self, parent=None):
""" Initializer.
@param parent ancestor of this widget
"""
QFrame.__init__(self, parent)
self.setupUi(self)
self.settings = Settings()
self.settings.beginGroup(self.settings.keys.plots)
self.setupOptionsMenu()
self.setupPlotsMenu()
self.setupPlot()
def setupOptionsMenu(self):
""" Configure the options button menu.
@return None
"""
self.dataDialog = None
optionsButton = self.optionsButton
pop = QMenu(optionsButton)
optionsButton.setMenu(pop)
pop.addAction(self.actionDrawMajorX)
pop.addAction(self.actionDrawMajorY)
pop.addAction(self.actionChangeMajorGridStyle)
pop.addSeparator()
pop.addAction(self.actionDrawMinorX)
pop.addAction(self.actionDrawMinorY)
pop.addAction(self.actionChangeMinorGridStyle)
pop.addSeparator()
pop.addAction(self.actionShowDataDialog)
pop.addAction(self.actionDrawLegend)
pop.addAction(self.actionChangeCanvasColor)
def setupPlotsMenu(self):
""" Configure the plots button menu.
@return None
"""
plotButton = self.plotButton
pop = QMenu(plotButton)
plotButton.setMenu(pop)
pop.addAction(self.actionNewPlot)
pop.addAction(self.actionClosePlot)
pop.addSeparator()
pop.addAction(self.actionSyncWithData)
def setupPlot(self):
""" Configure the plot widget.
@return None
"""
pen = QPen(Qt.black)
plot = self.plot
plot.setFrameStyle(plot.NoFrame|plot.Plain)
plot.insertLegend(Legend(), plot.LeftLegend)
canvas = plot.canvas()
canvas.setFrameStyle(plot.NoFrame|plot.Plain)
layout = plot.plotLayout()
layout.setCanvasMargin(0)
layout.setAlignCanvasToScales(True)
self.grid = PlotGrid()
self.grid.attach(plot)
self.panner = PlotPanner(canvas)
self.zoomer = PlotZoomer(canvas)
self.zoomer.setRubberBandPen(pen)
self.picker = PlotPicker(canvas)
self.picker.setTrackerPen(pen)
self.connect(self.zoomer, Signals.zoomed, self.on_zoomer_zoomed)
self.enableAutoScale()
def setSessionPlot(self, session, collection, key, *indexes):
""" Associate a session with this instance.
@param session Session instance
@param key id of ticker as integer
@param *indexes unused
@return None
"""
self.controlsTreeItems = []
self.highlightMarkers = []
self.session = session
self.collection = collection
self.key = key
settings = self.settings
name = self.plotName()
statekey = '%s/%s' % (name, settings.keys.splitstate)
state = settings.value(statekey, defaults.rightSplitterState())
self.plotSplitter.restoreState(state.toByteArray())
self.setupTree()
self.loadGrids()
self.loadSelections()
self.loadCanvasColor()
self.loadLegend()
self.updateAxis()
scaler = self.plot.axisScaleEngine(xBottom)
scaler.setMargins(0.0, 0.05)
axisactions = [self.actionChangeAxesFont, self.actionChangeAxesColor]
for widget in self.axisWidgets():
widget.addActions(axisactions)
widget.setContextMenuPolicy(Qt.ActionsContextMenu)
color = settings.value('%s/axiscolor' % name)
if color.isValid():
self.setAxisColor(QColor(color))
font = settings.value('%s/axisfont' % name)
if font.isValid():
self.setAxisFont(QFont(font))
self.plot.replot()
if settings.value('%s/datadialog' % name).toBool():
## tab might not be available
QTimer.singleShot(500, self.actionShowDataDialog.trigger)
session.registerMeta(self)
def setupTree(self):
""" Configure the model and initial items for this instance.
@return None
"""
tree = self.controlsTree
self.controlsTreeModel = model = QStandardItemModel(self)
tree.setModel(model)
model.setHorizontalHeaderLabels(['Line', 'Value'])
tree.sortByColumn(0, Qt.AscendingOrder)
try:
ticker = self.collection[self.key]
except (KeyError, TypeError, ):
pass
else:
for field, series in ticker.series.items():
self.addSeries(TickType.getField(field), series)
self.connect(model, Signals.standardItemChanged,
self.on_controlsTree_itemChanged)
for col in range(model.columnCount()):
tree.resizeColumnToContents(col)
tree.addActions(
[self.actionChangeCurveStyle,
self.actionChangeDataMarker,
self.actionChangeCurveAxisX,
self.actionChangeCurveAxisY,])
tree.expandAll()
def addSeries(self, name, series, parent=None, items=[], checkable=True):
""" Creates new controls and curve for an individual series.
@param name series key
@return None
"""
try:
name + ()
except (TypeError, ):
key = name
else:
key = '/'.join(name)
name = name[0]
if parent is None:
parent = self.controlsTreeModel.invisibleRootItem()
item = ControlTreeItem(name, series, key, checkable=checkable)
self.controlsTreeItems.append(item)
if not items:
items = [ControlTreeValueItem(''), ]
parent.appendRow([item, ] + items)
if checkable:
item.setColor(self.loadItemPen(item).color())
for index in getattr(series, 'indexes', []):
self.addSeries(index.key, index, parent=item)
self.loadSelection(item)
return item
def anyCheckedItems(self):
""" True if any control is checked.
"""
return bool(self.checkedItems())
def axisWidgets(self):
""" Yields each plot axis widget.
"""
for axis in allAxes:
yield self.plot.axisWidget(axis)
def checkedItems(self):
""" Sequence of checked controls.
"""
return [item for item in self.controlsTreeItems if item.isChecked()]
def checkedNames(self):
""" Sequence of checked control names.
"""
return [self.itemName(item) for item in self.checkedItems()]
def on_zoomer_zoomed(self, rect):
""" Sets autoscaling mode when plot is zoomed to its base.
@param rect ignored
@return None
"""
if not self.zoomer.zoomRectIndex():
self.enableAutoScale()
def enableAutoScale(self):
""" Sets autoscaling mode on all four axes.
@return None
"""
for axis in allAxes:
self.plot.setAxisAutoScale(axis)
def enableCurve(self, item, enable=True):
""" Sets the visibility and style of a plot curve.
@param item tree widget item
@param enabled sets curve visible if True, otherwise invisible
@return None
"""
curve = item.curve
curve.hide()
plot = self.plot
legend = plot.legend()
drawLegend = self.actionDrawLegend
if enable:
if not curve.settingsLoaded:
self.loadCurve(self.itemName(item), curve)
curve.setData(item.data.x, item.data.y)
curve.attach(plot)
if self.actionDrawLegend.isChecked():
curve.updateLegend(legend, True)
curve.show()
else:
legend.remove(curve)
curve.detach()
self.emit(Signals.enableCurve, item, enable)
checked = self.anyCheckedItems()
self.actionDrawLegend.setEnabled(checked)
if not checked:
legend.clear()
legend.hide()
plot.updateAxes()
plot.replot()
def getAxisColor(self):
""" Returns the foreground color of the axis widgets.
@return QColor instance
"""
widget = self.referenceAxisWidget()
palette = widget.palette()
return palette.color(palette.WindowText)
def itemName(self, item):
""" Name for given item, including name of this plot.
@param item ControlTreeItem instance
@return name full item name including plot name
"""
return '%s/%s' % (self.plotName(), item.name())
def loadCanvasColor(self):
""" Reads and sets the canvas color from saved settings.
@return None
"""
color = self.settings.value(
'%s/canvascolor' % self.plotName(), defaults.canvasColor())
self.plot.setCanvasBackground(QColor(color))
def loadCurve(self, name, curve):
""" Reads and configures a plot curve.
@param name of curve
@param curve QwtPlotCurve instance
@return None
"""
getv = self.settings.value
curve.setBrush(QBrush(getv('%s/brush' % name, QBrush())))
curve.setPen(QPen(getv('%s/pen' % name, QPen())))
curve.setStyle(curve.CurveStyle(
getv('%s/style' % name, QVariant(curve.Lines)).toInt()[0]))
curve.setBaseline(
getv('%s/baseline' % name, QVariant(0.0)).toDouble()[0])
curve.setCurveAttribute(
curve.Inverted, getv('%s/inverted' % name).toBool())
curve.setCurveAttribute(
curve.Fitted, getv('%s/fitted' % name).toBool())
curve.setPaintAttribute(
curve.PaintFiltered, getv('%s/filtered' % name).toBool())
curve.setPaintAttribute(
curve.ClipPolygons, getv('%s/clippoly' % name).toBool())
curve.setXAxis(
QwtPlot.Axis(getv('%s/xaxis' % name, xBottom).toInt()[0]))
curve.setYAxis(
QwtPlot.Axis(getv('%s/yaxis' % name, yRight).toInt()[0]))
def applySymbol(symname, symobj):
symobj.setBrush(QBrush(getv('%s/brush' % symname, QBrush())))
symobj.setPen(QPen(getv('%s/pen' % symname, QPen())))
style = getv('%s/style' % symname, QVariant(symobj.NoSymbol))
symobj.setStyle(symobj.Style(style.toInt()[0]))
symobj.setSize(getv('%s/size' % symname).toSize())
applySymbol('%s/symbol' % name, curve.symbol())
curve.dataMarker = marker = PlotDataMarker()
marksym = QwtSymbol()
applySymbol('%s/dataselect/symbol' % name, marksym)
marker.setSymbol(marksym)
markstyle = getv('%s/dataselect/style' % name, PlotDataMarker.VLine)
marker.setLineStyle(marker.LineStyle(markstyle.toInt()[0]))
marker.setLinePen(QPen(getv('%s/dataselect/pen' % name, Qt.red)))
curve.settingsLoaded = True
def loadGrids(self):
""" Reads and sets the major and minor grid pens and visibility.
@return None
"""
name = self.plotName()
grid = self.grid
getv = self.settings.value
pen = getv('%s/major/pen' % name, defaults.majorGridPen())
grid.setMajPen(QPen(pen))
pen = getv('%s/minor/pen' % name, defaults.minorGridPen())
grid.setMinPen(QPen(pen))
items = [('%s/major/x/enabled', self.actionDrawMajorX),
('%s/major/y/enabled', self.actionDrawMajorY),
('%s/minor/x/enabled', self.actionDrawMinorX),
('%s/minor/y/enabled', self.actionDrawMinorY)]
for key, action in items:
v = getv(key % name)
if not v.isValid() or v.toBool():
action.trigger()
def loadItemPen(self, item):
""" Creates a pen from saved settings.
@param item ControlTreeItem instance
@return QPen instance
"""
pen = self.settings.value('%s/pen' % self.itemName(item))
if pen.isValid():
pen = QPen(pen)
else:
pen = defaults.itemPen(item.name())
return pen
def loadLegend(self):
""" Restores the plot legend visibility from saved settings.
"""
key = '%s/legend/enabled' % self.plotName()
if self.settings.value(key).toBool():
self.actionDrawLegend.trigger()
def loadSelection(self, item):
""" Restores an item check state and pen from saved settings.
"""
key = '%s/checkeditems' % self.plotName()
if self.itemName(item) in self.settings.valueLoad(key, ''):
item.setCheckState(Qt.Checked)
item.setColor(self.loadItemPen(item).color())
def loadSelections(self):
""" Restores each control tree item check state and pen.
"""
for item in self.controlsTreeItems:
self.loadSelection(item)
def saveSelections(self):
""" Saves the selected control item names.
"""
key = '%s/checkeditems' % self.plotName()
names = self.checkedNames()
if names:
# don't save an empty list because the user might be
# closing an empty plot that really does have selections
# saved in the settings.
self.settings.setValueDump(key, names)
def plotName(self):
""" The name of this plot.
"""
try:
return '%s/%s' % (self.key, self.objectName())
except (AttributeError, ):
return 'noname/%s' % (self.objectName(), )
def referenceAxisWidget(self):
""" Returns a referece axis widget.
"""
return self.plot.axisWidget(xBottom)
def saveCanvasColor(self):
""" Saves the canvas background color to user settings.
@return None
"""
prefix = self.plotName()
self.settings.setValue(
'%s/canvascolor' % prefix, self.plot.canvasBackground())
def saveCurve(self, name, curve):
""" Saves visual settings of a curve.
@param name curve name, used as settings key
@param curve QwtPlotCurve instance
@return None
"""
setv = self.settings.setValue
setv('%s/brush' % name, curve.brush())
setv('%s/pen' % name, curve.pen())
setv('%s/style' % name, curve.style())
setv('%s/baseline' % name, curve.baseline())
setv('%s/inverted' % name,
curve.testCurveAttribute(curve.Inverted))
setv('%s/fitted' % name,
curve.testCurveAttribute(curve.Fitted))
setv('%s/filtered' % name,
curve.testPaintAttribute(curve.PaintFiltered))
setv('%s/clippoly' % name,
curve.testPaintAttribute(curve.ClipPolygons))
setv('%s/xaxis' % name, curve.xAxis())
setv('%s/yaxis' % name, curve.yAxis())
name = '%s/symbol' % name
symbol = curve.symbol()
setv('%s/brush' % name, symbol.brush())
setv('%s/pen' % name, symbol.pen())
setv('%s/style' % name, symbol.style())
setv('%s/size' % name, symbol.size())
def saveMarker(self, name, marker):
""" Saves visual settings of a marker.
@param name curve name, used as settings key
@param curve QwtPlotMarker instance
@return None
"""
setv = self.settings.setValue
setv('%s/dataselect/style' % name, marker.lineStyle())
setv('%s/dataselect/pen' % name, marker.linePen())
symname = '%s/dataselect/symbol' % name
symbol = marker.symbol()
setv('%s/brush' % symname, symbol.brush())
setv('%s/pen' % symname, symbol.pen())
setv('%s/style' % symname, symbol.style())
setv('%s/size' % symname, symbol.size())
def saveLegend(self):
""" Saves the visibility of the plot legend to user settings.
@return None
"""
key = '%s/legend/enabled' % self.plotName()
self.settings.setValue(key, self.actionDrawLegend.isChecked())
def saveMajorX(self):
""" Saves the state and pen of the major grid x axis.
@return None
"""
name = self.plotName()
setv = self.settings.setValue
setv('%s/major/x/enabled' % name,
self.actionDrawMajorX.isChecked())
setv('%s/major/pen' % name, self.grid.majPen())
def saveMajorY(self):
""" Saves the state and pen of the major grid y axis.
@return None
"""
name = self.plotName()
setv = self.settings.setValue
setv('%s/major/y/enabled' % name,
self.actionDrawMajorY.isChecked())
setv('%s/major/pen' % name, self.grid.majPen())
def saveMinorX(self):
""" Saves the state and pen of the minor grid x axis.
@return None
"""
name = self.plotName()
setv = self.settings.setValue
setv('%s/minor/x/enabled' % name,
self.actionDrawMinorX.isChecked())
setv('%s/minor/pen' % name, self.grid.minPen())
def saveMinorY(self):
""" Saves the state and pen of the minor grid y axis.
@return None
"""
name = self.plotName()
setv = self.settings.setValue
setv('%s/minor/y/enabled' % name,
self.actionDrawMinorY.isChecked())
setv('%s/minor/pen' % name, self.grid.minPen())
def setAxisColor(self, color):
""" Sets the axis widgets foreground and text color.
@param color QColor instance
@return None
"""
for widget in self.axisWidgets():
palette = widget.palette()
palette.setColor(palette.WindowText, color)
palette.setColor(palette.Text, color)
widget.setPalette(palette)
def setAxisFont(self, font):
""" Sets the axis widgets font.
@param font QFont instance
@return None
"""
for widget in self.axisWidgets():
widget.setFont(font)
def updateAxis(self):
""" Enables each y axis if there are curves attached to it.
@return None
"""
enable = self.plot.enableAxis
items = self.checkedItems()
for pair, pred in [
([yRight, yLeft], lambda i, a:i.curve.yAxis()==a),
([xTop, xBottom], lambda i, a:i.curve.xAxis()==a)]:
for axis in pair:
enable(axis, any(item for item in items if pred(item, axis)))
## session signal handlers
def on_session_createdSeries(self, key, field):
""" Signal handler called when new Series objects are created.
@param key id of ticker with new series
@param field series field
"""
if key != self.key:
return
series = self.collection[self.key].series[field]
self.addSeries(TickType.getField(field), series)
self.controlsTree.sortByColumn(0, Qt.AscendingOrder)
def setItemValue(self, item):
idx = self.controlsTreeModel.indexFromItem(item)
parent = item.parent()
if parent:
getc = parent.child
else:
getc = self.controlsTreeModel.item
next = getc(item.row(), item.column()+1)
try:
next.setText('%.2f' % item.data[-1])
except (AttributeError, IndexError, TypeError, ):
pass
else:
for c in [item.child(r, 0) for r in range(item.rowCount())]:
self.setItemValue(c)
def on_session_TickPrice_TickSize(self, message):
""" Signal handler for TickPrice and TickSize session messages.
@param message Message instance
@return None
"""
if message.tickerId != self.key:
return
for item in self.controlsTreeItems:
self.setItemValue(item)
items = [i for i in self.controlsTreeItems if i.curve.isVisible()]
for item in items:
item.curve.setData(item.data.x, item.data.y)
if items:
self.plot.replot()
self.on_zoomer_zoomed(None)
def on_session_UpdateAccountValue(self, message):
if self.key != 'account':
return
items = [i for i in self.controlsTreeItems if i.curve.isVisible()]
for item in items:
item.curve.setData(item.data.x, item.data.y)
if items:
self.plot.replot()
self.on_zoomer_zoomed(None)
## action signal handlers
@pyqtSignature('')
def on_actionChangeCurveStyle_triggered(self):
""" Signal handler called to edit a curve.
@return None
"""
pos = self.sender().data().toPoint()
index = self.controlsTree.indexAt(pos)
if index.isValid():
item = self.controlsTreeModel.itemFromIndex(index)
indexZero = self.controlsTreeModel.sibling(index.row(), 0, index)
first = self.controlsTreeModel.itemFromIndex(indexZero)
try:
curve = first.curve
color = first.color
except (AttributeError, ):
return
else:
item = first
if not curve.settingsLoaded:
self.loadCurve(self.itemName(item), curve)
cplot = curve.plot()
if cplot is None:
curve.attach(self.plot)
dlg = PlotItemDialog(curve, self)
if dlg.exec_() == dlg.Accepted:
dlg.applyToCurve(curve)
item.setColor(curve.pen().color())
self.saveCurve(self.itemName(item), curve)
self.enableCurve(item, enable=item.checkState()==Qt.Checked)
if cplot is None:
curve.detach()
@pyqtSignature('')
def on_actionChangeCurveAxisX_triggered(self):
""" Signal handler called to toggle the x axis of a curve.
"""
pos = self.sender().data().toPoint()
index = self.controlsTree.indexAt(pos)
if index.isValid():
item = self.controlsTreeModel.itemFromIndex(index)
curve = item.curve
if curve.xAxis() == xTop:
curve.setXAxis(xBottom)
else:
curve.setXAxis(xTop)
self.updateAxis()
self.saveCurve(self.itemName(item), curve)
self.plot.replot()
@pyqtSignature('')
def on_actionChangeCurveAxisY_triggered(self):
""" Signal handler called to toggle the y axis of a curve.
@return None
"""
pos = self.sender().data().toPoint()
index = self.controlsTree.indexAt(pos)
if index.isValid():
item = self.controlsTreeModel.itemFromIndex(index)
curve = item.curve
if curve.yAxis() == yLeft:
curve.setYAxis(yRight)
else:
curve.setYAxis(yLeft)
self.updateAxis()
self.saveCurve(self.itemName(item), curve)
self.plot.replot()
@pyqtSignature('')
def on_actionChangeDataMarker_triggered(self):
""" Signal handler called to edit data marker.
@return None
"""
pos = self.sender().data().toPoint()
index = self.controlsTree.indexAt(pos)
if index.isValid():
item = self.controlsTreeModel.itemFromIndex(index)
curve = item.curve
if not curve.settingsLoaded:
self.loadCurve(self.itemName(item), curve)
cplot = curve.plot()
if cplot is None:
curve.attach(self.plot)
dlg = PlotItemDialog(curve, marker=curve.dataMarker, parent=self)
if dlg.exec_() == dlg.Accepted:
dlg.applyToMarker(curve.dataMarker)
self.saveMarker(self.itemName(item), curve.dataMarker)
for marker in self.highlightMarkers:
marker.restyleFrom(curve.dataMarker)
self.plot.replot()
if cplot is None:
curve.detach()
@pyqtSignature('bool')
def on_actionDrawLegend_triggered(self, enable):
""" Signal handler called to toggle the plot legend visibility.
@param enable if True, legend is enabled
@return False
"""
legend = self.plot.legend()
legend.setVisible(enable)
if enable:
items = self.checkedItems()
if items:
for item in items:
item.curve.updateLegend(legend, True)
else:
self.actionDrawLegend.setChecked(False)
else:
legend.clear()
self.saveLegend()
@pyqtSignature('bool')
def on_actionDrawMajorX_triggered(self, enable):
""" Signal handler called to toggle visiblity of major grid x axis.
@param enable if True, grid axis is enabled
@return None
"""
self.grid.enableX(enable)
self.plot.replot()
self.saveMajorX()
@pyqtSignature('bool')
def on_actionDrawMajorY_triggered(self, enable):
""" Signal handler called to toggle visiblity of major grid y axis.
@param enable if True, grid axis is enabled
@return None
"""
self.grid.enableY(enable)
self.plot.replot()
self.saveMajorY()
@pyqtSignature('bool')
def on_actionDrawMinorX_triggered(self, enable):
""" Signal handler called to toggle visiblity of minor grid x axis.
@param enable if True, grid axis is enabled
@return None
"""
self.grid.enableXMin(enable)
self.plot.replot()
self.saveMinorX()
@pyqtSignature('bool')
def on_actionDrawMinorY_triggered(self, enable):
""" Signal handler called to toggle visiblity of minor grid y axis.
@param enable if True, grid axis is enabled
@return None
"""
self.grid.enableYMin(enable)
self.plot.replot()
self.saveMinorY()
@pyqtSignature('')
def on_actionChangeMajorGridStyle_triggered(self):
""" Signal handler called to edit the major grid pen.
@return None
"""
pen = changePen(self.grid.majPen, self.grid.setMajPen, self)
if pen:
self.plot.replot()
self.saveMajorX()
self.saveMajorY()
@pyqtSignature('')
def on_actionChangeMinorGridStyle_triggered(self):
""" Signal handler called to edit the minor grid pen.
@return None
"""
pen = changePen(self.grid.minPen, self.grid.setMinPen, self)
if pen:
self.plot.replot()
self.saveMinorX()
self.saveMinorY()
@pyqtSignature('')
def on_actionChangeCanvasColor_triggered(self):
""" Signal handler called to edit the plot canvas background.
@return None
"""
plot = self.plot
color = changeColor(
plot.canvasBackground, plot.setCanvasBackground, self)
if color:
pen = QPen(complementColor(color))
self.zoomer.setRubberBandPen(pen)
self.picker.setTrackerPen(pen)
plot.replot()
self.saveCanvasColor()
@pyqtSignature('')
def on_actionChangeAxesFont_triggered(self):
""" Signal handler called to edit the axes font.
@return None
"""
widget = self.referenceAxisWidget()
default = widget.font()
font, okay = QFontDialog.getFont(default, self, 'Select Axis Font')
if okay:
self.setAxisFont(font)
self.settings.setValue(
'%s/axisfont' % self.plotName(), font)
@pyqtSignature('')
def on_actionChangeAxesColor_triggered(self):
""" Signal handler called to edit the axes color.
@return None
"""
color = changeColor(self.getAxisColor, self.setAxisColor, self)
if color:
self.settings.setValue('%s/axiscolor' % self.plotName(), color)
@pyqtSignature('bool')
def on_actionShowDataDialog_triggered(self, enable):
""" Signal handler called to show or hide the data dialog.
@return None
"""
if enable:
dlg = self.dataDialog = PlotDataDialog(self)
try:
tabs = self.window().centralTabs
except (AttributeError, ):
pass
else:
name = tabs.tabText(tabs.currentIndex())
dlg.setWindowTitle(str(dlg.windowTitle()) % name)
dlg.setWindowIcon(tabs.tabIcon(tabs.currentIndex()))
self.connect(
dlg, Signals.dialogFinished, self.on_dataDialog_finished)
self.connect(
dlg, Signals.highlightSelections, self.on_dataDialog_selected)
dlg.show()
elif self.dataDialog:
self.dataDialog.close()
self.dataDialog = None
self.settings.setValue('%s/datadialog' % self.plotName(), enable)
## controls tree signal handlers
def on_controlsTree_doubleClicked(self, index):
""" Signal handler for control tree double click.
@param index QModelIndex instance
@return None
"""
tree = self.controlsTree
if index.isValid():
pos = tree.visualRect(index).center()
actions = tree.actions()
for action in actions:
action.setData(QVariant(pos))
self.actionChangeCurveStyle.trigger()
def on_controlsTree_itemChanged(self, item):
""" Signal handler for all changes to control tree items.
@param item changed tree widget item
@return None
"""
try:
curve = item.curve
except (AttributeError, ):
pass
else:
self.enableCurve(item, enable=item.checkState()==Qt.Checked)
self.updateAxis()
self.saveSelections()
def on_controlsTree_customContextMenuRequested(self, pos):
""" Signal handler for context menu request over control tree.
@param pos QPoint of mouse click
@return None
"""
tree = self.controlsTree
index = tree.indexAt(pos)
if index.isValid():
item = self.controlsTreeModel.itemFromIndex(index)
if not hasattr(item, 'curve'):
return
if item.curve.yAxis() == yRight:
self.actionChangeCurveAxisY.setText('Move to Left Axis')
else:
self.actionChangeCurveAxisY.setText('Move to Right Axis')
if item.curve.xAxis() == xTop:
self.actionChangeCurveAxisX.setText('Move to Bottom Axis')
else:
self.actionChangeCurveAxisX.setText('Move to Top Axis')
actions = tree.actions()
for action in actions:
action.setData(QVariant(pos))
QMenu.exec_(actions, tree.mapToGlobal(pos))
def on_dataDialog_finished(self, result):
""" Signal handler for data dialog finish.
Sets and saves state of data dialog display after its closed.
@param result ignored
@return None
"""
self.actionShowDataDialog.setChecked(False)
self.dataDialog = None
self.on_dataDialog_selected([])
self.settings.setValue('%s/datadialog' % self.plotName(), False)
def on_dataDialog_selected(self, items):
""" Signal handler for data dialog selection changes.
@params items list of (index, item) two-tuples
@return None
"""
for marker in self.highlightMarkers:
marker.detach()
self.highlightMarkers = markers = []
for index, item in items:
try:
x, y = index.row(), item.data[index.row()]
except (IndexError, ):
continue
if x is None or y is None:
continue
curve = item.curve
marker = curve.dataMarker.cloneFromValue(curve, x, y)
markers.append(marker)
marker.attach(self.plot)
self.plot.replot()
def on_plotSplitter_splitterMoved(self, pos, index):
""" Signal handler for splitter move; saves state to user settings.
@param pos ignored
@param index ignored
@return None
"""
settings = self.settings
statekey = '%s/%s' % (self.plotName(), settings.keys.splitstate)
settings.setValue(statekey, self.plotSplitter.saveState())
def syncPlot(self, sync=None):
print '## sync?', sync
session = self.session
(session.registerMeta if sync else session.deregisterMeta)(self)
|
gpl-2.0
|
waseem18/oh-mainline
|
vendor/packages/Django/tests/regressiontests/utils/timezone.py
|
52
|
2381
|
import copy
import datetime
import pickle
from django.test.utils import override_settings
from django.utils import timezone
from django.utils.tzinfo import FixedOffset
from django.utils import unittest
EAT = FixedOffset(180) # Africa/Nairobi
ICT = FixedOffset(420) # Asia/Bangkok
class TimezoneTests(unittest.TestCase):
def test_localtime(self):
now = datetime.datetime.utcnow().replace(tzinfo=timezone.utc)
local_tz = timezone.LocalTimezone()
local_now = timezone.localtime(now, local_tz)
self.assertEqual(local_now.tzinfo, local_tz)
def test_now(self):
with override_settings(USE_TZ=True):
self.assertTrue(timezone.is_aware(timezone.now()))
with override_settings(USE_TZ=False):
self.assertTrue(timezone.is_naive(timezone.now()))
def test_override(self):
default = timezone.get_default_timezone()
try:
timezone.activate(ICT)
with timezone.override(EAT):
self.assertIs(EAT, timezone.get_current_timezone())
self.assertIs(ICT, timezone.get_current_timezone())
with timezone.override(None):
self.assertIs(default, timezone.get_current_timezone())
self.assertIs(ICT, timezone.get_current_timezone())
timezone.deactivate()
with timezone.override(EAT):
self.assertIs(EAT, timezone.get_current_timezone())
self.assertIs(default, timezone.get_current_timezone())
with timezone.override(None):
self.assertIs(default, timezone.get_current_timezone())
self.assertIs(default, timezone.get_current_timezone())
finally:
timezone.deactivate()
def test_copy(self):
self.assertIsInstance(copy.copy(timezone.UTC()), timezone.UTC)
self.assertIsInstance(copy.copy(timezone.LocalTimezone()), timezone.LocalTimezone)
def test_deepcopy(self):
self.assertIsInstance(copy.deepcopy(timezone.UTC()), timezone.UTC)
self.assertIsInstance(copy.deepcopy(timezone.LocalTimezone()), timezone.LocalTimezone)
def test_pickling_unpickling(self):
self.assertIsInstance(pickle.loads(pickle.dumps(timezone.UTC())), timezone.UTC)
self.assertIsInstance(pickle.loads(pickle.dumps(timezone.LocalTimezone())), timezone.LocalTimezone)
|
agpl-3.0
|
santiycr/selenium-firefox-support-matrix
|
test_the_firefoxen.py
|
1
|
5063
|
#!/usr/bin/env python
# encoding: utf-8
import os
import sys
import new
import json
import unittest
from time import sleep
import argparse
import random
from selenium import webdriver
import nose
from nose.plugins.multiprocess import MultiProcess
from lib.parse_version import parse_version
USER = os.environ['SAUCE_USER']
KEY = os.environ['SAUCE_KEY']
HOST = 'ondemand.saucelabs.com'
PORT = 80
JAR_URL = "https://sauce-bundles.s3.amazonaws.com/selenium/selenium-server-%s%s.jar"
class FirefoxSupportTest(unittest.TestCase):
__test__ = False
def setUp(self):
parsed_version = parse_version(self.sel_version)
jar_addition = ''
if parsed_version <= (2, 19, 0):
jar_addition += '-newcert'
if (2, 14, 0) <= parsed_version <= (2, 25, 0):
jar_addition += '-dnsfix'
dc = {'platform': self.platform,
'browserName': self.br,
'version': self.version,
'selenium-version': JAR_URL % (self.sel_version, jar_addition),
'name': self.name,
'prevent-requeue': True,
}
self.native = False
for i in range(3):
try:
self.driver = webdriver.Remote(desired_capabilities=dc,
command_executor="http://%s:%s@%s:%s/wd/hub"
% (USER, KEY, HOST, PORT))
except Exception:
self.driver = None
else:
self.native = self.driver.capabilities['nativeEvents']
break
def test_browser_works(self):
if not self.driver:
self.fail("Failed to launch browser")
url = random.choice(['https://saucelabs.com/login',
'https://google.com', 'https://www.bing.com',
'https://yahoo.com', 'https://www.facebook.com',
'https://instagram.com'])
self.driver.get(url)
for i in range(30):
if self.driver.title:
break
sleep(0.5)
else:
self.fail("title never showed")
def tearDown(self):
if self.driver:
self.driver.quit()
with open(os.path.join(self.platform, '%s_%s_results.json' % (self.version,
self.sel_version)),
'w') as results_file:
results = {self.version: {
self.sel_version: {
'worked': sys.exc_info() == (None, None, None),
'native': self.native}}}
results_file.write(json.dumps(results))
parser = argparse.ArgumentParser(
description='Collect Firefox vs Selenium version support matrix')
parser.add_argument('--firefox', '-f', metavar='FF_VER',
nargs='*', help='Specific versions of Firefox to test')
parser.add_argument('--selenium', '-s', metavar='SE_VER',
nargs='*', help='Specific versions of Selenium to test')
parser.add_argument('--platform', '-p', type=str, default="Windows 2003",
help='The OS to run the tests on.')
parser.add_argument('--threads', '-t', type=int, default=10,
help='Amount of threads to run tests in parallel on.')
args = parser.parse_args()
jars_to_test = args.selenium if args.selenium else [
'2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0',
'2.8.0', '2.9.0', '2.10.0', '2.11.0', '2.12.0', '2.13.0', '2.14.0',
'2.14.1', '2.15.0', '2.16.0', '2.16.1', '2.17.0', '2.18.0', '2.19.0',
'2.20.0', '2.21.0', '2.22.0', '2.23.0', '2.23.1', '2.24.0', '2.24.1',
'2.25.0', '2.26.0', '2.27.0', '2.28.0', '2.29.0', '2.30.0', '2.31.0',
'2.32.0', '2.33.0', '2.34.0', '2.35.0', '2.36.0', '2.37.0', '2.38.0',
'2.39.0', '2.40.0', '2.41.0', '2.42.2', '2.43.1', '2.44.0', '2.45.0',
]
firefoxes_to_test = args.firefox if args.firefox else range(3, 38)
classes = {}
for jar_version in jars_to_test:
for ff_version in firefoxes_to_test:
name = "%s_%s_%s" % (FirefoxSupportTest.__name__, jar_version, ff_version)
name = name.encode('ascii')
if name.endswith("."):
name = name[:-1]
for x in ". ":
name = name.replace(x, "")
d = dict(FirefoxSupportTest.__dict__)
d.update({'__test__': True,
'__name__': name,
'name': name,
'platform': args.platform,
'br': 'firefox',
'version': ff_version,
'sel_version': jar_version,
})
classes[name] = new.classobj(name, (FirefoxSupportTest,), d)
globals().update(classes)
if __name__ == "__main__":
if not os.path.isdir(args.platform):
os.mkdir(args.platform)
nose.core.run(argv=['--nocapture', "-v", "--processes", args.threads,
"--process-timeout", "1800", __file__],
plugins=[MultiProcess()])
|
gpl-2.0
|
yspanchal/docli
|
docli/main.py
|
1
|
3046
|
# -*- coding: utf-8 -*-
import click
from commands.account import account_group
from commands.actions import actions_group
from commands.domain import domain_group
from commands.records import record_group
from commands.droplet import droplet_group
from commands.droplet_actions import droplet_actions_group
from commands.images import images_group
from commands.image_actions import image_actions_group
from commands.ssh_keys import ssh_keys_group
from commands.region_size import region_size_group
from commands.base_request import CONTEXT_SETTINGS
import os
sources_list = [account_group, actions_group, domain_group, record_group,
droplet_group, droplet_actions_group, images_group,
image_actions_group, ssh_keys_group, region_size_group]
def config_file(file, reconfigure=False):
"""
create configuration file
"""
value = click.prompt('Enter digital ocean access token', type=str)
f = open(file, 'w')
f.write('[docli]\nauth_token='+value)
f.close()
if not reconfigure:
bashrc = os.path.expanduser('~/.bashrc')
bash_profile = os.path.expanduser('~/.bash_profile')
profile = os.path.expanduser('~/.profile')
if os.path.isfile(bashrc):
bashrc_file = open(bashrc, 'a')
bashrc_file.write('eval "$(_DOCLI_COMPLETE=source docli)"')
bashrc_file.close()
click.echo("apply changes by running 'source ~/.bashrc' without ''")
elif os.path.isfile(bash_profile):
bash_profile_file = open(bash_profile, 'a')
bash_profile_file.write('eval "$(_DOCLI_COMPLETE=source docli)"')
bash_profile_file.close()
click.echo("apply changes by running 'source ~/.bash_profile' without ''")
elif os.path.isfile(profile):
profile_file = open(profile, 'a')
profile_file.write('eval "$(_DOCLI_COMPLETE=source docli)"')
profile_file.close()
click.echo("apply changes by running 'source ~/.profile' without ''")
else:
msg = 'Add following line to your bashrc.\n eval "$(_DOCLI_COMPLETE=source docli)"'
click.echo(msg)
click.echo()
click.echo("configuration completed.")
@click.command(cls=click.CommandCollection, sources=sources_list, context_settings=CONTEXT_SETTINGS, invoke_without_command=True, no_args_is_help=True)
@click.option('-c', '--configure', is_flag=True, help='configure digital ocean access token')
@click.version_option(version=1.0, message=('Digital Ocean command line interface. \n%(prog)s, version %(version)s, by Yogesh panchal, [email protected]'))
def docli(configure):
"""
'docli' is Digital Ocean command line interfaces
# To configure docli
>>> docli --configure
# To get list available commands
>>> docli --help
# How to generate access token
https://goo.gl/hPkQG7
"""
if configure:
file = os.path.expanduser('~/.do.cfg')
if not os.path.isfile(file):
config_file(file)
else:
value = click.prompt('Do you want to reconfigure docli [y/n] ?', type=str, default='n')
if value.lower() == 'y':
reconfigure = True
config_file(file, reconfigure)
else:
click.echo()
click.echo('done..!!!')
if __name__ == '__main__':
docli()
|
apache-2.0
|
informaticameg/Posta
|
storm/references.py
|
3
|
37211
|
#
# Copyright (c) 2006, 2007 Canonical
#
# Written by Gustavo Niemeyer <[email protected]>
#
# This file is part of Storm Object Relational Mapper.
#
# Storm is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# Storm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import weakref
from storm.exceptions import (
ClassInfoError, FeatureError, NoStoreError, WrongStoreError)
from storm.store import Store, get_where_for_args, LostObjectError
from storm.variables import LazyValue
from storm.expr import (
Select, Column, Exists, ComparableExpr, LeftJoin, Not, SQLRaw,
compare_columns, compile)
from storm.info import get_cls_info, get_obj_info
__all__ = ["Reference", "ReferenceSet", "Proxy"]
class LazyAttribute(object):
"""
This descriptor will call the named attribute builder to
initialize the given attribute on first access. It avoids
having a test at every single place where the attribute is
touched when lazy initialization is wanted, and prevents
paying the price of a normal property when classes are
seldomly instantiated (the case of references).
"""
def __init__(self, attr, attr_builder):
self._attr = attr
self._attr_builder = attr_builder
def __get__(self, obj, cls=None):
getattr(obj, self._attr_builder)()
return getattr(obj, self._attr)
class PendingReferenceValue(LazyValue):
"""Lazy value to be used as a marker for unflushed foreign keys.
When a reference is set to an object which is still unflushed,
the foreign key in the local object remains set to this value
until the object is flushed.
"""
PendingReferenceValue = PendingReferenceValue()
class Reference(object):
"""Descriptor for one-to-one relationships.
This is typically used when the class that it is being defined on
has a foreign key onto another table::
class OtherGuy(object):
...
id = Int()
class MyGuy(object):
...
other_guy_id = Int()
other_guy = Reference(other_guy_id, OtherGuy.id)
but can also be used for backwards references, where OtherGuy's
table has a foreign key onto the class that you want this property
on::
class OtherGuy(object):
...
my_guy_id = Int() # in the database, a foreign key to my_guy.id
class MyGuy(object):
...
id = Int()
other_guy = Reference(id, OtherGuy.my_guy_id, on_remote=True)
In both cases, C{MyGuy().other_guy} will resolve to the
C{OtherGuy} instance which is linked to it. In the first case, it
will be the C{OtherGuy} instance whose C{id} is equivalent to the
C{MyGuy}'s C{other_guy_id}; in the second, it'll be the
C{OtherGuy} instance whose C{my_guy_id} is equivalent to the
C{MyGuy}'s C{id}.
Assigning to the property, for example with C{MyGuy().other_guy =
OtherGuy()}, will link the objects and update either
C{MyGuy.other_guy_id} or C{OtherGuy.my_guy_id} accordingly.
"""
# Must initialize _relation later because we don't want to resolve
# string references at definition time, since classes refered to might
# not be available yet. Notice that this attribute is "public" to the
# Proxy class and the SQLObject wrapper. It's still underlined because
# it's *NOT* part of the public API of Storm (we'll modify it without
# warnings!).
_relation = LazyAttribute("_relation", "_build_relation")
def __init__(self, local_key, remote_key, on_remote=False):
"""
Create a Reference property.
@param local_key: The sibling column which is the foreign key
onto C{remote_key}. (unless C{on_remote} is passed; see
below).
@param remote_key: The column on the referred-to object which
will have the same value as that for C{local_key} when
resolved on an instance.
@param on_remote: If specified, then the reference is
backwards: It is the C{remote_key} which is a foreign key
onto C{local_key}.
"""
self._local_key = local_key
self._remote_key = remote_key
self._on_remote = on_remote
self._cls = None
def __get__(self, local, cls=None):
if local is not None:
# Don't use local here, as it might be security proxied.
local = get_obj_info(local).get_obj()
if self._cls is None:
self._cls = _find_descriptor_class(cls or local.__class__, self)
if local is None:
return self
remote = self._relation.get_remote(local)
if remote is not None:
return remote
if self._relation.local_variables_are_none(local):
return None
store = Store.of(local)
if store is None:
return None
if self._relation.remote_key_is_primary:
remote = store.get(self._relation.remote_cls,
self._relation.get_local_variables(local))
else:
where = self._relation.get_where_for_remote(local)
result = store.find(self._relation.remote_cls, where)
remote = result.one()
if remote is not None:
self._relation.link(local, remote)
return remote
def __set__(self, local, remote):
# Don't use local here, as it might be security proxied or something.
local = get_obj_info(local).get_obj()
if self._cls is None:
self._cls = _find_descriptor_class(local.__class__, self)
if remote is None:
if self._on_remote:
remote = self.__get__(local)
if remote is None:
return
else:
remote = self._relation.get_remote(local)
if remote is None:
remote_info = None
else:
remote_info = get_obj_info(remote)
self._relation.unlink(get_obj_info(local), remote_info, True)
else:
# Don't use remote here, as it might be
# security proxied or something.
try:
remote = get_obj_info(remote).get_obj()
except ClassInfoError:
pass # It might fail when remote is a tuple or a raw value.
self._relation.link(local, remote, True)
def _build_relation(self):
resolver = PropertyResolver(self, self._cls)
self._local_key = resolver.resolve(self._local_key)
self._remote_key = resolver.resolve(self._remote_key)
self._relation = Relation(self._local_key, self._remote_key,
False, self._on_remote)
def __eq__(self, other):
return self._relation.get_where_for_local(other)
def __ne__(self, other):
return Not(self == other)
class ReferenceSet(object):
# Must initialize later because we don't want to resolve string
# references at definition time, since classes refered to might
# not be available yet.
_relation1 = LazyAttribute("_relation1", "_build_relations")
_relation2 = LazyAttribute("_relation2", "_build_relations")
_order_by = LazyAttribute("_order_by", "_build_relations")
def __init__(self, local_key1, remote_key1,
remote_key2=None, local_key2=None, order_by=None):
self._local_key1 = local_key1
self._remote_key1 = remote_key1
self._remote_key2 = remote_key2
self._local_key2 = local_key2
self._default_order_by = order_by
self._cls = None
def __get__(self, local, cls=None):
if local is not None:
# Don't use local here, as it might be security proxied.
local = get_obj_info(local).get_obj()
if self._cls is None:
self._cls = _find_descriptor_class(cls or local.__class__, self)
if local is None:
return self
#store = Store.of(local)
#if store is None:
# return None
if self._relation2 is None:
return BoundReferenceSet(self._relation1, local, self._order_by)
else:
return BoundIndirectReferenceSet(self._relation1,
self._relation2, local,
self._order_by)
def __set__(self, local, value):
raise FeatureError("Assigning to ResultSets not supported")
def _build_relations(self):
resolver = PropertyResolver(self, self._cls)
if self._default_order_by is not None:
self._order_by = resolver.resolve(self._default_order_by)
else:
self._order_by = None
self._local_key1 = resolver.resolve(self._local_key1)
self._remote_key1 = resolver.resolve(self._remote_key1)
self._relation1 = Relation(self._local_key1, self._remote_key1,
True, True)
if self._local_key2 and self._remote_key2:
self._local_key2 = resolver.resolve(self._local_key2)
self._remote_key2 = resolver.resolve(self._remote_key2)
self._relation2 = Relation(self._local_key2, self._remote_key2,
True, True)
else:
self._relation2 = None
class BoundReferenceSetBase(object):
def find(self, *args, **kwargs):
store = Store.of(self._local)
if store is None:
raise NoStoreError("Can't perform operation without a store")
where = self._get_where_clause()
result = store.find(self._target_cls, where, *args, **kwargs)
if self._order_by is not None:
result.order_by(self._order_by)
return result
def __iter__(self):
return self.find().__iter__()
def __contains__(self, item):
return item in self.find()
def first(self, *args, **kwargs):
return self.find(*args, **kwargs).first()
def last(self, *args, **kwargs):
return self.find(*args, **kwargs).last()
def any(self, *args, **kwargs):
return self.find(*args, **kwargs).any()
def one(self, *args, **kwargs):
return self.find(*args, **kwargs).one()
def values(self, *columns):
return self.find().values(*columns)
def order_by(self, *args):
return self.find().order_by(*args)
def count(self):
return self.find().count()
class BoundReferenceSet(BoundReferenceSetBase):
def __init__(self, relation, local, order_by):
self._relation = relation
self._local = local
self._target_cls = self._relation.remote_cls
self._order_by = order_by
def _get_where_clause(self):
return self._relation.get_where_for_remote(self._local)
def clear(self, *args, **kwargs):
set_kwargs = {}
for remote_column in self._relation.remote_key:
set_kwargs[remote_column.name] = None
store = Store.of(self._local)
if store is None:
raise NoStoreError("Can't perform operation without a store")
where = self._relation.get_where_for_remote(self._local)
store.find(self._target_cls, where, *args, **kwargs).set(**set_kwargs)
def add(self, remote):
self._relation.link(self._local, remote, True)
def remove(self, remote):
self._relation.unlink(get_obj_info(self._local),
get_obj_info(remote), True)
class BoundIndirectReferenceSet(BoundReferenceSetBase):
def __init__(self, relation1, relation2, local, order_by):
self._relation1 = relation1
self._relation2 = relation2
self._local = local
self._order_by = order_by
self._target_cls = relation2.local_cls
self._link_cls = relation1.remote_cls
def _get_where_clause(self):
return (self._relation1.get_where_for_remote(self._local) &
self._relation2.get_where_for_join())
def clear(self, *args, **kwargs):
store = Store.of(self._local)
if store is None:
raise NoStoreError("Can't perform operation without a store")
where = self._relation1.get_where_for_remote(self._local)
if args or kwargs:
filter = get_where_for_args(args, kwargs, self._target_cls)
join = self._relation2.get_where_for_join()
table = get_cls_info(self._target_cls).table
where &= Exists(Select(SQLRaw("*"), join & filter, tables=table))
store.find(self._link_cls, where).remove()
def add(self, remote):
link = self._link_cls()
self._relation1.link(self._local, link, True)
# Don't use remote here, as it might be security proxied or something.
remote = get_obj_info(remote).get_obj()
self._relation2.link(remote, link, True)
def remove(self, remote):
store = Store.of(self._local)
if store is None:
raise NoStoreError("Can't perform operation without a store")
# Don't use remote here, as it might be security proxied or something.
remote = get_obj_info(remote).get_obj()
where = (self._relation1.get_where_for_remote(self._local) &
self._relation2.get_where_for_remote(remote))
store.find(self._link_cls, where).remove()
class Proxy(ComparableExpr):
"""Proxy exposes a referred object's column as a local column.
For example::
class Foo(object):
bar_id = Int()
bar = Reference(bar_id, Bar.id)
bar_title = Proxy(bar, Bar.title)
For most uses, Foo.bar_title should behave as if it were
a native property of Foo.
"""
class RemoteProp(object):
"""
This descriptor will resolve and set the _remote_prop attribute
when it's first used. It avoids having a test at every single
place where the attribute is touched.
"""
def __get__(self, obj, cls=None):
resolver = PropertyResolver(obj, obj._cls)
obj._remote_prop = resolver.resolve_one(obj._unresolved_prop)
return obj._remote_prop
_remote_prop = RemoteProp()
def __init__(self, reference, remote_prop):
self._reference = reference
self._unresolved_prop = remote_prop
self._cls = None
def __get__(self, obj, cls=None):
if self._cls is None:
self._cls = _find_descriptor_class(cls, self)
if obj is None:
return self
# Have you counted how many descriptors we're dealing with here? ;-)
return self._remote_prop.__get__(self._reference.__get__(obj))
def __set__(self, obj, value):
return self._remote_prop.__set__(self._reference.__get__(obj), value)
@property
def variable_factory(self):
return self._remote_prop.variable_factory
@compile.when(Proxy)
def compile_proxy(compile, proxy, state):
# Inject the join between the table of the class holding the proxy
# and the table of the class which is the target of the reference.
left_join = LeftJoin(proxy._reference._relation.local_cls,
proxy._remote_prop.table,
proxy._reference._relation.get_where_for_join())
state.auto_tables.append(left_join)
# And compile the remote property normally.
return compile(proxy._remote_prop, state)
class Relation(object):
def __init__(self, local_key, remote_key, many, on_remote):
assert type(local_key) is tuple and type(remote_key) is tuple
self.local_key = local_key
self.remote_key = remote_key
self.local_cls = getattr(self.local_key[0], "cls", None)
self.remote_cls = self.remote_key[0].cls
self.remote_key_is_primary = False
primary_key = get_cls_info(self.remote_cls).primary_key
if len(primary_key) == len(self.remote_key):
for column1, column2 in zip(self.remote_key, primary_key):
if column1.name != column2.name:
break
else:
self.remote_key_is_primary = True
self.many = many
self.on_remote = on_remote
# XXX These should probably be weak dictionaries.
self._local_columns = {}
self._remote_columns = {}
self._l_to_r = {}
self._r_to_l = {}
def get_remote(self, local):
"""Return the remote object for this relation, using the local cache.
If the object in the cache is invalidated, we validate it again to
check if it's still in the database.
"""
local_info = get_obj_info(local)
try:
obj = local_info[self]["remote"]
except KeyError:
return None
remote_info = get_obj_info(obj)
if remote_info.get("invalidated"):
try:
Store.of(obj)._validate_alive(remote_info)
except LostObjectError:
return None
return obj
def get_where_for_remote(self, local):
"""Generate a column comparison expression for reference properties.
The returned expression may be used to find objects of the I{remote}
type referring to C{local}.
"""
local_variables = self.get_local_variables(local)
for variable in local_variables:
if not variable.is_defined():
Store.of(local).flush()
break
return compare_columns(self.remote_key, local_variables)
def get_where_for_local(self, other):
"""Generate a column comparison expression for reference properties.
The returned expression may be used to find objects of the I{local}
type referring to C{other}.
It handles the following cases::
Class.reference == obj
Class.reference == obj.id
Class.reference == (obj.id1, obj.id2)
Where the right-hand side is the C{other} object given.
"""
try:
obj_info = get_obj_info(other)
except ClassInfoError:
if type(other) is not tuple:
remote_variables = (other,)
else:
remote_variables = other
else:
# Don't use other here, as it might be
# security proxied or something.
other = get_obj_info(other).get_obj()
remote_variables = self.get_remote_variables(other)
return compare_columns(self.local_key, remote_variables)
def get_where_for_join(self):
return compare_columns(self.local_key, self.remote_key)
def get_local_variables(self, local):
local_info = get_obj_info(local)
return tuple(local_info.variables[column]
for column in self._get_local_columns(local.__class__))
def local_variables_are_none(self, local):
"""Return true if all variables of the local key have None values."""
local_info = get_obj_info(local)
for column in self._get_local_columns(local.__class__):
if local_info.variables[column].get() is not None:
return False
return True
def get_remote_variables(self, remote):
remote_info = get_obj_info(remote)
return tuple(remote_info.variables[column]
for column in self._get_remote_columns(remote.__class__))
def link(self, local, remote, setting=False):
"""Link objects to represent their relation.
@param local: Object representing the I{local} side of the reference.
@param remote: Object representing the I{remote} side of the reference,
or the actual value to be set as the local key.
@param setting: Pass true when the relationship is being newly created.
"""
local_info = get_obj_info(local)
try:
remote_info = get_obj_info(remote)
except ClassInfoError:
# Must be a plain key. Just set it.
# XXX I guess this is broken if self.on_remote is True.
local_variables = self.get_local_variables(local)
if type(remote) is not tuple:
remote = (remote,)
assert len(remote) == len(local_variables)
for variable, value in zip(local_variables, remote):
variable.set(value)
return
local_store = Store.of(local)
remote_store = Store.of(remote)
if setting:
if local_store is None:
if remote_store is None:
local_info.event.hook("added", self._add_all, local_info)
remote_info.event.hook("added", self._add_all, local_info)
else:
remote_store.add(local)
local_store = remote_store
elif remote_store is None:
local_store.add(remote)
elif local_store is not remote_store:
raise WrongStoreError("%r and %r cannot be linked because they "
"are in different stores." %
(local, remote))
# In cases below, we maintain a reference to the remote object
# to make sure it won't get deallocated while the link is active.
relation_data = local_info.get(self)
if self.many:
if relation_data is None:
relation_data = local_info[self] = {"remote":
{remote_info: remote}}
else:
relation_data["remote"][remote_info] = remote
else:
if relation_data is None:
relation_data = local_info[self] = {"remote": remote}
else:
old_remote = relation_data.get("remote")
if old_remote is not None:
self.unlink(local_info, get_obj_info(old_remote))
relation_data["remote"] = remote
if setting:
local_vars = local_info.variables
remote_vars = remote_info.variables
pairs = zip(self._get_local_columns(local.__class__),
self.remote_key)
if self.on_remote:
local_has_changed = False
for local_column, remote_column in pairs:
local_var = local_vars[local_column]
if not local_var.is_defined():
remote_vars[remote_column].set(PendingReferenceValue)
else:
remote_vars[remote_column].set(local_var.get())
if local_var.has_changed():
local_has_changed = True
if local_has_changed:
self._add_flush_order(local_info, remote_info)
local_info.event.hook("changed", self._track_local_changes,
remote_info)
local_info.event.hook("flushed", self._break_on_local_flushed,
remote_info)
#local_info.event.hook("removed", self._break_on_local_removed,
# remote_info)
remote_info.event.hook("removed", self._break_on_remote_removed,
weakref.ref(local_info))
else:
remote_has_changed = False
for local_column, remote_column in pairs:
remote_var = remote_vars[remote_column]
if not remote_var.is_defined():
local_vars[local_column].set(PendingReferenceValue)
else:
local_vars[local_column].set(remote_var.get())
if remote_var.has_changed():
remote_has_changed = True
if remote_has_changed:
self._add_flush_order(local_info, remote_info,
remote_first=True)
remote_info.event.hook("changed", self._track_remote_changes,
local_info)
remote_info.event.hook("flushed", self._break_on_remote_flushed,
local_info)
#local_info.event.hook("removed", self._break_on_remote_removed,
# local_info)
local_info.event.hook("changed", self._break_on_local_diverged,
remote_info)
else:
local_info.event.hook("changed", self._break_on_local_diverged,
remote_info)
remote_info.event.hook("changed", self._break_on_remote_diverged,
weakref.ref(local_info))
if self.on_remote:
remote_info.event.hook("removed", self._break_on_remote_removed,
weakref.ref(local_info))
def unlink(self, local_info, remote_info, setting=False):
"""Break the relation between the local and remote objects.
@param setting: If true objects will be changed to persist breakage.
"""
unhook = False
relation_data = local_info.get(self)
if relation_data is not None:
if self.many:
remote_infos = relation_data["remote"]
if remote_info in remote_infos:
remote_infos.pop(remote_info, None)
unhook = True
else:
if relation_data.pop("remote", None) is not None:
unhook = True
if unhook:
local_store = Store.of(local_info)
local_info.event.unhook("changed", self._track_local_changes,
remote_info)
local_info.event.unhook("changed", self._break_on_local_diverged,
remote_info)
local_info.event.unhook("flushed", self._break_on_local_flushed,
remote_info)
remote_info.event.unhook("changed", self._track_remote_changes,
local_info)
remote_info.event.unhook("changed", self._break_on_remote_diverged,
weakref.ref(local_info))
remote_info.event.unhook("flushed", self._break_on_remote_flushed,
local_info)
remote_info.event.unhook("removed", self._break_on_remote_removed,
weakref.ref(local_info))
if local_store is None:
if not self.many or not remote_infos:
local_info.event.unhook("added", self._add_all, local_info)
remote_info.event.unhook("added", self._add_all, local_info)
else:
flush_order = relation_data.get("flush_order")
if flush_order is not None and remote_info in flush_order:
if self.on_remote:
local_store.remove_flush_order(local_info, remote_info)
else:
local_store.remove_flush_order(remote_info, local_info)
flush_order.remove(remote_info)
if setting:
if self.on_remote:
remote_vars = remote_info.variables
for remote_column in self.remote_key:
remote_vars[remote_column].set(None)
else:
local_vars = local_info.variables
local_cols = self._get_local_columns(local_info.cls_info.cls)
for local_column in local_cols:
local_vars[local_column].set(None)
def _add_flush_order(self, local_info, remote_info, remote_first=False):
"""Tell the Store to flush objects in the specified order.
We need to conditionally remove the flush order in unlink() only
if we added it here. Note that we can't just check if the Store
has ordering on the (local, remote) pair, since it may have more
than one request for ordering it, from different relations.
@param local_info: The object info for the local object.
@param remote_info: The object info for the remote object.
@param remote_first: If True, remote_info will be flushed
before local_info.
"""
local_store = Store.of(local_info)
if local_store is not None:
flush_order = local_info[self].setdefault("flush_order", set())
if remote_info not in flush_order:
flush_order.add(remote_info)
if remote_first:
local_store.add_flush_order(remote_info, local_info)
else:
local_store.add_flush_order(local_info, remote_info)
def _track_local_changes(self, local_info, local_variable,
old_value, new_value, fromdb, remote_info):
"""Deliver changes in local to remote.
This hook ensures that the remote object will keep track of
changes done in the local object, either manually or at
flushing time.
"""
remote_column = self._get_remote_column(local_info.cls_info.cls,
local_variable.column)
if remote_column is not None:
remote_info.variables[remote_column].set(new_value)
self._add_flush_order(local_info, remote_info)
def _track_remote_changes(self, remote_info, remote_variable,
old_value, new_value, fromdb, local_info):
"""Deliver changes in remote to local.
This hook ensures that the local object will keep track of
changes done in the remote object, either manually or at
flushing time.
"""
local_column = self._get_local_column(local_info.cls_info.cls,
remote_variable.column)
if local_column is not None:
local_info.variables[local_column].set(new_value)
self._add_flush_order(local_info, remote_info, remote_first=True)
def _break_on_local_diverged(self, local_info, local_variable,
old_value, new_value, fromdb, remote_info):
"""Break the remote/local relationship on diverging changes.
This hook ensures that if the local object has an attribute
changed by hand in a way that diverges from the remote object,
it stops tracking changes.
"""
remote_column = self._get_remote_column(local_info.cls_info.cls,
local_variable.column)
if remote_column is not None:
variable = remote_info.variables[remote_column]
if variable.get_lazy() is None and variable.get() != new_value:
self.unlink(local_info, remote_info)
def _break_on_remote_diverged(self, remote_info, remote_variable,
old_value, new_value, fromdb, local_info_ref):
"""Break the remote/local relationship on diverging changes.
This hook ensures that if the remote object has an attribute
changed by hand in a way that diverges from the local object,
the relationship is undone.
"""
local_info = local_info_ref()
if local_info is None:
return
local_column = self._get_local_column(local_info.cls_info.cls,
remote_variable.column)
if local_column is not None:
local_value = local_info.variables[local_column].get()
if local_value != new_value:
self.unlink(local_info, remote_info)
def _break_on_local_flushed(self, local_info, remote_info):
"""Break the remote/local relationship on flush."""
self.unlink(local_info, remote_info)
def _break_on_remote_flushed(self, remote_info, local_info):
"""Break the remote/local relationship on flush."""
self.unlink(local_info, remote_info)
def _break_on_remote_removed(self, remote_info, local_info_ref):
"""Break the remote relationship when the remote object is removed."""
local_info = local_info_ref()
if local_info is not None:
self.unlink(local_info, remote_info)
def _add_all(self, obj_info, local_info):
store = Store.of(obj_info)
store.add(local_info)
local_info.event.unhook("added", self._add_all, local_info)
def add(remote_info):
remote_info.event.unhook("added", self._add_all, local_info)
store.add(remote_info)
self._add_flush_order(local_info, remote_info,
remote_first=(not self.on_remote))
if self.many:
for remote_info in local_info[self]["remote"]:
add(remote_info)
else:
add(get_obj_info(local_info[self]["remote"]))
def _get_remote_columns(self, remote_cls):
try:
return self._remote_columns[remote_cls]
except KeyError:
columns = tuple(prop.__get__(None, remote_cls)
for prop in self.remote_key)
self._remote_columns[remote_cls] = columns
return columns
def _get_local_columns(self, local_cls):
try:
return self._local_columns[local_cls]
except KeyError:
columns = tuple(prop.__get__(None, local_cls)
for prop in self.local_key)
self._local_columns[local_cls] = columns
return columns
def _get_remote_column(self, local_cls, local_column):
try:
return self._l_to_r[local_cls].get(local_column)
except KeyError:
map = {}
for local_prop, _remote_column in zip(self.local_key,
self.remote_key):
map[local_prop.__get__(None, local_cls)] = _remote_column
return self._l_to_r.setdefault(local_cls, map).get(local_column)
def _get_local_column(self, local_cls, remote_column):
try:
return self._r_to_l[local_cls].get(remote_column)
except KeyError:
map = {}
for local_prop, _remote_column in zip(self.local_key,
self.remote_key):
map[_remote_column] = local_prop.__get__(None, local_cls)
return self._r_to_l.setdefault(local_cls, map).get(remote_column)
class PropertyResolver(object):
"""Transform strings and pure properties (non-columns) into columns."""
def __init__(self, reference, used_cls):
self._reference = reference
self._used_cls = used_cls
self._registry = None
self._namespace = None
def resolve(self, properties):
if not type(properties) is tuple:
return (self.resolve_one(properties),)
return tuple(self.resolve_one(property) for property in properties)
def resolve_one(self, property):
if type(property) is tuple:
return self.resolve(property)
elif isinstance(property, basestring):
return self._resolve_string(property)
elif not isinstance(property, Column):
return _find_descriptor_obj(self._used_cls, property)
return property
def _resolve_string(self, property_path):
if self._registry is None:
try:
self._registry = self._used_cls._storm_property_registry
except AttributeError:
raise RuntimeError("When using strings on references, "
"classes involved must be subclasses "
"of 'Storm'")
cls = _find_descriptor_class(self._used_cls, self._reference)
self._namespace = "%s.%s" % (cls.__module__, cls.__name__)
return self._registry.get(property_path, self._namespace)
def _find_descriptor_class(used_cls, descr):
for cls in used_cls.__mro__:
for attr, _descr in cls.__dict__.iteritems():
if _descr is descr:
return cls
raise RuntimeError("Reference used in an unknown class")
def _find_descriptor_obj(used_cls, descr):
for cls in used_cls.__mro__:
for attr, _descr in cls.__dict__.iteritems():
if _descr is descr:
return getattr(cls, attr)
raise RuntimeError("Reference used in an unknown class")
|
gpl-3.0
|
pombreda/git-git.code.sf.net-p-rpmlint-code
|
LSBCheck.py
|
3
|
1846
|
# -*- coding: utf-8 -*-
#############################################################################
# Project : Mandriva Linux
# Module : rpmlint
# File : LSBCheck.py
# Author : Frederic Lepied
# Created On : Tue Jan 30 14:44:37 2001
# Purpose : LSB non compliance checks
#############################################################################
import re
import rpm
from Filter import addDetails, printError
import AbstractCheck
version_regex = re.compile('^[a-zA-Z0-9.+]+$')
name_regex = re.compile('^[a-z0-9.+-]+$')
class LSBCheck(AbstractCheck.AbstractCheck):
def __init__(self):
AbstractCheck.AbstractCheck.__init__(self, "LSBCheck")
def check(self, pkg):
name = pkg.name
if name and not name_regex.search(name):
printError(pkg, 'non-lsb-compliant-package-name', name)
version = pkg[rpm.RPMTAG_VERSION]
if version and not version_regex.search(version):
printError(pkg, 'non-lsb-compliant-version', version)
release = pkg[rpm.RPMTAG_RELEASE]
if release and not version_regex.search(release):
printError(pkg, 'non-lsb-compliant-release', release)
# Create an object to enable the auto registration of the test
check = LSBCheck()
addDetails(
'non-lsb-compliant-package-name',
"""Your package name contains an illegal character. Use only
alphanumeric symbols in your package name.""",
'non-lsb-compliant-version',
"""Your version number contains an illegal character. Use only
lowercase letters and/or numbers.""",
'non-lsb-compliant-release',
"""Your version number contains an illegal character. Use only
lowercase letters and/or numbers.""",
)
# LSBCheck.py ends here
# Local variables:
# indent-tabs-mode: nil
# py-indent-offset: 4
# End:
# ex: ts=4 sw=4 et
|
gpl-2.0
|
matvey00z/histfetch
|
config.py
|
1
|
2851
|
import sys
import os
import re
import json
import shutil
import errno
config_directory_path = os.path.expanduser("~/.config/histfetch")
config_path = "/".join([config_directory_path, "config.json"])
default_config_path = "default_config.json"
last_config_path = "/".join([config_directory_path, "last_used_config.json"])
def create_default_config():
try:
os.makedirs(config_directory_path)
except OSError as exception:
if exception.errno != errno.EEXIST:
print("Can't create default config:", sys.exc_info(),
file=sys.stderr)
exit(1)
shutil.copyfile(default_config_path, config_path)
def compile_address_patterns(config):
address_patterns = []
for pattern_pair in config["patterns"].items():
address_patterns.append(
(
re.compile(pattern_pair[0]),
[re.compile(word_pattern)
for word_pattern in pattern_pair[1]]
)
)
return address_patterns
def read_config_file(config_path):
with open(config_path, "rt") as config_file:
config = json.load(config_file)
return config
def get_config_update(old_config, new_config):
old_patterns = old_config["patterns"]
new_patterns = new_config["patterns"]
patterns_update = {}
for new_pattern_pair in new_patterns.items():
dictionary_address = new_pattern_pair[0]
old_word_patterns = old_patterns.get(dictionary_address, None)
if not old_word_patterns:
patterns_update[new_pattern_pair[0]] = new_pattern_pair[1]
continue
new_word_patterns = new_pattern_pair[1]
word_update = [pattern for pattern in new_word_patterns
if pattern not in old_word_patterns]
if word_update:
patterns_update[dictionary_address] = word_update
return compile_address_patterns({"patterns" : patterns_update})
class ConfigParser:
def __init__(self):
if not os.path.isfile(config_path):
print("No config fine found; create default", file=sys.stderr)
create_default_config()
try:
self.config = read_config_file(config_path)
self.address_patterns = compile_address_patterns(self.config)
except:
print("Can't read config file:", sys.exc_info(), file=sys.stderr)
exit(1)
self.config_update = []
self.last_time = 0
if os.path.isfile(last_config_path):
self.last_config = read_config_file(last_config_path)
self.config_update = get_config_update(self.last_config, self.config)
self.last_time = self.last_config["last_time"]
def write_last_config(self):
with open(last_config_path, "wt") as last_config_file:
json.dump(self.config, last_config_file)
|
unlicense
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.