gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import logging.handlers
import sqlalchemy as tsa
from sqlalchemy import bindparam
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import util
from sqlalchemy.sql import util as sql_util
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assert_raises_return
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import eq_regex
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
from sqlalchemy.testing.util import lazy_gc
class LogParamsTest(fixtures.TestBase):
__only_on__ = "sqlite"
__requires__ = ("ad_hoc_engines",)
def setup(self):
self.eng = engines.testing_engine(options={"echo": True})
self.no_param_engine = engines.testing_engine(
options={"echo": True, "hide_parameters": True}
)
self.eng.execute("create table if not exists foo (data string)")
self.no_param_engine.execute(
"create table if not exists foo (data string)"
)
self.buf = logging.handlers.BufferingHandler(100)
for log in [logging.getLogger("sqlalchemy.engine")]:
log.addHandler(self.buf)
def teardown(self):
self.eng.execute("drop table if exists foo")
for log in [logging.getLogger("sqlalchemy.engine")]:
log.removeHandler(self.buf)
def test_log_large_list_of_dict(self):
self.eng.execute(
"INSERT INTO foo (data) values (:data)",
[{"data": str(i)} for i in range(100)],
)
eq_(
self.buf.buffer[1].message,
"[{'data': '0'}, {'data': '1'}, {'data': '2'}, {'data': '3'}, "
"{'data': '4'}, {'data': '5'}, {'data': '6'}, {'data': '7'}"
" ... displaying 10 of 100 total bound "
"parameter sets ... {'data': '98'}, {'data': '99'}]",
)
def test_repr_params_large_list_of_dict(self):
eq_(
repr(
sql_util._repr_params(
[{"data": str(i)} for i in range(100)],
batches=10,
ismulti=True,
)
),
"[{'data': '0'}, {'data': '1'}, {'data': '2'}, {'data': '3'}, "
"{'data': '4'}, {'data': '5'}, {'data': '6'}, {'data': '7'}"
" ... displaying 10 of 100 total bound "
"parameter sets ... {'data': '98'}, {'data': '99'}]",
)
def test_log_no_parameters(self):
self.no_param_engine.execute(
"INSERT INTO foo (data) values (:data)",
[{"data": str(i)} for i in range(100)],
)
eq_(
self.buf.buffer[1].message,
"[SQL parameters hidden due to hide_parameters=True]",
)
def test_log_large_list_of_tuple(self):
self.eng.execute(
"INSERT INTO foo (data) values (?)",
[(str(i),) for i in range(100)],
)
eq_(
self.buf.buffer[1].message,
"[('0',), ('1',), ('2',), ('3',), ('4',), ('5',), "
"('6',), ('7',) ... displaying 10 of 100 total "
"bound parameter sets ... ('98',), ('99',)]",
)
def test_log_positional_array(self):
with self.eng.connect() as conn:
exc_info = assert_raises_return(
tsa.exc.DBAPIError,
conn.execute,
tsa.text("SELECT * FROM foo WHERE id IN :foo AND bar=:bar"),
{"foo": [1, 2, 3], "bar": "hi"},
)
assert (
"[SQL: SELECT * FROM foo WHERE id IN ? AND bar=?]\n"
"[parameters: ([1, 2, 3], 'hi')]\n" in str(exc_info)
)
eq_(self.buf.buffer[1].message, "([1, 2, 3], 'hi')")
def test_repr_params_positional_array(self):
eq_(
repr(
sql_util._repr_params(
[[1, 2, 3], 5], batches=10, ismulti=False
)
),
"[[1, 2, 3], 5]",
)
def test_repr_params_unknown_list(self):
# not known if given multiparams or not. repr params with
# straight truncation
eq_(
repr(
sql_util._repr_params(
[[i for i in range(300)], 5], batches=10, max_chars=80
)
),
"[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ... "
"(1315 characters truncated) ... , 293, 294, 295, 296, "
"297, 298, 299], 5]",
)
def test_repr_params_positional_list(self):
# given non-multi-params in a list. repr params with
# per-element truncation, mostly does the exact same thing
eq_(
repr(
sql_util._repr_params(
[[i for i in range(300)], 5],
batches=10,
max_chars=80,
ismulti=False,
)
),
"[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1 ... "
"(1310 characters truncated) ... "
"292, 293, 294, 295, 296, 297, 298, 299], 5]",
)
def test_repr_params_named_dict(self):
# given non-multi-params in a list. repr params with
# per-element truncation, mostly does the exact same thing
params = {"key_%s" % i: i for i in range(10)}
eq_(
repr(
sql_util._repr_params(
params, batches=10, max_chars=80, ismulti=False
)
),
repr(params),
)
def test_repr_params_ismulti_named_dict(self):
# given non-multi-params in a list. repr params with
# per-element truncation, mostly does the exact same thing
param = {"key_%s" % i: i for i in range(10)}
eq_(
repr(
sql_util._repr_params(
[param for j in range(50)],
batches=5,
max_chars=80,
ismulti=True,
)
),
"[%(param)r, %(param)r, %(param)r ... "
"displaying 5 of 50 total bound parameter sets ... "
"%(param)r, %(param)r]" % {"param": param},
)
def test_repr_params_ismulti_list(self):
# given multi-params in a list. repr params with
# per-element truncation, mostly does the exact same thing
eq_(
repr(
sql_util._repr_params(
[
[[i for i in range(300)], 5],
[[i for i in range(300)], 5],
[[i for i in range(300)], 5],
],
batches=10,
max_chars=80,
ismulti=True,
)
),
"[[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1 ... "
"(1310 characters truncated) ... 292, 293, 294, 295, 296, 297, "
"298, 299], 5], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1 ... "
"(1310 characters truncated) ... 292, 293, 294, 295, 296, 297, "
"298, 299], 5], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1 ... "
"(1310 characters truncated) ... 292, 293, 294, 295, 296, 297, "
"298, 299], 5]]",
)
def test_log_large_parameter_single(self):
import random
largeparam = "".join(chr(random.randint(52, 85)) for i in range(5000))
self.eng.execute("INSERT INTO foo (data) values (?)", (largeparam,))
eq_(
self.buf.buffer[1].message,
"('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
def test_log_large_multi_parameter(self):
import random
lp1 = "".join(chr(random.randint(52, 85)) for i in range(5))
lp2 = "".join(chr(random.randint(52, 85)) for i in range(8))
lp3 = "".join(chr(random.randint(52, 85)) for i in range(670))
self.eng.execute("SELECT ?, ?, ?", (lp1, lp2, lp3))
eq_(
self.buf.buffer[1].message,
"('%s', '%s', '%s ... (372 characters truncated) ... %s')"
% (lp1, lp2, lp3[0:149], lp3[-149:]),
)
def test_log_large_parameter_multiple(self):
import random
lp1 = "".join(chr(random.randint(52, 85)) for i in range(5000))
lp2 = "".join(chr(random.randint(52, 85)) for i in range(200))
lp3 = "".join(chr(random.randint(52, 85)) for i in range(670))
self.eng.execute(
"INSERT INTO foo (data) values (?)", [(lp1,), (lp2,), (lp3,)]
)
eq_(
self.buf.buffer[1].message,
"[('%s ... (4702 characters truncated) ... %s',), ('%s',), "
"('%s ... (372 characters truncated) ... %s',)]"
% (lp1[0:149], lp1[-149:], lp2, lp3[0:149], lp3[-149:]),
)
def test_exception_format_dict_param(self):
exception = tsa.exc.IntegrityError("foo", {"x": "y"}, None)
eq_regex(
str(exception),
r"\(.*.NoneType\) None\n\[SQL: foo\]\n\[parameters: {'x': 'y'}\]",
)
def test_exception_format_hide_parameters(self):
exception = tsa.exc.IntegrityError(
"foo", {"x": "y"}, None, hide_parameters=True
)
eq_regex(
str(exception),
r"\(.*.NoneType\) None\n\[SQL: foo\]\n"
r"\[SQL parameters hidden due to hide_parameters=True\]",
)
def test_exception_format_hide_parameters_dbapi_round_trip(self):
assert_raises_message(
tsa.exc.DBAPIError,
r".*INSERT INTO nonexistent \(data\) values \(:data\)\]\n"
r"\[SQL parameters hidden due to hide_parameters=True\]",
lambda: self.no_param_engine.execute(
"INSERT INTO nonexistent (data) values (:data)",
[{"data": str(i)} for i in range(10)],
),
)
def test_exception_format_hide_parameters_nondbapi_round_trip(self):
foo = Table("foo", MetaData(), Column("data", String))
with self.no_param_engine.connect() as conn:
assert_raises_message(
tsa.exc.StatementError,
r"\(sqlalchemy.exc.InvalidRequestError\) A value is required "
r"for bind parameter 'the_data_2'\n"
r"\[SQL: SELECT foo.data \nFROM foo \nWHERE "
r"foo.data = \? OR foo.data = \?\]\n"
r"\[SQL parameters hidden due to hide_parameters=True\]",
conn.execute,
select([foo]).where(
or_(
foo.c.data == bindparam("the_data_1"),
foo.c.data == bindparam("the_data_2"),
)
),
{"the_data_1": "some data"},
)
def test_exception_format_unexpected_parameter(self):
# test that if the parameters aren't any known type, we just
# run through repr()
exception = tsa.exc.IntegrityError("foo", "bar", "bat")
eq_regex(
str(exception),
r"\(.*.str\) bat\n\[SQL: foo\]\n\[parameters: 'bar'\]",
)
def test_exception_format_unexpected_member_parameter(self):
# test that if the parameters aren't any known type, we just
# run through repr()
exception = tsa.exc.IntegrityError("foo", ["bar", "bat"], "hoho")
eq_regex(
str(exception),
r"\(.*.str\) hoho\n\[SQL: foo\]\n\[parameters: \['bar', 'bat'\]\]",
)
def test_result_large_param(self):
import random
largeparam = "".join(chr(random.randint(52, 85)) for i in range(5000))
self.eng.echo = "debug"
result = self.eng.execute("SELECT ?", (largeparam,))
row = result.first()
eq_(
self.buf.buffer[1].message,
"('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
if util.py3k:
eq_(
self.buf.buffer[3].message,
"Row ('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
else:
eq_(
self.buf.buffer[3].message,
"Row (u'%s ... (4703 characters truncated) ... %s',)"
% (largeparam[0:148], largeparam[-149:]),
)
if util.py3k:
eq_(
repr(row),
"('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
else:
eq_(
repr(row),
"(u'%s ... (4703 characters truncated) ... %s',)"
% (largeparam[0:148], largeparam[-149:]),
)
def test_error_large_dict(self):
assert_raises_message(
tsa.exc.DBAPIError,
r".*INSERT INTO nonexistent \(data\) values \(:data\)\]\n"
r"\[parameters: "
r"\[{'data': '0'}, {'data': '1'}, {'data': '2'}, "
r"{'data': '3'}, {'data': '4'}, {'data': '5'}, "
r"{'data': '6'}, {'data': '7'} ... displaying 10 of "
r"100 total bound parameter sets ... {'data': '98'}, "
r"{'data': '99'}\]",
lambda: self.eng.execute(
"INSERT INTO nonexistent (data) values (:data)",
[{"data": str(i)} for i in range(100)],
),
)
def test_error_large_list(self):
assert_raises_message(
tsa.exc.DBAPIError,
r".*INSERT INTO nonexistent \(data\) values "
r"\(\?\)\]\n\[parameters: \[\('0',\), \('1',\), \('2',\), "
r"\('3',\), \('4',\), \('5',\), \('6',\), \('7',\) "
r"... displaying "
r"10 of 100 total bound parameter sets ... "
r"\('98',\), \('99',\)\]",
lambda: self.eng.execute(
"INSERT INTO nonexistent (data) values (?)",
[(str(i),) for i in range(100)],
),
)
class PoolLoggingTest(fixtures.TestBase):
def setup(self):
self.existing_level = logging.getLogger("sqlalchemy.pool").level
self.buf = logging.handlers.BufferingHandler(100)
for log in [logging.getLogger("sqlalchemy.pool")]:
log.addHandler(self.buf)
def teardown(self):
for log in [logging.getLogger("sqlalchemy.pool")]:
log.removeHandler(self.buf)
logging.getLogger("sqlalchemy.pool").setLevel(self.existing_level)
def _queuepool_echo_fixture(self):
return tsa.pool.QueuePool(creator=mock.Mock(), echo="debug")
def _queuepool_logging_fixture(self):
logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG)
return tsa.pool.QueuePool(creator=mock.Mock())
def _stpool_echo_fixture(self):
return tsa.pool.SingletonThreadPool(creator=mock.Mock(), echo="debug")
def _stpool_logging_fixture(self):
logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG)
return tsa.pool.SingletonThreadPool(creator=mock.Mock())
def _test_queuepool(self, q, dispose=True):
conn = q.connect()
conn.close()
conn = None
conn = q.connect()
conn.close()
conn = None
conn = q.connect()
conn = None
del conn
lazy_gc()
q.dispose()
eq_(
[buf.msg for buf in self.buf.buffer],
[
"Created new connection %r",
"Connection %r checked out from pool",
"Connection %r being returned to pool",
"Connection %s rollback-on-return%s",
"Connection %r checked out from pool",
"Connection %r being returned to pool",
"Connection %s rollback-on-return%s",
"Connection %r checked out from pool",
"Connection %r being returned to pool",
"Connection %s rollback-on-return%s",
"Closing connection %r",
]
+ (["Pool disposed. %s"] if dispose else []),
)
def test_stpool_echo(self):
q = self._stpool_echo_fixture()
self._test_queuepool(q, False)
def test_stpool_logging(self):
q = self._stpool_logging_fixture()
self._test_queuepool(q, False)
def test_queuepool_echo(self):
q = self._queuepool_echo_fixture()
self._test_queuepool(q)
def test_queuepool_logging(self):
q = self._queuepool_logging_fixture()
self._test_queuepool(q)
class LoggingNameTest(fixtures.TestBase):
__requires__ = ("ad_hoc_engines",)
def _assert_names_in_execute(self, eng, eng_name, pool_name):
eng.execute(select([1]))
assert self.buf.buffer
for name in [b.name for b in self.buf.buffer]:
assert name in (
"sqlalchemy.engine.base.Engine.%s" % eng_name,
"sqlalchemy.pool.impl.%s.%s"
% (eng.pool.__class__.__name__, pool_name),
)
def _assert_no_name_in_execute(self, eng):
eng.execute(select([1]))
assert self.buf.buffer
for name in [b.name for b in self.buf.buffer]:
assert name in (
"sqlalchemy.engine.base.Engine",
"sqlalchemy.pool.impl.%s" % eng.pool.__class__.__name__,
)
def _named_engine(self, **kw):
options = {
"logging_name": "myenginename",
"pool_logging_name": "mypoolname",
"echo": True,
}
options.update(kw)
return engines.testing_engine(options=options)
def _unnamed_engine(self, **kw):
kw.update({"echo": True})
return engines.testing_engine(options=kw)
def setup(self):
self.buf = logging.handlers.BufferingHandler(100)
for log in [
logging.getLogger("sqlalchemy.engine"),
logging.getLogger("sqlalchemy.pool"),
]:
log.addHandler(self.buf)
def teardown(self):
for log in [
logging.getLogger("sqlalchemy.engine"),
logging.getLogger("sqlalchemy.pool"),
]:
log.removeHandler(self.buf)
def test_named_logger_names(self):
eng = self._named_engine()
eq_(eng.logging_name, "myenginename")
eq_(eng.pool.logging_name, "mypoolname")
def test_named_logger_names_after_dispose(self):
eng = self._named_engine()
eng.execute(select([1]))
eng.dispose()
eq_(eng.logging_name, "myenginename")
eq_(eng.pool.logging_name, "mypoolname")
def test_unnamed_logger_names(self):
eng = self._unnamed_engine()
eq_(eng.logging_name, None)
eq_(eng.pool.logging_name, None)
def test_named_logger_execute(self):
eng = self._named_engine()
self._assert_names_in_execute(eng, "myenginename", "mypoolname")
def test_named_logger_echoflags_execute(self):
eng = self._named_engine(echo="debug", echo_pool="debug")
self._assert_names_in_execute(eng, "myenginename", "mypoolname")
def test_named_logger_execute_after_dispose(self):
eng = self._named_engine()
eng.execute(select([1]))
eng.dispose()
self._assert_names_in_execute(eng, "myenginename", "mypoolname")
def test_unnamed_logger_execute(self):
eng = self._unnamed_engine()
self._assert_no_name_in_execute(eng)
def test_unnamed_logger_echoflags_execute(self):
eng = self._unnamed_engine(echo="debug", echo_pool="debug")
self._assert_no_name_in_execute(eng)
class EchoTest(fixtures.TestBase):
__requires__ = ("ad_hoc_engines",)
def setup(self):
self.level = logging.getLogger("sqlalchemy.engine").level
logging.getLogger("sqlalchemy.engine").setLevel(logging.WARN)
self.buf = logging.handlers.BufferingHandler(100)
logging.getLogger("sqlalchemy.engine").addHandler(self.buf)
def teardown(self):
logging.getLogger("sqlalchemy.engine").removeHandler(self.buf)
logging.getLogger("sqlalchemy.engine").setLevel(self.level)
def _testing_engine(self):
e = engines.testing_engine()
# do an initial execute to clear out 'first connect'
# messages
e.execute(select([10])).close()
self.buf.flush()
return e
def test_levels(self):
e1 = engines.testing_engine()
eq_(e1._should_log_info(), False)
eq_(e1._should_log_debug(), False)
eq_(e1.logger.isEnabledFor(logging.INFO), False)
eq_(e1.logger.getEffectiveLevel(), logging.WARN)
e1.echo = True
eq_(e1._should_log_info(), True)
eq_(e1._should_log_debug(), False)
eq_(e1.logger.isEnabledFor(logging.INFO), True)
eq_(e1.logger.getEffectiveLevel(), logging.INFO)
e1.echo = "debug"
eq_(e1._should_log_info(), True)
eq_(e1._should_log_debug(), True)
eq_(e1.logger.isEnabledFor(logging.DEBUG), True)
eq_(e1.logger.getEffectiveLevel(), logging.DEBUG)
e1.echo = False
eq_(e1._should_log_info(), False)
eq_(e1._should_log_debug(), False)
eq_(e1.logger.isEnabledFor(logging.INFO), False)
eq_(e1.logger.getEffectiveLevel(), logging.WARN)
def test_echo_flag_independence(self):
"""test the echo flag's independence to a specific engine."""
e1 = self._testing_engine()
e2 = self._testing_engine()
e1.echo = True
e1.execute(select([1])).close()
e2.execute(select([2])).close()
e1.echo = False
e1.execute(select([3])).close()
e2.execute(select([4])).close()
e2.echo = True
e1.execute(select([5])).close()
e2.execute(select([6])).close()
assert self.buf.buffer[0].getMessage().startswith("SELECT 1")
assert self.buf.buffer[2].getMessage().startswith("SELECT 6")
assert len(self.buf.buffer) == 4
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
## @file ostap/math/reduce.py
# Module with some useful utilities for reducing some math objects
# =============================================================================
__version__ = "$Revision$"
__author__ = "Vanya BELYAEV [email protected]"
__date__ = "2011-12-01"
__all__ = ()
# =============================================================================
import ROOT
from array import array
from ostap.math.base import Ostap, doubles
from ostap.core.ostap_types import sequence_types
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'ostap.math.reduce' )
else : logger = getLogger ( __name__ )
# =============================================================================
# =============================================================================
## Simple (basic) polynomials
# =============================================================================
# =============================================================================
## factory for deserialization of simple polynomians
# @see Ostap.Math.Chebyshev
# @see Ostap.Math.ChebyshevU
# @see Ostap.Math.Hermite
# @see Ostap.Math.Legendre
# @see Ostap.Math.PLegendre
def pN_factory ( klass , *args ) :
"""Factory for deserialization of simple polynomians
- see Ostap.Math.Chebyshev
- see Ostap.Math.ChebyshevU
- see Ostap.Math.Hermite
- see Ostap.Math.Legendre
- see Ostap.Math.PLegendre
"""
return klass ( *args )
# =============================================================================
## reduce simple polynomials
# @see Ostap.Math.Chebyshev
# @see Ostap.Math.ChebyshevU
# @see Ostap.Math.Hermite
# @see Ostap.Math.Legendre
def pN_reduce ( p ) :
"""Reduce simple polynomials
- see Ostap.Math.Chebyshev
- see Ostap.Math.ChebyshevU
- see Ostap.Math.Hermite
- see Ostap.Math.Legendre
"""
return pN_factory , ( type ( p ) , p.degree() )
# =============================================================================
## reduce simple polynomials
# @see Ostap.Math.PLegendre
def pLM_reduce ( p ) :
"""Reduce simple polynomials
- see Ostap.Math.PLegendre
"""
return pN_factory , ( type ( p ) , p.L() , p.M() )
for t in ( Ostap.Math.Chebyshev ,
Ostap.Math.ChebyshevU ,
Ostap.Math.Hermite ,
Ostap.Math.Legendre ) :
t.__reduce__ = pN_reduce
for t in ( Ostap.Math.Chebyshev ,
Ostap.Math.ChebyshevU ,
Ostap.Math.Hermite ,
Ostap.Math.Legendre ) :
t.__reduce__ = pN_reduce
Ostap.Math.PLegendre.__reduce__ = pLM_reduce
# =============================================================================
## Regular polynomials
# =============================================================================
# =============================================================================
## factory for deserisalization of polynomials with parameters
# @see Ostap::Math::Polynomial
# @see Ostap::Math::ChebyshevSum
# @see Ostap::Math::LegendreSum
# @see Ostap::Math::HermiteSum
# @see Ostap::Math::Bernstein
# @see Ostap::Math::BernsteinEven
def poly_factory ( klass , params , *args ) :
"""Factory for deserisalization of polynomials with parameters
- see Ostap.Math.Polynomial
- see Ostap.Math.ChebyshevSum
- see Ostap.Math.LegendreSum
- see Ostap.Math.HermiteSum
- see Ostap.Math.Bernstein
- see Ostap.Math.BernsteinEven
"""
return klass ( doubles ( params ) , *args )
# =============================================================================
## Reduce polynomials with parameters
# @see Ostap::Math::Polynomial
# @see Ostap::Math::ChebyshevSum
# @see Ostap::Math::LegendreSum
# @see Ostap::Math::HermiteSum
# @see Ostap::Math::Bernstein
# @see Ostap::Math::BernsteinEven
# @see Ostap::Math::Positive
def poly_reduce ( p ) :
"""Reduce polynomials with parameters
- see Ostap.Math.Polynomial
- see Ostap.Math.ChebyshevSum
- see Ostap.Math.LegendreSum
- see Ostap.Math.HermiteSum
- see Ostap.Math.Bernstein
- see Ostap.Math.BernsteinEven
- see Ostap.Math.Positive
- see Ostap.Math.PositiveEven
"""
return poly_factory , ( type ( p ) ,
array ( 'd' , p.pars() ) ,
p.xmin () ,
p.xmax () )
for t in ( Ostap.Math.Polynomial ,
Ostap.Math.ChebyshevSum ,
Ostap.Math.LegendreSum ,
Ostap.Math.HermiteSum ,
Ostap.Math.Bernstein ,
Ostap.Math.BernsteinEven ,
Ostap.Math.Positive ,
Ostap.Math.PositiveEven ) :
t.__reduce__ = poly_reduce
# =============================================================================
## Specific forms of Bernstein polynomials
# =============================================================================
# =============================================================================
## reduce monotonic polynomial
# @see Ostap::Math::Monotonic
def pm_reduce ( p ) :
"""reduce monotonic polynomial
- see Ostap.Math.Monotonic
"""
return poly_factory , ( type ( p ) ,
array ( 'd' , p.pars() ) ,
p.xmin () ,
p.xmax () ,
True if p.increasing() else False )
# =============================================================================
## reduce convex polynomial
# @see Ostap::Math::Convex
def pc_reduce ( p ) :
"""reduce convex polynomial
- see Ostap.Math.Convex
"""
return poly_factory , ( type ( p ) ,
array ( 'd' , p.pars() ) ,
p.xmin () ,
p.xmax () ,
True if p.increasing () else False ,
True if p.convex () else False )
# =============================================================================
## reduce convex-only polynomial
# @see Ostap::Math::ConvexOnly
def pco_reduce ( p ) :
"""reduce convex-only polynomial
- see Ostap.Math.ConvexOnly
"""
return poly_factory , ( type ( p ) ,
array ( 'd' , p.pars() ) ,
p.xmin () ,
p.xmax () ,
True if p.convex () else False )
Ostap.Math.Monotonic .__reduce__ = pm_reduce
Ostap.Math.Convex .__reduce__ = pc_reduce
Ostap.Math.ConvexOnly .__reduce__ = pco_reduce
# =============================================================================
## B-splines
# =============================================================================
# =============================================================================
## factory for deserisalization of splines
# @see Ostap::Math::BSPline
# @see Ostap::Math::PositiveSpline
# @see Ostap::Math::MonotonicSpline
# @see Ostap::Math::ConvexSpline
# @see Ostap::Math::ConvexOnlySpline
def sp_factory ( klass , knots , pars , *args ) :
"""Factory for deserisalization of splines
- see Ostap.Math.BSPline
- see Ostap.Math.PositiveSpline
- see Ostap.Math.MonotonicSpline
- see Ostap.Math.ConvexSpline
- see Ostap.Math.ConvexOnlySpline
"""
return klass ( doubles ( knots) , doubles ( pars ) , *args )
# =============================================================================
## factory for deserisalization of splines
# @see Ostap::Math::BSPline
# @see Ostap::Math::PositiveSpline
def sp_reduce ( sp ) :
"""Factory for deserisalization of splines
- see Ostap.Math.BSPline
- see Ostap.Math.PositiveSpline
"""
return sp_factory , ( type ( sp ) ,
array ( 'd' , sp.knots() ) ,
array ( 'd' , sp.pars () ) )
Ostap.Math.BSpline . __reduce__ = sp_reduce
Ostap.Math.PositiveSpline . __reduce__ = sp_reduce
# =============================================================================
## factory for deserisalization of splines
# @see Ostap::Math::MonotonicSpline
def spm_reduce ( sp ) :
"""Factory for deserisalization of splines
- see Ostap.Math.MonotonicSpline
"""
return sp_factory , ( type ( sp ) ,
array ( 'd' , sp.knots() ) ,
array ( 'd' , sp.pars () ) ,
True if sp.increasing () else False )
# =============================================================================
## factory for deserisalization of splines
# @see Ostap::Math::ConvexSpline
def spc_reduce ( sp ) :
"""Factory for deserisalization of splines
- see Ostap.Math.ConvexSpline
"""
return sp_factory , ( type ( sp ) ,
array ( 'd' , sp.knots() ) ,
array ( 'd' , sp.pars () ) ,
True if sp.increasing () else False ,
True if sp.convex () else False )
# =============================================================================
## factory for deserisalization of splines
# @see Ostap::Math::ConvexOnlySpline
def spco_reduce ( sp ) :
"""Factory for deserisalization of splines
- see Ostap.Math.ConvexOnlySpline
"""
return sp_factory , ( type ( sp ) ,
array ( 'd' , sp.knots() ) ,
array ( 'd' , sp.pars () ) ,
True if p.convex () else False )
Ostap.Math.MonotonicSpline . __reduce__ = spm_reduce
Ostap.Math.ConvexSpline . __reduce__ = spc_reduce
Ostap.Math.ConvexOnlySpline . __reduce__ = spco_reduce
# =============================================================================
## Interpolation stuff
# =============================================================================
# ============================================================================
## factory for deserialisation of interpolation abscissas
def abs_factory ( arg , *args ) :
"""Factory for deserialisation of interpolation abscissas
"""
if isinstance ( arg , sequence_types ) :
vals = doubles ( arg )
return Ostap.Math.Interpolation.Abscissas ( vals , *args )
return Ostap.Math.Interpolation.Abscissas ( arg , *args )
# =============================================================================
## Reduce interpolation abscissas
def abs_reduce ( a ) :
"""Reduce interpolation abscissas
"""
at = a.atype()
if at in ( Ostap.Math.Interpolation.Abscissas.Uniform ,
Ostap.Math.Interpolation.Abscissas.Chebyshev ,
Ostap.Math.Interpolation.Abscissas.Chebyshev2 ) :
return abs_factory, ( a.n () , a.xmin() , a.xmax () , int ( at ) )
return abs_factory, ( array ('d' , a.x() ) , )
# ============================================================================
## the factory for serialisation of the interpolation table
def tab_factory ( abscissas , values ) :
"""The factory for serialisation of the interpolation table
"""
return Ostap.Math.Interpolation.Table ( abscissas , doubles ( values ) )
## ===========================================================================
## Reduce the interpolation table
def tab_reduce ( table ) :
"""Reduce the interpolation table"""
return tab_factory , ( table.abscissas () ,
array ( 'd' , table.values () ) )
Ostap.Math.Interpolation.Abscissas . __reduce__ = abs_reduce
Ostap.Math.Interpolation.Table . __reduce__ = tab_reduce
# ============================================================================
## the factory for serialisation of the interpolation objects
def int_factory ( klass , abscissas , values , *args ) :
"""The factory for serialisation of the interpolation table
"""
the_table = Ostap.Math.Interpolation.Table ( abscissas , doubles ( values ) )
return klass ( the_table , *args )
## ===========================================================================
## Reduce the interpolation object
def int_reduce ( table ) :
"""Reduce the interpolation object"""
return int_factory , ( type ( table ) ,
table.abscissas () ,
array ( 'd' , table.values () ) )
## ===========================================================================
## Reduce the interpolation Floater-Hormann interpolant
def intfh_reduce ( table ) :
"""Reduce the Floater-Hormann interpolant"""
return int_factory , ( type ( table ) ,
table.abscissas () ,
array ( 'd' , table.values () ) ,
table.d () )
for t in ( Ostap.Math.Neville ,
Ostap.Math.Lagrange ,
Ostap.Math.Newton ,
Ostap.Math.Barycentric ,
Ostap.Math.Berrut1st ,
Ostap.Math.Berrut2nd ) :
t.__reduce__ = int_reduce
Ostap.Math.FloaterHormann. __reduce__ = intfh_reduce
# =============================================================================
## Dalitz' objects
# =============================================================================
# =============================================================================
## Factory for deserialization of Dalitz' objects
def dalitz_factory ( klass , *params ) :
"""Factory for deserialization of `Dalitz` objects
"""
return klass ( *params )
# ============================================================================
## Serialise class <code>Ostap::Kinematics::Dalitz0</code>
# @see Ostap::Kinematcis.Dalitz0
def _dalitz0_reduce_ ( dalitz ) :
"""Serialise class `Ostap.Kinematics.Dalitz0`
- see Ostap.Kinematics.Dalitz0
"""
return dalitz_factory , ( type ( dalitz ) ,
dalitz.m1 () ,
dalitz.m2 () ,
dalitz.m3 () )
# ============================================================================
## Serialise class <code>Ostap::Kinematics::Dalitz</code>
# @see Ostap::Kinematcis.Dalitz
def _dalitzm_reduce_ ( dalitz ) :
"""Serialise class `Ostap.Kinematics.Dalitz`
- see Ostap.Kinematics.Dalitz
"""
return dalitz_factory , ( type ( dalitz ) ,
dalitz.M () ,
dalitz.m1 () ,
dalitz.m2 () ,
dalitz.m3 () )
Ostap.Kinematics.Dalitz0. __reduce__ = _dalitz0_reduce_
Ostap.Kinematics.Dalitz . __reduce__ = _dalitzm_reduce_
# =============================================================================
## decorated classes
_decorated_classes_ = (
##
Ostap.Math.Chebyshev ,
Ostap.Math.ChebyshevU ,
Ostap.Math.Hermite ,
Ostap.Math.Legendre ,
Ostap.Math.PLegendre ,
##
Ostap.Math.Polynomial ,
Ostap.Math.ChebyshevSum ,
Ostap.Math.LegendreSum ,
Ostap.Math.HermiteSum ,
Ostap.Math.Bernstein ,
Ostap.Math.BernsteinEven ,
Ostap.Math.Positive ,
Ostap.Math.PositiveEven ,
##
Ostap.Math.Monotonic ,
Ostap.Math.Convex ,
Ostap.Math.ConvexOnly ,
##
Ostap.Math.BSpline ,
Ostap.Math.PositiveSpline ,
##
Ostap.Math.MonotonicSpline ,
Ostap.Math.ConvexSpline ,
Ostap.Math.ConvexOnlySpline ,
##
Ostap.Math.Interpolation.Abscissas ,
Ostap.Math.Interpolation.Table ,
##
Ostap.Math.Neville ,
Ostap.Math.Lagrange ,
Ostap.Math.Newton ,
Ostap.Math.Barycentric ,
Ostap.Math.Berrut1st ,
Ostap.Math.Berrut2nd ,
Ostap.Math.FloaterHormann ,
##
Ostap.Kinematics.Dalitz0 ,
Ostap.Kinematics.Dalitz ,
)
# =============================================================================
## new methdods
_new_methods_ = (
##
Ostap.Math.Chebyshev . __reduce__ ,
Ostap.Math.ChebyshevU . __reduce__ ,
Ostap.Math.Hermite . __reduce__ ,
Ostap.Math.Legendre . __reduce__ ,
Ostap.Math.PLegendre . __reduce__ ,
##
Ostap.Math.Polynomial . __reduce__ ,
Ostap.Math.ChebyshevSum . __reduce__ ,
Ostap.Math.LegendreSum . __reduce__ ,
Ostap.Math.HermiteSum . __reduce__ ,
Ostap.Math.Bernstein . __reduce__ ,
Ostap.Math.BernsteinEven . __reduce__ ,
Ostap.Math.Positive . __reduce__ ,
Ostap.Math.PositiveEven . __reduce__ ,
##
Ostap.Math.Monotonic . __reduce__ ,
Ostap.Math.Convex . __reduce__ ,
Ostap.Math.ConvexOnly . __reduce__ ,
##
Ostap.Math.BSpline . __reduce__ ,
Ostap.Math.PositiveSpline . __reduce__ ,
##
Ostap.Math.MonotonicSpline . __reduce__ ,
Ostap.Math.ConvexSpline . __reduce__ ,
Ostap.Math.ConvexOnlySpline . __reduce__ ,
##
Ostap.Math.Interpolation.Abscissas . __reduce__ ,
Ostap.Math.Interpolation.Table . __reduce__ ,
##
Ostap.Math.Neville . __reduce__ ,
Ostap.Math.Lagrange . __reduce__ ,
Ostap.Math.Newton . __reduce__ ,
Ostap.Math.Barycentric . __reduce__ ,
Ostap.Math.Berrut1st . __reduce__ ,
Ostap.Math.Berrut2nd . __reduce__ ,
Ostap.Math.FloaterHormann . __reduce__ ,
##
Ostap.Kinematics.Dalitz0 . __reduce__ ,
Ostap.Kinematics.Dalitz . __reduce__ ,
)
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
# =============================================================================
## The END
# =============================================================================
|
|
from django.db.models import signals
from datetime import datetime, timedelta, date
import reversion
from collections import namedtuple
from difflib import SequenceMatcher
from django.contrib.contenttypes.models import ContentType
# convenience methods for reversion-backed models
class VersionManagerHelperException(Exception):
pass
class VersionManagerAccessor(object):
''' accessor for history objects, inspired by
the Django Manager and (Generic)ForeignKey classes
returns a VersionManager for the object.
'''
class VersionManager(object):
def _translate_reversion_call(self, *args, **kwargs):
return args.insert(0, self.obj)
def _proxify_object(self, version):
ret = self.cls()
ret.__dict__.update(version.field_dict)
ret._version = version
ret.ltools_versiondate = version.revision.date_created
if hasattr(self.cls._meta, 'versioned_attributes'):
recursing_attrs = [x[:-2] for x in self.cls._meta.versioned_attributes if x[-2:]==':r']
for aname in recursing_attrs:
def roflcopter():
result = []
attr = getattr(ret, aname)
ctype = ContentType.objects.get_for_model(attr.model)
versions = ret._version.revision.version_set.filter(content_type=ctype)
for version in versions:
instance = version.object_version.object
result.append(instance)
return result
setattr(ret, aname+'_', roflcopter)
return ret
def _proxify_objects(self, retval):
ret = None
try:
for x in retval:
ret = ret or []
ret.append(self._proxify_object(x))
except TypeError, e:
ret = self._proxify_object(retval)
return ret
def _generate_accessor(self, name, methodname):
setattr(self, name,
lambda *args, **kwargs:
self._proxify_objects(getattr(reversion, methodname)(self.obj, *args, **kwargs))
)
def list(self):
if not hasattr(self, '_list'):
self._list = self._proxify_objects(reversion.get_for_object(self.obj).select_related('revision'))
return self._list
def __init__(self, obj, cls):
self.obj = obj
self.cls = cls
methods = {
'for_date': 'get_for_date',
}
for name, methodname in methods.iteritems():
self._generate_accessor(name, methodname)
def by_datetime(self):
vs = self.obj.versions.list()
return dict([(x._version.revision.date_created, x,) for x in vs])
def this_date(self):
return self.obj._version.revision.date_created
def previous(self):
current_date = self.obj._version.revision.date_created if hasattr(self.obj, '_version') else None
versions_by_datetime = self.by_datetime()
datetimes = versions_by_datetime.keys() if not current_date else [x for x in versions_by_datetime.keys()
if x < current_date]
datetimes.sort()
if not datetimes:
return None
return versions_by_datetime[datetimes[-1]]
def next(self):
current_date = self.obj._version.revision.date_created if hasattr(self.obj, '_version') else None
if not current_date:
return None
versions_by_datetime = self.by_datetime()
datetimes = [x for x in versions_by_datetime.keys() if x > current_date]
datetimes.sort()
if not datetimes:
return None
return versions_by_datetime[datetimes[0]]
def by_date(self):
dates = []
lastdate = None
vs = self.obj.versions.list()
vs.sort(key=lambda x: x.ltools_versiondate)
for versionstory in vs:
revision = versionstory._version.revision
if lastdate:
while lastdate < revision.date_created.date():
lastdate += timedelta(days=1)
lastdate = revision.date_created.date()
if not dates or dates[-1] != lastdate:
dates.append((lastdate, [], ))
dates[-1][1].append(versionstory)
return dict(dates)
def timeline(self):
x = self.by_datetime()
datetimes = sorted(x.keys())
dates = self.by_date()
beginning = x[datetimes[0]]._version.revision.date_created.date()
days = (x[datetimes[-1]]._version.revision.date_created.date() - beginning).days + 1
Day = namedtuple('Day', ('date', 'events',),)
result = []
for i in xrange(days):
current = beginning + timedelta(days=i)
if current in dates:
result.append(Day(current, dates[current]))
else:
result.append(Day(current, []))
return result
def is_current(self):
if not self.obj:
raise VersionManagerHelperException("sorry, this is only available for %s instances" % self.cls)
return not hasattr(self.obj, '_version') or self.for_date(datetime.now())._version == self.obj._version
def current(self):
if not self.obj:
raise VersionManagerHelperException("sorry, this is only available for %s instances" % self.cls)
if not hasattr(self.obj, '_version'):
return self.obj
if not hasattr(self.obj, '_current'):
self.obj._current = self.cls._default_manager.get(pk=self.obj.pk)
return self.obj._current
def activity(self):
diff_overrides = {'d': lambda x,y: SequenceMatcher(a=x,b=y).ratio(), '=': lambda x,y: 0}
by_date = self.by_date()
today = date.today()
activity = []
for i in xrange(0,-31,-1):
day = today + timedelta(days=i)
if day in by_date:
current_version = by_date[day][0]
previous_version = current_version.versions.previous()
amount = 0
if not previous_version: # first version, just add a blip.
activity.append(100)
activity.append(0)
activity.append(None)
break
diffs = current_version.diff_to_older(previous_version, override=diff_overrides)
tmp = 0
for key,value in diffs.iteritems():
length = 0
try:
length = len(getattr(current_version,key))
except ValueError, e:
pass # allow for nullable elements
except OSError, e:
pass # allow for image to not exist
tmp+=value*length
activity.append(tmp)
else:
activity.append(0)
return activity[::-1] # reverse
def __get__(self, instance, owner):
if not instance:
return self
else:
return self.VersionManager(instance, owner)
def _ignore_versioned_modifications(self, instance, sender, **kwargs):
if instance and hasattr(instance, '_version'):
raise VersionManagerHelperException(
"you're trying to overwrite a former version of this model. sorry, that will not work out")
def contribute_to_class(self, cls, name):
setattr(cls, name, self)
signals.pre_delete.connect(self._ignore_versioned_modifications, sender=cls)
signals.pre_save.connect(self._ignore_versioned_modifications, sender=cls)
|
|
"""Support for Almond."""
import asyncio
from datetime import timedelta
import logging
import time
from typing import Optional
from aiohttp import ClientError, ClientSession
import async_timeout
from pyalmond import AbstractAlmondWebAuth, AlmondLocalAuth, WebAlmondAPI
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components import conversation
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_HOST,
CONF_TYPE,
EVENT_HOMEASSISTANT_START,
)
from homeassistant.core import Context, CoreState, HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client,
config_entry_oauth2_flow,
config_validation as cv,
event,
intent,
network,
storage,
)
from . import config_flow
from .const import DOMAIN, TYPE_LOCAL, TYPE_OAUTH2
STORAGE_VERSION = 1
STORAGE_KEY = DOMAIN
ALMOND_SETUP_DELAY = 30
DEFAULT_OAUTH2_HOST = "https://almond.stanford.edu"
DEFAULT_LOCAL_HOST = "http://localhost:3000"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Any(
vol.Schema(
{
vol.Required(CONF_TYPE): TYPE_OAUTH2,
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_OAUTH2_HOST): cv.url,
}
),
vol.Schema(
{vol.Required(CONF_TYPE): TYPE_LOCAL, vol.Required(CONF_HOST): cv.url}
),
)
},
extra=vol.ALLOW_EXTRA,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up the Almond component."""
hass.data[DOMAIN] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
host = conf[CONF_HOST]
if conf[CONF_TYPE] == TYPE_OAUTH2:
config_flow.AlmondFlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
conf[CONF_CLIENT_ID],
conf[CONF_CLIENT_SECRET],
f"{host}/me/api/oauth2/authorize",
f"{host}/me/api/oauth2/token",
),
)
return True
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"type": TYPE_LOCAL, "host": conf[CONF_HOST]},
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: config_entries.ConfigEntry):
"""Set up Almond config entry."""
websession = aiohttp_client.async_get_clientsession(hass)
if entry.data["type"] == TYPE_LOCAL:
auth = AlmondLocalAuth(entry.data["host"], websession)
else:
# OAuth2
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
oauth_session = config_entry_oauth2_flow.OAuth2Session(
hass, entry, implementation
)
auth = AlmondOAuth(entry.data["host"], websession, oauth_session)
api = WebAlmondAPI(auth)
agent = AlmondAgent(hass, api, entry)
# Hass.io does its own configuration.
if not entry.data.get("is_hassio"):
# If we're not starting or local, set up Almond right away
if hass.state != CoreState.not_running or entry.data["type"] == TYPE_LOCAL:
await _configure_almond_for_ha(hass, entry, api)
else:
# OAuth2 implementations can potentially rely on the HA Cloud url.
# This url is not be available until 30 seconds after boot.
async def configure_almond(_now):
try:
await _configure_almond_for_ha(hass, entry, api)
except ConfigEntryNotReady:
_LOGGER.warning(
"Unable to configure Almond to connect to Home Assistant"
)
async def almond_hass_start(_event):
event.async_call_later(hass, ALMOND_SETUP_DELAY, configure_almond)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, almond_hass_start)
conversation.async_set_agent(hass, agent)
return True
async def _configure_almond_for_ha(
hass: HomeAssistant, entry: config_entries.ConfigEntry, api: WebAlmondAPI
):
"""Configure Almond to connect to HA."""
try:
if entry.data["type"] == TYPE_OAUTH2:
# If we're connecting over OAuth2, we will only set up connection
# with Home Assistant if we're remotely accessible.
hass_url = network.get_url(hass, allow_internal=False, prefer_cloud=True)
else:
hass_url = network.get_url(hass)
except network.NoURLAvailableError:
# If no URL is available, we're not going to configure Almond to connect to HA.
return
_LOGGER.debug("Configuring Almond to connect to Home Assistant at %s", hass_url)
store = storage.Store(hass, STORAGE_VERSION, STORAGE_KEY)
data = await store.async_load()
if data is None:
data = {}
user = None
if "almond_user" in data:
user = await hass.auth.async_get_user(data["almond_user"])
if user is None:
user = await hass.auth.async_create_system_user("Almond", [GROUP_ID_ADMIN])
data["almond_user"] = user.id
await store.async_save(data)
refresh_token = await hass.auth.async_create_refresh_token(
user,
# Almond will be fine as long as we restart once every 5 years
access_token_expiration=timedelta(days=365 * 5),
)
# Create long lived access token
access_token = hass.auth.async_create_access_token(refresh_token)
# Store token in Almond
try:
with async_timeout.timeout(30):
await api.async_create_device(
{
"kind": "io.home-assistant",
"hassUrl": hass_url,
"accessToken": access_token,
"refreshToken": "",
# 5 years from now in ms.
"accessTokenExpires": (time.time() + 60 * 60 * 24 * 365 * 5) * 1000,
}
)
except (asyncio.TimeoutError, ClientError) as err:
if isinstance(err, asyncio.TimeoutError):
msg = "Request timeout"
else:
msg = err
_LOGGER.warning("Unable to configure Almond: %s", msg)
await hass.auth.async_remove_refresh_token(refresh_token)
raise ConfigEntryNotReady from err
# Clear all other refresh tokens
for token in list(user.refresh_tokens.values()):
if token.id != refresh_token.id:
await hass.auth.async_remove_refresh_token(token)
async def async_unload_entry(hass, entry):
"""Unload Almond."""
conversation.async_set_agent(hass, None)
return True
class AlmondOAuth(AbstractAlmondWebAuth):
"""Almond Authentication using OAuth2."""
def __init__(
self,
host: str,
websession: ClientSession,
oauth_session: config_entry_oauth2_flow.OAuth2Session,
):
"""Initialize Almond auth."""
super().__init__(host, websession)
self._oauth_session = oauth_session
async def async_get_access_token(self):
"""Return a valid access token."""
if not self._oauth_session.valid_token:
await self._oauth_session.async_ensure_token_valid()
return self._oauth_session.token["access_token"]
class AlmondAgent(conversation.AbstractConversationAgent):
"""Almond conversation agent."""
def __init__(
self, hass: HomeAssistant, api: WebAlmondAPI, entry: config_entries.ConfigEntry
):
"""Initialize the agent."""
self.hass = hass
self.api = api
self.entry = entry
@property
def attribution(self):
"""Return the attribution."""
return {"name": "Powered by Almond", "url": "https://almond.stanford.edu/"}
async def async_get_onboarding(self):
"""Get onboard url if not onboarded."""
if self.entry.data.get("onboarded"):
return None
host = self.entry.data["host"]
if self.entry.data.get("is_hassio"):
host = "/core_almond"
return {
"text": "Would you like to opt-in to share your anonymized commands with Stanford to improve Almond's responses?",
"url": f"{host}/conversation",
}
async def async_set_onboarding(self, shown):
"""Set onboarding status."""
self.hass.config_entries.async_update_entry(
self.entry, data={**self.entry.data, "onboarded": shown}
)
return True
async def async_process(
self, text: str, context: Context, conversation_id: Optional[str] = None
) -> intent.IntentResponse:
"""Process a sentence."""
response = await self.api.async_converse_text(text, conversation_id)
first_choice = True
buffer = ""
for message in response["messages"]:
if message["type"] == "text":
buffer += f"\n{message['text']}"
elif message["type"] == "picture":
buffer += f"\n Picture: {message['url']}"
elif message["type"] == "rdl":
buffer += (
f"\n Link: {message['rdl']['displayTitle']} "
f"{message['rdl']['webCallback']}"
)
elif message["type"] == "choice":
if first_choice:
first_choice = False
else:
buffer += ","
buffer += f" {message['title']}"
intent_result = intent.IntentResponse()
intent_result.async_set_speech(buffer.strip())
return intent_result
|
|
"""
Test the "snabb lwaftr config" subcommand. Does not need NIC names because
it uses the "bench" subcommand.
"""
import json
import os
from signal import SIGTERM
import socket
from subprocess import PIPE, Popen
import time
import unittest
import re
import random
import string
from test_env import BENCHDATA_DIR, DATA_DIR, ENC, SNABB_CMD, \
DAEMON_STARTUP_WAIT, BaseTestCase, nic_names
DAEMON_PROC_NAME = 'config-test-daemon'
DAEMON_ARGS = [
str(SNABB_CMD), 'lwaftr', 'bench',
'--bench-file', '/dev/null',
'--name', DAEMON_PROC_NAME,
str(DATA_DIR / 'icmp_on_fail.conf'),
str(BENCHDATA_DIR / 'ipv4-0550.pcap'),
str(BENCHDATA_DIR / 'ipv6-0550.pcap'),
]
LISTEN_SOCKET_PATH = '/tmp/snabb-lwaftr-listen-sock-%s' % DAEMON_PROC_NAME
MANAGER_SOCKET_PATH = '/var/run/snabb/by-name/%s/config-leader-socket' % DAEMON_PROC_NAME
def random_string(n=8):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(n))
def set_random_name(test):
global DAEMON_PROC_NAME
global LISTEN_SOCKET_PATH
global MANAGER_SOCKET_PATH
# Create random name.
name = 'config-test-daemon-' + random_string()
DAEMON_PROC_NAME = name
LISTEN_SOCKET_PATH = '/tmp/snabb-lwaftr-listen-sock-%s' % DAEMON_PROC_NAME
MANAGER_SOCKET_PATH = '/var/run/snabb/by-name/%s/config-leader-socket' % DAEMON_PROC_NAME
# Update test arguments name.
test.daemon_args[6] = name
test.config_args = list(test.config_args)
test.config_args[4] = name
def wait_for_socket(socket_path, timeout=5, step=0.1):
for i in range(0, int(timeout/step)):
if os.access(socket_path, os.F_OK):
return True
time.sleep(step)
return False
def wait_for_listen_socket(**kwargs):
return wait_for_socket(LISTEN_SOCKET_PATH, **kwargs)
def wait_for_manager_socket(**kwargs):
return wait_for_socket(MANAGER_SOCKET_PATH, **kwargs)
class TestConfigGet(BaseTestCase):
"""
Test querying from a known config, testing basic "getting".
It performs numerous gets on different paths.
"""
daemon_args = DAEMON_ARGS
config_args = (str(SNABB_CMD), 'config', 'get', '--schema=snabb-softwire-v3', DAEMON_PROC_NAME)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not wait_for_manager_socket():
cls.daemon.terminate()
cls.reportAndFail('Config manager socket not present', None)
def test_get_internal_iface(self):
cmd_args = list(self.config_args)
cmd_args.append('/softwire-config/instance[device=test]/queue[id=0]'
'/internal-interface/ip')
output = self.run_cmd(cmd_args)
self.assertEqual(
output.strip(), b'8:9:a:b:c:d:e:f',
'\n'.join(('OUTPUT', str(output, ENC))))
def test_get_external_iface(self):
cmd_args = list(self.config_args)
cmd_args.append('/softwire-config/instance[device=test]/queue[id=0]/'
'external-interface/ip')
output = self.run_cmd(cmd_args)
self.assertEqual(
output.strip(), b'10.10.10.10',
'\n'.join(('OUTPUT', str(output, ENC))))
def test_get_b4_ipv6(self):
cmd_args = list(self.config_args)
# Implicit string concatenation, do not add commas.
cmd_args.append(
'/softwire-config/binding-table/softwire'
'[ipv4=178.79.150.233][psid=7850]/b4-ipv6')
output = self.run_cmd(cmd_args)
self.assertEqual(
output.strip(), b'127:11:12:13:14:15:16:128',
'\n'.join(('OUTPUT', str(output, ENC))))
def test_get_ietf_path(self):
cmd_args = list(self.config_args)[:-1]
cmd_args[3] = '--schema=ietf-softwire-br'
cmd_args.extend((
DAEMON_PROC_NAME,
# Implicit string concatenation, do not add commas.
'/br-instances/binding/'
'bind-instance[name=config-test-daemon]/binding-table/binding-entry'
'[binding-ipv6info=127:22:33:44:55:66:77:128]/binding-ipv4-addr',
))
output = self.run_cmd(cmd_args)
self.assertEqual(
output.strip(), b'178.79.150.15',
'\n'.join(('OUTPUT', str(output, ENC))))
class TestConfigMultiproc(BaseTestCase):
"""
Test the ability to start, stop, get, etc. multiple processes.
"""
daemon = None
daemon_args = DAEMON_ARGS
ps_args = (str(SNABB_CMD), 'ps')
config_args = (str(SNABB_CMD), 'config', 'XXX', '--schema=snabb-softwire-v3', DAEMON_PROC_NAME)
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def start_daemon(self, config, additional=None):
""" Starts the daemon with a specific config """
if self.daemon is not None:
raise Exception("Daemon already started")
set_random_name(self)
daemon_args = list(self.daemon_args)
daemon_args[7] = config
for option in (additional or []):
daemon_args.insert(3, option)
# Start the daemon itself
self.daemon = Popen(daemon_args, stdout=PIPE, stderr=PIPE)
if not wait_for_manager_socket():
self.daemon.terminate()
stdout = self.daemon.stdout.read().decode(ENC)
stderr = self.daemon.stderr.read().decode(ENC)
self.fail("\n".join((
"Failed starting daemon",
"Command:", " ".join(daemon_args),
"STDOUT", stdout,
"STDOUT", stderr,
)))
return self.daemon.pid
@property
def instances(self):
""" Gets list of all the instance PIDs for lwaftr """
mypid = self.daemon.pid
output = self.run_cmd(self.ps_args).decode("utf-8")
my_lines = [inst for inst in output.split("\n") if str(mypid) in inst]
# The list won't be clean and have lots of text, extract the PIDs
instances = {}
for inst in my_lines:
# parts example: ['\\-', '20422', 'worker', 'for', '20420']
parts = inst.split()
if parts[0] == "\\-":
instances[int(parts[-1])].add(int(parts[1]))
else:
instances[int(parts[0])] = set()
return instances
def tearDown(self):
self.stop_daemon(self.daemon)
self.daemon = None
return super().tearDown()
def test_start_empty(self):
config = str(DATA_DIR / "empty.conf")
pid = self.start_daemon(config)
self.assertEqual(len(self.instances[pid]), 0)
def test_added_instances_startup(self):
config = str(DATA_DIR / "icmp_on_fail.conf")
pid = self.start_daemon(config)
initial_instance_amount = len(self.instances[pid])
# add an instance
device = """{
device addtest1;
queue {
id 0;
external-interface {
ip 72.72.72.72;
mac 14:14:14:14:14:14;
next-hop {
mac 15:15:15:15:15:15;
}
}
internal-interface {
ip 7:8:9:A:B:C:D:E;
mac 16:16:16:16:16:16;
next-hop {
mac 17:17:17:17:17:17;
}
}
}}"""
config_add_cmd = list(self.config_args)
config_add_cmd[2] = 'add'
config_add_cmd.extend((
'/softwire-config/instance',
device
))
# Add the instance
self.run_cmd(config_add_cmd)
# Wait around for it to start the instance
time.sleep(1)
# Verify we've got one more instance
self.assertEqual(
len(self.instances[pid]), (initial_instance_amount + 1)
)
def test_removed_instances_shutdown(self):
config = str(DATA_DIR / "icmp_on_fail.conf")
pid = self.start_daemon(config)
initial_instance_amount = len(self.instances[pid])
# There should be an instance called "test" in the initial
# config that's loaded. We'll try removing that.
config_remove_cmd = list(self.config_args)
config_remove_cmd[2] = 'remove'
config_remove_cmd.append('/softwire-config/instance[device=test]')
# Remove it
self.run_cmd(config_remove_cmd)
# Wait for the isntance to shutdown
time.sleep(1)
# Verify we've got one less instance than when we started.
self.assertEqual(
len(self.instances[pid]), (initial_instance_amount - 1)
)
def test_snabb_get_state_summation(self):
config = str(DATA_DIR / "icmp_on_fail_multiproc.conf")
pid = self.start_daemon(config)
get_state_cmd = list(self.config_args)
get_state_cmd[2] = "get-state"
get_state_cmd.insert(4, "-f")
get_state_cmd.insert(5, "xpath")
get_state_cmd.append("/")
state = self.run_cmd(get_state_cmd).decode(ENC)
state = [line for line in state.split("\n") if line]
# Build two dictionaries, one of each instance counter (a total)
# and one of just the values in the global "softwire-state"
summed = {}
instance = {}
for line in state:
if "softwire-state" not in line:
continue
[cname, cvalue] = line.split(" ")
cname = os.path.basename(cname)
if cname == "discontinuity-time":
cvalue = str(cvalue)
continue
cvalue = int(cvalue)
if line.startswith("/softwire-config"):
instance[cname] = instance.get(cname, 0) + cvalue
elif line.startswith("/softwire-state"):
summed[cname] = cvalue
# Now assert they're the same :)
for name, value in summed.items():
self.assertEqual(value, instance[name])
def test_snabb_get_state_lists_instances(self):
config = str(DATA_DIR / "icmp_on_fail_multiproc.conf")
pid = self.start_daemon(config)
get_state_cmd = list(self.config_args)
get_state_cmd[2] = "get-state"
get_state_cmd.insert(4, "-f")
get_state_cmd.insert(5, "xpath")
get_state_cmd.append("/")
state = self.run_cmd(get_state_cmd).decode(ENC)
state = [line for line in state.split("\n") if line]
instances = set()
for line in state:
[key, value] = line.split(" ")
if key.startswith("/softwire-config") and "instance" not in key:
continue
m = re.search(r"\[device=(.*)\]", key)
if m:
device_name = m.group(1)
instances.add(device_name)
self.assertTrue(len(instances) == 2)
self.assertTrue("test" in instances)
self.assertTrue("test1" in instances)
class TestConfigListen(BaseTestCase):
"""
Test it can listen, send a command and get a response. Only test the
socket method of communicating with the listen command, due to the
difficulties of testing interactive scripts.
"""
daemon_args = DAEMON_ARGS
listen_args = (str(SNABB_CMD), 'config', 'listen',
'--socket', LISTEN_SOCKET_PATH, DAEMON_PROC_NAME)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not wait_for_manager_socket():
cls.daemon.terminate()
cls.reportAndFail('Config manager socket not present', None)
def test_listen(self):
# Start the listen command with a socket.
listen_daemon = Popen(self.listen_args, stdout=PIPE, stderr=PIPE)
if not wait_for_listen_socket():
listen_daemon.terminate()
listen_daemon.wait()
stdout = listen_daemon.stdout.read().decode(ENC)
stderr = listen_daemon.stderr.read().decode(ENC)
self.fail("\n".join((
"Failed to run 'snabb listen'",
"Command:", " ".join(self.daemon_args),
"STDOUT", stdout,
"STDOUT", stderr,
)))
# Send command to and receive response from the listen command.
# (Implicit string concatenation, no summing needed.)
get_cmd = (b'{ "id": "0", "verb": "get",'
b' "path": "/routes/route[addr=1.2.3.4]/port" }\n')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.connect(LISTEN_SOCKET_PATH)
sock.sendall(get_cmd)
resp = str(sock.recv(200), ENC)
finally:
sock.close()
status = json.loads(resp)['status']
self.assertEqual(status, 'ok')
# Terminate the listen command.
listen_daemon.terminate()
ret_code = listen_daemon.wait()
if ret_code not in (0, -SIGTERM):
print('Error terminating daemon:', listen_daemon.args)
print('Exit code:', ret_code)
print('STDOUT\n', str(listen_daemon.stdout.read(), ENC))
print('STDERR\n', str(listen_daemon.stderr.read(), ENC))
listen_daemon.stdout.close()
listen_daemon.stderr.close()
os.unlink(LISTEN_SOCKET_PATH)
class TestConfigMisc(BaseTestCase):
daemon_args = DAEMON_ARGS
@classmethod
def setUpClass(cls):
super().setUpClass()
if not wait_for_manager_socket():
cls.daemon.terminate()
cls.reportAndFail('Config manager socket not present', None)
def get_cmd_args(self, action):
cmd_args = list((str(SNABB_CMD), 'config', 'XXX', '--schema=snabb-softwire-v3', DAEMON_PROC_NAME))
cmd_args[2] = action
return cmd_args
def test_add(self):
"""
Add a softwire section, get it back and check all the values.
"""
# External IPv4.
add_args = self.get_cmd_args('add')
add_args.extend((
'/softwire-config/binding-table/softwire',
'{ ipv4 8.8.8.8; psid 7; b4-ipv6 ::2; br-address 2001:db8::;'
'port-set { psid-length 16; }}',
))
self.run_cmd(add_args)
get_args = self.get_cmd_args('get')
get_args.append(
'/softwire-config/binding-table/softwire[ipv4=8.8.8.8][psid=7]'
'/b4-ipv6')
output = self.run_cmd(get_args)
# run_cmd checks the exit code and fails the test if it is not zero.
self.assertEqual(
output.strip(), b'::2',
'\n'.join(('OUTPUT', str(output, ENC))))
def test_add_ietf(self):
add_args = self.get_cmd_args('add')[:-1]
add_args[3] = '--schema=ietf-softwire-br'
add_args.extend((
DAEMON_PROC_NAME,
# Implicit string concatenation, no summing needed.
'/br-instances/binding/bind-instance[name=config-test-daemon]/'
'binding-table/binding-entry',
'{ binding-ipv6info ::123; binding-ipv4-addr 8.8.8.8;'
'br-ipv6-addr 2001:db8::; port-set { psid 8; psid-len 15; }}',
))
self.run_cmd(add_args)
get_args = self.get_cmd_args('get')[:-1]
get_args[3] = '--schema=ietf-softwire-br'
get_args.extend((
DAEMON_PROC_NAME,
'/br-instances/binding/bind-instance[name=config-test-daemon]/'
'binding-table/binding-entry[binding-ipv6info=::123]/port-set/psid',
))
output = self.run_cmd(get_args)
# run_cmd checks the exit code and fails the test if it is not zero.
self.assertEqual(
output.strip(), b'8',
'\n'.join(('OUTPUT', str(output, ENC))))
def test_get_state(self):
get_state_args = self.get_cmd_args('get-state')
# Select a few at random which should have non-zero results.
for query in (
'/softwire-state/in-ipv4-bytes',
'/softwire-state/out-ipv4-bytes',
):
cmd_args = list(get_state_args)
cmd_args.append(query)
output = self.run_cmd(cmd_args)
self.assertNotEqual(
output.strip(), b'0',
'\n'.join(('OUTPUT', str(output, ENC))))
get_state_args.append('/')
self.run_cmd(get_state_args)
# run_cmd checks the exit code and fails the test if it is not zero.
def test_get_state_ietf(self):
get_args = self.get_cmd_args('get-state')[:-1]
get_args[3] = '--schema=ietf-softwire-br'
get_args.extend((
DAEMON_PROC_NAME,
'/br-instances/binding/bind-instance[name=config-test-daemon]/'
'traffic-stat/rcvd-ipv4-bytes',
))
output = self.run_cmd(get_args)
# run_cmd checks the exit code and fails the test if it is not zero.
self.assertNotEqual(
output.strip(), b'0',
'\n'.join(('OUTPUT', str(output, ENC))))
def test_remove(self):
# Verify that the thing we want to remove actually exists.
get_args = self.get_cmd_args('get')
get_args.append(
# Implicit string concatenation, no summing needed.
'/softwire-config/binding-table/softwire'
'[ipv4=178.79.150.2][psid=7850]/'
)
self.run_cmd(get_args)
# run_cmd checks the exit code and fails the test if it is not zero.
# Remove it.
remove_args = list(get_args)
remove_args[2] = 'remove'
self.run_cmd(remove_args)
# run_cmd checks the exit code and fails the test if it is not zero.
# Verify we cannot find it anymore.
self.run_cmd(get_args, 1)
# run_cmd checks the exit code and fails the test if it is not 1.
def test_remove_ietf(self):
# Verify that the thing we want to remove actually exists.
get_args = self.get_cmd_args('get')[:-1]
get_args[3] = '--schema=ietf-softwire-br'
get_args.extend((
DAEMON_PROC_NAME,
'/br-instances/binding/bind-instance[name=config-test-daemon]/'
'binding-table/binding-entry[binding-ipv6info=::123]',
))
self.run_cmd(get_args)
# run_cmd checks the exit code and fails the test if it is not zero.
# Remove it.
remove_args = list(get_args)
remove_args[2] = 'remove'
self.run_cmd(remove_args)
# run_cmd checks the exit code and fails the test if it is not zero.
# Verify we cannot find it anymore.
self.run_cmd(get_args, 1)
# run_cmd checks the exit code and fails the test if it is not 1.x
def test_set(self):
"""
Test setting values, then perform a get to verify the value.
"""
# External IPv4.
test_ipv4 = '208.118.235.148'
set_args = self.get_cmd_args('set')
set_args.extend((
"/softwire-config/instance[device=test]/queue[id=0]/"
"external-interface/ip", test_ipv4
))
self.run_cmd(set_args)
get_args = list(set_args)[:-1]
get_args[2] = 'get'
output = self.run_cmd(get_args)
self.assertEqual(
output.strip(), bytes(test_ipv4, ENC),
'\n'.join(('OUTPUT', str(output, ENC))))
# Binding table.
test_ipv4, test_ipv6, test_psid = '178.79.150.15', '::1', '0'
set_args = self.get_cmd_args('set')
# Implicit string concatenation, no summing needed.
set_args.extend((
'/softwire-config/binding-table/softwire[ipv4=%s][psid=%s]/b4-ipv6'
% (test_ipv4, test_psid),
test_ipv6,
))
self.run_cmd(set_args)
get_args = list(set_args)[:-1]
get_args[2] = 'get'
output = self.run_cmd(get_args)
self.assertEqual(
output.strip(), bytes(test_ipv6, ENC),
'\n'.join(('OUTPUT', str(output, ENC))))
# Check that the value we just set is the same in the IETF schema.
# We actually need to look this up backwards, let's just check the
# same IPv4 address as was used to set it above.
get_args = self.get_cmd_args('get')[:-1]
get_args[3] = '--schema=ietf-softwire-br'
get_args.extend((
DAEMON_PROC_NAME,
# Implicit string concatenation, no summing needed.
'/br-instances/binding/'
'bind-instance[name=config-test-daemon]/binding-table/binding-entry'
'[binding-ipv6info=::1]/binding-ipv4-addr',
))
output = self.run_cmd(get_args)
self.assertEqual(
output.strip(), bytes(test_ipv4, ENC),
'\n'.join(('OUTPUT', str(output, ENC))))
# Check the portset: the IPv4 address alone is not unique.
get_args = self.get_cmd_args('get')[:-1]
get_args[3] = '--schema=ietf-softwire-br'
get_args.extend((
DAEMON_PROC_NAME,
# Implicit string concatenation, no summing needed.
'/br-instances/binding/bind-instance[name=config-test-daemon]/'
'binding-table/binding-entry[binding-ipv6info=::1]/port-set/psid',
))
output = self.run_cmd(get_args)
self.assertEqual(output.strip(), bytes(test_psid, ENC),
'\n'.join(('OUTPUT', str(output, ENC))))
def test_set_ietf(self):
"""
Set binding table, update an entry, check for validity via get.
"""
set_args = self.get_cmd_args('set')[:-1]
set_args[3] = '--schema=ietf-softwire-br'
set_args.extend((
DAEMON_PROC_NAME,
# Implicit string concatenation, no summing needed.
'/br-instances/binding/bind-instance[name=config-test-daemon]/'
'binding-table/binding-entry',
'{ binding-ipv6info ::124; binding-ipv4-addr 8.8.8.8;'
'br-ipv6-addr 2001:db8::; port-set { psid 8; psid-len 15; }}',
))
self.run_cmd(set_args)
set_args = self.get_cmd_args('set')[:-1]
set_args[3] = '--schema=ietf-softwire-br'
set_args.extend((
DAEMON_PROC_NAME,
# Implicit string concatenation, no summing needed.
'/br-instances/binding/bind-instance[name=config-test-daemon]/'
'binding-table/binding-entry[binding-ipv6info=::124]/'
'binding-ipv4-addr',
'9.9.9.9',
))
self.run_cmd(set_args)
get_args = self.get_cmd_args('get')
get_args.append(
'/softwire-config/binding-table/softwire[ipv4=9.9.9.9][psid=8]'
'/b4-ipv6')
output = self.run_cmd(get_args)
# run_cmd checks the exit code and fails the test if it is not zero.
self.assertEqual(
output.strip(), b'::124',
'\n'.join(('OUTPUT', str(output, ENC))))
def test_set_ietf_special(self):
"""
Test handling of special br attributes.
"""
set_args = self.get_cmd_args('set')[:-1]
set_args[3] = '--schema=ietf-softwire-br'
set_args.extend((
DAEMON_PROC_NAME,
# Implicit string concatenation, no summing needed.
'/br-instances/binding/bind-instance[name=config-test-daemon]/'
'softwire-path-mru',
'542',
))
self.run_cmd(set_args)
get_args = self.get_cmd_args('get')[:-1]
get_args[3] = '--schema=ietf-softwire-br'
get_args.extend((
DAEMON_PROC_NAME,
'/br-instances/binding/bind-instance[name=config-test-daemon]/'
'softwire-path-mru',
))
output = self.run_cmd(get_args)
# run_cmd checks the exit code and fails the test if it is not zero.
self.assertEqual(
output.strip(), b'542',
'\n'.join(('OUTPUT', str(output, ENC))))
#####
set_args = self.get_cmd_args('set')[:-1]
set_args[3] = '--schema=ietf-softwire-br'
set_args.extend((
DAEMON_PROC_NAME,
# Implicit string concatenation, no summing needed.
'/br-instances/binding/bind-instance[name=config-test-daemon]/'
'icmp-policy/icmpv6-errors/generate-icmpv6-errors',
'false',
))
self.run_cmd(set_args)
get_args = self.get_cmd_args('get')[:-1]
get_args[3] = '--schema=ietf-softwire-br'
get_args.extend((
DAEMON_PROC_NAME,
'/br-instances/binding/bind-instance[name=config-test-daemon]/'
'icmp-policy/icmpv6-errors/generate-icmpv6-errors',
))
output = self.run_cmd(get_args)
# run_cmd checks the exit code and fails the test if it is not zero.
self.assertEqual(
output.strip(), b'false',
'\n'.join(('OUTPUT', str(output, ENC))))
#####
set_args = self.get_cmd_args('set')[:-1]
set_args[3] = '--schema=ietf-softwire-br'
set_args.extend((
DAEMON_PROC_NAME,
# Implicit string concatenation, no summing needed.
'/br-instances/binding/bind-instance[name=config-test-daemon]/'
'icmp-policy/icmpv4-errors/icmpv4-rate',
'1001',
))
self.run_cmd(set_args)
get_args = self.get_cmd_args('get')[:-1]
get_args[3] = '--schema=ietf-softwire-br'
get_args.extend((
DAEMON_PROC_NAME,
'/br-instances/binding/bind-instance[name=config-test-daemon]/'
'icmp-policy/icmpv4-errors/icmpv4-rate',
))
output = self.run_cmd(get_args)
# run_cmd checks the exit code and fails the test if it is not zero.
self.assertEqual(
output.strip(), b'1001',
'\n'.join(('OUTPUT', str(output, ENC))))
def test_wrong_instance_ietf(self):
# Check for failure when querying wrong instance
remove_args = self.get_cmd_args('remove')[:-1]
remove_args[3] = '--schema=ietf-softwire-br'
remove_args.extend((
DAEMON_PROC_NAME,
# Implicit string concatenation, no summing needed.
'/br-instances/binding/bind-instance[name=nosuchinstance]'
'binding-table/binding-entry[binding-ipv6info=::123]',
))
output = str(self.run_cmd(remove_args, 1), ENC)
self.assertRegex(output, 'name does not match',
'\n'.join(('OUTPUT', output)))
####
set_args = self.get_cmd_args('set')[:-1]
set_args[3] = '--schema=ietf-softwire-br'
set_args.extend((
DAEMON_PROC_NAME,
# Implicit string concatenation, no summing needed.
'/br-instances/binding/bind-instance[name=nosuchinstance]/'
'binding-table/binding-entry',
'{ binding-ipv6info ::124; binding-ipv4-addr 8.8.8.8;'
'br-ipv6-addr 2001:db8::; port-set { psid 8; psid-len 15; }}',
))
output = str(self.run_cmd(set_args, 1), ENC)
self.assertRegex(output, 'name does not match',
'\n'.join(('OUTPUT', output)))
####
set_args = self.get_cmd_args('set')[:-1]
set_args[3] = '--schema=ietf-softwire-br'
set_args.extend((
DAEMON_PROC_NAME,
# Implicit string concatenation, no summing needed.
'/br-instances/binding/bind-instance[name=nosuchinstance]/'
'binding-table/binding-entry[binding-ipv6info=::124]/'
'binding-ipv4-addr',
'9.9.9.9',
))
output = str(self.run_cmd(set_args, 1), ENC)
self.assertRegex(output, 'name does not match',
'\n'.join(('OUTPUT', output)))
####
set_args = self.get_cmd_args('set')[:-1]
set_args[3] = '--schema=ietf-softwire-br'
set_args.extend((
DAEMON_PROC_NAME,
# Implicit string concatenation, no summing needed.
'/br-instances/binding/bind-instance[name=nosuchinstance]/'
'icmp-policy/icmpv4-errors/icmpv4-rate',
'1001',
))
output = str(self.run_cmd(set_args, 1), ENC)
self.assertRegex(output, 'name does not match',
'\n'.join(('OUTPUT', output)))
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import fnmatch
import logging
import os
from pylib import android_commands
from pylib import cmd_helper
from pylib import constants
from pylib import ports
from pylib.base import shard
from pylib.utils import emulator
from pylib.utils import report_results
from pylib.utils import xvfb
import gtest_config
import test_runner
def _FullyQualifiedTestSuites(exe, option_test_suite, build_type):
"""Get a list of absolute paths to test suite targets.
Args:
exe: if True, use the executable-based test runner.
option_test_suite: the test_suite specified as an option.
build_type: 'Release' or 'Debug'.
Returns:
A list of tuples containing the suite and absolute path.
Ex. ('content_unittests',
'/tmp/chrome/src/out/Debug/content_unittests_apk/'
'content_unittests-debug.apk')
"""
def GetQualifiedSuite(suite):
if suite.is_suite_exe:
relpath = suite.name
else:
# out/(Debug|Release)/$SUITE_apk/$SUITE-debug.apk
relpath = os.path.join(suite.name + '_apk', suite.name + '-debug.apk')
return suite.name, os.path.join(test_suite_dir, relpath)
test_suite_dir = os.path.join(cmd_helper.OutDirectory.get(), build_type)
if option_test_suite:
all_test_suites = [gtest_config.Suite(exe, option_test_suite)]
else:
all_test_suites = gtest_config.STABLE_TEST_SUITES
# List of tuples (suite_name, suite_path)
qualified_test_suites = map(GetQualifiedSuite, all_test_suites)
for t, q in qualified_test_suites:
if not os.path.exists(q):
raise Exception('Test suite %s not found in %s.\n'
'Supported test suites:\n %s\n'
'Ensure it has been built.\n' %
(t, q, gtest_config.STABLE_TEST_SUITES))
return qualified_test_suites
def GetTestsFromDevice(runner):
"""Get a list of tests from a device, excluding disabled tests.
Args:
runner: a TestRunner.
"""
# The executable/apk needs to be copied before we can call GetAllTests.
runner.test_package.StripAndCopyExecutable()
all_tests = runner.test_package.GetAllTests()
# Only includes tests that do not have any match in the disabled list.
disabled_list = runner.GetDisabledTests()
return filter(lambda t: not any([fnmatch.fnmatch(t, disabled_pattern)
for disabled_pattern in disabled_list]),
all_tests)
def GetAllEnabledTests(runner_factory, devices):
"""Get all enabled tests.
Obtains a list of enabled tests from the test package on the device,
then filters it again using the disabled list on the host.
Args:
runner_factory: callable that takes a devices and returns a TestRunner.
devices: list of devices.
Returns:
List of all enabled tests.
Raises Exception if all devices failed.
"""
for device in devices:
try:
logging.info('Obtaining tests from %s', device)
runner = runner_factory(device, 0)
return GetTestsFromDevice(runner)
except Exception as e:
logging.warning('Failed obtaining tests from %s with exception: %s',
device, e)
raise Exception('No device available to get the list of tests.')
def _RunATestSuite(options, suite_name):
"""Run a single test suite.
Helper for Dispatch() to allow stop/restart of the emulator across
test bundles. If using the emulator, we start it on entry and stop
it on exit.
Args:
options: options for running the tests.
suite_name: name of the test suite being run.
Returns:
0 if successful, number of failing tests otherwise.
"""
step_name = os.path.basename(options.test_suite).replace('-debug.apk', '')
attached_devices = []
buildbot_emulators = []
if options.use_emulator:
buildbot_emulators = emulator.LaunchEmulators(options.emulator_count,
options.abi,
wait_for_boot=True)
attached_devices = [e.device for e in buildbot_emulators]
elif options.test_device:
attached_devices = [options.test_device]
else:
attached_devices = android_commands.GetAttachedDevices()
if not attached_devices:
raise Exception('A device must be attached and online.')
# Reset the test port allocation. It's important to do it before starting
# to dispatch any tests.
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
# Constructs a new TestRunner with the current options.
def RunnerFactory(device, shard_index):
return test_runner.TestRunner(
device,
options.test_suite,
options.test_arguments,
options.timeout,
options.cleanup_test_files,
options.tool,
options.build_type,
options.webkit,
options.push_deps,
constants.GTEST_TEST_PACKAGE_NAME,
constants.GTEST_TEST_ACTIVITY_NAME,
constants.GTEST_COMMAND_LINE_FILE)
# Get tests and split them up based on the number of devices.
if options.gtest_filter:
all_tests = [t for t in options.gtest_filter.split(':') if t]
else:
all_tests = GetAllEnabledTests(RunnerFactory, attached_devices)
num_devices = len(attached_devices)
tests = [':'.join(all_tests[i::num_devices]) for i in xrange(num_devices)]
tests = [t for t in tests if t]
# Run tests.
test_results = shard.ShardAndRunTests(RunnerFactory, attached_devices, tests,
options.build_type, test_timeout=None,
num_retries=options.num_retries)
report_results.LogFull(
results=test_results,
test_type='Unit test',
test_package=suite_name,
build_type=options.build_type,
flakiness_server=options.flakiness_dashboard_server)
report_results.PrintAnnotation(test_results)
for buildbot_emulator in buildbot_emulators:
buildbot_emulator.Shutdown()
return len(test_results.GetNotPass())
def _ListTestSuites():
"""Display a list of available test suites."""
print 'Available test suites are:'
for test_suite in gtest_config.STABLE_TEST_SUITES:
print test_suite
def Dispatch(options):
"""Dispatches the tests, sharding if possible.
If options.use_emulator is True, all tests will be run in new emulator
instance.
Args:
options: options for running the tests.
Returns:
0 if successful, number of failing tests otherwise.
"""
if options.test_suite == 'help':
_ListTestSuites()
return 0
if options.use_xvfb:
framebuffer = xvfb.Xvfb()
framebuffer.Start()
all_test_suites = _FullyQualifiedTestSuites(options.exe, options.test_suite,
options.build_type)
failures = 0
for suite_name, suite_path in all_test_suites:
# Give each test suite its own copy of options.
test_options = copy.deepcopy(options)
test_options.test_suite = suite_path
failures += _RunATestSuite(test_options, suite_name)
if options.use_xvfb:
framebuffer.Stop()
return failures
|
|
import numpy as np
from PySide import QtGui, QtCore
import sharppy.sharptab as tab
from sharppy.sharptab.profile import Profile, create_profile
from sharppy.sharptab.constants import *
from PySide.QtGui import *
from PySide.QtCore import *
__all__ = ['backgroundHodo', 'plotHodo']
class backgroundHodo(QtGui.QFrame):
'''
Handles the plotting of the backgroun frame onto
a QPixmap. Inherits from the QtGui.QFrame object.
Unlike most plotting classes in SHARPPy, this class
will not call the function to draw the background.
This is so that the background can be redrawn when
the hodograph gets centered on a vector.
'''
def __init__(self):
super(backgroundHodo, self).__init__()
self.first = True
self.initUI()
def initUI(self):
'''
Initialize the User Interface
'''
## set the interface variables for width, height, padding, etc.
self.lpad = 0; self.rpad = 0
self.tpad = 0; self.bpad = 0
self.wid = self.size().width()
self.hgt = self.size().height()
self.tlx = self.rpad; self.tly = self.tpad
self.brx = self.wid; self.bry = self.hgt
## set default center to the origin
self.point = (0,0)
self.hodomag = 160.
self.centerx = self.wid / 2; self.centery = self.hgt / 2
self.scale = (self.brx - self.tlx) / self.hodomag
## ring increment
self.ring_increment = 10
self.rings = xrange(self.ring_increment, 100+self.ring_increment,
self.ring_increment)
if self.physicalDpiX() > 75:
fsize = 7
else:
fsize = 9
self.label_font = QtGui.QFont('Helvetica', fsize)
self.critical_font = QtGui.QFont('Helvetica', fsize + 2)
self.label_metrics = QtGui.QFontMetrics( self.label_font )
self.critical_metrics = QtGui.QFontMetrics( self.critical_font )
self.label_height = self.label_metrics.xHeight() + 5
self.critical_height = self.critical_metrics.xHeight() + 5
self.plotBitMap = QtGui.QPixmap(self.width(), self.height())
self.saveBitMap = None
self.plotBitMap.fill(QtCore.Qt.black)
self.plotBackground()
self.backgroundBitMap = self.plotBitMap.copy()
def center_hodo(self, point):
'''
Center the hodograph in the window. It will either center it about
the origin, about the mean wind vector, or the storm motion vector.
Parameters
----------
point: A (u,v) vector that the hodograph is to be centered on.
'''
## modify the center based on an offset from the origin
centerx = self.wid / 2; centery = self.hgt / 2
point = self.uv_to_pix(point[0], point[1])
## if the function was called but the center hasn't changed in pixel space,
## just leave the center as is
if self.point == point:
self.centerx = self.centerx
self.centery = self.centery
## otherwise, offset the hodograph center
else:
self.point = point
diffx = centerx - point[0]; diffy = centery - point[1]
self.centerx += diffx; self.centery += diffy
self.plotBitMap.fill(QtCore.Qt.black)
self.plotBackground()
self.backgroundBitMap = self.plotBitMap.copy()
def wheelEvent(self, e):
'''
Handeles the zooming of the hodograph window.
Parameters
----------
e: an Event object
'''
## get the new scaling magnitude
new_mag = self.hodomag + e.delta() / 5
## make sure the user doesn't zoom out of
## bounds to prevent drawing issues
if new_mag >= 40. and new_mag <= 200.:
self.hodomag = new_mag
## if it is out of bounds, do nothing
else:
self.hodomag = self.hodomag
## get the maximum speed value in the frame for the ring increment.
## this is to help reduce drawing resources
max_uv = int(self.pix_to_uv(self.brx, 0)[0])
self.rings = xrange(self.ring_increment, max_uv+self.ring_increment,
self.ring_increment)
## reassign the new scale
self.scale = (self.brx - self.tlx) / self.hodomag
self.plotBitMap.fill(QtCore.Qt.black)
self.plotBackground()
self.backgroundBitMap = self.plotBitMap.copy()
self.plotData()
## update
self.update()
def resizeEvent(self, e):
'''
Resize the plot based on adjusting the main window.
Parameters
----------
e: an Event object
'''
self.initUI()
def plotBackground(self):
'''
Handles painting the frame background onto the
QPixmap.
'''
## initialize a QPainter object.
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
## draw the wind speed rings
for spd in self.rings: self.draw_ring(spd, qp)
## draw the frame axes
self.draw_axes(qp)
self.draw_frame(qp)
qp.end()
def draw_frame(self, qp):
'''
Draw frame around object.
Parameters
----------
qp: QtGui.QPainter object
'''
## initialize a white pen to draw the frame
pen = QtGui.QPen(QtGui.QColor(WHITE), 2)
pen.setStyle(QtCore.Qt.SolidLine)
qp.setPen(pen)
## draw the frame borders
qp.drawLine(self.tlx, self.tly, self.brx, self.tly)
qp.drawLine(self.brx, self.tly, self.brx, self.bry)
qp.drawLine(self.brx, self.bry, self.tlx, self.bry)
qp.drawLine(self.tlx, self.bry, self.tlx, self.tly)
def draw_axes(self, qp):
'''
Draw the X, Y Axes.
Parameters
----------
qp: QtGui.QPainter object
'''
## initialize a white pen to draw the frame axes
pen = QtGui.QPen(QtGui.QColor(WHITE), 2)
pen.setStyle(QtCore.Qt.SolidLine)
qp.setPen(pen)
## draw the frame axes
qp.drawLine(self.centerx, self.tly, self.centerx, self.bry)
qp.drawLine(self.tlx, self.centery, self.brx, self.centery)
def draw_ring(self, spd, qp):
'''
Draw a range ring.
Parameters
----------
spd: wind speed
qp: QtGui.QPainter object
'''
## set the ring color and get the u and v components of a
## 0 direction vector with speed spd.
color = "#555555"
uu, vv = tab.utils.vec2comp(0, spd)
vv *= self.scale
## create a center point
center = QtCore.QPointF(self.centerx, self.centery)
## initialize a pen to draw with
pen = QtGui.QPen(QtGui.QColor(color), 1)
pen.setStyle(QtCore.Qt.DashLine)
qp.setPen(pen)
## draw the range ring
qp.drawEllipse(center, vv, vv)
qp.setFont(self.label_font)
## reset the pen to draw with. Color is set to black and width zero
## because we actually don't want to draw and lines yet.
pen = QtGui.QPen(QtGui.QColor('#000000'), 0, QtCore.Qt.SolidLine)
qp.setPen(pen)
offset = 5; width = 15; hght = 15;
## crete some rectangles
top_rect = QtCore.QRectF(self.centerx+offset,
self.centery+vv-offset, width, hght)
bottom_rect = QtCore.QRectF(self.centerx+offset,
self.centery-vv-offset, width, hght)
right_rect = QtCore.QRectF(self.centerx+vv-offset,
self.centery+offset, width, hght)
left_rect = QtCore.QRectF(self.centerx-vv-offset,
self.centery+offset, width, hght)
## draw some invisible rectangles
qp.drawRect(top_rect); qp.drawRect(right_rect)
qp.drawRect(bottom_rect); qp.drawRect(left_rect)
## now make the pen white and draw text using
## the invisible rectangles
pen = QtGui.QPen(QtGui.QColor("#FFFFFF"))
qp.setPen(pen)
qp.setFont(self.label_font)
qp.drawText(top_rect, QtCore.Qt.AlignCenter, tab.utils.INT2STR(spd))
qp.drawText(right_rect, QtCore.Qt.AlignCenter, tab.utils.INT2STR(spd))
qp.drawText(bottom_rect, QtCore.Qt.AlignCenter, tab.utils.INT2STR(spd))
qp.drawText(left_rect, QtCore.Qt.AlignCenter, tab.utils.INT2STR(spd))
def hodo_to_pix(self, ang, spd):
'''
Function to convert a (direction, speed) to (x, y) coordinates.
Parameters
----------
ang: wind direction
spd: wind speed
'''
uu, vv = tab.utils.vec2comp(ang, spd)
xx = self.centerx + (uu * self.scale)
yy = self.centery + (vv * self.scale)
return xx, yy
def uv_to_pix(self, u, v):
'''
Function to convert (u, v) to (x, y) coordinates.
Parameters
----------
u: the u wind component
v: the v wind component
'''
xx = self.centerx + (u * self.scale)
yy = self.centery - (v * self.scale)
return xx, yy
def pix_to_uv(self, xx, yy):
'''
Function to convert (x,y) to (u,v) coordinates.
Parameters
----------
xx: the x pixel value
yy: the y pixel value
'''
u = (xx - self.centerx) / self.scale
v = (self.centery - yy) / self.scale
return u, v
class plotHodo(backgroundHodo):
'''
Plots the data on the hodograph. Inherits from the backgroundHodo
class that plots the background frame onto a QPixmap.
'''
modified = Signal(int, dict)
reset = Signal(list)
def __init__(self, **kwargs):
'''
Initialize the data used in the class.
'''
super(plotHodo, self).__init__()
self.prof = None
self.pc_idx = 0
self.prof_collections = []
self.all_observed = False
self.colors = [
QtGui.QColor("#FF0000"),
QtGui.QColor("#00FF00"),
QtGui.QColor("#FFFF00"),
QtGui.QColor("#00FFFF")
]
self.ens_colors = [
QtGui.QColor("#880000"),
QtGui.QColor("#008800"),
QtGui.QColor("#888800"),
QtGui.QColor("#008888")
]
## if you want the storm motion vector, you need to
## provide the profile.
self.cursor_type = kwargs.get('cursor', 'none')
self.bndy_spd = kwargs.get('bndy_spd', 0)
self.bndy_dir = kwargs.get('bndy_dir', 0)
self.bndy_u, self.bndy_v = tab.utils.vec2comp(self.bndy_dir, self.bndy_spd)
self.track_cursor = False
self.was_right_click = False
self.initdrag = False
self.dragging = False
self.drag_idx = None
self.drag_buffer = 5
self.clickradius = 6
self.centered = kwargs.get('centered', (0,0))
self.center_loc = 'centered'
## the following is used for the dynamic readout
self.setMouseTracking(True)
self.wndReadout = QLabel(parent=self)
self.srh1kmReadout = QLabel(parent=self)
self.srh3kmReadout = QLabel(parent=self)
self.esrhReadout = QLabel(parent=self)
self.wndReadout.setFixedWidth(0)
self.srh1kmReadout.setFixedWidth(0)
self.srh3kmReadout.setFixedWidth(0)
self.esrhReadout.setFixedWidth(0)
## these stylesheets have to be set for
## each readout
self.wndReadout.setStyleSheet("QLabel {"
" background-color: rgb(0, 0, 0);"
" border-width: 0px;"
" font-size: 11px;"
" color: #FFFFFF;}")
self.srh1kmReadout.setStyleSheet("QLabel {"
" background-color: rgb(0, 0, 0);"
" border-width: 0px;"
" font-size: 11px;"
" color: #FF0000;}")
self.srh3kmReadout.setStyleSheet("QLabel {"
" background-color: rgb(0, 0, 0);"
" border-width: 0px;"
" font-size: 11px;"
" color: #00FF00;}")
self.esrhReadout.setStyleSheet("QLabel {"
" background-color: rgb(0, 0, 0);"
" border-width: 0px;"
" font-size: 11px;"
" color: #00FFFF;}")
self.hband = QRubberBand(QRubberBand.Line, self)
self.vband = QRubberBand(QRubberBand.Line, self)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showCursorMenu)
self.popupmenu=QMenu("Cursor Type:")
ag = QtGui.QActionGroup(self, exclusive=True)
nocurs = QAction(self)
nocurs.setText("No Cursor")
nocurs.setCheckable(True)
nocurs.setChecked(True)
nocurs.triggered.connect(self.setNoCursor)
a = ag.addAction(nocurs)
self.popupmenu.addAction(a)
storm_motion = QAction(self)
storm_motion.setText("Strm Motion Cursor")
storm_motion.setCheckable(True)
storm_motion.triggered.connect(self.setStormMotionCursor)
a = ag.addAction(storm_motion)
self.popupmenu.addAction(a)
bnd = QAction(self)
bnd.setText("Bndy Cursor")
bnd.setCheckable(True)
bnd.triggered.connect(self.setBndyCursor)
a = ag.addAction(bnd)
self.popupmenu.addAction(a)
self.popupmenu.addSeparator()
ag2 = QtGui.QActionGroup(self, exclusive=True)
norm = QAction(self)
norm.setText("Normal")
norm.setCheckable(True)
norm.setChecked(True)
norm.triggered.connect(self.setNormalCenter)
a = ag2.addAction(norm)
self.popupmenu.addAction(a)
sr = QAction(self)
sr.setText("Storm Relative")
sr.setCheckable(True)
sr.triggered.connect(self.setSRCenter)
a = ag2.addAction(sr)
self.popupmenu.addAction(a)
mw = QAction(self)
mw.setText("Mean Wind")
mw.setCheckable(True)
mw.triggered.connect(self.setMWCenter)
a = ag2.addAction(mw)
self.popupmenu.addAction(a)
self.popupmenu.addSeparator()
reset = QAction(self)
reset.setText("Reset Hodograph")
reset.triggered.connect(lambda: self.reset.emit(['u', 'v']))
self.popupmenu.addAction(reset)
def addProfileCollection(self, prof_coll):
self.prof_collections.append(prof_coll)
def rmProfileCollection(self, prof_coll):
self.prof_collections.remove(prof_coll)
def setActiveCollection(self, pc_idx, **kwargs):
self.pc_idx = pc_idx
prof = self.prof_collections[pc_idx].getHighlightedProf()
self.prof = prof
self.hght = prof.hght
self.u = prof.u; self.v = prof.v
## if you want the storm motion vector, you need to
## provide the profile.
self.srwind = self.prof.srwind
self.ptop = self.prof.etop
self.pbottom = self.prof.ebottom
mean_lcl_el = self.prof.mean_lcl_el
if tab.utils.QC(mean_lcl_el[0]):
self.mean_lcl_el = tab.utils.vec2comp(*self.prof.mean_lcl_el)
else:
self.mean_lcl_el = (np.ma.masked, np.ma.masked)
self.corfidi_up_u = self.prof.upshear_downshear[0]
self.corfidi_up_v = self.prof.upshear_downshear[1]
self.corfidi_dn_u = self.prof.upshear_downshear[2]
self.corfidi_dn_v = self.prof.upshear_downshear[3]
self.bunkers_right_vec = tab.utils.comp2vec(self.prof.srwind[0], self.prof.srwind[1])
self.bunkers_left_vec = tab.utils.comp2vec(self.prof.srwind[2], self.prof.srwind[3])
self.upshear = tab.utils.comp2vec(self.prof.upshear_downshear[0],self.prof.upshear_downshear[1])
self.downshear = tab.utils.comp2vec(self.prof.upshear_downshear[2],self.prof.upshear_downshear[3])
self.mean_lcl_el_vec = self.prof.mean_lcl_el #tab.utils.comp2vec(self.prof.mean_lcl_el[0], self.prof.mean_lcl_el[1])
self.clearData()
self.plotData()
self.update()
def setBndyCursor(self):
self.track_cursor = True
self.cursor_type = 'boundary'
self.plotBndy(self.bndy_dir)
self.wndReadout.hide()
self.srh1kmReadout.hide()
self.srh3kmReadout.show()
self.esrhReadout.hide()
self.clearData()
self.plotData()
self.update()
self.parentWidget().setFocus()
def setNoCursor(self):
self.track_cursor = False
self.cursor_type = 'none'
self.unsetCursor()
self.hband.hide()
self.vband.hide()
self.clearData()
self.plotData()
self.update()
self.wndReadout.hide()
self.srh1kmReadout.hide()
self.srh3kmReadout.hide()
self.esrhReadout.hide()
self.parentWidget().setFocus()
def setStormMotionCursor(self):
self.unsetCursor()
self.track_cursor = True
self.cursor_type = 'stormmotion'
self.wndReadout.show()
self.srh1kmReadout.show()
self.srh3kmReadout.show()
self.esrhReadout.show()
self.clearData()
self.plotData()
self.update()
self.parentWidget().setFocus()
def showCursorMenu(self, pos):
self.popupmenu.popup(self.mapToGlobal(pos))
def setNormalCenter(self):
self.centered = (0, 0)
self.center_loc = 'centered'
self.clearData()
self.center_hodo(self.centered)
self.plotData()
self.update()
self.parentWidget().setFocus()
def setMWCenter(self):
if not tab.utils.QC(self.mean_lcl_el[0]):
return
self.centered = (self.mean_lcl_el[0],self.mean_lcl_el[1])
self.center_loc = 'meanwind'
self.clearData()
self.center_hodo(self.centered)
self.plotData()
self.update()
self.parentWidget().setFocus()
def setSRCenter(self):
rstu,rstv,lstu,lstv = self.srwind
self.centered = (rstu, rstv)
self.center_loc = 'stormrelative'
self.clearData()
self.center_hodo(self.centered)
self.plotData()
self.update()
self.parentWidget().setFocus()
def setAllObserved(self, all_observed, update_gui=True):
self.all_observed = all_observed
if update_gui:
self.clearData()
self.plotData()
self.update()
self.parentWidget().setFocus()
def wheelEvent(self, e):
'''
Handles the zooming of the hodograph.
Parameters
----------
e: an Event object
'''
super(plotHodo, self).wheelEvent(e)
# self.clearData()
# self.plotData()
def mousePressEvent(self, e):
'''
Handles when the mouse is pressed.
Used to set the storm motion vector.
Parameters
----------
e: an Event object
'''
if self.prof is None:
return
self.was_right_click = e.button() & QtCore.Qt.RightButton
if self.cursor_type == 'none' and not self.was_right_click:
visible = np.where(self.hght <= 12000)
xs, ys = self.uv_to_pix(self.u[visible], self.v[visible])
dists = np.hypot(xs - e.x(), ys - e.y())
if dists.min() < self.clickradius:
self.initdrag = True
self.drag_idx = np.argmin(dists)
def mouseReleaseEvent(self, e):
if self.cursor_type == 'stormmotion' and not self.was_right_click:
self.track_cursor = not self.track_cursor
elif self.cursor_type == 'boundary' and not self.was_right_click:
if self.track_cursor:
qp = QtGui.QPainter()
self.bndy_u, self.bndy_v = self.pix_to_uv(e.x(), e.y())
self.bndy_dir, self.bndy_spd = tab.utils.comp2vec(self.bndy_u, self.bndy_v)
y1 = 400*np.sin(np.radians(self.bndy_dir)) + e.y()
x1 = 400*np.cos(np.radians(self.bndy_dir)) + e.x()
y2 = e.y() - 400*np.sin(np.radians(self.bndy_dir))
x2 = e.x() - 400*np.cos(np.radians(self.bndy_dir))
penwidth = 2
width = 300
hght = 14
# Plot the actual boundary
boundary_color = QtGui.QColor("#CC9900")
pen = QtGui.QPen(boundary_color, penwidth)
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
qp.setPen(pen)
qp.drawLine(x1, y1, x2, y2)
center_rm = QtCore.QPointF(e.x(),e.y())
qp.setPen(pen)
pen = QtGui.QPen(boundary_color, 50)
pen.setStyle(QtCore.Qt.SolidLine)
qp.drawEllipse(center_rm, 3, 3)
# Plot the shear vector
width = 150
qp = self.setBlackPen(qp)
rect = QtCore.QRectF(3, self.bry-35, width, hght)
qp.drawRect(rect)
shear_color = QtGui.QColor("#0099CC")
pen = QtGui.QPen(shear_color, penwidth)
qp.setFont(self.critical_font)
qp.setPen(pen)
to_add = self.pix_to_uv(e.x(), e.y())
x2, y2 = self.uv_to_pix(self.prof.sfc_6km_shear[0] + to_add[0], self.prof.sfc_6km_shear[1]+ to_add[1])
qp.drawLine(e.x(), e.y(), x2, y2)
dir, spd = tab.utils.comp2vec(self.prof.sfc_6km_shear[0], self.prof.sfc_6km_shear[1])
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, "0 - 6 km Shear: " + tab.utils.INT2STR(dir) + '/' + tab.utils.INT2STR(spd) + ' kts')
# Plot the 9-11 km Storm Relative Winds
width = 200
qp = self.setBlackPen(qp)
rect = QtCore.QRectF(3, self.bry-20, width, hght)
qp.drawRect(rect)
srw_color = QtGui.QColor("#FF00FF")
pen = QtGui.QPen(srw_color, penwidth)
qp.setPen(pen)
x2, y2 = self.uv_to_pix(self.prof.srw_9_11km[0] + to_add[0], self.prof.srw_9_11km[1] + to_add[1])
qp.drawLine(e.x(), e.y(), x2, y2)
dir, spd = tab.utils.comp2vec(self.prof.srw_9_11km[0], self.prof.srw_9_11km[1])
if spd >= 70:
supercell_type = "LP"
elif spd < 70 and spd > 40:
supercell_type = "Classic"
else:
supercell_type = "HP"
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, "9 - 11 km SR-Wind: " + tab.utils.INT2STR(dir) + '/' + tab.utils.INT2STR(spd) + ' kts - (' + supercell_type + ')')
# Removing this function until @wblumberg can finish fixing this function.
"""
# Draw the descrete vs mixed/linear mode output only if there is an LCL-EL layer.
norm_Shear, mode_Shear, norm_Wind, norm_Mode = self.calculateStormMode()
if tab.utils.QC(norm_Wind) and self.prof.mupcl.bplus != 0:
width = 80
qp = self.setBlackPen(qp)
rect = QtCore.QRectF(3, self.bry-80, width, hght)
qp.drawRect(rect)
color = QtGui.QColor(YELLOW)
pen = QtGui.QPen(color, penwidth)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, "...Storm Mode...")
width = 270
qp = self.setBlackPen(qp)
rect = QtCore.QRectF(3, self.bry-50, width, hght)
qp.drawRect(rect)
if norm_Wind < 6:
color = QtGui.QColor(RED)
else:
color = QtGui.QColor(MAGENTA)
pen = QtGui.QPen(color, penwidth)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, "From Cloud Layer Wind - Bndy Diff (" + tab.utils.INT2STR(norm_Wind) + " m/s): " + norm_Mode)
width = 200
qp = self.setBlackPen(qp)
rect = QtCore.QRectF(3, self.bry-65, width, hght)
qp.drawRect(rect)
if norm_Shear < 15:
color = QtGui.QColor(RED)
else:
color = QtGui.QColor(MAGENTA)
pen = QtGui.QPen(color, penwidth)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, "From Bndy 0-6 km Shr Diff (" + tab.utils.INT2STR(norm_Shear) + " m/s): " + mode_Shear)
"""
qp.end()
self.update()
self.track_cursor = False
else:
self.plotBndy(self.bndy_dir)
self.clearData()
self.plotData()
self.update()
self.track_cursor = True
elif self.cursor_type == 'none' and (self.dragging or self.initdrag):
u, v = self.pix_to_uv(e.x(), e.y())
# new_u = self.u.copy()
# new_v = self.v.copy()
# new_u[self.drag_idx] = u
# new_v[self.drag_idx] = v
# new_prof = type(self.prof).copy(self.prof, u=new_u, v=new_v)
self.modified.emit(self.drag_idx, {'u':u, 'v':v})
self.drag_idx = None
self.dragging = False
self.saveBitMap = None
self.initdrag = False
def setBlackPen(self, qp):
color = QtGui.QColor('#000000')
color.setAlphaF(.5)
pen = QtGui.QPen(color, 0, QtCore.Qt.SolidLine)
brush = QtGui.QBrush(QtCore.Qt.SolidPattern)
qp.setPen(pen)
qp.setBrush(brush)
return qp
def calculateStormMode(self):
"""
Logic based off of some of the key findings in Dial et al. (2010)
"""
dir_06shear, mag_06shear = tab.utils.comp2vec(self.prof.sfc_6km_shear[0], self.prof.sfc_6km_shear[1])
norm_shear = mag_06shear * np.sin( np.radians( dir_06shear - (self.bndy_dir + 90)) )
norm_shear = np.abs(tab.utils.KTS2MS(norm_shear))
if norm_shear < 15: # M/S
shear_mode = "Linear/Mixed"
else:
shear_mode = "Discrete"
if not tab.utils.QC(self.mean_lcl_el[0]) or (self.mean_lcl_el[0] == 0 and self.mean_lcl_el[1] == 0):
wind_mode = np.ma.masked
wind_diff = np.ma.masked
else:
dir_cloud, mag_cloud = tab.utils.comp2vec(self.prof.mean_lcl_el[0], self.prof.mean_lcl_el[1])
norm_cloudmotion = mag_cloud * np.sin( np.radians( dir_cloud - (self.bndy_dir + 90) ) )
wind_diff = tab.utils.KTS2MS(np.abs(norm_cloudmotion) - self.bndy_spd)
if wind_diff > 6: # M/S
wind_mode = "Discrete"
else:
wind_mode = "Linear/Mixed"
return norm_shear, shear_mode, wind_diff, wind_mode
def plotBndy(self, direction):
length = 40
y1 = length*np.sin(np.radians(direction))
x1 = length*np.cos(np.radians(direction))
penwidth = 2
top_x_pix = x1 + length/2
top_y_pix = y1 + length/2
bot_x_pix = length/2 - x1
bot_y_pix = length/2 - y1
pixmap = QPixmap(length,length)
pixmap.fill(Qt.transparent)
painter = QPainter(pixmap)
boundary_color = QtGui.QColor("#CC9900")
pen = QtGui.QPen(boundary_color, penwidth)
painter.setPen(pen)
painter.drawLine(top_x_pix, top_y_pix, bot_x_pix, bot_y_pix)
center_rm = QtCore.QPointF(length/2, length/2)
pen = QtGui.QPen(boundary_color, 2)
painter.setPen(pen)
painter.drawEllipse(center_rm, 3, 3)
painter.end()
self.setCursor(pixmap)
def mouseMoveEvent(self, e):
'''
Handles the tracking of the mouse to
provide the dynamic readouts.
Parameters
----------
e: an Event object
'''
if self.cursor_type == 'stormmotion' and self.track_cursor:
## convert the location of the mouse to u,v space
u, v = self.pix_to_uv(e.x(), e.y())
## get the direction and speed from u,v
dir, spd = tab.utils.comp2vec(u,v)
## calculate the storm relative helicity for a storm motion
## vector with a u,v at the mouse pointer
srh1km = tab.winds.helicity(self.prof, 0, 1000., stu=u, stv=v)[0]
srh3km = tab.winds.helicity(self.prof, 0, 3000., stu=u, stv=v)[0]
## do some sanity checks to prevent crashing if there is no
## effective inflow layer
etop, ebot = self.prof.etopm, self.prof.ebotm
if tab.utils.QC(etop) and tab.utils.QC(ebot):
esrh = tab.winds.helicity(self.prof, ebot, etop, stu=u, stv=v)[0]
self.esrhReadout.setText('effective: ' + tab.utils.INT2STR(esrh) + ' m2/s2')
else:
esrh = np.ma.masked
self.esrhReadout.setText('effective: ' + str(esrh) + ' m2/s2')
## set the crosshair in the window
self.hband.setGeometry(QRect(QPoint(self.lpad,e.y()), QPoint(self.brx,e.y())).normalized())
self.vband.setGeometry(QRect(QPoint(e.x(), self.tpad), QPoint(e.x(),self.bry)).normalized())
## set the readout texts
self.wndReadout.setText(tab.utils.INT2STR(dir) + '/' + tab.utils.FLOAT2STR(spd, 1))
self.srh1kmReadout.setText('sfc-1km: ' + tab.utils.INT2STR(srh1km) + ' m2/s2')
self.srh3kmReadout.setText('sfc-3km: ' + tab.utils.INT2STR(srh3km) + ' m2/s2')
## set the readout width
self.wndReadout.setFixedWidth(50)
self.srh1kmReadout.setFixedWidth(120)
self.srh3kmReadout.setFixedWidth(120)
self.esrhReadout.setFixedWidth(120)
## place the readout
self.wndReadout.move(1, self.bry-15)
self.srh1kmReadout.move(self.brx-130, self.bry-45)
self.srh3kmReadout.move(self.brx-130, self.bry-30)
self.esrhReadout.move(self.brx-130, self.bry-15)
## show the crosshair
self.hband.show()
self.vband.show()
elif self.cursor_type == 'boundary':
self.hband.hide()
self.vband.hide()
u, v = self.pix_to_uv(e.x(), e.y())
## get the direction and speed from u,v
dir, spd = tab.utils.comp2vec(u,v)
self.plotBndy(dir)
self.srh3kmReadout.setText('Bndy Motion: ' + tab.utils.INT2STR(dir) + '/' + tab.utils.INT2STR(spd))
self.srh3kmReadout.setFixedWidth(120)
self.srh3kmReadout.move(self.brx-130, self.bry-30)
elif self.cursor_type == 'none' and (self.initdrag or self.dragging):
self.initdrag = False
self.dragging = True
self.dragHodo(e)
def dragHodo(self, e):
idx = self.drag_idx
u, v = self.pix_to_uv(e.x(), e.y())
u_pts = [ u ]
v_pts = [ v ]
lb_idx, ub_idx = max(idx - 1, 0), min(idx + 1, self.u.shape[0] - 1)
while lb_idx >= 0 and (self.u.mask[lb_idx] or self.v.mask[lb_idx]):
lb_idx -= 1
while ub_idx < self.u.shape[0] and (self.u.mask[ub_idx] or self.v.mask[ub_idx]):
ub_idx += 1
if lb_idx != -1:
u_pts.append(self.u[lb_idx])
v_pts.append(self.v[lb_idx])
if ub_idx != self.u.shape[0]:
u_pts.append(self.u[ub_idx])
v_pts.append(self.v[ub_idx])
lb_u, ub_u = min(u_pts), max(u_pts)
lb_v, ub_v = min(v_pts), max(v_pts)
lb_x, lb_y = self.uv_to_pix(lb_u, ub_v)
ub_x, ub_y = self.uv_to_pix(ub_u, lb_v)
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
if self.saveBitMap is not None:
(origin, size, bmap) = self.saveBitMap
qp.drawPixmap(origin, bmap, QRect(QPoint(0, 0), size))
# Capture the new portion of the image to save
origin = QPoint(max(lb_x - self.drag_buffer, 0), max(lb_y - self.drag_buffer, 0))
size = QSize(ub_x - lb_x + 2 * self.drag_buffer, ub_y - lb_y + 2 * self.drag_buffer)
bmap = self.plotBitMap.copy(QRect(origin, size))
self.saveBitMap = (origin, size, bmap)
pen = QtGui.QPen(QtGui.QColor('#FFFFFF'), 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
if lb_idx != -1:
prof_x, prof_y = self.uv_to_pix(self.u[lb_idx], self.v[lb_idx])
qp.drawLine(prof_x, prof_y, e.x(), e.y())
if ub_idx != self.u.shape[0]:
prof_x, prof_y = self.uv_to_pix(self.u[ub_idx], self.v[ub_idx])
qp.drawLine(e.x(), e.y(), prof_x, prof_y)
qp.end()
self.update()
def resizeEvent(self, e):
'''
Resize the plot based on adjusting the main window.
Parameters
----------
e: an Event object
'''
super(plotHodo, self).resizeEvent(e)
self.plotData()
def paintEvent(self, e):
'''
Handles painting the QPixmap onto the QWidget frame.
Parameters
----------
e: an Event object
'''
super(plotHodo, self).paintEvent(e)
qp = QtGui.QPainter()
qp.begin(self)
qp.drawPixmap(0, 0, self.plotBitMap)
qp.end()
def clearData(self):
'''
Clears/resets the base QPixmap.
'''
self.plotBitMap = self.backgroundBitMap.copy()
def plotData(self):
'''
Handles the plotting of the data in the QPixmap.
'''
## initialize a QPainter object
if self.prof is None:
return
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
cur_dt = self.prof_collections[self.pc_idx].getCurrentDate()
for idx, prof_coll in enumerate(self.prof_collections):
# Draw all unhighlighed members
if prof_coll.getCurrentDate() == cur_dt:
proflist = prof_coll.getCurrentProfs().values()
if idx == self.pc_idx:
for prof in proflist:
self.draw_hodo(qp, prof, self.ens_colors, width=1)
else:
for prof in proflist:
self.draw_profile(qp, prof, width=1)
for idx, prof_coll in enumerate(self.prof_collections):
# Draw all highlighted members that aren't the active one.
if idx != self.pc_idx and (prof_coll.getCurrentDate() == cur_dt or self.all_observed):
prof = prof_coll.getHighlightedProf()
self.draw_profile(qp, prof)
## draw the hodograph
self.draw_hodo(qp, self.prof, self.colors)
## draw the storm motion vector
self.drawSMV(qp)
self.drawCorfidi(qp)
self.drawLCLtoEL_MW(qp)
if self.cursor_type in [ 'none', 'stormmotion' ]:
self.drawCriticalAngle(qp)
qp.end()
def drawLCLtoEL_MW(self, qp):
'''
Draws the LCL to EL mean wind onto the hodo.
Parameters
----------
qp: a QPainter object
'''
penwidth = 2
pen = QtGui.QPen(QtGui.QColor("#B8860B"), penwidth)
pen.setStyle(QtCore.Qt.SolidLine)
qp.setPen(pen)
try:
mean_u, mean_v = self.uv_to_pix(self.mean_lcl_el[0],self.mean_lcl_el[1])
half_length = (8./2.)
qp.drawRect(mean_u-half_length, mean_v+half_length ,8,8)
except:
return
# This probably needs to be checked.
color = QtGui.QColor('#000000')
color.setAlpha(0)
pen = QtGui.QPen(color, 0, QtCore.Qt.SolidLine)
qp.setPen(pen)
v_offset=5; h_offset = 1; width = 40; hght = 12;
mw_rect = QtCore.QRectF(mean_u+h_offset, mean_v+v_offset, width, hght)
qp.drawRect(mw_rect)
pen = QtGui.QPen(QtGui.QColor("#B8860B"))
qp.setPen(pen)
qp.setFont(self.label_font)
mw_str = tab.utils.INT2STR(self.mean_lcl_el_vec[0]) + '/' + tab.utils.INT2STR(self.mean_lcl_el_vec[1])
qp.drawText(mw_rect, QtCore.Qt.AlignCenter, mw_str)
def drawCorfidi(self, qp):
'''
Draw the Corfidi upshear/downshear vectors
Parameters
----------
qp: a QPainter object
'''
penwidth = 1
pen = QtGui.QPen(QtGui.QColor("#00BFFF"), penwidth)
pen.setStyle(QtCore.Qt.SolidLine)
qp.setPen(pen)
if not np.isfinite(self.corfidi_up_u) or not np.isfinite(self.corfidi_up_v) or \
not np.isfinite(self.corfidi_dn_u) or not np.isfinite(self.corfidi_dn_v):
return
try:
up_u, up_v = self.uv_to_pix(self.corfidi_up_u, self.corfidi_up_v)
dn_u, dn_v = self.uv_to_pix(self.corfidi_dn_u, self.corfidi_dn_v)
center_up = QtCore.QPointF(up_u, up_v)
center_dn = QtCore.QPointF(dn_u, dn_v)
## draw circles around the center point of the Corfidi vectors
qp.drawEllipse(center_up, 3, 3)
qp.drawEllipse(center_dn, 3, 3)
except:
return
up_u, up_v = self.uv_to_pix(self.corfidi_up_u, self.corfidi_up_v)
dn_u, dn_v = self.uv_to_pix(self.corfidi_dn_u, self.corfidi_dn_v)
center_up = QtCore.QPointF(up_u, up_v)
center_dn = QtCore.QPointF(dn_u, dn_v)
## draw circles around the center point of the Corfidi vectors
qp.drawEllipse(center_up, 3, 3)
qp.drawEllipse(center_dn, 3, 3)
color = QtGui.QColor('#000000')
color.setAlpha(0)
pen = QtGui.QPen(color, 0, QtCore.Qt.SolidLine)
qp.setPen(pen)
v_offset=3; h_offset = 1; width = 60; hght = 10;
up_rect = QtCore.QRectF(up_u+h_offset, up_v+v_offset, width, hght)
dn_rect = QtCore.QRectF(dn_u+h_offset, dn_v+v_offset, width, hght)
qp.drawRect(up_rect)
qp.drawRect(dn_rect)
## now make the pen white and draw text using
## the invisible rectangles
pen = QtGui.QPen(QtGui.QColor("#00BFFF"))
qp.setPen(pen)
qp.setFont(self.label_font)
up_stuff = tab.utils.INT2STR(self.upshear[0]) + '/' + tab.utils.INT2STR(self.upshear[1])
dn_stuff = tab.utils.INT2STR(self.downshear[0]) + '/' + tab.utils.INT2STR(self.downshear[1])
qp.drawText(up_rect, QtCore.Qt.AlignCenter, "UP=" + up_stuff)
qp.drawText(dn_rect, QtCore.Qt.AlignCenter, "DN=" + dn_stuff)
def drawSMV(self, qp):
'''
Draws the storm motion vector.
Parameters
----------
qp: QtGui.QPainter object
'''
## set a pen with white color, width 1, solid line.
penwidth = 1
pen = QtGui.QPen(QtGui.QColor(WHITE), penwidth)
pen.setStyle(QtCore.Qt.SolidLine)
qp.setPen(pen)
## check and make sure there is no missing data
try:
mask = np.maximum( self.u, self.v )
hght = self.hght[~mask]
u = self.u[~mask]; v = self.v[~mask]
## calculate the left and right storm motion vectors
rstu,rstv,lstu,lstv = self.srwind
rstu = rstu[~mask]; rstv = rstv[~mask]
lstu = lstu[~mask]; lstv = lstv[~mask]
## otherwise the data is fine
except:
hght = self.hght
u = self.u; v = self.v
rstu,rstv,lstu,lstv = self.srwind
# make sure the storm motion exists
if not tab.utils.QC(rstu) or not tab.utils.QC(lstu):
return
## convert the left and right mover vector components to pixel values
ruu, rvv = self.uv_to_pix(rstu,rstv)
luu, lvv = self.uv_to_pix(lstu, lstv)
## calculate the center points of the storm motion vectors
center_rm = QtCore.QPointF(ruu,rvv)
center_lm = QtCore.QPointF(luu,lvv)
## draw circles around the sorm motion vectors
qp.drawEllipse(center_rm, 5, 5)
qp.drawEllipse(center_lm, 5, 5)
## get the effective inflow layer
ptop, pbottom = self.ptop, self.pbottom
## make sure the effective inflow layer and storm motion vectors exist
if tab.utils.QC(ptop) and tab.utils.QC(pbottom):
## get the interpolated wind at the bottom and top
## of the effective inflow layer
utop,vtop = tab.interp.components(self.prof, ptop)
ubot,vbot = tab.interp.components(self.prof, pbottom)
## convert these values to pixels
uutop, vvtop = self.uv_to_pix(utop, vtop)
uubot, vvbot = self.uv_to_pix(ubot, vbot)
## set a pen
pen = QtGui.QPen(QtGui.QColor("#00FFFF"), penwidth)
pen.setStyle(QtCore.Qt.SolidLine)
qp.setPen(pen)
## draw lines showing the effective inflow layer
qp.drawLine(center_rm.x(), center_rm.y(), uubot, vvbot)
qp.drawLine(center_rm.x(), center_rm.y(), uutop, vvtop)
color = QtGui.QColor('#000000')
color.setAlpha(0)
pen = QtGui.QPen(color, 0, QtCore.Qt.SolidLine)
qp.setPen(pen)
h_offset = 2; v_offset=5; width = 55; hght = 12;
rm_rect = QtCore.QRectF(ruu+h_offset, rvv+v_offset, width, hght)
lm_rect = QtCore.QRectF(luu+h_offset, lvv+v_offset, width, hght)
qp.drawRect(rm_rect)
qp.drawRect(lm_rect)
## now make the pen white and draw text using
## the invisible rectangles
pen = QtGui.QPen(QtGui.QColor("#FFFFFF"))
qp.setPen(pen)
qp.setFont(self.label_font)
rm_stuff = tab.utils.INT2STR(self.bunkers_right_vec[0]) + '/' + tab.utils.INT2STR(self.bunkers_right_vec[1])
lm_stuff = tab.utils.INT2STR(self.bunkers_left_vec[0]) + '/' + tab.utils.INT2STR(self.bunkers_left_vec[1])
qp.drawText(rm_rect, QtCore.Qt.AlignCenter, rm_stuff + " RM")
qp.drawText(lm_rect, QtCore.Qt.AlignCenter, lm_stuff + " LM")
def drawCriticalAngle(self, qp):
'''
Plot the critical angle on the hodograph and show the value in the hodograph.
Parameters
----------
qp : QtGui.QPainter object
'''
if tab.utils.QC(self.ptop) and tab.utils.QC(self.pbottom):
# There is an effective inflow layer at the surface so draw the critical angle line
ca_color = QtGui.QColor("#FF00FF")
pres_500m = tab.interp.pres(self.prof, tab.interp.to_msl(self.prof, 500))
u500, v500 = tab.interp.components(self.prof, pres_500m)
sfc_u, sfc_v = tab.interp.components(self.prof, self.prof.pres[self.prof.get_sfc()])
sfc_u_pix, sfc_v_pix = self.uv_to_pix(sfc_u,sfc_v)
u500_pix, v500_pix = self.uv_to_pix(u500, v500)
pen = QtGui.QPen(ca_color, 1.0, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawLine(sfc_u_pix, sfc_v_pix, u500_pix, v500_pix)
vec1_u, vec1_v = u500 - sfc_u, v500 - sfc_v
try:
mask = np.maximum( self.u, self.v )
rstu,rstv,lstu,lstv = self.srwind
rstu = rstu[~mask]; rstv = rstv[~mask]
except:
rstu,rstv,lstu,lstv = self.srwind
if tab.utils.QC(rstu) and tab.utils.QC(lstu):
qp = self.setBlackPen(qp)
rect = QtCore.QRectF(15, self.bry-36, 140, self.critical_height + 5)
qp.drawRect(rect)
ca_text_color = QtGui.QColor("#00FFFF")
pen = QtGui.QPen(ca_text_color, 1.0, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setFont(self.critical_font)
offset = 10
qp.drawText(rect, QtCore.Qt.AlignLeft, 'Critical Angle = ' + tab.utils.INT2STR(self.prof.critical_angle))
def draw_hodo(self, qp, prof, colors, width=2):
'''
Plot the Hodograph.
Parameters
----------
qp: QtGui.QPainter object
'''
## check for masked daata
try:
mask = np.maximum(prof.u.mask, prof.v.mask)
z = tab.interp.to_agl(prof, prof.hght[~mask])
u = prof.u[~mask]
v = prof.v[~mask]
## otherwise the data is fine
except:
z = tab.interp.to_agl(prof, prof.hght )
u = prof.u
v = prof.v
## convert the u and v values to x and y pixels
xx, yy = self.uv_to_pix(u, v)
## define the colors for the different hodograph heights
penwidth = width
seg_bnds = [0., 3000., 6000., 9000., 12000.]
seg_x = [ tab.interp.generic_interp_hght(bnd, z, xx) for bnd in seg_bnds ]
seg_y = [ tab.interp.generic_interp_hght(bnd, z, yy) for bnd in seg_bnds ]
seg_idxs = np.searchsorted(z, seg_bnds)
for idx in xrange(len(seg_bnds) - 1):
## define a pen to draw with
pen = QtGui.QPen(colors[idx], penwidth)
pen.setStyle(QtCore.Qt.SolidLine)
qp.setPen(pen)
path = QPainterPath()
path.moveTo(seg_x[idx], seg_y[idx])
for z_idx in xrange(seg_idxs[idx] + 1, seg_idxs[idx + 1]):
path.lineTo(xx[z_idx], yy[z_idx])
path.lineTo(seg_x[idx + 1], seg_y[idx + 1])
qp.drawPath(path)
def draw_profile(self, qp, prof, color="#6666CC", width=2):
'''
Plot the Hodograph.
Parameters
----------
qp: QtGui.QPainter object
'''
## check for masked daata
try:
mask = np.maximum(prof.u.mask, prof.v.mask)
z = tab.interp.to_agl(prof.prof, prof.hght[~mask])
u = prof.u[~mask]
v = prof.v[~mask]
## otherwise the data is fine
except:
z = tab.interp.to_agl(prof, prof.hght )
u = prof.u
v = prof.v
## convert the u and v values to x and y pixels
xx, yy = self.uv_to_pix(u, v)
penwidth = width
pen = QtGui.QPen(QtGui.QColor(color), penwidth)
pen.setStyle(QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setBrush(Qt.NoBrush)
seg_bnds = [0., 3000., 6000., 9000., 12000.]
seg_x = [ tab.interp.generic_interp_hght(bnd, z, xx) for bnd in seg_bnds ]
seg_y = [ tab.interp.generic_interp_hght(bnd, z, yy) for bnd in seg_bnds ]
seg_idxs = np.searchsorted(z, seg_bnds)
for idx in xrange(len(seg_bnds) - 1):
## define a pen to draw with
pen = QtGui.QPen(QtGui.QColor(color), penwidth)
pen.setStyle(QtCore.Qt.SolidLine)
qp.setPen(pen)
path = QPainterPath()
path.moveTo(seg_x[idx], seg_y[idx])
for z_idx in xrange(seg_idxs[idx] + 1, seg_idxs[idx + 1]):
path.lineTo(xx[z_idx], yy[z_idx])
path.lineTo(seg_x[idx + 1], seg_y[idx + 1])
qp.drawPath(path)
|
|
"""
Common statistics library
"""
# python libs
from math import ceil
from math import exp
from math import floor
from math import log
from math import pi
from math import sqrt
from itertools import izip
import cmath
import random
# rasmus libs
from rasmus import util
from rasmus import tablelib
def logprod(lst):
"""Computes the product of a list of numbers"""
return sum(log(i) for i in lst)
def prod(lst):
"""Computes the product of a list of numbers"""
p = 1.0
for i in lst:
p *= i
return p
def zscores(vals):
"""Computes the zscores for a list of numbers"""
mu = mean(vals)
sd = sdev(vals)
return [(float(i)-mu)/sd for i in vals]
def mean(vals):
"""Computes the mean of a list of numbers"""
n = 0
s = 0.0
for i in vals:
s += i
n += 1
return s / float(n)
def median(vals):
"""Computes the median of a list of numbers"""
lenvals = len(vals)
sortvals = sorted(vals)
if lenvals % 2 == 0:
return (sortvals[lenvals / 2] + sortvals[lenvals / 2 - 1]) / 2.0
else:
return sortvals[lenvals / 2]
def mode(vals):
"""Computes the mode of a list of numbers"""
top = 0
topkey = None
for key, val in util.hist_dict(vals).iteritems():
if val > top:
top = val
topkey = key
return topkey
def msqerr(vals1, vals2):
"""Mean squared error"""
assert len(vals1) == len(vals2), "lists are not the same length"
return mean([(vals1[i] - vals2[i]) ** 2
for i in xrange(len(vals1))])
def variance(vals):
"""Variance"""
u = mean(vals)
return sum((x - u)**2 for x in vals) / float(len(vals)-1)
def sdev(vals):
"""Standard deviation"""
return sqrt(variance(vals))
def serror(vals):
"""Stanadrd error"""
return sdev(vals) / sqrt(len(vals))
def covariance(lst1, lst2):
"""Covariance"""
m1 = mean(lst1)
m2 = mean(lst2)
tot = 0.0
for i in xrange(len(lst1)):
tot += (lst1[i] - m1) * (lst2[i] - m2)
return tot / (len(lst1)-1)
def covmatrix(mat):
"""Covariance Matrix"""
size = len(mat)
return [[covariance(mat[i], mat[j]) for j in range(size)]
for i in range(size)]
def corrmatrix(mat):
"""Correlation Matrix"""
size = len(mat)
return [[corr(mat[i], mat[j]) for j in range(size)]
for i in range(size)]
def corr(lst1, lst2):
"""Pearson's Correlation Coefficient"""
num = covariance(lst1, lst2)
denom = float(sdev(lst1) * sdev(lst2))
if denom != 0:
return num / denom
else:
return util.INF
def corr_spearman(lst1, lst2):
"""
Spearman's Rank Correlation Coefficient
i.e. Pearson's Correlation Coefficient between ranked variables (in ascending order)
"""
rank1 = util.sortranks(lst1, tied=True)
rank2 = util.sortranks(lst2, tied=True)
return corr(rank1, rank2)
def corr_pvalue(r, n):
"""Returns the signficance of correlation > r with n samples"""
import rpy.r
t = r / sqrt((1 - r*r) / float(n - 2))
return rpy.r.pt(-t, n-2)
def qqnorm(data, plot=None):
"""Quantile-quantile plot"""
from rasmus import gnuplot
data2 = sorted(data)
norm = [random.normalvariate(0, 1) for x in range(len(data2))]
norm.sort()
if plot is None:
return gnuplot.plot(data2, norm)
else:
plot.plot(data2, norm)
return plot
def entropy(probs, base=2):
"""Shannon's entropy"""
return - sum(p * log(p, base) for p in probs if p > 0.0)
def cross_entropy(p, q, base=2):
try:
return - sum(i * log(j, base) for i, j in izip(p, q) if i > 0.0)
except OverflowError:
return util.INF
def kl_div(p, q):
"""Compute the KL divergence for two discrete distributions"""
return cross_entropy(p, q) - entropy(p)
def akaike_ic(lnl, k):
"""Akaike information criterion"""
return 2 * k - 2 * lnl
def akaike_icc(lnl, n, k):
"""Akaike information criterion with second order correction
Good for small sample sizes
"""
return akaike_ic(lnl, k) + 2*k*(k+1) / (n - k - 1)
def bayesian_ic(lnl, n, k):
"""Bayesian information criterion
lnl -- ln(L)
n -- number of data points
k -- number of parameters
"""
return -2 * lnl + k * log(n)
def fitLine(xlist, ylist):
"""2D regression"""
xysum = 0
xxsum = 0
n = len(xlist)
for i in range(n):
xysum += xlist[i] * ylist[i]
xxsum += xlist[i] * xlist[i]
avgx = mean(xlist)
avgy = mean(ylist)
if (xxsum - n*avgx*avgx) == 0:
slope = 1e10
else:
slope = (xysum - n*avgx*avgy) / float(xxsum - n*avgx*avgx)
inter = (avgy*xxsum - avgx*xysum) / float(xxsum - n*avgx*avgx)
return (slope, inter)
def fitLineError(xlist, ylist, slope, inter):
"""Returns the Mean Square Error of the data fit"""
error = 0
n = len(xlist)
for i in range(n):
error += ((xlist[i]*slope + inter) - ylist[i]) ** 2
return error / n
def pearsonsRegression(observed, expected):
"""
Pearson's coefficient of regression
e.g. r^2 of least squares linear regression
"""
# error sum of squares
ess = sum((a - b)**2 for a, b in izip(observed, expected))
# total sum of squares
u = mean(observed)
tss = sum((a - u)**2 for a in observed)
r2 = 1 - ess / tss
return r2
def pearsonsRegressionLine(x, y, m, b):
observed = y
expected = [m*i + b for i in x]
return pearsonsRegression(observed, expected)
def rank(vals, x, norm=False, sort=True):
"""
Returns the rank of x in list vals
rank(x) = i if vals[i-1] <= x < vals[i]
x -- value to rank within values
vals -- list of values to compute the rank of
sort -- if True, vals will be sorted first
norm -- if True, return normalized ranks (i.e. percentiles)
"""
if sort:
vals = sorted(vals)
n = len(vals)
for r, v in enumerate(vals):
if v > x:
break
else:
r = n
if norm:
r /= float(n + 1)
return r
def percentile(vals, perc, rounding=-1, sort=True):
"""Give the value at a percentile 'perc'
vals -- list of values
perc -- perctile
rounding -- round down if -1 or round up for 1
sort -- if True, sort vals first
"""
if sort:
vals2 = sorted(vals)
else:
vals2 = vals
n = len(vals2)
if rounding == -1:
return vals2[util.clamp(int(perc * n), 0, n-1)]
elif rounding == 1:
return vals2[util.clamp(int(ceil(perc * n)), 0, n-1)]
else:
raise Exception("rounding must be 1 or -1")
def dither(vals, radius):
return [x + random.uniform(-radius, radius) for x in vals]
def logadd(lna, lnb):
"""Adding numbers in log-space"""
diff = lna - lnb
if diff < 500:
return log(exp(diff) + 1.0) + lnb
else:
return lna
def logsum(vals):
SUM_LOG_THRESHOLD = -15
maxval = vals[0]
maxi = 0
# find maxval
for i in range(1, len(vals)):
if vals[i] > maxval:
maxval = vals[i]
maxi = i
expsum = 1.0
for i in xrange(len(vals)):
if i != maxi and vals[i] - maxval > SUM_LOG_THRESHOLD:
expsum += exp(vals[i] - maxval)
return maxval + log(expsum)
def logsub(lna, lnb):
"""
subtracting numbers in log-space
must have lna > lnb
"""
diff = lna - lnb
if diff < 500:
diff2 = exp(diff) - 1.0
if diff2 == 0.0:
return -util.INF
else:
return log(diff2) + lnb
else:
return lna
def logadd_sign(sa, lna, sb, lnb):
"""Adding numbers in log-space"""
if sa > 0 and sb > 0:
return 1, logadd(lna, lnb)
elif sa == 0:
return sb, lnb
elif sb == 0:
return sa, lna
elif sa < 0 and sb < 0:
return -1, logadd(lna, lnb)
elif sa > 0 and sb < 0:
if lna > lnb:
return 1, logsub(lna, lnb)
elif lna == lnb:
return 0, -util.INF
else:
return -1, logsub(lnb, lna)
elif sa < 0 and sb > 0:
if lna > lnb:
return -1, logsub(lna, lnb)
elif lna == lnb:
return 0, -util.INF
else:
return 1, logsub(lnb, lna)
else:
raise Exception("unhandled case")
def smooth(vals, radius):
"""
return an averaging of vals using a radius
Note: not implemented as fast as possible
runtime: O(len(vals) * radius)
"""
vals2 = []
vlen = len(vals)
for i in xrange(vlen):
radius2 = min(i, vlen - i - 1, radius)
vals2.append(mean(vals[i-radius2:i+radius2+1]))
return vals2
def iter_window_index(x, xdist, esp=None):
"""
iterates a sliding window over x with width 'xdist'
returns an iterator over list of indices in x that represent windows
x must be sorted least to greatest
"""
vlen = len(x)
#if esp is None:
# esp = min(x[i+1] - x[i] for i in range(vlen-1)
# if x[i+1] - x[i] > 0) / 2.0
# simple case
if vlen == 0:
return
start = x[0]
low = start
high = start + xdist
lowi = 0 # inclusive
highi = 0 # inclusive
# move up high boundary
while highi+1 < vlen and x[highi+1] < high:
highi += 1
yield (lowi, highi, low, high)
while highi+1 < vlen:
low_step = x[lowi] - low # dist until expell
high_step = x[highi+1] - high # dist until include
# advance though duplicates
if low_step == 0:
lowi += 1
continue
if high_step == 0:
highi += 1
continue
# determine new low high boundary
if low_step <= high_step:
low = x[lowi] # + min(esp, (high_step - low_step) / 2.0)
high = low + xdist
lowi += 1
if high_step <= low_step:
highi += 1
if highi >= vlen:
break
high = x[highi] # + min(esp, (low_step - high_step) / 2.0)
low = high - xdist
assert abs((high - low) - xdist) < .001, (low, high)
yield (lowi, highi, low, high)
def iter_window_index_step(x, size, step, minsize=0):
vlen = len(x)
start = x[0]
end = x[-1]
low = start
high = start + size
i = 1
lowi = 0
highi = 0
# move up high boundary
while highi+1 < vlen and x[highi+1] < high:
highi += 1
while highi < vlen and high < end:
if highi - lowi >= minsize:
yield lowi, highi, low, high
low = start + i * step
high = low + size
i += 1
# move up low boundary
while lowi < vlen and x[lowi] < low:
lowi += 1
# move up high boundary
while highi+1 < vlen and x[highi+1] < high:
highi += 1
def iter_window(x, xdist, func=lambda win: win, minsize=0, key=lambda x: x):
"""
iterates a sliding window over x with radius xradius
x must be sorted least to greatest
"""
for lowi, highi, low, high in iter_window_index(map(key, x), xdist):
if highi - lowi >= minsize:
yield (high + low)/2.0, func(x[lowi:highi])
def iter_window_step(x, width, step, func=lambda win: win, minsize=0):
"""
iterates a sliding window over x with width 'width'
x must be sorted least to greatest
return an iterator with (midx, func(x[lowi:highi]))
"""
for lowi, highi, low, high in iter_window_index_step(
x, width, step, minsize):
yield (high + low) / 2.0, func(x[lowi:highi])
def smooth2(x, y, xradius, minsize=0, sort=False):
"""
return an averaging of x and y using xradius
x must be sorted least to greatest
"""
vlen = len(x)
assert vlen == len(y)
# simple case
if vlen == 0:
return [], []
if sort:
x, y = util.sort_many(x, y)
x2 = []
y2 = []
start = min(x)
end = max(x)
xtot = x[0]
ytot = y[0]
low = 0
high = 0
for i in xrange(vlen):
xi = x[i]
xradius2 = min(xi - start, end - xi, xradius)
# move window
while x[low] < xi - xradius2:
xtot -= x[low]
ytot -= y[low]
low += 1
while x[high] < xi + xradius2:
high += 1
xtot += x[high]
ytot += y[high]
denom = float(high - low + 1)
if denom >= minsize:
x2.append(xtot / denom)
y2.append(ytot / denom)
return x2, y2
def factorial(x, k=1):
"""Simple implementation of factorial"""
n = 1
for i in xrange(int(k)+1, int(x)+1):
n *= i
return n
def logfactorial(x, k=1):
"""returns the log(factorial(x) / factorial(k)"""
n = 0
for i in xrange(int(k)+1, int(x)+1):
n += log(i)
return n
def choose(n, k):
if n == 0 and k == 0:
return 1
if n < 0 or k < 0 or k > n:
return 0
# optimization for speed
if k > n/2:
k = n - k
t = 1.0
n2 = n + 1.0
for i in xrange(1, k+1):
t *= (n2 - i) / i
return int(t + 0.5)
#return factorial(n, n - k) / factorial(k)
def fchoose(n, k):
if n == 0 and k == 0:
return 1
if n < 0 or k < 0 or k > n:
return 0
# optimization for speed
if k > n/2:
k = n - k
t = 1.0
n2 = n + 1.0
for i in xrange(1, k+1):
t *= (n2 - i) / i
return t
def logchoose(n, k):
if n == 0 and k == 0:
return 0.0
if n < 0 or k < 0 or k > n:
return -util.INF
# optimization for speed
if k > n/2:
k = n - k
t = 0.0
n2 = n + 1.0
for i in xrange(1, k+1):
t += log((n2 - i) / i)
return t
def multinomial(vals):
n = sum(vals)
res = logfactorial(n)
for v in vals:
res -= logfactorial(v)
return int(exp(res) + .05)
def logmultinomial(vals):
n = sum(vals)
res = logfactorial(n)
for v in vals:
res -= logfactorial(v)
return res
def sample(weights):
"""
Randomly choose an int between 0 and len(probs)-1 using
the weights stored in list probs.
item i will be chosen with probability weights[i]/sum(weights)
"""
total = sum(weights)
pick = random.random() * total
x = 0
for i in xrange(len(weights)):
x += weights[i]
if x >= pick:
return i
return len(weights) - 1
def rhyper(m, n, M, N, report=0):
'''
calculates cumulative probability based on
hypergeometric distribution
over/under/both (report = 0/1/2)
(uses R through RPy)
N = total balls in urn
M = total white balls in urn
n = drawn balls from urn
m = drawn white balls from urn
'''
from rpy import r
assert ((type(m) == type(n) == type(M) == type(N) == int)
and m <= n and m <= M and n <= N)
if report == 0:
#p-val for over-repr.
return r.phyper(m-1, M, N-M, n, lower_tail=False)
elif report == 1:
#p-val for under-repr.
return r.phyper(m, M, N-M, n)
elif report == 2:
#tuple (over, under)
return (r.phyper(m-1, M, N-M, n, lower_tail=False),
r.phyper(m, M, N-M, n))
else:
raise "unknown option"
def cdf(vals, reverse=False):
"""Computes the CDF of a list of values"""
vals = sorted(vals, reverse=reverse)
tot = float(len(vals))
x = []
y = []
for i, x2 in enumerate(vals):
x.append(x2)
y.append((i+1) / tot)
return x, y
def enrichItems(in_items, out_items, M=None, N=None, useq=True, extra=False):
"""Calculates enrichment for items within an in-set vs and out-set.
Returns a sorted table.
"""
# DEPRECATED
# TODO: remove this function
# count items
counts = util.Dict(default=[0, 0])
for item in in_items:
counts[item][0] += 1
for item in out_items:
counts[item][1] += 1
if N is None:
N = len(in_items) + len(out_items)
if M is None:
M = len(in_items)
tab = tablelib.Table(headers=["item", "in_count", "out_count",
"pval", "pval_under"])
# do hypergeometric
for item, (a, b) in counts.iteritems():
tab.add(item=item,
in_count=a,
out_count=b,
pval=rhyper(a, a+b, M, N),
pval_under=rhyper(a, a+b, M, N, 1))
# add qvalues
if useq:
qval = qvalues(tab.cget("pval"))
qval_under = qvalues(tab.cget("pval_under"))
tab.add_col("qval", data=qval)
tab.add_col("qval_under", data=qval_under)
if extra:
tab.add_col("in_size", data=[M]*len(tab))
tab.add_col("out_size", data=[N-M]*len(tab))
tab.add_col("item_ratio", data=[
row["in_count"] / float(row["in_count"] + row["out_count"])
for row in tab])
tab.add_col("size_ratio", data=[
M / float(N) for row in tab])
tab.add_col("fold", data=[row["item_ratio"] / row["size_ratio"]
for row in tab])
tab.sort(col='pval')
return tab
def qvalues(pvals):
import rpy
ret = rpy.r.p_adjust(pvals, "fdr")
return ret
#=============================================================================
# Distributions
#
def uniformPdf(x, params):
a, b = params
if x < a or x > b:
return 0.0
else:
return 1.0 / (b - a)
def binomialPdf(k, params):
p, n = params
return choose(n, k) * (p ** k) * ((1.0-p) ** (n - k))
def gaussianPdf(x, params):
return 1/sqrt(2*pi) * exp(- x**2 / 2.0)
def normalPdf(x, params):
mu, sigma = params
# sqrt(2*pi) = 2.5066282746310002
return exp(- (x - mu)**2 / (2.0 * sigma**2)) / (sigma * 2.5066282746310002)
def normalCdf(x, params):
mu, sigma = params
return (1 + erf((x - mu)/(sigma * sqrt(2)))) / 2.0
def logNormalPdf(x, params):
"""mu and sigma are the mean and standard deviation of the
variable's logarithm"""
mu, sigma = params
return (1/(x * sigma * sqrt(2*pi)) *
exp(- (log(x) - mu)**2 / (2.0 * sigma**2)))
def logNormalCdf(x, params):
"""mu and sigma are the mean and standard deviation of the
variable's logarithm"""
mu, sigma = params
return (1 + erf((log(x) - mu)/(sigma * sqrt(2)))) / 2.0
def poissonPdf(x, params):
lambd = params[0]
if x < 0 or lambd <= 0:
return 0.0
a = 0
for i in xrange(1, int(x)+1):
a += log(lambd / float(i))
return exp(-lambd + a)
def poissonCdf(x, params):
"""Cumulative distribution function of the Poisson distribution"""
# NOTE: not implemented accurately for large x or lambd
lambd = params[0]
if x < 0:
return 0
else:
return ((gamma(floor(x+1)) - gammainc(floor(x + 1), lambd)) /
factorial(floor(x)))
def poissonvariate(lambd):
"""Sample from a Poisson distribution"""
l = -lambd
k = 0
p = 0.0
while 1:
k += 1
p += log(random.random())
if p < l:
return k - 1
def exponentialPdf(x, params):
lambd = params[0]
if x < 0 or lambd < 0:
return 0.0
else:
return lambd * exp(-lambd * x)
def exponentialCdf(x, params):
lambd = params[0]
if x < 0 or lambd < 0:
return 0.0
else:
return 1.0 - exp(-lambd * x)
def exponentialvariate(lambd):
return -log(random.random()) / lambd
def gammaPdf(x, params):
alpha, beta = params
if x <= 0 or alpha <= 0 or beta <= 0:
return 0.0
else:
return ((exp(-x * beta) * (x ** (alpha - 1)) * (beta ** alpha)) /
gamma(alpha))
def loggammaPdf(x, params):
alpha, beta = params
if x <= 0.0 or alpha <= 0.0 or beta <= 0.0:
return -util.INF
else:
return -x*beta + (alpha - 1)*log(x) + alpha*log(beta) - gammaln(alpha)
def gammaPdf2(x, params):
alpha, beta = params
if x <= 0 or alpha <= 0 or beta <= 0:
return 0.0
else:
return exp(loggammaPdf(x, params))
def gammaCdf(x, params):
alpha, beta = params
if x <= 0:
return 0
else:
return gammainc(alpha, x * beta) / gamma(alpha)
def invgammaPdf(x, params):
a, b = params
if x <= 0 or a <= 0 or b <= 0:
return 0.0
else:
return (b**a) / gamma(a) * (1.0/x)**(a + 1) * exp(-b/x)
def loginvgammaPdf(x, params):
a, b = params
if x < 0 or a < 0 or b < 0:
return -util.INF
else:
return a*log(b) - gammaln(a) + (a+1)*log(1.0/x) - b/x
def betaPdf2(x, params):
"""A simpler implementation of beta distribution but will overflow
for values of alpha and beta near 100
"""
alpha, beta = params
if 0 < x < 1 and alpha > 0 and beta > 0:
return (gamma(alpha + beta) / (gamma(alpha)*gamma(beta)) *
x ** (alpha-1) * (1-x)**(beta-1))
else:
return 0.0
def betaPdf(x, params):
alpha, beta = params
if 0 < x < 1 and alpha > 0 and beta > 0:
return (exp(gammaln(alpha + beta) -
(gammaln(alpha) + gammaln(beta)) +
(alpha-1) * log(x) + (beta-1) * log(1-x)))
else:
return 0.0
def betaPdf3(x, params):
alpha, beta = map(int, params)
if 0 < x < 1 and alpha > 0 and beta > 0:
n = min(alpha-1, beta-1)
m = max(alpha-1, beta-1)
prod1 = 1
for i in range(1, n+1):
prod1 *= ((n+i)*x*(1-x))/i
prod2 = 1
if alpha > beta:
for i in range(n+1, m+1):
prod2 *= ((n+i)*x)/i
else:
for i in range(n+1, m+1):
prod2 *= ((n+i)*(1-x))/i
return prod1 * prod2 * (alpha + beta - 1)
else:
return 0.0
def negbinomPdf(k, r, p):
return exp(gammaln(r+k) - gammaln(k+1) - gammaln(r) +
r*log(p) + k * log(1-p))
def gamma(x):
"""
Lanczos approximation to the gamma function.
found on http://www.rskey.org/gamma.htm
"""
ret = (1.000000000190015 +
76.18009172947146 / (x + 1) +
-86.50532032941677 / (x + 2) +
24.01409824083091 / (x + 3) +
-1.231739572450155 / (x + 4) +
1.208650973866179e-3 / (x + 5) +
-5.395239384953e-6 / (x + 6))
return ret * sqrt(2*pi)/x * (x + 5.5)**(x+.5) * exp(-x-5.5)
def gammaln(xx):
"""
From numerical alogrithms in C
float gammln(float xx)
Returns the value ln[(xx)] for xx > 0.
{
Internal arithmetic will be done in double precision, a nicety
that you can omit if five-figure accuracy is good enough.
double x,y,tmp,ser;
static double cof[6]={76.18009172947146,-86.50532032941677,
24.01409824083091,-1.231739572450155,
0.1208650973866179e-2,-0.5395239384953e-5};
int j;
y=x=xx;
tmp=x+5.5;
tmp -= (x+0.5)*log(tmp);
ser=1.000000000190015;
for (j=0;j<=5;j++) ser += cof[j]/++y;
return -tmp+log(2.5066282746310005*ser/x);
}
"""
cof = [76.18009172947146, -86.50532032941677,
24.01409824083091, -1.231739572450155,
0.1208650973866179e-2, -0.5395239384953e-5]
y = x = xx
tmp = x + 5.5
tmp -= (x + 0.5) * log(tmp)
ser = 1.000000000190015
for j in range(6):
y += 1
ser += cof[j] / y
return - tmp + log(2.5066282746310005 * ser / x)
def gammainc(a, x):
"""Lower incomplete gamma function"""
# found on http://www.rskey.org/gamma.htm
GAMMA_INCOMP_ACCURACY = 1000
ret = 0
term = 1.0/x
for n in xrange(GAMMA_INCOMP_ACCURACY):
term *= x/(a+n)
ret += term
if term < .0001:
break
return x**a * exp(-x) * ret
def erf(x):
# http://www.theorie.physik.uni-muenchen.de/~serge/erf-approx.pdf
a = 8/(3*pi) * (pi - 3)/(4 - pi)
axx = a * x * x
if x >= 0:
return sqrt(1 - exp(-x*x * (4.0/pi + axx)/(1 + axx)))
else:
return - sqrt(1 - exp(-x*x * (4.0/pi + axx)/(1 + axx)))
def chiSquare(rows, expected=None, nparams=0):
# ex: rows = [[1,2,3],[1,4,5]]
assert util.equal(map(len, rows))
if 0 in map(sum, rows):
return 0, 1.0
cols = zip(* rows)
if 0 in map(sum, cols):
return 0, 1.0
if not expected:
expected = make_expected(rows)
chisq = 0
for obss, exps in zip(rows, expected):
for obs, exp in zip(obss, exps):
chisq += ((obs-exp)**2)/exp
df = max(len(rows)-1, 1)*max(len(rows[0])-1, 1) - nparams
p = chi_square_lookup(chisq, df)
return chisq, p
def make_expected(rows):
rowtotals = map(sum, rows)
coltotals = map(sum, zip(* rows))
grandtotal = float(sum(rowtotals))
expected = []
for row, rowtotal in zip(rows, rowtotals):
expected_row = []
for obs, coltotal in zip(row, coltotals):
exp = rowtotal * coltotal / grandtotal
expected_row.append(exp)
expected.append(expected_row)
return expected
def chiSquareFit(xbins, ybins, func, nsamples, nparams, minsamples=5):
sizes = [xbins[i+1] - xbins[i] for i in xrange(len(xbins)-1)]
sizes.append(sizes[-1]) # NOTE: assumes bins are of equal size
# only focus on bins that are large enough
counts = [ybins[i] * sizes[i] * nsamples for i in xrange(len(xbins)-1)]
expected = []
for i in xrange(len(xbins)-1):
expected.append((func(xbins[i]) + func(xbins[i+1]))/2.0 *
sizes[i] * nsamples)
# ensure we have enough expected samples in each bin
ind = util.find(util.gefunc(minsamples), expected)
counts = util.mget(counts, ind)
expected = util.mget(expected, ind)
if len(counts) == 0:
return [0, 1], counts, expected
else:
return chiSquare([counts], [expected], nparams), counts, expected
chi_square_table = {
1: [1.64, 2.71, 3.84, 5.02, 6.64, 10.83],
2: [3.22, 4.61, 5.99, 7.38, 9.21, 13.82],
3: [4.64, 6.25, 7.82, 9.35, 11.34, 16.27],
4: [5.99, 7.78, 9.49, 11.14, 13.28, 18.47],
5: [7.29, 9.24, 11.07, 12.83, 15.09, 20.52],
6: [8.56, 10.64, 12.59, 14.45, 16.81, 22.46],
7: [9.80, 12.02, 14.07, 16.01, 18.48, 24.32],
8: [11.03, 13.36, 15.51, 17.53, 20.09, 26.12],
9: [12.24, 14.68, 16.92, 19.02, 21.67, 27.88],
10: [13.44, 15.99, 18.31, 20.48, 23.21, 29.59],
11: [14.63, 17.28, 19.68, 21.92, 24.72, 31.26],
12: [15.81, 18.55, 21.03, 23.34, 26.22, 32.91],
13: [16.98, 19.81, 22.36, 24.74, 27.69, 34.53],
14: [18.15, 21.06, 23.68, 26.12, 29.14, 36.12],
15: [19.31, 22.31, 25.00, 27.49, 30.58, 37.70],
16: [20.47, 23.54, 26.30, 28.85, 32.00, 39.25],
17: [21.61, 24.77, 27.59, 30.19, 33.41, 40.79],
18: [22.76, 25.99, 28.87, 31.53, 34.81, 42.31],
19: [23.90, 27.20, 30.14, 32.85, 36.19, 43.82],
20: [25.04, 28.41, 31.41, 34.17, 37.57, 45.31],
21: [26.17, 29.62, 32.67, 35.48, 38.93, 46.80],
22: [27.30, 30.81, 33.92, 36.78, 40.29, 48.27],
23: [28.43, 32.01, 35.17, 38.08, 41.64, 49.73],
24: [29.55, 33.20, 36.42, 39.36, 42.98, 51.18],
25: [30.68, 34.38, 37.65, 40.65, 44.31, 52.62],
26: [31.79, 35.56, 38.89, 41.92, 45.64, 54.05],
27: [32.91, 36.74, 40.11, 43.19, 46.96, 55.48],
28: [34.03, 37.92, 41.34, 44.46, 48.28, 56.89],
29: [35.14, 39.09, 42.56, 45.72, 49.59, 58.30],
30: [36.25, 40.26, 43.77, 46.98, 50.89, 59.70]
}
def chi_square_lookup(value, df):
ps = [0.20, 0.10, 0.05, 0.025, 0.01, 0.001]
if df <= 0:
return 1.0
row = chi_square_table[min(df, 30)]
for i in range(0, len(row)):
if row[i] >= value:
i = i-1
break
if i == -1:
return 1
else:
return ps[i]
def spearman(vec1, vec2):
"""Spearman's rank test"""
assert len(vec1) == len(vec2), "vec1 and vec2 are not the same length"
n = len(vec1)
rank1 = util.sortranks(vec1)
rank2 = util.sortranks(vec2)
R = sum((rank1[i] - rank2[i])**2 for i in xrange(n))
Z = (6*R - n*(n*n - 1)) / (n*(n + 1) * sqrt(n - 1))
return Z
def fitCurve(xdata, ydata, func, paramsInit):
"""
Fit a function to data points.
Args:
xdata, ydata - data to fit
func - a function of the form f(x, params)
"""
import scipy
import scipy.optimize
y = scipy.array(ydata)
p0 = scipy.array(paramsInit)
def error(params):
y2 = scipy.array(map(lambda x: func(x, params), xdata))
return y - y2
params, msg = scipy.optimize.leastsq(error, p0)
resid = error(params)
return list(params), sum(resid*resid)
def fitDistrib(func, paramsInit, data, start, end, step, perc=1.0):
xdata, ydata = util.distrib(data, low=start, width=step)
ydata = [i / perc for i in ydata]
xdata = util.histbins(xdata)
params, resid = fitCurve(xdata, ydata, func, paramsInit)
return params, resid
def plotfuncFit(func, paramsInit, xdata, ydata, start, end, step, plot=None,
**options):
from rasmus import gnuplot
if not plot:
plot = gnuplot.Gnuplot()
options.setdefault('style', 'boxes')
params, resid = fitCurve(xdata, ydata, func, paramsInit)
plot.plot(util.histbins(xdata), ydata, **options)
plot.plotfunc(lambda x: func(x, params), start, end, step)
return plot, params, resid
def plotdistribFit(func, paramsInit, data, start, end, step, plot=None,
**options):
xdata, ydata = util.distrib(data, low=start, width=step)
return plotfuncFit(
func, paramsInit, xdata, ydata, start, end, step/10, plot, **options)
def chi_square_fit(cdf, params, data, ndivs=20, minsamples=5, plot=False,
start=-util.INF, end=util.INF):
from rasmus import gnuplot
import scipy
import scipy.stats
# determine ndiv and binsize
binsize = len(data) / ndivs
if binsize < minsamples:
ndivs = len(data) / minsamples
binsize = len(data) / ndivs
data = sorted(data)
bins = [data[i:i+binsize] for i in xrange(0, len(data), binsize)]
obs = scipy.array(map(len, bins))
ind = util.find(lambda x: x[-1] >= start and x[0] <= end, bins)
obs = util.mget(obs, ind)
x = [bin[0] for bin in bins]
expected = [len(data) * cdf(x[1], params)]
expected.extend([len(data) *
(cdf(x[i+1], params) - cdf(x[i], params))
for i in range(1, len(x)-1)])
expected.append(len(data) * (1.0 - cdf(x[-1], params)))
expected = scipy.array(util.mget(expected, ind))
chi2, pval = scipy.stats.chisquare(obs, expected)
if plot:
p = gnuplot.plot(util.mget(x, ind), obs)
p.plot(util.mget(x, ind), expected)
return chi2, pval
def fit_distrib(cdf, params_init, data, ndivs=20, minsamples=5,
start=-util.INF, end=util.INF):
import scipy
import scipy.optimize
import scipy.stats
# determine ndiv and binsize
binsize = len(data) / ndivs
if binsize < minsamples:
ndivs = len(data) / minsamples
binsize = len(data) / ndivs
data = sorted(data)
bins = [data[i:i+binsize] for i in xrange(0, len(data), binsize)]
obs = scipy.array(map(len, bins))
ind = util.find(lambda x: x[-1] >= start and x[0] <= end, bins)
obs = util.mget(obs, ind)
def optfunc(params):
x = [bin[0] for bin in bins]
expected = [len(data) * cdf(x[1], params)]
expected.extend([len(data) *
(cdf(x[i+1], params) - cdf(x[i], params))
for i in range(1, len(x)-1)])
expected.append(len(data) * (1.0 - cdf(x[-1], params)))
expected = scipy.array(util.mget(expected, ind))
chi2, pval = scipy.stats.chisquare(obs, expected)
return chi2
params = scipy.optimize.fmin(optfunc, params_init, disp=False)
chi2, pval = chi_square_fit(cdf, params, data, ndivs, minsamples)
return list(params), pval
def solveCubic(a, b, c, real=True):
"""solves x^3 + ax^2 + bx + c = 0 for x"""
p = b - a*a / 3.0
q = c + (2*a*a*a - 9*a*b) / 27.0
# special case: avoids division by zero later on
if p == q == 0:
return [- a / 3.0]
#
# u = (q/2 +- sqrt(q^2/4 + p^3/27))^(1/3)
#
# complex math is used to find complex roots
sqrteqn = cmath.sqrt(q*q/4.0 + p*p*p/27.0)
# find fist cube root
u1 = (q/2.0 + sqrteqn)**(1/3.0)
# special case: avoids division by zero later on
if u1 == 0:
u1 = (q/2.0 - sqrteqn)**(1/3.0)
# find other two cube roots
u2 = u1 * complex(-.5, -sqrt(3)/2)
u3 = u1 * complex(-.5, sqrt(3)/2)
# finds roots of cubic polynomial
root1 = p / (3*u1) - u1 - a / 3.0
root2 = p / (3*u2) - u2 - a / 3.0
root3 = p / (3*u3) - u3 - a / 3.0
if real:
return [x.real
for x in [root1, root2, root3]
if abs(x.imag) < 1e-10]
else:
return [root1, root2, root3]
def bisect_root(f, x0, x1, err=1e-7):
"""Find a root of a function func(x) using the bisection method"""
f0 = f(x0)
#f1 = f(x1)
while (x1 - x0) / 2.0 > err:
x2 = (x0 + x1) / 2.0
f2 = f(x2)
if f0 * f2 > 0:
x0 = x2
f0 = f2
else:
x1 = x2
#f1 = f2
return (x0 + x1) / 2.0
|
|
import os
import yaml
import json
import re
import requests
import logging
import socket
from datetime import datetime
import teuthology
from .config import config
from .job_status import get_status, set_status
report_exceptions = (requests.exceptions.RequestException, socket.error)
def init_logging():
"""
Set up logging for the module
:returns: a logger
"""
# Don't need to see connection pool INFO messages
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
logging.WARNING)
log = logging.getLogger(__name__)
return log
def main(args):
run = args['--run']
job = args['--job']
dead = args['--dead']
refresh = dead or args['--refresh']
server = args['--server']
if server:
config.results_server = server
if args['--verbose']:
teuthology.log.setLevel(logging.DEBUG)
archive_base = os.path.abspath(os.path.expanduser(args['--archive'])) or \
config.archive_base
save = not args['--no-save']
log = init_logging()
reporter = ResultsReporter(archive_base, save=save, refresh=refresh,
log=log)
if dead and not job:
for run_name in run:
try_mark_run_dead(run[0])
elif dead and len(run) == 1 and job:
reporter.report_jobs(run[0], job, dead=True)
elif len(run) == 1 and job:
reporter.report_jobs(run[0], job)
elif run and len(run) > 1:
reporter.report_runs(run)
elif run:
reporter.report_run(run[0])
elif args['--all-runs']:
reporter.report_all_runs()
class ResultsSerializer(object):
"""
This class exists to poke around in the archive directory doing things like
assembling lists of test runs, lists of their jobs, and merging sets of job
YAML files together to form JSON objects.
"""
yamls = ('orig.config.yaml', 'config.yaml', 'info.yaml', 'summary.yaml')
def __init__(self, archive_base, log=None):
self.archive_base = archive_base or config.archive_base
self.log = log or init_logging()
def job_info(self, run_name, job_id, pretty=False, simple=False):
"""
Given a run name and job id, merge the job's YAML files together.
:param run_name: The name of the run.
:param job_id: The job's id.
:param simple(bool): Read less data for speed (only orig.config.yaml/info.yaml)
:returns: A dict.
"""
job_archive_dir = os.path.join(self.archive_base,
run_name,
job_id)
job_info = {}
if simple:
self.yamls = ('orig.config.yaml', 'info.yaml')
for yaml_name in self.yamls:
yaml_path = os.path.join(job_archive_dir, yaml_name)
if not os.path.exists(yaml_path):
continue
with file(yaml_path) as yaml_file:
partial_info = yaml.safe_load(yaml_file)
if partial_info is not None:
job_info.update(partial_info)
if 'job_id' not in job_info:
job_info['job_id'] = job_id
if simple:
return job_info
log_path = os.path.join(job_archive_dir, 'teuthology.log')
if os.path.exists(log_path):
mtime = int(os.path.getmtime(log_path))
mtime_dt = datetime.fromtimestamp(mtime)
job_info['updated'] = str(mtime_dt)
return job_info
def json_for_job(self, run_name, job_id, pretty=False):
"""
Given a run name and job id, merge the job's YAML files together to
create a JSON object.
:param run_name: The name of the run.
:param job_id: The job's id.
:returns: A JSON object.
"""
job_info = self.job_info(run_name, job_id, pretty)
if pretty:
job_json = json.dumps(job_info, sort_keys=True, indent=4)
else:
job_json = json.dumps(job_info)
return job_json
def jobs_for_run(self, run_name):
"""
Given a run name, look on the filesystem for directories containing job
information, and return a dict mapping job IDs to job directories.
:param run_name: The name of the run.
:returns: A dict like: {'1': '/path/to/1', '2': 'path/to/2'}
"""
archive_dir = os.path.join(self.archive_base, run_name)
if not os.path.isdir(archive_dir):
return {}
jobs = {}
for item in os.listdir(archive_dir):
if not re.match('\d+$', item):
continue
job_id = item
job_dir = os.path.join(archive_dir, job_id)
if os.path.isdir(job_dir):
jobs[job_id] = job_dir
return jobs
def running_jobs_for_run(self, run_name):
"""
Like jobs_for_run(), but only returns jobs with no summary.yaml
:param run_name: The name of the run.
:returns: A dict like: {'1': '/path/to/1', '2': 'path/to/2'}
"""
jobs = self.jobs_for_run(run_name)
for job_id in jobs.keys():
if os.path.exists(os.path.join(jobs[job_id], 'summary.yaml')):
jobs.pop(job_id)
return jobs
@property
def all_runs(self):
"""
Look in the base archive directory for all test runs. Return a list of
their names.
"""
archive_base = self.archive_base
if not os.path.isdir(archive_base):
return []
runs = []
for run_name in os.listdir(archive_base):
if not os.path.isdir(os.path.join(archive_base, run_name)):
continue
runs.append(run_name)
return runs
class ResultsReporter(object):
last_run_file = 'last_successful_run'
def __init__(self, archive_base=None, base_uri=None, save=False,
refresh=False, log=None):
self.log = log or init_logging()
self.archive_base = archive_base or config.archive_base
self.base_uri = base_uri or config.results_server
if self.base_uri:
self.base_uri = self.base_uri.rstrip('/')
self.serializer = ResultsSerializer(archive_base, log=self.log)
self.save_last_run = save
self.refresh = refresh
self.session = self._make_session()
if not self.base_uri:
msg = "No results_server set in {yaml}; cannot report results"
self.log.warn(msg.format(yaml=config.yaml_path))
def _make_session(self, max_retries=10):
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=max_retries)
session.mount('http://', adapter)
return session
def report_all_runs(self):
"""
Report *all* runs in self.archive_dir to the results server.
"""
all_runs = self.serializer.all_runs
last_run = self.last_run
if self.save_last_run and last_run and last_run in all_runs:
next_index = all_runs.index(last_run) + 1
runs = all_runs[next_index:]
else:
runs = all_runs
return self.report_runs(runs)
def report_runs(self, run_names):
"""
Report several runs to the results server.
:param run_names: The names of the runs.
"""
num_runs = len(run_names)
num_jobs = 0
self.log.info("Posting %s runs", num_runs)
for run in run_names:
job_count = self.report_run(run)
num_jobs += job_count
if self.save_last_run:
self.last_run = run
del self.last_run
self.log.info("Total: %s jobs in %s runs", num_jobs, len(run_names))
def report_run(self, run_name, dead=False):
"""
Report a single run to the results server.
:param run_name: The name of the run.
:returns: The number of jobs reported.
"""
jobs = self.serializer.jobs_for_run(run_name)
self.log.info("{name} {jobs} jobs dead={dead}".format(
name=run_name,
jobs=len(jobs),
dead=str(dead),
))
if jobs:
if not self.refresh:
response = self.session.head("{base}/runs/{name}/".format(
base=self.base_uri, name=run_name))
if response.status_code == 200:
self.log.info(" already present; skipped")
return 0
self.report_jobs(run_name, jobs.keys(), dead=dead)
elif not jobs:
self.log.debug(" no jobs; skipped")
return len(jobs)
def report_jobs(self, run_name, job_ids, dead=False):
"""
Report several jobs to the results server.
:param run_name: The name of the run.
:param job_ids: The jobs' ids
"""
for job_id in job_ids:
self.report_job(run_name, job_id, dead=dead)
def report_job(self, run_name, job_id, job_info=None, dead=False):
"""
Report a single job to the results server.
:param run_name: The name of the run. The run must already exist.
:param job_id: The job's id
:param job_info: The job's info dict. Optional - if not present, we
look at the archive.
"""
if job_info is not None and not isinstance(job_info, dict):
raise TypeError("job_info must be a dict")
run_uri = "{base}/runs/{name}/jobs/".format(
base=self.base_uri, name=run_name,)
if job_info is None:
job_info = self.serializer.job_info(run_name, job_id)
if dead and get_status(job_info) is None:
set_status(job_info, 'dead')
job_json = json.dumps(job_info)
headers = {'content-type': 'application/json'}
response = self.session.post(run_uri, data=job_json, headers=headers)
if response.status_code == 200:
return job_id
# This call is wrapped in a try/except because of:
# http://tracker.ceph.com/issues/8166
try:
resp_json = response.json()
except ValueError:
resp_json = dict()
if resp_json:
msg = resp_json.get('message', '')
else:
msg = response.text
if msg and msg.endswith('already exists'):
job_uri = os.path.join(run_uri, job_id, '')
response = self.session.put(job_uri, data=job_json,
headers=headers)
elif msg:
self.log.error(
"POST to {uri} failed with status {status}: {msg}".format(
uri=run_uri,
status=response.status_code,
msg=msg,
))
response.raise_for_status()
return job_id
@property
def last_run(self):
"""
The last run to be successfully reported.
"""
if hasattr(self, '__last_run'):
return self.__last_run
elif os.path.exists(self.last_run_file):
with file(self.last_run_file) as f:
self.__last_run = f.read().strip()
return self.__last_run
@last_run.setter
def last_run(self, run_name):
self.__last_run = run_name
with file(self.last_run_file, 'w') as f:
f.write(run_name)
@last_run.deleter
def last_run(self):
self.__last_run = None
if os.path.exists(self.last_run_file):
os.remove(self.last_run_file)
def get_jobs(self, run_name, fields=None):
"""
Query the results server for jobs in a run
:param run_name: The name of the run
:param fields: Optional. A list of fields to include in the result.
Defaults to returning all fields.
"""
uri = "{base}/runs/{name}/jobs/".format(base=self.base_uri,
name=run_name)
if fields:
if not 'job_id' in fields:
fields.append('job_id')
uri += "?fields=" + ','.join(fields)
response = self.session.get(uri)
response.raise_for_status()
return response.json()
def delete_job(self, run_name, job_id):
"""
Delete a job from the results server.
:param run_name: The name of the run
:param job_id: The job's id
"""
uri = "{base}/runs/{name}/jobs/{job_id}/".format(
base=self.base_uri, name=run_name, job_id=job_id)
response = self.session.delete(uri)
response.raise_for_status()
def delete_jobs(self, run_name, job_ids):
"""
Delete multiple jobs from the results server.
:param run_name: The name of the run
:param job_ids: A list of job ids
"""
for job_id in job_ids:
self.delete_job(self, run_name, job_id)
def delete_run(self, run_name):
"""
Delete a run from the results server.
:param run_name: The name of the run
"""
uri = "{base}/runs/{name}/".format(
base=self.base_uri, name=run_name)
response = self.session.delete(uri)
response.raise_for_status()
def push_job_info(run_name, job_id, job_info, base_uri=None):
"""
Push a job's info (example: ctx.config) to the results server.
:param run_name: The name of the run.
:param job_id: The job's id
:param job_info: A dict containing the job's information.
:param base_uri: The endpoint of the results server. If you leave it out
ResultsReporter will ask teuthology.config.
"""
reporter = ResultsReporter()
if not reporter.base_uri:
return
reporter.report_job(run_name, job_id, job_info)
def try_push_job_info(job_config, extra_info=None):
"""
Wrap push_job_info, gracefully doing nothing if:
Anything inheriting from requests.exceptions.RequestException is raised
A socket.error is raised
config.results_server is not set
config['job_id'] is not present or is None
:param job_config: The ctx.config object to push
:param extra_info: Optional second dict to push
"""
log = init_logging()
if not config.results_server:
log.warning('No result_server in config; not reporting results')
return
if job_config.get('job_id') is None:
log.warning('No job_id found; not reporting results')
return
run_name = job_config['name']
job_id = job_config['job_id']
if extra_info is not None:
job_info = extra_info.copy()
job_info.update(job_config)
else:
job_info = job_config
try:
log.debug("Pushing job info to %s", config.results_server)
push_job_info(run_name, job_id, job_info)
return
except report_exceptions:
log.exception("Could not report results to %s",
config.results_server)
def try_delete_jobs(run_name, job_ids, delete_empty_run=True):
"""
Using the same error checking and retry mechanism as try_push_job_info(),
delete one or more jobs
:param run_name: The name of the run.
:param job_ids: Either a single job_id, or a list of job_ids
:param delete_empty_run: If this would empty the run, delete it.
"""
log = init_logging()
if isinstance(job_ids, int):
job_ids = [str(job_ids)]
elif isinstance(job_ids, basestring):
job_ids = [job_ids]
reporter = ResultsReporter()
if not reporter.base_uri:
return
log.debug("Deleting jobs from {server}: {jobs}".format(
server=config.results_server, jobs=str(job_ids)))
if delete_empty_run:
got_jobs = reporter.get_jobs(run_name, fields=['job_id'])
got_job_ids = [j['job_id'] for j in got_jobs]
if sorted(got_job_ids) == sorted(job_ids):
try:
reporter.delete_run(run_name)
return
except report_exceptions:
log.exception("Run deletion failed")
def try_delete_job(job_id):
try:
reporter.delete_job(run_name, job_id)
return
except report_exceptions:
log.exception("Job deletion failed")
for job_id in job_ids:
try_delete_job(job_id)
def try_mark_run_dead(run_name):
"""
Using the same error checking and retry mechanism as try_push_job_info(),
mark any unfinished runs as dead.
:param run_name: The name of the run.
"""
log = init_logging()
reporter = ResultsReporter()
if not reporter.base_uri:
return
log.debug("Marking run as dead: {name}".format(name=run_name))
jobs = reporter.get_jobs(run_name, fields=['status'])
for job in jobs:
if job['status'] not in ['pass', 'fail', 'dead']:
job_id = job['job_id']
try:
log.info("Marking job {job_id} as dead".format(job_id=job_id))
reporter.report_job(run_name, job['job_id'], dead=True)
except report_exceptions:
log.exception("Could not mark job as dead: {job_id}".format(
job_id=job_id))
|
|
from __future__ import unicode_literals
import warnings
from mopidy.core import PlaybackState
from mopidy.mpd import exceptions, protocol
@protocol.commands.add('consume', state=protocol.BOOL)
def consume(context, state):
"""
*musicpd.org, playback section:*
``consume {STATE}``
Sets consume state to ``STATE``, ``STATE`` should be 0 or
1. When consume is activated, each song played is removed from
playlist.
"""
context.core.tracklist.consume = state
@protocol.commands.add('crossfade', seconds=protocol.UINT)
def crossfade(context, seconds):
"""
*musicpd.org, playback section:*
``crossfade {SECONDS}``
Sets crossfading between songs.
"""
raise exceptions.MpdNotImplemented # TODO
# TODO: add at least reflection tests before adding NotImplemented version
# @protocol.commands.add('mixrampdb')
def mixrampdb(context, decibels):
"""
*musicpd.org, playback section:*
``mixrampdb {deciBels}``
Sets the threshold at which songs will be overlapped. Like crossfading but
doesn't fade the track volume, just overlaps. The songs need to have
MixRamp tags added by an external tool. 0dB is the normalized maximum
volume so use negative values, I prefer -17dB. In the absence of mixramp
tags crossfading will be used. See http://sourceforge.net/projects/mixramp
"""
pass
# TODO: add at least reflection tests before adding NotImplemented version
# @protocol.commands.add('mixrampdelay', seconds=protocol.UINT)
def mixrampdelay(context, seconds):
"""
*musicpd.org, playback section:*
``mixrampdelay {SECONDS}``
Additional time subtracted from the overlap calculated by mixrampdb. A
value of "nan" disables MixRamp overlapping and falls back to
crossfading.
"""
pass
@protocol.commands.add('next')
def next_(context):
"""
*musicpd.org, playback section:*
``next``
Plays next song in the playlist.
*MPD's behaviour when affected by repeat/random/single/consume:*
Given a playlist of three tracks numbered 1, 2, 3, and a currently
playing track ``c``. ``next_track`` is defined at the track that
will be played upon calls to ``next``.
Tests performed on MPD 0.15.4-1ubuntu3.
====== ====== ====== ======= ===== ===== ===== =====
Inputs next_track
------------------------------- ------------------- -----
repeat random single consume c = 1 c = 2 c = 3 Notes
====== ====== ====== ======= ===== ===== ===== =====
T T T T 2 3 EOPL
T T T . Rand Rand Rand [1]
T T . T Rand Rand Rand [4]
T T . . Rand Rand Rand [4]
T . T T 2 3 EOPL
T . T . 2 3 1
T . . T 3 3 EOPL
T . . . 2 3 1
. T T T Rand Rand Rand [3]
. T T . Rand Rand Rand [3]
. T . T Rand Rand Rand [2]
. T . . Rand Rand Rand [2]
. . T T 2 3 EOPL
. . T . 2 3 EOPL
. . . T 2 3 EOPL
. . . . 2 3 EOPL
====== ====== ====== ======= ===== ===== ===== =====
- When end of playlist (EOPL) is reached, the current track is
unset.
- [1] When *random* and *single* is combined, ``next`` selects
a track randomly at each invocation, and not just the next track
in an internal prerandomized playlist.
- [2] When *random* is active, ``next`` will skip through
all tracks in the playlist in random order, and finally EOPL is
reached.
- [3] *single* has no effect in combination with *random*
alone, or *random* and *consume*.
- [4] When *random* and *repeat* is active, EOPL is never
reached, but the playlist is played again, in the same random
order as the first time.
"""
return context.core.playback.next().get()
@protocol.commands.add('pause', state=protocol.BOOL)
def pause(context, state=None):
"""
*musicpd.org, playback section:*
``pause {PAUSE}``
Toggles pause/resumes playing, ``PAUSE`` is 0 or 1.
*MPDroid:*
- Calls ``pause`` without any arguments to toogle pause.
"""
if state is None:
warnings.warn(
'The use of pause command w/o the PAUSE argument is deprecated.',
DeprecationWarning)
if (context.core.playback.state.get() == PlaybackState.PLAYING):
context.core.playback.pause()
elif (context.core.playback.state.get() == PlaybackState.PAUSED):
context.core.playback.resume()
elif state:
context.core.playback.pause()
else:
context.core.playback.resume()
@protocol.commands.add('play', tlid=protocol.INT)
def play(context, tlid=None):
"""
*musicpd.org, playback section:*
``play [SONGPOS]``
Begins playing the playlist at song number ``SONGPOS``.
The original MPD server resumes from the paused state on ``play``
without arguments.
*Clarifications:*
- ``play "-1"`` when playing is ignored.
- ``play "-1"`` when paused resumes playback.
- ``play "-1"`` when stopped with a current track starts playback at the
current track.
- ``play "-1"`` when stopped without a current track, e.g. after playlist
replacement, starts playback at the first track.
*BitMPC:*
- issues ``play 6`` without quotes around the argument.
"""
if tlid is None:
return context.core.playback.play().get()
elif tlid == -1:
return _play_minus_one(context)
try:
tl_track = context.core.tracklist.slice(tlid, tlid + 1).get()[0]
return context.core.playback.play(tl_track).get()
except IndexError:
raise exceptions.MpdArgError('Bad song index')
def _play_minus_one(context):
if (context.core.playback.state.get() == PlaybackState.PLAYING):
return # Nothing to do
elif (context.core.playback.state.get() == PlaybackState.PAUSED):
return context.core.playback.resume().get()
elif context.core.playback.current_tl_track.get() is not None:
tl_track = context.core.playback.current_tl_track.get()
return context.core.playback.play(tl_track).get()
elif context.core.tracklist.slice(0, 1).get():
tl_track = context.core.tracklist.slice(0, 1).get()[0]
return context.core.playback.play(tl_track).get()
else:
return # Fail silently
@protocol.commands.add('playid', tlid=protocol.INT)
def playid(context, tlid):
"""
*musicpd.org, playback section:*
``playid [SONGID]``
Begins playing the playlist at song ``SONGID``.
*Clarifications:*
- ``playid "-1"`` when playing is ignored.
- ``playid "-1"`` when paused resumes playback.
- ``playid "-1"`` when stopped with a current track starts playback at the
current track.
- ``playid "-1"`` when stopped without a current track, e.g. after playlist
replacement, starts playback at the first track.
"""
if tlid == -1:
return _play_minus_one(context)
tl_tracks = context.core.tracklist.filter(tlid=[tlid]).get()
if not tl_tracks:
raise exceptions.MpdNoExistError('No such song')
return context.core.playback.play(tl_tracks[0]).get()
@protocol.commands.add('previous')
def previous(context):
"""
*musicpd.org, playback section:*
``previous``
Plays previous song in the playlist.
*MPD's behaviour when affected by repeat/random/single/consume:*
Given a playlist of three tracks numbered 1, 2, 3, and a currently
playing track ``c``. ``previous_track`` is defined at the track
that will be played upon ``previous`` calls.
Tests performed on MPD 0.15.4-1ubuntu3.
====== ====== ====== ======= ===== ===== =====
Inputs previous_track
------------------------------- -------------------
repeat random single consume c = 1 c = 2 c = 3
====== ====== ====== ======= ===== ===== =====
T T T T Rand? Rand? Rand?
T T T . 3 1 2
T T . T Rand? Rand? Rand?
T T . . 3 1 2
T . T T 3 1 2
T . T . 3 1 2
T . . T 3 1 2
T . . . 3 1 2
. T T T c c c
. T T . c c c
. T . T c c c
. T . . c c c
. . T T 1 1 2
. . T . 1 1 2
. . . T 1 1 2
. . . . 1 1 2
====== ====== ====== ======= ===== ===== =====
- If :attr:`time_position` of the current track is 15s or more,
``previous`` should do a seek to time position 0.
"""
return context.core.playback.previous().get()
@protocol.commands.add('random', state=protocol.BOOL)
def random(context, state):
"""
*musicpd.org, playback section:*
``random {STATE}``
Sets random state to ``STATE``, ``STATE`` should be 0 or 1.
"""
context.core.tracklist.random = state
@protocol.commands.add('repeat', state=protocol.BOOL)
def repeat(context, state):
"""
*musicpd.org, playback section:*
``repeat {STATE}``
Sets repeat state to ``STATE``, ``STATE`` should be 0 or 1.
"""
context.core.tracklist.repeat = state
@protocol.commands.add('replay_gain_mode')
def replay_gain_mode(context, mode):
"""
*musicpd.org, playback section:*
``replay_gain_mode {MODE}``
Sets the replay gain mode. One of ``off``, ``track``, ``album``.
Changing the mode during playback may take several seconds, because
the new settings does not affect the buffered data.
This command triggers the options idle event.
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('replay_gain_status')
def replay_gain_status(context):
"""
*musicpd.org, playback section:*
``replay_gain_status``
Prints replay gain options. Currently, only the variable
``replay_gain_mode`` is returned.
"""
return 'off' # TODO
@protocol.commands.add('seek', tlid=protocol.UINT, seconds=protocol.UINT)
def seek(context, tlid, seconds):
"""
*musicpd.org, playback section:*
``seek {SONGPOS} {TIME}``
Seeks to the position ``TIME`` (in seconds) of entry ``SONGPOS`` in
the playlist.
*Droid MPD:*
- issues ``seek 1 120`` without quotes around the arguments.
"""
tl_track = context.core.playback.current_tl_track.get()
if context.core.tracklist.index(tl_track).get() != tlid:
play(context, tlid)
context.core.playback.seek(seconds * 1000).get()
@protocol.commands.add('seekid', tlid=protocol.UINT, seconds=protocol.UINT)
def seekid(context, tlid, seconds):
"""
*musicpd.org, playback section:*
``seekid {SONGID} {TIME}``
Seeks to the position ``TIME`` (in seconds) of song ``SONGID``.
"""
tl_track = context.core.playback.current_tl_track.get()
if not tl_track or tl_track.tlid != tlid:
playid(context, tlid)
context.core.playback.seek(seconds * 1000).get()
@protocol.commands.add('seekcur')
def seekcur(context, time):
"""
*musicpd.org, playback section:*
``seekcur {TIME}``
Seeks to the position ``TIME`` within the current song. If prefixed by
'+' or '-', then the time is relative to the current playing position.
"""
if time.startswith(('+', '-')):
position = context.core.playback.time_position.get()
position += protocol.INT(time) * 1000
context.core.playback.seek(position).get()
else:
position = protocol.UINT(time) * 1000
context.core.playback.seek(position).get()
@protocol.commands.add('setvol', volume=protocol.INT)
def setvol(context, volume):
"""
*musicpd.org, playback section:*
``setvol {VOL}``
Sets volume to ``VOL``, the range of volume is 0-100.
*Droid MPD:*
- issues ``setvol 50`` without quotes around the argument.
"""
# NOTE: we use INT as clients can pass in +N etc.
context.core.playback.volume = min(max(0, volume), 100)
@protocol.commands.add('single', state=protocol.BOOL)
def single(context, state):
"""
*musicpd.org, playback section:*
``single {STATE}``
Sets single state to ``STATE``, ``STATE`` should be 0 or 1. When
single is activated, playback is stopped after current song, or
song is repeated if the ``repeat`` mode is enabled.
"""
context.core.tracklist.single = state
@protocol.commands.add('stop')
def stop(context):
"""
*musicpd.org, playback section:*
``stop``
Stops playing.
"""
context.core.playback.stop()
|
|
#===============================================================================
# The OTBot: The Old Trailers Bot
#-------------------------------------------------------------------------------
# Version: 0.1.0
# Updated: 14-05-2014
# Author: Alex Crawford
# License: MIT
#===============================================================================
"""
A fairly simple Reddit bot, written specifically for /r/oldtrailers, that
updates & indexes the titles of the movies/trailers posted there.
"""
#===============================================================================
# Imports
#===============================================================================
from redbot import *
#===============================================================================
# The OTBot
#===============================================================================
class OTBot(RedBot):
"""The OTBot class, containing the attributes/methods for the bot."""
AGENT_NAME = "Old Trailers Bot v0.1.0 by /u/Trebek604"
VERSION = "0.1.0"
USERNAME = ""
PASSWORD = ""
SUBREDDIT = "oldtrailers"
SUB_URL = "http://www.reddit.com/r/oldtrailers/"
FLAIR = ["Trailer", "Teaser", "TV Spot", "VHS Trailer"]
ARTICLE_EXCEPTS = []
MESSAGES = {
"adding": "New titles found. Adding titles to local data...\n",
"buildpage_alpha": "Building alphabetical movies wiki page...\n",
"buildpage_year": "Building decade separated movies wiki pages...\n",
"edit_wiki": "Editing/updating wiki page: %s...\n",
"login": "Logging in to reddit...\n",
"getnew": "Getting %s new submissions from: /r/%s...\n",
"getsub": "Getting subreddit: /r/%s...\n",
"noadding": "No new titles to add.\n",
"opendata": "Loading local movie data...\n",
"opengz": "Opening gzipped JSON file: '%s'\n",
"openjson": "Opening JSON file: '%s'\n",
"savedata": "Saving local movie data...\n",
"savegz": "Saving gzipped JSON file: '%s'\n",
"savejson": "Saving JSON file: '%s'\n",
}
def __init__(self):
"""The init method for the OTBot."""
super(OTBot, self).__init__()
self.PATHS["movies"] = "data\\movies.gz"
self.DATA["movies"] = None
self.new_titles = False
def article_fix(self, title):
"""
Checks for an article before a given title, and if found, moves it to
the end of the title, and returns the modified title.
"""
form_str = "%s, %s %s"
if title not in self.ARTICLE_EXCEPTS:
if title[:2] == "A ":
return form_str % (title[2:][:-7], title[:1], title[2:][-6:])
elif title[:3] == "An ":
return form_str % (title[3:][:-7], title[:2], title[3:][-6:])
elif title[:4] == "The ":
return form_str % (title[4:][:-7], title[:3], title[4:][-6:])
return title
def build_page_alpha(self):
"""
Builds the movies wiki page, containing an alphabetical listing
of titles posted to the subreddit.
"""
self.print_and_log(self.MESSAGES["buildpage_alpha"])
hr = "\n--------\n"
date = self.get_date()
alpha_titles = self.sort_by_alpha(self.DATA["movies"])
header = "# Movies"
footer = (
"--------\n\n"
"*This page was generated automatically by "
"the OTBot (%s).*\n" % (date)
)
page = [header, hr, "\n"]
for letter in sorted(alpha_titles):
page.append("## %s%s" % (letter, hr))
for title in alpha_titles[letter]:
md_link = "[%s](%s) \n" % (
self.article_fix(title),
self.DATA["movies"][title]["search"]
)
page.append(md_link)
page.append("\n")
page.append(footer)
return page
def build_page_decade(self):
"""
Builds the movies wiki page, containing an alphabetical listing
of titles posted to the subreddit.
"""
self.print_and_log(self.MESSAGES["buildpage_year"])
hr = "\n--------\n"
date = self.get_date()
titles_decade = self.sort_by_decade(self.DATA["movies"])
pages = {
"1930s": [], "1940s": [], "1950s": [], "1960s": [], "1970s": [],
"1980s": [], "1990s": []
}
footer = (
"--------\n\n"
"*This page was generated automatically by "
"the OTBot (%s).*\n" % (date)
)
sorted_decades = sorted(titles_decade)
for decade in sorted_decades:
sorted_years = sorted(titles_decade[decade])
header = "# Movies (%s)" % (decade)
pages[decade].extend([header, hr, "\n"])
for year in sorted_years:
subheader = "## %s" % (year)
pages[decade].extend([subheader, hr])
for title in titles_decade[decade][year]:
md_link = "[%s](%s) \n" % (
self.article_fix(title),
self.DATA["movies"][title]["search"]
)
pages[decade].append(md_link)
pages[decade].append("\n")
pages[decade].append(footer)
return pages
def extract_genres(self, title):
"""Extract a movie's genres from a given submission title."""
title_list = list(title)
index = title_list.index("[")
genres = title[index+1:-1].split(" | ")
return genres
def extract_title_year(self, title):
"""Extract a movie's title from a given submission title."""
title_list = list(title)
index = title_list.index("[")
title = title[:index-1]
return title
def extract_title(self, title):
"""Extract a movie's title and year from a given submission title."""
title_list = list(title)
index = title_list.index("[")
title = title[:index][:-8]
return title
def extract_year(self, title):
"""Extract a movie's release year from a given submission title."""
title_list = list(title)
index = title_list.index("[")
year = title[:index][-6:-2]
return year
def open_data(self, zipped=True):
"""Opens the local data."""
self.print_and_log(self.MESSAGES["opendata"])
try:
if not zipped:
self.DATA["movies"] = self.open_json(self.PATHS["movies"])
else:
self.DATA["movies"] = self.open_zip(self.PATHS["movies"])
except Exception as error:
print str(error)
exit()
self.print_and_log("Could not load data.\n")
self.DATA["movies"] = {}
def save_data(self, zipped=True):
"""Saves the local data."""
self.print_and_log(self.MESSAGES["savedata"])
try:
if not zipped:
self.save_json(self.PATHS["movies"], self.DATA["movies"])
else:
self.save_zip(self.PATHS["movies"], self.DATA["movies"])
except:
self.print_and_log("Could not save data.\n")
def sort_by_alpha(self, titles):
"""Sorts the given titles alphabetically."""
self.log.append("Sorting titles alphabetically...\n")
alphabet = {
"A": [], "B": [], "C": [], "D": [], "E": [], "F": [], "G": [],
"H": [], "I": [], "J": [], "K": [], "L": [], "M": [], "N": [],
"O": [], "P": [], "Q": [], "R": [], "S": [], "T": [], "U": [],
"V": [], "W": [], "X": [], "Y": [], "Z": []
}
for title in titles:
letter = self.article_fix(title)[0].upper()
alphabet[letter].append(title)
for letter in alphabet:
alphabet[letter] = sorted(
alphabet[letter],
key=self.article_fix
)
return alphabet
def sort_by_decade(self, titles):
"""Sorts the given titles by decade, and then by year."""
self.log.append("Sorting titles by decade...\n")
decades = {
"1930s": {
"1930": [], "1931": [], "1932": [], "1933": [], "1934": [],
"1935": [], "1936": [], "1937": [], "1938": [], "1939": [],
},
"1940s": {
"1940": [], "1941": [], "1942": [], "1943": [], "1944": [],
"1945": [], "1946": [], "1947": [], "1948": [], "1949": [],
},
"1950s": {
"1950": [], "1951": [], "1952": [], "1953": [], "1954": [],
"1955": [], "1956": [], "1957": [], "1958": [], "1959": [],
},
"1960s": {
"1960": [], "1961": [], "1962": [], "1963": [], "1964": [],
"1965": [], "1966": [], "1967": [], "1968": [], "1969": [],
},
"1970s": {
"1970": [], "1971": [], "1972": [], "1973": [], "1974": [],
"1975": [], "1976": [], "1977": [], "1978": [], "1979": [],
},
"1980s": {
"1980": [], "1981": [], "1982": [], "1983": [], "1984": [],
"1985": [], "1986": [], "1987": [], "1988": [], "1989": [],
},
"1990s": {
"1990": [], "1991": [], "1992": [], "1993": [], "1994": [],
"1995": [], "1996": [], "1997": [], "1998": [], "1999": [],
}
}
for title in titles:
year = title[-5:-1]
decade = "19%s0s" % (year[2])
decades[decade][year].append(title)
for decade in decades:
for year in decades[decade]:
decades[decade][year] = sorted(
decades[decade][year],
key=self.article_fix
)
return decades
def update_titles(self):
"""Adds any new data (movies/titles) to the local database."""
for item in self.submissions:
if self.submis_flair(item) in self.FLAIR:
created = self.submis_created(item)[0]
title = self.submis_title(item)
key = self.extract_title_year(title)
year = self.extract_year(title)
genres = self.extract_genres(title)
url = self.build_search_url(key)
if key not in self.DATA["movies"]:
self.DATA["movies"][key] = {}
self.DATA["movies"][key]["added"] = created
self.DATA["movies"][key]["genres"] = genres
self.DATA["movies"][key]["search"] = url
if not self.new_titles:
self.print_and_log(self.MESSAGES["adding"])
self.new_titles = True
print " Added '%s'" % (key)
self.log.append(" Added '%s'\n" % (key))
if not self.new_titles:
self.print_and_log(self.MESSAGES["noadding"])
else:
print
def update_wiki(self):
"""Updates the wiki pages."""
movies_alpha = "".join(self.build_page_alpha())
self.edit_wiki("movies", movies_alpha, "New titles added.")
# TODO: Let the bot do it's thing, and hope it works as it should.
# movies_decade = self.build_page_decade()
# for decade in movies_decade:
# page = "".join(movies_decade[decade])
# self.edit_wiki(
# "movies_%s" % (decade),
# page,
# "New titles added."
# )
#===============================================================================
# If Main
#===============================================================================
if __name__ == '__main__':
print "You're doing it wrong."
|
|
"""The Shelly integration."""
import asyncio
from datetime import timedelta
import logging
import aioshelly
import async_timeout
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_DEVICE_ID,
CONF_HOST,
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, device_registry, update_coordinator
from .const import (
AIOSHELLY_DEVICE_TIMEOUT_SEC,
ATTR_CHANNEL,
ATTR_CLICK_TYPE,
ATTR_DEVICE,
BATTERY_DEVICES_WITH_PERMANENT_CONNECTION,
COAP,
DATA_CONFIG_ENTRY,
DEVICE,
DOMAIN,
EVENT_SHELLY_CLICK,
INPUTS_EVENTS_DICT,
POLLING_TIMEOUT_SEC,
REST,
REST_SENSORS_UPDATE_INTERVAL,
SLEEP_PERIOD_MULTIPLIER,
UPDATE_PERIOD_MULTIPLIER,
)
from .utils import get_coap_context, get_device_name, get_device_sleep_period
PLATFORMS = ["binary_sensor", "cover", "light", "sensor", "switch"]
SLEEPING_PLATFORMS = ["binary_sensor", "sensor"]
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Shelly component."""
hass.data[DOMAIN] = {DATA_CONFIG_ENTRY: {}}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Shelly from a config entry."""
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id] = {}
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][DEVICE] = None
temperature_unit = "C" if hass.config.units.is_metric else "F"
options = aioshelly.ConnectionOptions(
entry.data[CONF_HOST],
entry.data.get(CONF_USERNAME),
entry.data.get(CONF_PASSWORD),
temperature_unit,
)
coap_context = await get_coap_context(hass)
device = await aioshelly.Device.create(
aiohttp_client.async_get_clientsession(hass),
coap_context,
options,
False,
)
dev_reg = await device_registry.async_get_registry(hass)
identifier = (DOMAIN, entry.unique_id)
device_entry = dev_reg.async_get_device(identifiers={identifier}, connections=set())
sleep_period = entry.data.get("sleep_period")
@callback
def _async_device_online(_):
_LOGGER.debug("Device %s is online, resuming setup", entry.title)
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][DEVICE] = None
if sleep_period is None:
data = {**entry.data}
data["sleep_period"] = get_device_sleep_period(device.settings)
data["model"] = device.settings["device"]["type"]
hass.config_entries.async_update_entry(entry, data=data)
hass.async_create_task(async_device_setup(hass, entry, device))
if sleep_period == 0:
# Not a sleeping device, finish setup
_LOGGER.debug("Setting up online device %s", entry.title)
try:
async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):
await device.initialize(True)
except (asyncio.TimeoutError, OSError) as err:
raise ConfigEntryNotReady from err
await async_device_setup(hass, entry, device)
elif sleep_period is None or device_entry is None:
# Need to get sleep info or first time sleeping device setup, wait for device
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][DEVICE] = device
_LOGGER.debug(
"Setup for device %s will resume when device is online", entry.title
)
device.subscribe_updates(_async_device_online)
await device.coap_request("s")
else:
# Restore sensors for sleeping device
_LOGGER.debug("Setting up offline device %s", entry.title)
await async_device_setup(hass, entry, device)
return True
async def async_device_setup(
hass: HomeAssistant, entry: ConfigEntry, device: aioshelly.Device
):
"""Set up a device that is online."""
device_wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][
COAP
] = ShellyDeviceWrapper(hass, entry, device)
await device_wrapper.async_setup()
platforms = SLEEPING_PLATFORMS
if not entry.data.get("sleep_period"):
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][
REST
] = ShellyDeviceRestWrapper(hass, device)
platforms = PLATFORMS
for platform in platforms:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
class ShellyDeviceWrapper(update_coordinator.DataUpdateCoordinator):
"""Wrapper for a Shelly device with Home Assistant specific functions."""
def __init__(self, hass, entry, device: aioshelly.Device):
"""Initialize the Shelly device wrapper."""
self.device_id = None
sleep_period = entry.data["sleep_period"]
if sleep_period:
update_interval = SLEEP_PERIOD_MULTIPLIER * sleep_period
else:
update_interval = (
UPDATE_PERIOD_MULTIPLIER * device.settings["coiot"]["update_period"]
)
device_name = get_device_name(device) if device.initialized else entry.title
super().__init__(
hass,
_LOGGER,
name=device_name,
update_interval=timedelta(seconds=update_interval),
)
self.hass = hass
self.entry = entry
self.device = device
self._async_remove_device_updates_handler = self.async_add_listener(
self._async_device_updates_handler
)
self._last_input_events_count = {}
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._handle_ha_stop)
@callback
def _async_device_updates_handler(self):
"""Handle device updates."""
if not self.device.initialized:
return
# Check for input events
for block in self.device.blocks:
if (
"inputEvent" not in block.sensor_ids
or "inputEventCnt" not in block.sensor_ids
):
continue
channel = int(block.channel or 0) + 1
event_type = block.inputEvent
last_event_count = self._last_input_events_count.get(channel)
self._last_input_events_count[channel] = block.inputEventCnt
if (
last_event_count is None
or last_event_count == block.inputEventCnt
or event_type == ""
):
continue
if event_type in INPUTS_EVENTS_DICT:
self.hass.bus.async_fire(
EVENT_SHELLY_CLICK,
{
ATTR_DEVICE_ID: self.device_id,
ATTR_DEVICE: self.device.settings["device"]["hostname"],
ATTR_CHANNEL: channel,
ATTR_CLICK_TYPE: INPUTS_EVENTS_DICT[event_type],
},
)
else:
_LOGGER.warning(
"Shelly input event %s for device %s is not supported, please open issue",
event_type,
self.name,
)
async def _async_update_data(self):
"""Fetch data."""
if self.entry.data.get("sleep_period"):
# Sleeping device, no point polling it, just mark it unavailable
raise update_coordinator.UpdateFailed("Sleeping device did not update")
_LOGGER.debug("Polling Shelly Device - %s", self.name)
try:
async with async_timeout.timeout(POLLING_TIMEOUT_SEC):
return await self.device.update()
except OSError as err:
raise update_coordinator.UpdateFailed("Error fetching data") from err
@property
def model(self):
"""Model of the device."""
return self.entry.data["model"]
@property
def mac(self):
"""Mac address of the device."""
return self.entry.unique_id
async def async_setup(self):
"""Set up the wrapper."""
dev_reg = await device_registry.async_get_registry(self.hass)
sw_version = self.device.settings["fw"] if self.device.initialized else ""
entry = dev_reg.async_get_or_create(
config_entry_id=self.entry.entry_id,
name=self.name,
connections={(device_registry.CONNECTION_NETWORK_MAC, self.mac)},
# This is duplicate but otherwise via_device can't work
identifiers={(DOMAIN, self.mac)},
manufacturer="Shelly",
model=aioshelly.MODEL_NAMES.get(self.model, self.model),
sw_version=sw_version,
)
self.device_id = entry.id
self.device.subscribe_updates(self.async_set_updated_data)
def shutdown(self):
"""Shutdown the wrapper."""
self.device.shutdown()
self._async_remove_device_updates_handler()
@callback
def _handle_ha_stop(self, _):
"""Handle Home Assistant stopping."""
_LOGGER.debug("Stopping ShellyDeviceWrapper for %s", self.name)
self.shutdown()
class ShellyDeviceRestWrapper(update_coordinator.DataUpdateCoordinator):
"""Rest Wrapper for a Shelly device with Home Assistant specific functions."""
def __init__(self, hass, device: aioshelly.Device):
"""Initialize the Shelly device wrapper."""
if (
device.settings["device"]["type"]
in BATTERY_DEVICES_WITH_PERMANENT_CONNECTION
):
update_interval = (
SLEEP_PERIOD_MULTIPLIER * device.settings["coiot"]["update_period"]
)
else:
update_interval = REST_SENSORS_UPDATE_INTERVAL
super().__init__(
hass,
_LOGGER,
name=get_device_name(device),
update_interval=timedelta(seconds=update_interval),
)
self.device = device
async def _async_update_data(self):
"""Fetch data."""
try:
async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):
_LOGGER.debug("REST update for %s", self.name)
return await self.device.update_status()
except OSError as err:
raise update_coordinator.UpdateFailed("Error fetching data") from err
@property
def mac(self):
"""Mac address of the device."""
return self.device.settings["device"]["mac"]
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
device = hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id].get(DEVICE)
if device is not None:
# If device is present, device wrapper is not setup yet
device.shutdown()
return True
platforms = SLEEPING_PLATFORMS
if not entry.data.get("sleep_period"):
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][REST] = None
platforms = PLATFORMS
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in platforms
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][COAP].shutdown()
hass.data[DOMAIN][DATA_CONFIG_ENTRY].pop(entry.entry_id)
return unload_ok
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from six.moves import urllib
import pandas as pd
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("model_dir", "", "Base directory for output models.")
flags.DEFINE_string("model_type", "wide_n_deep",
"Valid model types: {'wide', 'deep', 'wide_n_deep'}.")
flags.DEFINE_integer("train_steps", 200, "Number of training steps.")
flags.DEFINE_string(
"train_data",
"",
"Path to the training data.")
flags.DEFINE_string(
"test_data",
"",
"Path to the test data.")
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
LABEL_COLUMN = "label"
CATEGORICAL_COLUMNS = ["workclass", "education", "marital_status", "occupation",
"relationship", "race", "gender", "native_country"]
CONTINUOUS_COLUMNS = ["age", "education_num", "capital_gain", "capital_loss",
"hours_per_week"]
def maybe_download():
"""Maybe downloads training data and returns train and test file names."""
if FLAGS.train_data:
train_file_name = FLAGS.train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data", train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if FLAGS.test_data:
test_file_name = FLAGS.test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test", test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s" % test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir):
"""Build an estimator."""
# Sparse base columns.
gender = tf.contrib.layers.sparse_column_with_keys(column_name="gender",
keys=["female", "male"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
# Transformations.
age_buckets = tf.contrib.layers.bucketized_column(age,
boundaries=[
18, 25, 30, 35, 40, 45,
50, 55, 60, 65
])
# Wide columns and deep columns.
wide_columns = [gender, native_country, education, occupation, workclass,
relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column(
[age_buckets, education, occupation],
hash_bucket_size=int(1e6)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(native_country,
dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
if FLAGS.model_type == "wide":
m = tf.contrib.learn.LinearClassifier(model_dir=model_dir,
feature_columns=wide_columns)
elif FLAGS.model_type == "deep":
m = tf.contrib.learn.DNNClassifier(model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(df):
"""Input builder function."""
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.constant(df[k].values) for k in CONTINUOUS_COLUMNS}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols)
feature_cols.update(categorical_cols)
# Converts the label column into a constant Tensor.
label = tf.constant(df[LABEL_COLUMN].values)
# Returns the feature columns and the label.
return feature_cols, label
def train_and_eval():
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download()
df_train = pd.read_csv(
tf.gfile.Open(train_file_name),
names=COLUMNS,
skipinitialspace=True,
engine="python")
df_test = pd.read_csv(
tf.gfile.Open(test_file_name),
names=COLUMNS,
skipinitialspace=True,
skiprows=1,
engine="python")
# remove NaN elements
df_train = df_train.dropna(how='any', axis=0)
df_test = df_test.dropna(how='any', axis=0)
df_train[LABEL_COLUMN] = (
df_train["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
df_test[LABEL_COLUMN] = (
df_test["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
model_dir = tempfile.mkdtemp() if not FLAGS.model_dir else FLAGS.model_dir
print("model directory = %s" % model_dir)
m = build_estimator(model_dir)
m.fit(input_fn=lambda: input_fn(df_train), steps=FLAGS.train_steps)
results = m.evaluate(input_fn=lambda: input_fn(df_test), steps=1)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
def main(_):
train_and_eval()
if __name__ == "__main__":
tf.app.run()
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import json
import uuid
import mock
import requests
import six
import yaml
from mistralclient.api.base import APIException
from mistralclient.api.v2 import workbooks
from mistralclient.api.v2 import workflows
from mistralclient.api.v2 import executions
# XXX: actionsensor import depends on config being setup.
import st2tests.config as tests_config
tests_config.parse_args()
import st2actions.bootstrap.runnersregistrar as runners_registrar
from st2actions.handlers.mistral import MistralCallbackHandler
from st2actions.runners.localrunner import LocalShellRunner
from st2actions.runners.mistral.v2 import MistralRunner
from st2common.constants import action as action_constants
from st2common.models.api.auth import TokenAPI
from st2common.models.api.action import ActionAPI
from st2common.models.db.liveaction import LiveActionDB
from st2common.persistence.action import Action
from st2common.persistence.liveaction import LiveAction
from st2common.services import access as access_service
from st2common.services import action as action_service
from st2common.transport.liveaction import LiveActionPublisher
from st2common.transport.publishers import CUDPublisher
from st2common.util import isotime
from st2tests import DbTestCase
from st2tests import http
from st2tests.fixturesloader import FixturesLoader
from tests.unit.base import MockLiveActionPublisher
TEST_FIXTURES = {
'workflows': [
'workbook_v2.yaml',
'workbook_v2_many_workflows.yaml',
'workbook_v2_many_workflows_no_default.yaml',
'workflow_v2.yaml',
'workflow_v2_many_workflows.yaml'
],
'actions': [
'workbook_v2.yaml',
'workbook_v2_many_workflows.yaml',
'workbook_v2_many_workflows_no_default.yaml',
'workflow_v2.yaml',
'workflow_v2_many_workflows.yaml',
'workbook_v2_name_mismatch.yaml',
'workflow_v2_name_mismatch.yaml',
'local.yaml'
]
}
PACK = 'generic'
LOADER = FixturesLoader()
FIXTURES = LOADER.load_fixtures(fixtures_pack=PACK, fixtures_dict=TEST_FIXTURES)
MISTRAL_EXECUTION = {'id': str(uuid.uuid4()), 'state': 'RUNNING', 'workflow_name': None}
# Workbook with a single workflow
WB1_YAML_FILE_NAME = TEST_FIXTURES['workflows'][0]
WB1_YAML_FILE_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WB1_YAML_FILE_NAME)
WB1_SPEC = FIXTURES['workflows'][WB1_YAML_FILE_NAME]
WB1_YAML = yaml.safe_dump(WB1_SPEC, default_flow_style=False)
WB1_NAME = '%s.%s' % (PACK, WB1_YAML_FILE_NAME.replace('.yaml', ''))
WB1 = workbooks.Workbook(None, {'name': WB1_NAME, 'definition': WB1_YAML})
WB1_OLD = workbooks.Workbook(None, {'name': WB1_NAME, 'definition': ''})
WB1_EXEC = copy.deepcopy(MISTRAL_EXECUTION)
WB1_EXEC['workflow_name'] = WB1_NAME
# Workbook with many workflows
WB2_YAML_FILE_NAME = TEST_FIXTURES['workflows'][1]
WB2_YAML_FILE_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WB2_YAML_FILE_NAME)
WB2_SPEC = FIXTURES['workflows'][WB2_YAML_FILE_NAME]
WB2_YAML = yaml.safe_dump(WB2_SPEC, default_flow_style=False)
WB2_NAME = '%s.%s' % (PACK, WB2_YAML_FILE_NAME.replace('.yaml', ''))
WB2 = workbooks.Workbook(None, {'name': WB2_NAME, 'definition': WB2_YAML})
WB2_EXEC = copy.deepcopy(MISTRAL_EXECUTION)
WB2_EXEC['workflow_name'] = WB2_NAME
# Workbook with many workflows but no default workflow is defined
WB3_YAML_FILE_NAME = TEST_FIXTURES['workflows'][2]
WB3_YAML_FILE_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WB3_YAML_FILE_NAME)
WB3_SPEC = FIXTURES['workflows'][WB3_YAML_FILE_NAME]
WB3_YAML = yaml.safe_dump(WB3_SPEC, default_flow_style=False)
WB3_NAME = '%s.%s' % (PACK, WB3_YAML_FILE_NAME.replace('.yaml', ''))
WB3 = workbooks.Workbook(None, {'name': WB3_NAME, 'definition': WB3_YAML})
WB3_EXEC = copy.deepcopy(MISTRAL_EXECUTION)
WB3_EXEC['workflow_name'] = WB3_NAME
# Non-workbook with a single workflow
WF1_YAML_FILE_NAME = TEST_FIXTURES['workflows'][3]
WF1_YAML_FILE_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF1_YAML_FILE_NAME)
WF1_SPEC = FIXTURES['workflows'][WF1_YAML_FILE_NAME]
WF1_YAML = yaml.safe_dump(WF1_SPEC, default_flow_style=False)
WF1_NAME = '%s.%s' % (PACK, WF1_YAML_FILE_NAME.replace('.yaml', ''))
WF1 = workflows.Workflow(None, {'name': WF1_NAME, 'definition': WF1_YAML})
WF1_OLD = workflows.Workflow(None, {'name': WF1_NAME, 'definition': ''})
WF1_EXEC = copy.deepcopy(MISTRAL_EXECUTION)
WF1_EXEC['workflow_name'] = WF1_NAME
# Non-workbook with a many workflows
WF2_YAML_FILE_NAME = TEST_FIXTURES['workflows'][4]
WF2_YAML_FILE_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF2_YAML_FILE_NAME)
WF2_SPEC = FIXTURES['workflows'][WF2_YAML_FILE_NAME]
WF2_YAML = yaml.safe_dump(WF2_SPEC, default_flow_style=False)
WF2_NAME = '%s.%s' % (PACK, WF2_YAML_FILE_NAME.replace('.yaml', ''))
WF2 = workflows.Workflow(None, {'name': WF2_NAME, 'definition': WF2_YAML})
WF2_EXEC = copy.deepcopy(MISTRAL_EXECUTION)
WF2_EXEC['workflow_name'] = WF2_NAME
# Action executions requirements
ACTION_CONTEXT = {'user': 'stanley'}
ACTION_PARAMS = {'friend': 'Rocky'}
# Token for auth test cases
TOKEN_API = TokenAPI(
user=ACTION_CONTEXT['user'], token=uuid.uuid4().hex,
expiry=isotime.format(isotime.add_utc_tz(datetime.datetime.utcnow()), offset=False))
TOKEN_DB = TokenAPI.to_model(TOKEN_API)
NON_EMPTY_RESULT = 'non-empty'
@mock.patch.object(LocalShellRunner, 'run', mock.
MagicMock(return_value=(action_constants.LIVEACTION_STATUS_SUCCEEDED,
NON_EMPTY_RESULT, None)))
@mock.patch.object(CUDPublisher, 'publish_update', mock.MagicMock(return_value=None))
@mock.patch.object(CUDPublisher, 'publish_create',
mock.MagicMock(side_effect=MockLiveActionPublisher.publish_create))
@mock.patch.object(LiveActionPublisher, 'publish_state',
mock.MagicMock(side_effect=MockLiveActionPublisher.publish_state))
class TestMistralRunner(DbTestCase):
@classmethod
def setUpClass(cls):
super(TestMistralRunner, cls).setUpClass()
runners_registrar.register_runner_types()
for _, fixture in six.iteritems(FIXTURES['actions']):
instance = ActionAPI(**fixture)
Action.add_or_update(ActionAPI.to_model(instance))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
def test_launch_workflow(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
execution = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
workflow_input = copy.deepcopy(ACTION_PARAMS)
workflow_input.update({'count': '3'})
env = {
'__actions': {
'st2.action': {
'st2_context': {
'endpoint': 'http://0.0.0.0:9101/v1/actionexecutions',
'parent': str(liveaction.id),
'notify': {},
'skip_notify_tasks': []
}
}
}
}
executions.ExecutionManager.create.assert_called_with(
WF1_NAME, workflow_input=workflow_input, env=env)
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
@mock.patch.object(
access_service, 'create_token',
mock.MagicMock(return_value=TOKEN_DB))
def test_launch_workflow_with_auth(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
execution = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS, context=ACTION_CONTEXT)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
workflow_input = copy.deepcopy(ACTION_PARAMS)
workflow_input.update({'count': '3'})
env = {
'__actions': {
'st2.action': {
'st2_context': {
'auth_token': TOKEN_DB.token,
'endpoint': 'http://0.0.0.0:9101/v1/actionexecutions',
'parent': str(liveaction.id),
'notify': {},
'skip_notify_tasks': []
}
}
}
}
executions.ExecutionManager.create.assert_called_with(
WF1_NAME, workflow_input=workflow_input, env=env)
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
def test_launch_workflow_with_notifications(self):
notify_data = {'on_complete': {'channels': ['slack'],
'message': '"@channel: Action succeeded."', 'data': {}}}
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
execution = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS, notify=notify_data)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
workflow_input = copy.deepcopy(ACTION_PARAMS)
workflow_input.update({'count': '3'})
env = {
'__actions': {
'st2.action': {
'st2_context': {
'endpoint': 'http://0.0.0.0:9101/v1/actionexecutions',
'parent': str(liveaction.id),
'notify': notify_data,
'skip_notify_tasks': []
}
}
}
}
executions.ExecutionManager.create.assert_called_with(
WF1_NAME, workflow_input=workflow_input, env=env)
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(side_effect=requests.exceptions.ConnectionError()))
def test_launch_workflow_mistral_offline(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
execution = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED)
self.assertIn('Failed to connect to mistral', liveaction.result['message'])
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(side_effect=[requests.exceptions.ConnectionError(), []]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
def test_launch_workflow_mistral_retry(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
execution = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(side_effect=[APIException(error_message='Duplicate entry.'), WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
def test_launch_workflow_duplicate_error(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
execution = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF1_OLD))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
workflows.WorkflowManager, 'update',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
def test_launch_when_workflow_definition_changed(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
execution = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(side_effect=Exception()))
@mock.patch.object(
workbooks.WorkbookManager, 'delete',
mock.MagicMock(side_effect=Exception()))
@mock.patch.object(
workflows.WorkflowManager, 'create',
mock.MagicMock(return_value=[WF1]))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WF1_EXEC)))
def test_launch_when_workflow_not_exists(self):
execution = LiveActionDB(action=WF1_NAME, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WF1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WF1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(return_value=WF2))
def test_launch_workflow_with_many_workflows(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WF2_YAML_FILE_PATH)
execution = LiveActionDB(action=WF2_NAME, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED)
self.assertIn('Multiple workflows is not supported.', liveaction.result['message'])
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workflows.WorkflowManager, 'get',
mock.MagicMock(side_effect=Exception()))
def test_launch_workflow_name_mistmatch(self):
action_ref = 'generic.workflow_v2_name_mismatch'
MistralRunner.entry_point = mock.PropertyMock(return_value=WF1_YAML_FILE_PATH)
execution = LiveActionDB(action=action_ref, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED)
self.assertIn('Name of the workflow must be the same', liveaction.result['message'])
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workbooks.WorkbookManager, 'get',
mock.MagicMock(return_value=WB1))
@mock.patch.object(
workbooks.WorkbookManager, 'create',
mock.MagicMock(return_value=WB1))
@mock.patch.object(
workbooks.WorkbookManager, 'update',
mock.MagicMock(return_value=WB1))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WB1_EXEC)))
def test_launch_workbook(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WB1_YAML_FILE_PATH)
execution = LiveActionDB(action=WB1_NAME, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WB1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WB1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workbooks.WorkbookManager, 'get',
mock.MagicMock(return_value=WB2))
@mock.patch.object(
workbooks.WorkbookManager, 'create',
mock.MagicMock(return_value=WB2))
@mock.patch.object(
workbooks.WorkbookManager, 'update',
mock.MagicMock(return_value=WB2))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WB2_EXEC)))
def test_launch_workbook_with_many_workflows(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WB2_YAML_FILE_PATH)
execution = LiveActionDB(action=WB2_NAME, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WB2_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WB2_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workbooks.WorkbookManager, 'get',
mock.MagicMock(return_value=WB3))
@mock.patch.object(
workbooks.WorkbookManager, 'create',
mock.MagicMock(return_value=WB3))
@mock.patch.object(
workbooks.WorkbookManager, 'update',
mock.MagicMock(return_value=WB3))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WB3_EXEC)))
def test_launch_workbook_with_many_workflows_no_default(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WB3_YAML_FILE_PATH)
execution = LiveActionDB(action=WB3_NAME, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED)
self.assertIn('Default workflow cannot be determined.', liveaction.result['message'])
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workbooks.WorkbookManager, 'get',
mock.MagicMock(return_value=WB1_OLD))
@mock.patch.object(
workbooks.WorkbookManager, 'create',
mock.MagicMock(return_value=WB1))
@mock.patch.object(
workbooks.WorkbookManager, 'update',
mock.MagicMock(return_value=WB1))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WB1_EXEC)))
def test_launch_when_workbook_definition_changed(self):
MistralRunner.entry_point = mock.PropertyMock(return_value=WB1_YAML_FILE_PATH)
execution = LiveActionDB(action=WB1_NAME, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WB1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WB1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workbooks.WorkbookManager, 'get',
mock.MagicMock(side_effect=Exception()))
@mock.patch.object(
workflows.WorkflowManager, 'delete',
mock.MagicMock(side_effect=Exception()))
@mock.patch.object(
workbooks.WorkbookManager, 'create',
mock.MagicMock(return_value=WB1))
@mock.patch.object(
executions.ExecutionManager, 'create',
mock.MagicMock(return_value=executions.Execution(None, WB1_EXEC)))
def test_launch_when_workbook_not_exists(self):
execution = LiveActionDB(action=WB1_NAME, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_RUNNING)
mistral_context = liveaction.context.get('mistral', None)
self.assertIsNotNone(mistral_context)
self.assertEqual(mistral_context['execution_id'], WB1_EXEC.get('id'))
self.assertEqual(mistral_context['workflow_name'], WB1_EXEC.get('workflow_name'))
@mock.patch.object(
workflows.WorkflowManager, 'list',
mock.MagicMock(return_value=[]))
@mock.patch.object(
workbooks.WorkbookManager, 'get',
mock.MagicMock(side_effect=Exception()))
def test_launch_workbook_name_mismatch(self):
action_ref = 'generic.workbook_v2_name_mismatch'
MistralRunner.entry_point = mock.PropertyMock(return_value=WB1_YAML_FILE_PATH)
execution = LiveActionDB(action=action_ref, parameters=ACTION_PARAMS)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_FAILED)
self.assertIn('Name of the workbook must be the same', liveaction.result['message'])
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=http.FakeResponse({}, 200, 'OK')))
def test_callback_handler_with_result_as_text(self):
MistralCallbackHandler.callback('http://localhost:8989/v2/action_executions/12345', {},
action_constants.LIVEACTION_STATUS_SUCCEEDED,
'<html></html>')
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=http.FakeResponse({}, 200, 'OK')))
def test_callback_handler_with_result_as_dict(self):
MistralCallbackHandler.callback('http://localhost:8989/v2/action_executions/12345', {},
action_constants.LIVEACTION_STATUS_SUCCEEDED, {'a': 1})
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=http.FakeResponse({}, 200, 'OK')))
def test_callback_handler_with_result_as_json_str(self):
MistralCallbackHandler.callback('http://localhost:8989/v2/action_executions/12345', {},
action_constants.LIVEACTION_STATUS_SUCCEEDED, '{"a": 1}')
MistralCallbackHandler.callback('http://localhost:8989/v2/action_executions/12345', {},
action_constants.LIVEACTION_STATUS_SUCCEEDED, "{'a': 1}")
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=http.FakeResponse({}, 200, 'OK')))
def test_callback_handler_with_result_as_list(self):
MistralCallbackHandler.callback('http://localhost:8989/v2/action_executions/12345', {},
action_constants.LIVEACTION_STATUS_SUCCEEDED,
["a", "b", "c"])
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=http.FakeResponse({}, 200, 'OK')))
def test_callback_handler_with_result_as_list_str(self):
MistralCallbackHandler.callback('http://localhost:8989/v2/action_executions/12345', {},
action_constants.LIVEACTION_STATUS_SUCCEEDED,
'["a", "b", "c"]')
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=http.FakeResponse({}, 200, 'OK')))
def test_callback(self):
execution = LiveActionDB(
action='core.local', parameters={'cmd': 'uname -a'},
callback={
'source': 'mistral',
'url': 'http://localhost:8989/v2/action_executions/12345'
}
)
liveaction, _ = action_service.request(execution)
liveaction = LiveAction.get_by_id(str(liveaction.id))
self.assertEqual(liveaction.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
requests.request.assert_called_with('PUT', liveaction.callback['url'],
data=json.dumps({'state': 'SUCCESS',
'output': NON_EMPTY_RESULT}),
headers={'content-type': 'application/json'})
def test_build_context(self):
parent = {
'mistral': {
'workflow_name': 'foo',
'workflow_execution_id': 'b222b934-7473-4cd4-a2ec-e204a8c93848',
'task_tags': None,
'task_name': 'some_fancy_wf_task',
'task_id': '6c7d4334-3e7d-49c6-918d-698e846affaf',
'action_execution_id': '24da5c88-834c-4a65-8b56-4ddbd654eb68'
}
}
current = {
'workflow_name': 'foo.subwf',
'workflow_execution_id': '135e3446-4c89-4afe-821f-6ec6a0849b27'
}
context = MistralRunner._build_mistral_context(parent, current)
self.assertTrue(context is not None)
self.assertTrue('parent' in context['mistral'].keys())
parent_dict = {
'workflow_name': parent['mistral']['workflow_name'],
'workflow_execution_id': parent['mistral']['workflow_execution_id']
}
self.assertDictEqual(context['mistral']['parent'], parent_dict)
self.assertEqual(context['mistral']['workflow_execution_id'],
current['workflow_execution_id'])
parent = None
context = MistralRunner._build_mistral_context(parent, current)
self.assertDictEqual(context['mistral'], current)
|
|
"""
server side redis listener.
- logging
- exception handling
"""
import os
import sys
import json
import toml
import msgpack
import requests
import redis
import threading
import time
import etcd
import gevent
from gevent.pool import Pool
from gevent import monkey
monkey.patch_all()
conf_fn = os.sep.join(
[os.path.split(os.path.realpath(__file__))[0], "listener.toml"])
# print conf_fn
with open(conf_fn) as conf_fh:
cfg = toml.loads(conf_fh.read())
conf = cfg["redis"]
etcd_cfg = cfg["etcd"]
db = redis.client.StrictRedis(
host=conf["HOST"],
port=conf["PORT"],
db=conf["DB"])
etc = etcd.Client(host=etcd_cfg["HOST"], port=etcd_cfg["PORT"])
lua_file = conf["HEARTBEAT_LUA"]
print lua_file
with open(lua_file, "r") as fileh:
lua_script = fileh.read()
sha = db.script_load(lua_script)
# print sha
def strict_time():
if sys.platform == "win32":
return time.clock()
else:
return time.time()
def post(data):
print data
URL = conf["JSONRPC"]
payload = {
"jsonrpc": "2.0",
"id": "r2",
"method": "call",
"params": {
"method": conf["METHOD"],
"table": conf["TABLE"],
"pkey": conf["PKEY"],
"columns": data,
"context": {
"user": "mt",
"languageid": "1033",
"sessionid": "123"}}}
HEADERS = {
'content-type': 'application/json',
'accept': 'json',
'User-Agent': 'mabo'}
payload = json.dumps(payload)
resp = requests.post(URL, data=payload, headers=HEADERS)
s = resp.text # .encode("utf8")
v = json.loads(s)
if "error" in v:
print s.encode("utf8")
def callback():
""" run in thread """
sub = db.pubsub()
channels = ['new_data']
for channel in channels:
sub.subscribe(channel)
while True:
for msg in sub.listen():
if msg["type"] == 'message':
queue_len = db.llen("data_queue")
for i in xrange(0, queue_len):
v = db.lpop("data_queue")
data = msgpack.unpackb(v)
# print queue_len, data
try:
print data
#del data["heartbeat"]
#del data["time_precision"]
data = {"id":data["id"], "ch_ori_eqpt":data["ch_ori_eqpt"]}
post(data)
except Exception as ex:
print "post data exception:", ex
else:
# print "channel: %s" %(m["channel"])
pass
def new_thread():
""" new thread """
t = threading.Thread(target=callback)
t.setDaemon(True)
t.start()
def etcd_write(key):
etc.write("/heartbeat/%s" % (key), 1, ttl=conf["SLEEP"])
pass
def check_heartbeat():
"""
check heartbeat by lua in redis
and update etcd
if no etcd?
"""
name = conf["NAME"]
etcd_write(name)
collectors = conf["COLLECTORS"]
now = 1000 * time.time()
for key in collectors:
print now
print 1000 * conf["SLEEP"]
#
status = db.evalsha(sha, 1, key, now, 1000 * conf["SLEEP"])
print "%s heartbeat: %s" % (key, status)
if status == "On":
etcd_write(key)
#etc.write("/heartbeat/%s" % (key), 1, ttl=conf["SLEEP"])
def main():
""" main """
new_thread()
pool = Pool(conf["POOL_SIZE"])
while True:
pool.spawn(check_heartbeat)
#print "1"
gevent.sleep(conf["SLEEP"])
if __name__ == '__main__':
main()
|
|
import json
import uuid
from unittest import TestCase
from plugIt.bridge.bridge import Bridge
class TestPlugIt(TestCase):
def setUp(self):
self.plugIt = Bridge('http://0.0.0.0/')
_self = self
def _do_query(url, method='GET', query_string=None, body=None, files=None, additional_headers=None,
session=None):
_self.last_do_query_call = {'url': url, 'method': method, 'query_string': query_string,
'body': body, 'files': files, 'additional_headers': additional_headers,
'session': session}
class DummyResponse:
def json(self):
return _self.plugIt.toReplyJson()
@property
def status_code(self):
return _self.plugIt.toReplyStatusCode()
@property
def headers(self):
return _self.plugIt.toReplyHeaders()
@property
def content(self):
return json.dumps(self.json())
return DummyResponse()
self.plugIt.do_query = _do_query
def test_ping(self):
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {'data': self.last_do_query_call['url'].split('data=', 1)[1]}
assert (self.plugIt.ping())
self.plugIt.toReplyStatusCode = lambda: 404
assert (not self.plugIt.ping())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {'data': self.last_do_query_call['url'].split('data=', 1)[1] * 2}
assert (not self.plugIt.ping())
assert (self.last_do_query_call['url'].startswith('ping'))
def test_check_version(self):
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {'result': 'Ok', 'version': self.plugIt.PI_API_VERSION,
'protocol': self.plugIt.PI_API_NAME}
assert (self.plugIt.check_version())
assert (self.last_do_query_call['url'] == 'version')
self.plugIt.toReplyJson = lambda: {'result': 'poney', 'version': self.plugIt.PI_API_VERSION,
'protocol': self.plugIt.PI_API_NAME}
assert (not self.plugIt.check_version())
self.plugIt.toReplyJson = lambda: {'result': 'Ok', 'version': self.plugIt.PI_API_VERSION * 2,
'protocol': self.plugIt.PI_API_NAME}
assert (not self.plugIt.check_version())
self.plugIt.toReplyJson = lambda: {'result': 'Ok', 'version': self.plugIt.PI_API_VERSION,
'protocol': self.plugIt.PI_API_NAME * 2}
assert (not self.plugIt.check_version())
self.plugIt.toReplyStatusCode = lambda: 201
self.plugIt.toReplyJson = lambda: {'result': 'Ok', 'version': self.plugIt.PI_API_VERSION,
'protocol': self.plugIt.PI_API_NAME}
assert (not self.plugIt.check_version())
def test_new_mail(self):
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {'result': 'Ok'}
message_id = str(uuid.uuid4())
message = str(uuid.uuid4())
assert (self.plugIt.new_mail(message_id, message))
assert (self.last_do_query_call['url'] == 'mail')
assert (self.last_do_query_call['body'].get('response_id') == message_id)
assert (self.last_do_query_call['body'].get('message') == message)
self.plugIt.toReplyStatusCode = lambda: 201
assert (not self.plugIt.new_mail(message_id, message))
def test_media(self):
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {}
self.plugIt.toReplyHeaders = lambda: {}
media = str(uuid.uuid4())
data, content_type, cache_control = self.plugIt.get_media(media)
assert (data == '{}')
assert (content_type == 'application/octet-stream')
assert (self.last_do_query_call['url'] == 'media/{}'.format(media))
assert (not cache_control)
self.plugIt.toReplyHeaders = lambda: {'content-type': 'test', 'cache-control': 'public, max-age=31536000'}
data, content_type, cache_control = self.plugIt.get_media(media)
assert (data == '{}')
assert (content_type == 'test')
assert (cache_control == 'public, max-age=31536000')
self.plugIt.toReplyStatusCode = lambda: 201
data, content_type, cache_control = self.plugIt.get_media(media)
assert (not data)
assert (not content_type)
def test_meta(self):
k = str(uuid.uuid4())
path = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {'k': k}
self.plugIt.toReplyHeaders = lambda: {'expire': 'Wed, 21 Oct 2015 07:28:00 GMT'}
data = self.plugIt.get_meta(path)
assert (self.last_do_query_call['url'] == 'meta/{}'.format(path))
assert (data['k'] == k)
# Data should not be cached
self.plugIt.toReplyJson = lambda: {'k2': k}
data = self.plugIt.get_meta(path)
assert (data['k2'] == k)
def test_meta_fail(self):
path = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 201
self.plugIt.toReplyHeaders = lambda: {}
assert (not self.plugIt.get_meta(path))
def test_meta_cache(self):
k = str(uuid.uuid4())
path = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {'k': k}
self.plugIt.toReplyHeaders = lambda: {}
# Data should be cached
data = self.plugIt.get_meta(path)
self.plugIt.toReplyJson = lambda: {'k2': k}
data = self.plugIt.get_meta(path)
assert (data['k'] == k)
def test_template(self):
k = str(uuid.uuid4())
path = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {'k': k, 'template_tag': '-'}
self.plugIt.toReplyHeaders = lambda: {}
data = json.loads(self.plugIt.get_template(path))
assert (self.last_do_query_call['url'] == 'template/{}'.format(path))
assert (data['k'] == k)
# Data should be cached
self.plugIt.toReplyJson = lambda: {'k2': k, 'template_tag': '-'}
data = json.loads(self.plugIt.get_template(path))
assert (data['k'] == k)
def test_template_fail(self):
path = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 201
self.plugIt.toReplyHeaders = lambda: {}
assert (not self.plugIt.get_template(path))
def test_template_no_meta_no_template(self):
path = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {}
self.plugIt.toReplyHeaders = lambda: {}
assert (not self.plugIt.get_template(path))
def test_do_action_normal_mode(self):
path = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {}
self.plugIt.toReplyHeaders = lambda: {}
assert (self.plugIt.do_action(path) == ({}, {}, {}))
assert (self.last_do_query_call['url'] == 'action/{}'.format(path))
def test_do_action_proxy_mode(self):
path = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {}
self.plugIt.toReplyHeaders = lambda: {}
assert self.plugIt.do_action(path) == ({}, {}, {})
assert self.last_do_query_call['url'] == "action/" + path
def test_do_action_proxy_mode_no_remplate(self):
k = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {'k': k}
self.plugIt.toReplyHeaders = lambda: {'ebuio-plugit-notemplate': True}
r, __, __ = self.plugIt.do_action('')
assert (r.__class__.__name__ == 'PlugItNoTemplate')
assert (json.loads(r.content)['k'] == k)
def test_do_action_data(self):
path = str(uuid.uuid4())
k = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {'k': k}
self.plugIt.toReplyHeaders = lambda: {}
assert (self.plugIt.do_action(path) == ({'k': k}, {}, {}))
def test_do_action_500(self):
self.plugIt.toReplyStatusCode = lambda: 500
assert (self.plugIt.do_action('')[0].__class__.__name__ == 'PlugIt500')
def test_do_action_fail(self):
self.plugIt.toReplyStatusCode = lambda: 501
assert (self.plugIt.do_action('') == (None, {}, {}))
def test_do_action_special_codes(self):
special_codes = [429, 404, 403, 401, 304]
for x in range(200, 500):
self.plugIt.toReplyStatusCode = lambda: x
self.plugIt.toReplyHeaders = lambda: {}
self.plugIt.toReplyJson = lambda: {}
r, __, __ = self.plugIt.do_action('')
if x in special_codes:
assert (r.__class__.__name__ == 'PlugItSpecialCode')
assert (r.code == x)
else:
assert (r.__class__.__name__ != 'PlugItSpecialCode')
def test_do_action_session(self):
k = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {}
self.plugIt.toReplyHeaders = lambda: {'Ebuio-PlugIt-SetSession-k': k}
assert (self.plugIt.do_action('') == ({}, {'k': k}, {}))
def test_do_action_redirect(self):
k = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {}
self.plugIt.toReplyHeaders = lambda: {'ebuio-plugit-redirect': k}
r, session, headers = self.plugIt.do_action('')
assert (r.__class__.__name__ == 'PlugItRedirect')
assert (r.url == k)
assert (not r.no_prefix)
assert (session == {})
assert (headers == {})
def test_do_action_redirect_noprefix(self):
k = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {}
self.plugIt.toReplyHeaders = lambda: {'ebuio-plugit-redirect': k, 'ebuio-plugit-redirect-noprefix': "True"}
r, session, headers = self.plugIt.do_action('')
assert (r.__class__.__name__ == 'PlugItRedirect')
assert (r.url == k)
assert (r.no_prefix)
assert (session == {})
assert (headers == {})
def test_do_action_file(self):
k = str(uuid.uuid4())
content_type = str(uuid.uuid4())
content_disposition = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {'k': k}
self.plugIt.toReplyHeaders = lambda: {'ebuio-plugit-itafile': k, 'Content-Type': content_type}
r, session, headers = self.plugIt.do_action('')
assert (r.__class__.__name__ == 'PlugItFile')
assert (json.loads(r.content)['k'] == k)
assert (r.content_type == content_type)
assert (r.content_disposition == '')
assert (session == {})
assert (headers == {})
self.plugIt.toReplyHeaders = lambda: {'ebuio-plugit-itafile': k, 'Content-Type': content_type,
'content-disposition': content_disposition}
r, __, __ = self.plugIt.do_action('')
assert (r.content_disposition == content_disposition)
def test_do_action_etag(self):
k = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {}
self.plugIt.toReplyHeaders = lambda: {'ETag': k}
r, session, headers = self.plugIt.do_action('')
assert (headers == {'ETag': k})
def test_do_action_crossdomain(self):
k = str(uuid.uuid4())
self.plugIt.toReplyStatusCode = lambda: 200
self.plugIt.toReplyJson = lambda: {}
for header in ['Access-Control-Allow-Origin', 'Access-Control-Allow-Credentials',
'Access-Control-Expose-Headers', 'Access-Control-Max-Age', 'Access-Control-Allow-Methods',
'Access-Control-Allow-Headers']:
self.plugIt.toReplyHeaders = lambda: {header: k}
r, session, headers = self.plugIt.do_action('')
assert (headers == {header: k})
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routes all the requests to the task manager.
"""
from oslo import messaging
from trove.common import cfg
from trove.common import exception
from trove.common.strategies.cluster import strategy
import trove.common.rpc.version as rpc_version
from trove.guestagent import models as agent_models
from trove import rpc
from trove.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class API(object):
"""API for interacting with the task manager."""
def __init__(self, context):
self.context = context
super(API, self).__init__()
target = messaging.Target(topic=CONF.taskmanager_queue,
version=rpc_version.RPC_API_VERSION)
self.version_cap = rpc_version.VERSION_ALIASES.get(
CONF.upgrade_levels.taskmanager)
self.client = self.get_client(target, self.version_cap)
def get_client(self, target, version_cap, serializer=None):
return rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def _transform_obj(self, obj_ref):
# Turn the object into a dictionary and remove the mgr
if "__dict__" in dir(obj_ref):
obj_dict = obj_ref.__dict__
# We assume manager contains a object due to the *clients
if obj_dict.get('manager'):
del obj_dict['manager']
return obj_dict
raise ValueError("Could not transform %s" % obj_ref)
def _delete_heartbeat(self, instance_id):
agent_heart_beat = agent_models.AgentHeartBeat()
try:
heartbeat = agent_heart_beat.find_by_instance_id(instance_id)
heartbeat.delete()
except exception.ModelNotFoundError as e:
LOG.error(e.message)
def resize_volume(self, new_size, instance_id):
LOG.debug("Making async call to resize volume for instance: %s"
% instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "resize_volume",
new_size=new_size,
instance_id=instance_id)
def resize_flavor(self, instance_id, old_flavor, new_flavor):
LOG.debug("Making async call to resize flavor for instance: %s" %
instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "resize_flavor",
instance_id=instance_id,
old_flavor=self._transform_obj(old_flavor),
new_flavor=self._transform_obj(new_flavor))
def reboot(self, instance_id):
LOG.debug("Making async call to reboot instance: %s" % instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "reboot", instance_id=instance_id)
#rds-start
def restore_instance(self, packages, flavor, datastore_manager, instance_id, image_id, backup_id):
LOG.debug("Making async call to restore instance: %s" % instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "restore_instance", packages=packages,
flavor=self._transform_obj(flavor),
datastore_manager=datastore_manager,
instance_id=instance_id,
image_id=image_id,
backup_id=backup_id)
#rds-end
def restart(self, instance_id):
LOG.debug("Making async call to restart instance: %s" % instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "restart", instance_id=instance_id)
def detach_replica(self, instance_id):
LOG.debug("Making async call to detach replica: %s" % instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "detach_replica", instance_id=instance_id)
def promote_to_replica_source(self, instance_id):
LOG.debug("Making async call to promote replica to source: %s" %
instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "promote_to_replica_source",
instance_id=instance_id)
def eject_replica_source(self, instance_id):
LOG.debug("Making async call to eject replica source: %s" %
instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "eject_replica_source",
instance_id=instance_id)
def migrate(self, instance_id, host):
LOG.debug("Making async call to migrate instance: %s" % instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "migrate", instance_id=instance_id, host=host)
def delete_instance(self, instance_id):
LOG.debug("Making async call to delete instance: %s" % instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "delete_instance", instance_id=instance_id)
def create_backup(self, backup_info, instance_id):
LOG.debug("Making async call to create a backup for instance: %s" %
instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "create_backup",
backup_info=backup_info,
instance_id=instance_id)
def delete_backup(self, backup_id):
LOG.debug("Making async call to delete backup: %s" % backup_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "delete_backup", backup_id=backup_id)
def create_instance(self, instance_id, name, flavor,
image_id, databases, users, datastore_manager,
packages, volume_size, backup_id=None,
availability_zone=None, root_password=None,
nics=None, overrides=None, slave_of_id=None,
cluster_config=None):
LOG.debug("Making async call to create instance %s " % instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "create_instance",
instance_id=instance_id, name=name,
flavor=self._transform_obj(flavor),
image_id=image_id,
databases=databases,
users=users,
datastore_manager=datastore_manager,
packages=packages,
volume_size=volume_size,
backup_id=backup_id,
availability_zone=availability_zone,
root_password=root_password,
nics=nics,
overrides=overrides,
slave_of_id=slave_of_id,
cluster_config=cluster_config)
def update_overrides(self, instance_id, overrides=None):
LOG.debug("Making async call to update datastore configurations for "
"instance %s" % instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "update_overrides",
instance_id=instance_id,
overrides=overrides)
def unassign_configuration(self, instance_id, flavor, configuration_id):
LOG.debug("Making async call to remove datastore configurations for "
"instance %s" % instance_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "unassign_configuration",
instance_id=instance_id,
flavor=self._transform_obj(flavor),
configuration_id=configuration_id)
def create_cluster(self, cluster_id):
LOG.debug("Making async call to create cluster %s " % cluster_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "create_cluster",
cluster_id=cluster_id)
def delete_cluster(self, cluster_id):
LOG.debug("Making async call to delete cluster %s " % cluster_id)
cctxt = self.client.prepare(version=self.version_cap)
cctxt.cast(self.context, "delete_cluster",
cluster_id=cluster_id)
def load(context, manager=None):
if manager:
task_manager_api_class = (strategy.load_taskmanager_strategy(manager)
.task_manager_api_class)
else:
task_manager_api_class = API
return task_manager_api_class(context)
|
|
"""Tractor Purge - avoid running out of diskspace!
More info: https://github.com/fredrikaverpil/tractor-purge
"""
import sys
import os
import subprocess
import re
import shutil
import logging
from optparse import OptionParser
import time
import glob
####################################
# Option parser and constants
TRACTOR_PURGE_VERSION = 'v2.1.0'
DEFAULT_DAYS = '30'
parser = OptionParser(version='%prog ' + TRACTOR_PURGE_VERSION)
parser.add_option('-t', '--tq', dest='tq',
default='/opt/pixar/Tractor-2.2/bin/tq',
help='Absolute path to tq [default: %default]')
parser.add_option('-c', '--cmd-log-sdir', dest='cmdlogsdir',
default='/var/spool/tractor/cmd-logs',
help='Absolute path to cmd-logs dir [default: %default]')
parser.add_option('-l', '--log', dest='logfile',
default='/var/tmp/tractor-purge.log',
help='Absolute path to tractor-purge log file '
'[default: %default]')
parser.add_option('-d', '--days', dest='days', default=DEFAULT_DAYS,
help='Number of days worth of jobs/logs to keep '
'[default: %default]')
parser.add_option('--delete-cmd-logs', action='store_true',
dest='deletecmdlogs',
default=False, help='Delete cmd logs [default: %default]')
parser.add_option('--delete-jobs', action='store_true', dest='deletejobs',
default=False,
help='Delete jobs from psql database after log deletion. '
'If DBArchiving is True in Tractor config, archive '
'jobs instead. [default: %default]')
parser.add_option('--dry-run', action='store_true', dest='dryrun',
default=False,
help='Do not perform actual deletion, instead just preview \
deletions [default: %default]')
(options, args) = parser.parse_args()
TQ = options.tq
CMD_LOGS_DIR = options.cmdlogsdir
PURGE_LOG = options.logfile
DAYS = options.days
DELETE_CMD_LOGS = options.deletecmdlogs
DELETE_JOBS = options.deletejobs
DRY_RUN = options.dryrun
if not os.path.exists(TQ):
parser.error('tq not found on path' + TQ)
if DELETE_CMD_LOGS and not os.path.exists(CMD_LOGS_DIR):
parser.error('cmd-logs dir not found on path ' + CMD_LOGS_DIR)
if DELETE_CMD_LOGS is False and DELETE_JOBS is False:
parser.error('Neither --delete-cmd-logs or --delete-jobs were specified.')
####################################
# General setup
# Logging
logger = logging.getLogger('Tractor 2.2 purger')
hdlr = logging.FileHandler(PURGE_LOG)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
# Logging to stdout
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
####################################
# Functions
def jids_to_delete(days):
"""Create list of all job ids matching query."""
jids = []
command = [TQ, 'jobs',
'not active and not ready and spooltime < -' + days + 'd',
'--noheader',
'--cols', 'jid',
'--sortby', 'jid',
'--limit', '0']
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
try:
for line in iter(p.stdout.readline, b''):
sys.stdout.flush()
jid = line.rstrip()
jids.append(int(jid))
logger.info('Found job: ' + jid)
except:
logger.warning('Failed to read stdout.')
return jids
def jids_to_keep(days):
"""Create list of all job ids matching query.
NOTE: this query returns all jids within the time span in order to
NOT delete them.
"""
jids = []
command = [TQ, 'jobs',
'spooltime > -' + days + 'd or active or ready or blocked',
'--noheader',
'--archives',
'--cols', 'jid',
'--sortby', 'jid',
'--limit', '0']
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
try:
for line in iter(p.stdout.readline, b''):
sys.stdout.flush()
jid = line.rstrip()
jids.append(int(jid))
logger.info('Keep logs for job: ' + jid)
except:
logger.warning('Failed to read stdout.')
return jids
def get_job_folders_for_deletion(job_folders, keep_jids):
"""Return list of job folders to NOT keep."""
folders_to_delete = []
for job_folder in job_folders:
jid = int(os.path.basename(job_folder).replace("J", ""))
if jid not in keep_jids:
folders_to_delete.append(job_folder)
return folders_to_delete
def delete_logs(delete_list):
"""Delete the actual log folders
"""
for job_folder in delete_list:
if not DRY_RUN:
logger.info('Deleting %s' % job_folder)
shutil.rmtree(job_folder)
else:
logger.info('Dry run: (not) deleting %s' % job_folder)
def delete_tractor_jobs(days):
"""Delete jobs from Tractor. You can also delete jobs manually using:
tractor-dbctl --purge-archive-to-year-month YY-MM
"""
if not DRY_RUN:
logger.info('Executing tq command to delete jobs...')
command = [TQ, '--force', '--yes', 'delete',
'not active and not ready and spooltime < -' + days + 'd',
'--cols', 'jid',
'--limit', '0']
else:
logger.info('Executing tq command to (not) delete jobs...')
command = [TQ, 'jobs', '--archives',
'not active and not ready and spooltime < -' + days + 'd',
'--cols', 'jid',
'--limit', '0']
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
try:
for line in iter(p.stdout.readline, b''):
sys.stdout.flush()
logger.info(line.rstrip())
except:
logger.warning('Failed reading stdout.')
####################################
# Main
def main():
"""Main program."""
# Show warning
seconds = 10
warning_message = ('Welcome to tractor-purge.\n\n' +
'This script will now execute the follow actions')
if DRY_RUN:
warning_message += ' in "dry run" mode:\n'
else:
warning_message += ':\n'
if DELETE_CMD_LOGS:
warning_message += ('- Delete cmd-logs older than ' +
str(DAYS) + ' days.\n')
if DELETE_JOBS:
warning_message += ('- Delete/archive jobs older than ' +
str(DAYS) + ' days.\n')
warning_message += ('\nAbort now (ctrl+c) if this is does not look ' +
'right to you. You have ' + str(seconds) + ' ' +
'seconds and counting...')
logger.warning(warning_message)
time.sleep(seconds)
logger.info('Tractor purge initiated.')
# Queries
if DELETE_CMD_LOGS:
jids = jids_to_keep(days=DAYS)
all_job_folders = glob.glob("%s/*/J*" % (CMD_LOGS_DIR))
paths_to_delete = get_job_folders_for_deletion(
job_folders=all_job_folders, keep_jids=jids)
logger.info('Job log folders found: %s' % len(all_job_folders))
logger.info('Job log folders to be emptied: %s' % len(paths_to_delete))
if len(jids) > 0:
delete_logs(delete_list=paths_to_delete)
else:
logger.info('No logs to delete.')
# Delete jobs
elif DELETE_JOBS:
jids = jids_to_delete(days=DAYS)
logger.info('Jobs to be deleted: %s' % len(jids))
if len(jids) > 0:
delete_tractor_jobs(days=DAYS)
else:
logger.info('No jobs to delete.')
logger.info('Tractor purge done.\n')
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ResponseBase(Model):
"""ResponseBase.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Identifiable
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
"""
_validation = {
'_type': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
}
_subtype_map = {
'_type': {'Identifiable': 'Identifiable'}
}
def __init__(self, **kwargs):
super(ResponseBase, self).__init__(**kwargs)
self._type = None
class Identifiable(ResponseBase):
"""Defines the identity of a resource.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Response
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
_subtype_map = {
'_type': {'Response': 'Response'}
}
def __init__(self, **kwargs):
super(Identifiable, self).__init__(**kwargs)
self.id = None
self._type = 'Identifiable'
class Response(Identifiable):
"""Defines a response. All schemas that could be returned at the root of a
response should inherit from this.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Answer, ErrorResponse
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
_subtype_map = {
'_type': {'Answer': 'Answer', 'ErrorResponse': 'ErrorResponse'}
}
def __init__(self, **kwargs):
super(Response, self).__init__(**kwargs)
self._type = 'Response'
class Answer(Response):
"""Answer.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SpellCheck
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
_subtype_map = {
'_type': {'SpellCheck': 'SpellCheck'}
}
def __init__(self, **kwargs):
super(Answer, self).__init__(**kwargs)
self._type = 'Answer'
class Error(Model):
"""Defines the error that occurred.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error code that identifies the category of
error. Possible values include: 'None', 'ServerError', 'InvalidRequest',
'RateLimitExceeded', 'InvalidAuthorization', 'InsufficientAuthorization'.
Default value: "None" .
:type code: str or
~azure.cognitiveservices.language.spellcheck.models.ErrorCode
:ivar sub_code: The error code that further helps to identify the error.
Possible values include: 'UnexpectedError', 'ResourceError',
'NotImplemented', 'ParameterMissing', 'ParameterInvalidValue',
'HttpNotAllowed', 'Blocked', 'AuthorizationMissing',
'AuthorizationRedundancy', 'AuthorizationDisabled', 'AuthorizationExpired'
:vartype sub_code: str or
~azure.cognitiveservices.language.spellcheck.models.ErrorSubCode
:param message: Required. A description of the error.
:type message: str
:ivar more_details: A description that provides additional information
about the error.
:vartype more_details: str
:ivar parameter: The parameter in the request that caused the error.
:vartype parameter: str
:ivar value: The parameter's value in the request that was not valid.
:vartype value: str
"""
_validation = {
'code': {'required': True},
'sub_code': {'readonly': True},
'message': {'required': True},
'more_details': {'readonly': True},
'parameter': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'sub_code': {'key': 'subCode', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'more_details': {'key': 'moreDetails', 'type': 'str'},
'parameter': {'key': 'parameter', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Error, self).__init__(**kwargs)
self.code = kwargs.get('code', "None")
self.sub_code = None
self.message = kwargs.get('message', None)
self.more_details = None
self.parameter = None
self.value = None
class ErrorResponse(Response):
"""The top-level response that represents a failed request.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:param errors: Required. A list of errors that describe the reasons why
the request failed.
:type errors:
list[~azure.cognitiveservices.language.spellcheck.models.Error]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'errors': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[Error]'},
}
def __init__(self, **kwargs):
super(ErrorResponse, self).__init__(**kwargs)
self.errors = kwargs.get('errors', None)
self._type = 'ErrorResponse'
class ErrorResponseException(HttpOperationError):
"""Server responded with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
class SpellCheck(Answer):
"""SpellCheck.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:param flagged_tokens: Required.
:type flagged_tokens:
list[~azure.cognitiveservices.language.spellcheck.models.SpellingFlaggedToken]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'flagged_tokens': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'flagged_tokens': {'key': 'flaggedTokens', 'type': '[SpellingFlaggedToken]'},
}
def __init__(self, **kwargs):
super(SpellCheck, self).__init__(**kwargs)
self.flagged_tokens = kwargs.get('flagged_tokens', None)
self._type = 'SpellCheck'
class SpellingFlaggedToken(Model):
"""SpellingFlaggedToken.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param offset: Required.
:type offset: int
:param token: Required.
:type token: str
:param type: Required. Possible values include: 'UnknownToken',
'RepeatedToken'. Default value: "UnknownToken" .
:type type: str or
~azure.cognitiveservices.language.spellcheck.models.ErrorType
:ivar suggestions:
:vartype suggestions:
list[~azure.cognitiveservices.language.spellcheck.models.SpellingTokenSuggestion]
:ivar ping_url_suffix:
:vartype ping_url_suffix: str
"""
_validation = {
'offset': {'required': True},
'token': {'required': True},
'type': {'required': True},
'suggestions': {'readonly': True},
'ping_url_suffix': {'readonly': True},
}
_attribute_map = {
'offset': {'key': 'offset', 'type': 'int'},
'token': {'key': 'token', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'suggestions': {'key': 'suggestions', 'type': '[SpellingTokenSuggestion]'},
'ping_url_suffix': {'key': 'pingUrlSuffix', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SpellingFlaggedToken, self).__init__(**kwargs)
self.offset = kwargs.get('offset', None)
self.token = kwargs.get('token', None)
self.type = kwargs.get('type', "UnknownToken")
self.suggestions = None
self.ping_url_suffix = None
class SpellingTokenSuggestion(Model):
"""SpellingTokenSuggestion.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param suggestion: Required.
:type suggestion: str
:ivar score:
:vartype score: float
:ivar ping_url_suffix:
:vartype ping_url_suffix: str
"""
_validation = {
'suggestion': {'required': True},
'score': {'readonly': True},
'ping_url_suffix': {'readonly': True},
}
_attribute_map = {
'suggestion': {'key': 'suggestion', 'type': 'str'},
'score': {'key': 'score', 'type': 'float'},
'ping_url_suffix': {'key': 'pingUrlSuffix', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SpellingTokenSuggestion, self).__init__(**kwargs)
self.suggestion = kwargs.get('suggestion', None)
self.score = None
self.ping_url_suffix = None
|
|
from test.support import verbose, run_unittest
import re
from re import Scanner
import sys, traceback
from weakref import proxy
# Misc tests from Tim Peters' re.doc
# WARNING: Don't change details in these tests if you don't know
# what you're doing. Some of these tests were carefuly modeled to
# cover most of the code.
import unittest
class ReTests(unittest.TestCase):
def test_weakref(self):
s = 'QabbbcR'
x = re.compile('ab+c')
y = proxy(x)
self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR'))
def test_search_star_plus(self):
self.assertEqual(re.search('x*', 'axx').span(0), (0, 0))
self.assertEqual(re.search('x*', 'axx').span(), (0, 0))
self.assertEqual(re.search('x+', 'axx').span(0), (1, 3))
self.assertEqual(re.search('x+', 'axx').span(), (1, 3))
self.assertEqual(re.search('x', 'aaa'), None)
self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0))
self.assertEqual(re.match('a*', 'xxx').span(), (0, 0))
self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3))
self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3))
self.assertEqual(re.match('a+', 'xxx'), None)
def bump_num(self, matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
def test_basic_re_sub(self):
self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'),
'9.3 -3 24x100y')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3),
'9.3 -3 23x99y')
self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n')
self.assertEqual(re.sub('.', r"\n", 'x'), '\n')
s = r"\1\1"
self.assertEqual(re.sub('(.)', s, 'x'), 'xx')
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'),
'\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'),
(chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
def test_bug_449964(self):
# fails for group followed by other escape
self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'),
'xx\bxx\b')
def test_bug_449000(self):
# Test for sub() on escaped characters
self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
def test_bug_1661(self):
# Verify that flags do not get silently ignored with compiled patterns
pattern = re.compile('.')
self.assertRaises(ValueError, re.match, pattern, 'A', re.I)
self.assertRaises(ValueError, re.search, pattern, 'A', re.I)
self.assertRaises(ValueError, re.findall, pattern, 'A', re.I)
self.assertRaises(ValueError, re.compile, pattern, re.I)
def test_bug_3629(self):
# A regex that triggered a bug in the sre-code validator
re.compile("(?P<quote>)(?(quote))")
def test_sub_template_numeric_escape(self):
# bug 776311 and friends
self.assertEqual(re.sub('x', r'\0', 'x'), '\0')
self.assertEqual(re.sub('x', r'\000', 'x'), '\000')
self.assertEqual(re.sub('x', r'\001', 'x'), '\001')
self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\111', 'x'), '\111')
self.assertEqual(re.sub('x', r'\117', 'x'), '\117')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1')
self.assertEqual(re.sub('x', r'\00', 'x'), '\x00')
self.assertEqual(re.sub('x', r'\07', 'x'), '\x07')
self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a')
self.assertEqual(re.sub('x', r'\400', 'x'), '\0')
self.assertEqual(re.sub('x', r'\777', 'x'), '\377')
self.assertRaises(re.error, re.sub, 'x', r'\1', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\8', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\9', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\11', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\18', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\90', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\99', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8'
self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1'
self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0'
# in python2.3 (etc), these loop endlessly in sre_parser.py
self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'),
'xz8')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'),
'xza')
def test_qualified_re_sub(self):
self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb')
self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa')
def test_bug_114660(self):
self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'),
'hello there')
def test_bug_462270(self):
# Test for empty sub() behaviour, see SF bug #462270
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
def test_symbolic_refs(self):
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx')
self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx')
def test_re_subn(self):
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1))
self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0))
self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4))
self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2))
def test_re_split(self):
self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c'])
self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:*)", ":a:b::c"),
['', ':', 'a', ':', 'b', '::', 'c'])
self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:)*", ":a:b::c"),
['', ':', 'a', ':', 'b', ':', 'c'])
self.assertEqual(re.split("([b:]+)", ":a:b::c"),
['', ':', 'a', ':b::', 'c'])
self.assertEqual(re.split("(b)|(:+)", ":a:b::c"),
['', None, ':', 'a', None, ':', '', 'b', None, '',
None, '::', 'c'])
self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"),
['', 'a', '', '', 'c'])
def test_qualified_re_split(self):
self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c'])
self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d'])
self.assertEqual(re.split("(:)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
self.assertEqual(re.split("(:*)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
def test_re_findall(self):
self.assertEqual(re.findall(":+", "abc"), [])
self.assertEqual(re.findall(":+", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:+)", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:)(:*)", "a:b::c:::d"), [(":", ""),
(":", ":"),
(":", "::")])
def test_bug_117612(self):
self.assertEqual(re.findall(r"(a|(b))", "aba"),
[("a", ""),("b", "b"),("a", "")])
def test_re_match(self):
self.assertEqual(re.match('a', 'a').groups(), ())
self.assertEqual(re.match('(a)', 'a').groups(), ('a',))
self.assertEqual(re.match(r'(a)', 'a').group(0), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1, 1), ('a', 'a'))
pat = re.compile('((a)|(b))(c)?')
self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None))
self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None))
self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c'))
self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c'))
self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c'))
# A single group
m = re.match('(a)', 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(1), 'a')
self.assertEqual(m.group(1, 1), ('a', 'a'))
pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None))
self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'),
(None, 'b', None))
self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c'))
def test_re_groupref_exists(self):
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(),
('(', 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None)
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None)
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(),
('a', 'b'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(),
('a', ''))
# Tests for bug #1177831: exercise groups other than the first group
p = re.compile('(?P<g1>a)(?P<g2>b)?((?(g2)c|d))')
self.assertEqual(p.match('abc').groups(),
('a', 'b', 'c'))
self.assertEqual(p.match('ad').groups(),
('a', None, 'd'))
self.assertEqual(p.match('abd'), None)
self.assertEqual(p.match('ac'), None)
def test_re_groupref(self):
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(),
('|', 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None)
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None)
self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(),
('a', 'a'))
self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(),
(None, None))
def test_groupdict(self):
self.assertEqual(re.match('(?P<first>first) (?P<second>second)',
'first second').groupdict(),
{'first':'first', 'second':'second'})
def test_expand(self):
self.assertEqual(re.match("(?P<first>first) (?P<second>second)",
"first second")
.expand(r"\2 \1 \g<second> \g<first>"),
"second first second first")
def test_repeat_minmax(self):
self.assertEqual(re.match("^(\w){1}$", "abc"), None)
self.assertEqual(re.match("^(\w){1}?$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None)
self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^x{1}$", "xxx"), None)
self.assertEqual(re.match("^x{1}?$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertEqual(re.match("^x{}$", "xxx"), None)
self.assertNotEqual(re.match("^x{}$", "x{}"), None)
def test_getattr(self):
self.assertEqual(re.compile("(?i)(a)(b)").pattern, "(?i)(a)(b)")
self.assertEqual(re.compile("(?i)(a)(b)").flags, re.I | re.U)
self.assertEqual(re.compile("(?i)(a)(b)").groups, 2)
self.assertEqual(re.compile("(?i)(a)(b)").groupindex, {})
self.assertEqual(re.compile("(?i)(?P<first>a)(?P<other>b)").groupindex,
{'first': 1, 'other': 2})
self.assertEqual(re.match("(a)", "a").pos, 0)
self.assertEqual(re.match("(a)", "a").endpos, 1)
self.assertEqual(re.match("(a)", "a").string, "a")
self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1)))
self.assertNotEqual(re.match("(a)", "a").re, None)
def test_special_escapes(self):
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a").group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.LOCALE).group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
def test_bigcharset(self):
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222").group(1), "\u2222")
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222", re.UNICODE).group(1), "\u2222")
def test_anyall(self):
self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0),
"a\nb")
self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0),
"a\n\nb")
def test_non_consuming(self):
self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a")
def test_ignore_case(self):
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa")
self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa")
def test_category(self):
self.assertEqual(re.match(r"(\s)", " ").group(1), " ")
def test_getlower(self):
import _sre
self.assertEqual(_sre.getlower(ord('A'), 0), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a'))
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
def test_not_literal(self):
self.assertEqual(re.search("\s([^a])", " b").group(1), "b")
self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb")
def test_search_coverage(self):
self.assertEqual(re.search("\s(b)", " b").group(1), "b")
self.assertEqual(re.search("a\s", "a ").group(0), "a ")
def test_re_escape(self):
p=""
self.assertEqual(re.escape(p), p)
for i in range(0, 256):
p = p + chr(i)
self.assertEqual(re.match(re.escape(chr(i)), chr(i)) is not None,
True)
self.assertEqual(re.match(re.escape(chr(i)), chr(i)).span(), (0,1))
pat=re.compile(re.escape(p))
self.assertEqual(pat.match(p) is not None, True)
self.assertEqual(pat.match(p).span(), (0,256))
def test_re_escape_byte(self):
p=b""
self.assertEqual(re.escape(p), p)
for i in range(0, 256):
b = bytes([i])
p += b
self.assertEqual(re.match(re.escape(b), b) is not None, True)
self.assertEqual(re.match(re.escape(b), b).span(), (0,1))
pat=re.compile(re.escape(p))
self.assertEqual(pat.match(p) is not None, True)
self.assertEqual(pat.match(p).span(), (0,256))
def pickle_test(self, pickle):
oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
s = pickle.dumps(oldpat)
newpat = pickle.loads(s)
self.assertEqual(oldpat, newpat)
def test_constants(self):
self.assertEqual(re.I, re.IGNORECASE)
self.assertEqual(re.L, re.LOCALE)
self.assertEqual(re.M, re.MULTILINE)
self.assertEqual(re.S, re.DOTALL)
self.assertEqual(re.X, re.VERBOSE)
def test_flags(self):
for flag in [re.I, re.M, re.X, re.S, re.L]:
self.assertNotEqual(re.compile('^pattern$', flag), None)
def test_sre_character_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertNotEqual(re.match(r"\%03o" % i, chr(i)), None)
self.assertNotEqual(re.match(r"\%03o0" % i, chr(i)+"0"), None)
self.assertNotEqual(re.match(r"\%03o8" % i, chr(i)+"8"), None)
self.assertNotEqual(re.match(r"\x%02x" % i, chr(i)), None)
self.assertNotEqual(re.match(r"\x%02x0" % i, chr(i)+"0"), None)
self.assertNotEqual(re.match(r"\x%02xz" % i, chr(i)+"z"), None)
self.assertRaises(re.error, re.match, "\911", "")
def test_sre_character_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertNotEqual(re.match(r"[\%03o]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\%03o0]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\%03o8]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02x]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02x0]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02xz]" % i, chr(i)), None)
self.assertRaises(re.error, re.match, "[\911]", "")
def test_bug_113254(self):
self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1))
def test_bug_527371(self):
# bug described in patches 527371/672491
self.assertEqual(re.match(r'(a)?a','a').lastindex, None)
self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1)
self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a')
self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a')
self.assertEqual(re.match("((a))", "a").lastindex, 1)
def test_bug_545855(self):
# bug 545855 -- This pattern failed to cause a compile error as it
# should, instead provoking a TypeError.
self.assertRaises(re.error, re.compile, 'foo[a-')
def test_bug_418626(self):
# bugs 418626 at al. -- Testing Greg Chapman's addition of op code
# SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of
# pattern '*?' on a long string.
self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001)
self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0),
20003)
self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001)
# non-simple '*?' still used to hit the recursion limit, before the
# non-recursive scheme was implemented.
self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001)
def test_bug_612074(self):
pat="["+re.escape("\u2039")+"]"
self.assertEqual(re.compile(pat) and 1, 1)
def test_stack_overflow(self):
# nasty cases that used to overflow the straightforward recursive
# implementation of repeated groups.
self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x')
self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x')
self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x')
def test_scanner(self):
def s_ident(scanner, token): return token
def s_operator(scanner, token): return "op%s" % token
def s_float(scanner, token): return float(token)
def s_int(scanner, token): return int(token)
scanner = Scanner([
(r"[a-zA-Z_]\w*", s_ident),
(r"\d+\.\d*", s_float),
(r"\d+", s_int),
(r"=|\+|-|\*|/", s_operator),
(r"\s+", None),
])
self.assertNotEqual(scanner.scanner.scanner("").pattern, None)
self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"),
(['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5,
'op+', 'bar'], ''))
def test_bug_448951(self):
# bug 448951 (similar to 429357, but with single char match)
# (Also test greedy matches.)
for op in '','?','*':
self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(),
(None, None))
self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(),
('a:', 'a'))
def test_bug_725106(self):
# capturing groups in alternatives in repeats
self.assertEqual(re.match('^((a)|b)*', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(),
('b', None))
def test_bug_725149(self):
# mark_stack_base restoring before restoring marks
self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(),
('a', None))
self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(),
('a', None, None))
def test_bug_764548(self):
# bug 764548, re.compile() barfs on str/unicode subclasses
class my_unicode(str): pass
pat = re.compile(my_unicode("abc"))
self.assertEqual(pat.match("xyz"), None)
def test_finditer(self):
iter = re.finditer(r":+", "a:b::c:::d")
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
def test_bug_926075(self):
self.assertTrue(re.compile('bug_926075') is not
re.compile(b'bug_926075'))
def test_bug_931848(self):
pattern = eval('"[\u002E\u3002\uFF0E\uFF61]"')
self.assertEqual(re.compile(pattern).split("a.b.c"),
['a','b','c'])
def test_bug_581080(self):
iter = re.finditer(r"\s", "a b")
self.assertEqual(next(iter).span(), (1,2))
self.assertRaises(StopIteration, next, iter)
scanner = re.compile(r"\s").scanner("a b")
self.assertEqual(scanner.search().span(), (1, 2))
self.assertEqual(scanner.search(), None)
def test_bug_817234(self):
iter = re.finditer(r".*", "asdf")
self.assertEqual(next(iter).span(), (0, 4))
self.assertEqual(next(iter).span(), (4, 4))
self.assertRaises(StopIteration, next, iter)
def test_bug_6561(self):
# '\d' should match characters in Unicode category 'Nd'
# (Number, Decimal Digit), but not those in 'Nl' (Number,
# Letter) or 'No' (Number, Other).
decimal_digits = [
'\u0037', # '\N{DIGIT SEVEN}', category 'Nd'
'\u0e58', # '\N{THAI DIGIT SIX}', category 'Nd'
'\uff10', # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd'
]
for x in decimal_digits:
self.assertEqual(re.match('^\d$', x).group(0), x)
not_decimal_digits = [
'\u2165', # '\N{ROMAN NUMERAL SIX}', category 'Nl'
'\u3039', # '\N{HANGZHOU NUMERAL TWENTY}', category 'Nl'
'\u2082', # '\N{SUBSCRIPT TWO}', category 'No'
'\u32b4', # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No'
]
for x in not_decimal_digits:
self.assertIsNone(re.match('^\d$', x))
def test_empty_array(self):
# SF buf 1647541
import array
for typecode in 'bBuhHiIlLfd':
a = array.array(typecode)
self.assertEqual(re.compile(b"bla").match(a), None)
self.assertEqual(re.compile(b"").match(a).groups(), ())
def test_inline_flags(self):
# Bug #1700
upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Bellow
lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Bellow
p = re.compile(upper_char, re.I | re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile(lower_char, re.I | re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + upper_char, re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + lower_char, re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + upper_char)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + lower_char)
q = p.match(upper_char)
self.assertNotEqual(q, None)
def test_dollar_matches_twice(self):
"$ matches the end of string, and just before the terminating \n"
pattern = re.compile('$')
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#')
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
pattern = re.compile('$', re.MULTILINE)
self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#' )
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
def test_bytes_str_mixing(self):
# Mixing str and bytes is disallowed
pat = re.compile('.')
bpat = re.compile(b'.')
self.assertRaises(TypeError, pat.match, b'b')
self.assertRaises(TypeError, bpat.match, 'b')
self.assertRaises(TypeError, pat.sub, b'b', 'c')
self.assertRaises(TypeError, pat.sub, 'b', b'c')
self.assertRaises(TypeError, pat.sub, b'b', b'c')
self.assertRaises(TypeError, bpat.sub, b'b', 'c')
self.assertRaises(TypeError, bpat.sub, 'b', b'c')
self.assertRaises(TypeError, bpat.sub, 'b', 'c')
def test_ascii_and_unicode_flag(self):
# String patterns
for flags in (0, re.UNICODE):
pat = re.compile('\xc0', flags | re.IGNORECASE)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\w', flags)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\xc0', re.ASCII | re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\xc0', re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('\w', re.ASCII)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\w')
self.assertEqual(pat.match('\xe0'), None)
# Bytes patterns
for flags in (0, re.ASCII):
pat = re.compile(b'\xc0', re.IGNORECASE)
self.assertEqual(pat.match(b'\xe0'), None)
pat = re.compile(b'\w')
self.assertEqual(pat.match(b'\xe0'), None)
# Incompatibilities
self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, b'(?u)\w')
self.assertRaises(ValueError, re.compile, '\w', re.UNICODE | re.ASCII)
self.assertRaises(ValueError, re.compile, '(?u)\w', re.ASCII)
self.assertRaises(ValueError, re.compile, '(?a)\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, '(?au)\w')
def test_bug_6509(self):
# Replacement strings of both types must parse properly.
# all strings
pat = re.compile('a(\w)')
self.assertEqual(pat.sub('b\\1', 'ac'), 'bc')
pat = re.compile('a(.)')
self.assertEqual(pat.sub('b\\1', 'a\u1234'), 'b\u1234')
pat = re.compile('..')
self.assertEqual(pat.sub(lambda m: 'str', 'a5'), 'str')
# all bytes
pat = re.compile(b'a(\w)')
self.assertEqual(pat.sub(b'b\\1', b'ac'), b'bc')
pat = re.compile(b'a(.)')
self.assertEqual(pat.sub(b'b\\1', b'a\xCD'), b'b\xCD')
pat = re.compile(b'..')
self.assertEqual(pat.sub(lambda m: b'bytes', b'a5'), b'bytes')
def test_dealloc(self):
# issue 3299: check for segfault in debug build
import _sre
# the overflow limit is different on wide and narrow builds and it
# depends on the definition of SRE_CODE (see sre.h).
# 2**128 should be big enough to overflow on both. For smaller values
# a RuntimeError is raised instead of OverflowError.
long_overflow = 2**128
self.assertRaises(TypeError, re.finditer, "a", {})
self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow])
self.assertRaises(TypeError, _sre.compile, {}, 0, [])
def run_re_tests():
from test.re_tests import tests, SUCCEED, FAIL, SYNTAX_ERROR
if verbose:
print('Running re_tests test suite')
else:
# To save time, only run the first and last 10 tests
#tests = tests[:10] + tests[-10:]
pass
for t in tests:
sys.stdout.flush()
pattern = s = outcome = repl = expected = None
if len(t) == 5:
pattern, s, outcome, repl, expected = t
elif len(t) == 3:
pattern, s, outcome = t
else:
raise ValueError('Test tuples should have 3 or 5 fields', t)
try:
obj = re.compile(pattern)
except re.error:
if outcome == SYNTAX_ERROR: pass # Expected a syntax error
else:
print('=== Syntax error:', t)
except KeyboardInterrupt: raise KeyboardInterrupt
except:
print('*** Unexpected error ***', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
try:
result = obj.search(s)
except re.error as msg:
print('=== Unexpected exception', t, repr(msg))
if outcome == SYNTAX_ERROR:
# This should have been a syntax error; forget it.
pass
elif outcome == FAIL:
if result is None: pass # No match, as expected
else: print('=== Succeeded incorrectly', t)
elif outcome == SUCCEED:
if result is not None:
# Matched, as expected, so now we compute the
# result string and compare it to our expected result.
start, end = result.span(0)
vardict={'found': result.group(0),
'groups': result.group(),
'flags': result.re.flags}
for i in range(1, 100):
try:
gi = result.group(i)
# Special hack because else the string concat fails:
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict['g%d' % i] = gi
for i in result.re.groupindex.keys():
try:
gi = result.group(i)
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict[i] = gi
repl = eval(repl, vardict)
if repl != expected:
print('=== grouping error', t, end=' ')
print(repr(repl) + ' should be ' + repr(expected))
else:
print('=== Failed incorrectly', t)
# Try the match with both pattern and string converted to
# bytes, and check that it still succeeds.
try:
bpat = bytes(pattern, "ascii")
bs = bytes(s, "ascii")
except UnicodeEncodeError:
# skip non-ascii tests
pass
else:
try:
bpat = re.compile(bpat)
except Exception:
print('=== Fails on bytes pattern compile', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
bytes_result = bpat.search(bs)
if bytes_result is None:
print('=== Fails on bytes pattern match', t)
# Try the match with the search area limited to the extent
# of the match and see if it still succeeds. \B will
# break (because it won't match at the end or start of a
# string), so we'll ignore patterns that feature it.
if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
and result is not None:
obj = re.compile(pattern)
result = obj.search(s, result.start(0), result.end(0) + 1)
if result is None:
print('=== Failed on range-limited match', t)
# Try the match with IGNORECASE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.IGNORECASE)
result = obj.search(s)
if result is None:
print('=== Fails on case-insensitive match', t)
# Try the match with LOCALE enabled, and check that it
# still succeeds.
if '(?u)' not in pattern:
obj = re.compile(pattern, re.LOCALE)
result = obj.search(s)
if result is None:
print('=== Fails on locale-sensitive match', t)
# Try the match with UNICODE locale enabled, and check
# that it still succeeds.
obj = re.compile(pattern, re.UNICODE)
result = obj.search(s)
if result is None:
print('=== Fails on unicode-sensitive match', t)
def test_main():
run_unittest(ReTests)
run_re_tests()
if __name__ == "__main__":
test_main()
|
|
import time
import warnings
import numpy as np
import tensorflow as tf
from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter
from stable_baselines.common.vec_env import VecEnv
from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action
from stable_baselines.common.schedules import get_schedule_fn
from stable_baselines.common.buffers import ReplayBuffer
from stable_baselines.sac.policies import SACPolicy
from stable_baselines import logger
class SAC(OffPolicyRLModel):
"""
Soft Actor-Critic (SAC)
Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,
This implementation borrows code from original implementation (https://github.com/haarnoja/sac)
from OpenAI Spinning Up (https://github.com/openai/spinningup) and from the Softlearning repo
(https://github.com/rail-berkeley/softlearning/)
Paper: https://arxiv.org/abs/1801.01290
Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html
:param policy: (SACPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) the discount factor
:param learning_rate: (float or callable) learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress (from 1 to 0)
:param buffer_size: (int) size of the replay buffer
:param batch_size: (int) Minibatch size for each gradient update
:param tau: (float) the soft update coefficient ("polyak update", between 0 and 1)
:param ent_coef: (str or float) Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.
Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)
:param train_freq: (int) Update the model every `train_freq` steps.
:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
:param target_update_interval: (int) update the target network every `target_network_update_freq` steps.
:param gradient_steps: (int) How many gradient update after each step
:param target_entropy: (str or float) target entropy when learning ent_coef (ent_coef = 'auto')
:param action_noise: (ActionNoise) the action noise type (None by default), this can help
for hard exploration problem. Cf DDPG for the different action noise type.
:param random_exploration: (float) Probability of taking a random action (as in an epsilon-greedy strategy)
This is not needed for SAC normally but can help exploring when using HER + SAC.
This hack was present in the original OpenAI Baselines repo (DDPG + HER)
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
Note: this has no effect on SAC logging for now
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, learning_rate=3e-4, buffer_size=50000,
learning_starts=100, train_freq=1, batch_size=64,
tau=0.005, ent_coef='auto', target_update_interval=1,
gradient_steps=1, target_entropy='auto', action_noise=None,
random_exploration=0.0, verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False,
seed=None, n_cpu_tf_sess=None):
super(SAC, self).__init__(policy=policy, env=env, replay_buffer=None, verbose=verbose,
policy_base=SACPolicy, requires_vec_env=False, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.buffer_size = buffer_size
self.learning_rate = learning_rate
self.learning_starts = learning_starts
self.train_freq = train_freq
self.batch_size = batch_size
self.tau = tau
# In the original paper, same learning rate is used for all networks
# self.policy_lr = learning_rate
# self.qf_lr = learning_rate
# self.vf_lr = learning_rate
# Entropy coefficient / Entropy temperature
# Inverse of the reward scale
self.ent_coef = ent_coef
self.target_update_interval = target_update_interval
self.gradient_steps = gradient_steps
self.gamma = gamma
self.action_noise = action_noise
self.random_exploration = random_exploration
self.value_fn = None
self.graph = None
self.replay_buffer = None
self.sess = None
self.tensorboard_log = tensorboard_log
self.verbose = verbose
self.params = None
self.summary = None
self.policy_tf = None
self.target_entropy = target_entropy
self.full_tensorboard_log = full_tensorboard_log
self.obs_target = None
self.target_policy = None
self.actions_ph = None
self.rewards_ph = None
self.terminals_ph = None
self.observations_ph = None
self.action_target = None
self.next_observations_ph = None
self.value_target = None
self.step_ops = None
self.target_update_op = None
self.infos_names = None
self.entropy = None
self.target_params = None
self.learning_rate_ph = None
self.processed_obs_ph = None
self.processed_next_obs_ph = None
self.log_ent_coef = None
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.policy_tf
# Rescale
deterministic_action = unscale_action(self.action_space, self.deterministic_action)
return policy.obs_ph, self.actions_ph, deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
self.replay_buffer = ReplayBuffer(self.buffer_size)
with tf.variable_scope("input", reuse=False):
# Create policy and target TF objects
self.policy_tf = self.policy(self.sess, self.observation_space, self.action_space,
**self.policy_kwargs)
self.target_policy = self.policy(self.sess, self.observation_space, self.action_space,
**self.policy_kwargs)
# Initialize Placeholders
self.observations_ph = self.policy_tf.obs_ph
# Normalized observation for pixels
self.processed_obs_ph = self.policy_tf.processed_obs
self.next_observations_ph = self.target_policy.obs_ph
self.processed_next_obs_ph = self.target_policy.processed_obs
self.action_target = self.target_policy.action_ph
self.terminals_ph = tf.placeholder(tf.float32, shape=(None, 1), name='terminals')
self.rewards_ph = tf.placeholder(tf.float32, shape=(None, 1), name='rewards')
self.actions_ph = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape,
name='actions')
self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
with tf.variable_scope("model", reuse=False):
# Create the policy
# first return value corresponds to deterministic actions
# policy_out corresponds to stochastic actions, used for training
# logp_pi is the log probability of actions taken by the policy
self.deterministic_action, policy_out, logp_pi = self.policy_tf.make_actor(self.processed_obs_ph)
# Monitor the entropy of the policy,
# this is not used for training
self.entropy = tf.reduce_mean(self.policy_tf.entropy)
# Use two Q-functions to improve performance by reducing overestimation bias.
qf1, qf2, value_fn = self.policy_tf.make_critics(self.processed_obs_ph, self.actions_ph,
create_qf=True, create_vf=True)
qf1_pi, qf2_pi, _ = self.policy_tf.make_critics(self.processed_obs_ph,
policy_out, create_qf=True, create_vf=False,
reuse=True)
# Target entropy is used when learning the entropy coefficient
if self.target_entropy == 'auto':
# automatically set target entropy if needed
self.target_entropy = -np.prod(self.action_space.shape).astype(np.float32)
else:
# Force conversion
# this will also throw an error for unexpected string
self.target_entropy = float(self.target_entropy)
# The entropy coefficient or entropy can be learned automatically
# see Automating Entropy Adjustment for Maximum Entropy RL section
# of https://arxiv.org/abs/1812.05905
if isinstance(self.ent_coef, str) and self.ent_coef.startswith('auto'):
# Default initial value of ent_coef when learned
init_value = 1.0
if '_' in self.ent_coef:
init_value = float(self.ent_coef.split('_')[1])
assert init_value > 0., "The initial value of ent_coef must be greater than 0"
self.log_ent_coef = tf.get_variable('log_ent_coef', dtype=tf.float32,
initializer=np.log(init_value).astype(np.float32))
self.ent_coef = tf.exp(self.log_ent_coef)
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.ent_coef = float(self.ent_coef)
with tf.variable_scope("target", reuse=False):
# Create the value network
_, _, value_target = self.target_policy.make_critics(self.processed_next_obs_ph,
create_qf=False, create_vf=True)
self.value_target = value_target
with tf.variable_scope("loss", reuse=False):
# Take the min of the two Q-Values (Double-Q Learning)
min_qf_pi = tf.minimum(qf1_pi, qf2_pi)
# Target for Q value regression
q_backup = tf.stop_gradient(
self.rewards_ph +
(1 - self.terminals_ph) * self.gamma * self.value_target
)
# Compute Q-Function loss
# TODO: test with huber loss (it would avoid too high values)
qf1_loss = 0.5 * tf.reduce_mean((q_backup - qf1) ** 2)
qf2_loss = 0.5 * tf.reduce_mean((q_backup - qf2) ** 2)
# Compute the entropy temperature loss
# it is used when the entropy coefficient is learned
ent_coef_loss, entropy_optimizer = None, None
if not isinstance(self.ent_coef, float):
ent_coef_loss = -tf.reduce_mean(
self.log_ent_coef * tf.stop_gradient(logp_pi + self.target_entropy))
entropy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
# Compute the policy loss
# Alternative: policy_kl_loss = tf.reduce_mean(logp_pi - min_qf_pi)
policy_kl_loss = tf.reduce_mean(self.ent_coef * logp_pi - qf1_pi)
# NOTE: in the original implementation, they have an additional
# regularization loss for the Gaussian parameters
# this is not used for now
# policy_loss = (policy_kl_loss + policy_regularization_loss)
policy_loss = policy_kl_loss
# Target for value fn regression
# We update the vf towards the min of two Q-functions in order to
# reduce overestimation bias from function approximation error.
v_backup = tf.stop_gradient(min_qf_pi - self.ent_coef * logp_pi)
value_loss = 0.5 * tf.reduce_mean((value_fn - v_backup) ** 2)
values_losses = qf1_loss + qf2_loss + value_loss
# Policy train op
# (has to be separate from value train op, because min_qf_pi appears in policy_loss)
policy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
policy_train_op = policy_optimizer.minimize(policy_loss, var_list=tf_util.get_trainable_vars('model/pi'))
# Value train op
value_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph)
values_params = tf_util.get_trainable_vars('model/values_fn')
source_params = tf_util.get_trainable_vars("model/values_fn")
target_params = tf_util.get_trainable_vars("target/values_fn")
# Polyak averaging for target variables
self.target_update_op = [
tf.assign(target, (1 - self.tau) * target + self.tau * source)
for target, source in zip(target_params, source_params)
]
# Initializing target to match source variables
target_init_op = [
tf.assign(target, source)
for target, source in zip(target_params, source_params)
]
# Control flow is used because sess.run otherwise evaluates in nondeterministic order
# and we first need to compute the policy action before computing q values losses
with tf.control_dependencies([policy_train_op]):
train_values_op = value_optimizer.minimize(values_losses, var_list=values_params)
self.infos_names = ['policy_loss', 'qf1_loss', 'qf2_loss', 'value_loss', 'entropy']
# All ops to call during one training step
self.step_ops = [policy_loss, qf1_loss, qf2_loss,
value_loss, qf1, qf2, value_fn, logp_pi,
self.entropy, policy_train_op, train_values_op]
# Add entropy coefficient optimization operation if needed
if ent_coef_loss is not None:
with tf.control_dependencies([train_values_op]):
ent_coef_op = entropy_optimizer.minimize(ent_coef_loss, var_list=self.log_ent_coef)
self.infos_names += ['ent_coef_loss', 'ent_coef']
self.step_ops += [ent_coef_op, ent_coef_loss, self.ent_coef]
# Monitor losses and entropy in tensorboard
tf.summary.scalar('policy_loss', policy_loss)
tf.summary.scalar('qf1_loss', qf1_loss)
tf.summary.scalar('qf2_loss', qf2_loss)
tf.summary.scalar('value_loss', value_loss)
tf.summary.scalar('entropy', self.entropy)
if ent_coef_loss is not None:
tf.summary.scalar('ent_coef_loss', ent_coef_loss)
tf.summary.scalar('ent_coef', self.ent_coef)
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))
# Retrieve parameters that must be saved
self.params = tf_util.get_trainable_vars("model")
self.target_params = tf_util.get_trainable_vars("target/values_fn")
# Initialize Variables and target network
with self.sess.as_default():
self.sess.run(tf.global_variables_initializer())
self.sess.run(target_init_op)
self.summary = tf.summary.merge_all()
def _train_step(self, step, writer, learning_rate):
# Sample a batch from the replay buffer
batch = self.replay_buffer.sample(self.batch_size, env=self._vec_normalize_env)
batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones = batch
feed_dict = {
self.observations_ph: batch_obs,
self.actions_ph: batch_actions,
self.next_observations_ph: batch_next_obs,
self.rewards_ph: batch_rewards.reshape(self.batch_size, -1),
self.terminals_ph: batch_dones.reshape(self.batch_size, -1),
self.learning_rate_ph: learning_rate
}
# out = [policy_loss, qf1_loss, qf2_loss,
# value_loss, qf1, qf2, value_fn, logp_pi,
# self.entropy, policy_train_op, train_values_op]
# Do one gradient step
# and optionally compute log for tensorboard
if writer is not None:
out = self.sess.run([self.summary] + self.step_ops, feed_dict)
summary = out.pop(0)
writer.add_summary(summary, step)
else:
out = self.sess.run(self.step_ops, feed_dict)
# Unpack to monitor losses and entropy
policy_loss, qf1_loss, qf2_loss, value_loss, *values = out
# qf1, qf2, value_fn, logp_pi, entropy, *_ = values
entropy = values[4]
if self.log_ent_coef is not None:
ent_coef_loss, ent_coef = values[-2:]
return policy_loss, qf1_loss, qf2_loss, value_loss, entropy, ent_coef_loss, ent_coef
return policy_loss, qf1_loss, qf2_loss, value_loss, entropy
def learn(self, total_timesteps, callback=None,
log_interval=4, tb_log_name="SAC", reset_num_timesteps=True, replay_wrapper=None):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
if replay_wrapper is not None:
self.replay_buffer = replay_wrapper(self.replay_buffer)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
# Initial learning rate
current_lr = self.learning_rate(1)
start_time = time.time()
episode_rewards = [0.0]
episode_successes = []
if self.action_noise is not None:
self.action_noise.reset()
obs = self.env.reset()
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
obs_ = self._vec_normalize_env.get_original_obs().squeeze()
n_updates = 0
infos_values = []
callback.on_training_start(locals(), globals())
callback.on_rollout_start()
for step in range(total_timesteps):
# Before training starts, randomly sample actions
# from a uniform distribution for better exploration.
# Afterwards, use the learned policy
# if random_exploration is set to 0 (normal setting)
if self.num_timesteps < self.learning_starts or np.random.rand() < self.random_exploration:
# actions sampled from action space are from range specific to the environment
# but algorithm operates on tanh-squashed actions therefore simple scaling is used
unscaled_action = self.env.action_space.sample()
action = scale_action(self.action_space, unscaled_action)
else:
action = self.policy_tf.step(obs[None], deterministic=False).flatten()
# Add noise to the action (improve exploration,
# not needed in general)
if self.action_noise is not None:
action = np.clip(action + self.action_noise(), -1, 1)
# inferred actions need to be transformed to environment action_space before stepping
unscaled_action = unscale_action(self.action_space, action)
assert action.shape == self.env.action_space.shape
new_obs, reward, done, info = self.env.step(unscaled_action)
self.num_timesteps += 1
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
callback.update_locals(locals())
if callback.on_step() is False:
break
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()
reward_ = self._vec_normalize_env.get_original_reward().squeeze()
else:
# Avoid changing the original ones
obs_, new_obs_, reward_ = obs, new_obs, reward
# Store transition in the replay buffer.
self.replay_buffer_add(obs_, action, reward_, new_obs_, done, info)
obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
obs_ = new_obs_
# Retrieve reward and episode length if using Monitor wrapper
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
self.ep_info_buf.extend([maybe_ep_info])
if writer is not None:
# Write reward per episode to tensorboard
ep_reward = np.array([reward_]).reshape((1, -1))
ep_done = np.array([done]).reshape((1, -1))
tf_util.total_episode_reward_logger(self.episode_reward, ep_reward,
ep_done, writer, self.num_timesteps)
if self.num_timesteps % self.train_freq == 0:
callback.on_rollout_end()
mb_infos_vals = []
# Update policy, critics and target networks
for grad_step in range(self.gradient_steps):
# Break if the warmup phase is not over
# or if there are not enough samples in the replay buffer
if not self.replay_buffer.can_sample(self.batch_size) \
or self.num_timesteps < self.learning_starts:
break
n_updates += 1
# Compute current learning_rate
frac = 1.0 - step / total_timesteps
current_lr = self.learning_rate(frac)
# Update policy and critics (q functions)
mb_infos_vals.append(self._train_step(step, writer, current_lr))
# Update target network
if (step + grad_step) % self.target_update_interval == 0:
# Update target network
self.sess.run(self.target_update_op)
# Log losses and entropy, useful for monitor training
if len(mb_infos_vals) > 0:
infos_values = np.mean(mb_infos_vals, axis=0)
callback.on_rollout_start()
episode_rewards[-1] += reward_
if done:
if self.action_noise is not None:
self.action_noise.reset()
if not isinstance(self.env, VecEnv):
obs = self.env.reset()
episode_rewards.append(0.0)
maybe_is_success = info.get('is_success')
if maybe_is_success is not None:
episode_successes.append(float(maybe_is_success))
if len(episode_rewards[-101:-1]) == 0:
mean_reward = -np.inf
else:
mean_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)
# substract 1 as we appended a new term just now
num_episodes = len(episode_rewards) - 1
# Display training infos
if self.verbose >= 1 and done and log_interval is not None and num_episodes % log_interval == 0:
fps = int(step / (time.time() - start_time))
logger.logkv("episodes", num_episodes)
logger.logkv("mean 100 episode reward", mean_reward)
if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0:
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buf]))
logger.logkv("n_updates", n_updates)
logger.logkv("current_lr", current_lr)
logger.logkv("fps", fps)
logger.logkv('time_elapsed', int(time.time() - start_time))
if len(episode_successes) > 0:
logger.logkv("success rate", np.mean(episode_successes[-100:]))
if len(infos_values) > 0:
for (name, val) in zip(self.infos_names, infos_values):
logger.logkv(name, val)
logger.logkv("total timesteps", self.num_timesteps)
logger.dumpkvs()
# Reset infos:
infos_values = []
callback.on_training_end()
return self
def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):
if actions is not None:
raise ValueError("Error: SAC does not have action probabilities.")
warnings.warn("Even though SAC has a Gaussian policy, it cannot return a distribution as it "
"is squashed by a tanh before being scaled and outputed.")
return None
def predict(self, observation, state=None, mask=None, deterministic=True):
observation = np.array(observation)
vectorized_env = self._is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
actions = self.policy_tf.step(observation, deterministic=deterministic)
actions = actions.reshape((-1,) + self.action_space.shape) # reshape to the correct action shape
actions = unscale_action(self.action_space, actions) # scale the output for the prediction
if not vectorized_env:
actions = actions[0]
return actions, None
def get_parameter_list(self):
return (self.params +
self.target_params)
def save(self, save_path, cloudpickle=False):
data = {
"learning_rate": self.learning_rate,
"buffer_size": self.buffer_size,
"learning_starts": self.learning_starts,
"train_freq": self.train_freq,
"batch_size": self.batch_size,
"tau": self.tau,
"ent_coef": self.ent_coef if isinstance(self.ent_coef, float) else 'auto',
"target_entropy": self.target_entropy,
# Should we also store the replay buffer?
# this may lead to high memory usage
# with all transition inside
# "replay_buffer": self.replay_buffer
"gamma": self.gamma,
"verbose": self.verbose,
"observation_space": self.observation_space,
"action_space": self.action_space,
"policy": self.policy,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"action_noise": self.action_noise,
"random_exploration": self.random_exploration,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
|
|
"""
Low-level helpers for the SecureTransport bindings.
These are Python functions that are not directly related to the high-level APIs
but are necessary to get them to work. They include a whole bunch of low-level
CoreFoundation messing about and memory management. The concerns in this module
are almost entirely about trying to avoid memory leaks and providing
appropriate and useful assistance to the higher-level code.
"""
import base64
import ctypes
import itertools
import os
import re
import ssl
import struct
import tempfile
from typing import Any, List, Optional, Tuple, Type
from .bindings import ( # type: ignore[attr-defined]
CFArray,
CFConst,
CFData,
CFDictionary,
CFMutableArray,
CFString,
CFTypeRef,
CoreFoundation,
SecKeychainRef,
Security,
)
# This regular expression is used to grab PEM data out of a PEM bundle.
_PEM_CERTS_RE = re.compile(
b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
)
def _cf_data_from_bytes(bytestring: bytes) -> CFData:
"""
Given a bytestring, create a CFData object from it. This CFData object must
be CFReleased by the caller.
"""
return CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
)
def _cf_dictionary_from_tuples(tuples: List[Tuple[Any, Any]]) -> CFDictionary:
"""
Given a list of Python tuples, create an associated CFDictionary.
"""
dictionary_size = len(tuples)
# We need to get the dictionary keys and values out in the same order.
keys = (t[0] for t in tuples)
values = (t[1] for t in tuples)
cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
return CoreFoundation.CFDictionaryCreate(
CoreFoundation.kCFAllocatorDefault,
cf_keys,
cf_values,
dictionary_size,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
def _cfstr(py_bstr: bytes) -> CFString:
"""
Given a Python binary data, create a CFString.
The string must be CFReleased by the caller.
"""
c_str = ctypes.c_char_p(py_bstr)
cf_str = CoreFoundation.CFStringCreateWithCString(
CoreFoundation.kCFAllocatorDefault,
c_str,
CFConst.kCFStringEncodingUTF8,
)
return cf_str
def _create_cfstring_array(lst: List[bytes]) -> CFMutableArray:
"""
Given a list of Python binary data, create an associated CFMutableArray.
The array must be CFReleased by the caller.
Raises an ssl.SSLError on failure.
"""
cf_arr = None
try:
cf_arr = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
)
if not cf_arr:
raise MemoryError("Unable to allocate memory!")
for item in lst:
cf_str = _cfstr(item)
if not cf_str:
raise MemoryError("Unable to allocate memory!")
try:
CoreFoundation.CFArrayAppendValue(cf_arr, cf_str)
finally:
CoreFoundation.CFRelease(cf_str)
except BaseException as e:
if cf_arr:
CoreFoundation.CFRelease(cf_arr)
raise ssl.SSLError(f"Unable to allocate array: {e}") from None
return cf_arr
def _cf_string_to_unicode(value: CFString) -> Optional[str]:
"""
Creates a Unicode string from a CFString object. Used entirely for error
reporting.
Yes, it annoys me quite a lot that this function is this complex.
"""
value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
string = CoreFoundation.CFStringGetCStringPtr(
value_as_void_p, CFConst.kCFStringEncodingUTF8
)
if string is None:
buffer = ctypes.create_string_buffer(1024)
result = CoreFoundation.CFStringGetCString(
value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8
)
if not result:
raise OSError("Error copying C string from CFStringRef")
string = buffer.value
if string is not None:
string = string.decode("utf-8")
return string # type: ignore[no-any-return]
def _assert_no_error(
error: int, exception_class: Optional[Type[BaseException]] = None
) -> None:
"""
Checks the return code and throws an exception if there is an error to
report
"""
if error == 0:
return
cf_error_string = Security.SecCopyErrorMessageString(error, None)
output = _cf_string_to_unicode(cf_error_string)
CoreFoundation.CFRelease(cf_error_string)
if output is None or output == "":
output = f"OSStatus {error}"
if exception_class is None:
exception_class = ssl.SSLError
raise exception_class(output)
def _cert_array_from_pem(pem_bundle: bytes) -> CFArray:
"""
Given a bundle of certs in PEM format, turns them into a CFArray of certs
that can be used to validate a cert chain.
"""
# Normalize the PEM bundle's line endings.
pem_bundle = pem_bundle.replace(b"\r\n", b"\n")
der_certs = [
base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle)
]
if not der_certs:
raise ssl.SSLError("No root certificates specified")
cert_array = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
)
if not cert_array:
raise ssl.SSLError("Unable to allocate memory!")
try:
for der_bytes in der_certs:
certdata = _cf_data_from_bytes(der_bytes)
if not certdata:
raise ssl.SSLError("Unable to allocate memory!")
cert = Security.SecCertificateCreateWithData(
CoreFoundation.kCFAllocatorDefault, certdata
)
CoreFoundation.CFRelease(certdata)
if not cert:
raise ssl.SSLError("Unable to build cert object!")
CoreFoundation.CFArrayAppendValue(cert_array, cert)
CoreFoundation.CFRelease(cert)
except Exception:
# We need to free the array before the exception bubbles further.
# We only want to do that if an error occurs: otherwise, the caller
# should free.
CoreFoundation.CFRelease(cert_array)
raise
return cert_array
def _is_cert(item: CFTypeRef) -> bool:
"""
Returns True if a given CFTypeRef is a certificate.
"""
expected = Security.SecCertificateGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected # type: ignore[no-any-return]
def _is_identity(item: CFTypeRef) -> bool:
"""
Returns True if a given CFTypeRef is an identity.
"""
expected = Security.SecIdentityGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected # type: ignore[no-any-return]
def _temporary_keychain() -> Tuple[SecKeychainRef, str]:
"""
This function creates a temporary Mac keychain that we can use to work with
credentials. This keychain uses a one-time password and a temporary file to
store the data. We expect to have one keychain per socket. The returned
SecKeychainRef must be freed by the caller, including calling
SecKeychainDelete.
Returns a tuple of the SecKeychainRef and the path to the temporary
directory that contains it.
"""
# Unfortunately, SecKeychainCreate requires a path to a keychain. This
# means we cannot use mkstemp to use a generic temporary file. Instead,
# we're going to create a temporary directory and a filename to use there.
# This filename will be 8 random bytes expanded into base64. We also need
# some random bytes to password-protect the keychain we're creating, so we
# ask for 40 random bytes.
random_bytes = os.urandom(40)
filename = base64.b16encode(random_bytes[:8]).decode("utf-8")
password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8
tempdirectory = tempfile.mkdtemp()
keychain_path = os.path.join(tempdirectory, filename).encode("utf-8")
# We now want to create the keychain itself.
keychain = Security.SecKeychainRef()
status = Security.SecKeychainCreate(
keychain_path, len(password), password, False, None, ctypes.byref(keychain)
)
_assert_no_error(status)
# Having created the keychain, we want to pass it off to the caller.
return keychain, tempdirectory
def _load_items_from_file(
keychain: SecKeychainRef, path: str
) -> Tuple[List[CFTypeRef], List[CFTypeRef]]:
"""
Given a single file, loads all the trust objects from it into arrays and
the keychain.
Returns a tuple of lists: the first list is a list of identities, the
second a list of certs.
"""
certificates = []
identities = []
result_array = None
with open(path, "rb") as f:
raw_filedata = f.read()
try:
filedata = CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata)
)
result_array = CoreFoundation.CFArrayRef()
result = Security.SecItemImport(
filedata, # cert data
None, # Filename, leaving it out for now
None, # What the type of the file is, we don't care
None, # what's in the file, we don't care
0, # import flags
None, # key params, can include passphrase in the future
keychain, # The keychain to insert into
ctypes.byref(result_array), # Results
)
_assert_no_error(result)
# A CFArray is not very useful to us as an intermediary
# representation, so we are going to extract the objects we want
# and then free the array. We don't need to keep hold of keys: the
# keychain already has them!
result_count = CoreFoundation.CFArrayGetCount(result_array)
for index in range(result_count):
item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index)
item = ctypes.cast(item, CoreFoundation.CFTypeRef)
if _is_cert(item):
CoreFoundation.CFRetain(item)
certificates.append(item)
elif _is_identity(item):
CoreFoundation.CFRetain(item)
identities.append(item)
finally:
if result_array:
CoreFoundation.CFRelease(result_array)
CoreFoundation.CFRelease(filedata)
return (identities, certificates)
def _load_client_cert_chain(keychain: SecKeychainRef, *paths: Optional[str]) -> CFArray:
"""
Load certificates and maybe keys from a number of files. Has the end goal
of returning a CFArray containing one SecIdentityRef, and then zero or more
SecCertificateRef objects, suitable for use as a client certificate trust
chain.
"""
# Ok, the strategy.
#
# This relies on knowing that macOS will not give you a SecIdentityRef
# unless you have imported a key into a keychain. This is a somewhat
# artificial limitation of macOS (for example, it doesn't necessarily
# affect iOS), but there is nothing inside Security.framework that lets you
# get a SecIdentityRef without having a key in a keychain.
#
# So the policy here is we take all the files and iterate them in order.
# Each one will use SecItemImport to have one or more objects loaded from
# it. We will also point at a keychain that macOS can use to work with the
# private key.
#
# Once we have all the objects, we'll check what we actually have. If we
# already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
# we'll take the first certificate (which we assume to be our leaf) and
# ask the keychain to give us a SecIdentityRef with that cert's associated
# key.
#
# We'll then return a CFArray containing the trust chain: one
# SecIdentityRef and then zero-or-more SecCertificateRef objects. The
# responsibility for freeing this CFArray will be with the caller. This
# CFArray must remain alive for the entire connection, so in practice it
# will be stored with a single SSLSocket, along with the reference to the
# keychain.
certificates = []
identities = []
# Filter out bad paths.
filtered_paths = (path for path in paths if path)
try:
for file_path in filtered_paths:
new_identities, new_certs = _load_items_from_file(keychain, file_path)
identities.extend(new_identities)
certificates.extend(new_certs)
# Ok, we have everything. The question is: do we have an identity? If
# not, we want to grab one from the first cert we have.
if not identities:
new_identity = Security.SecIdentityRef()
status = Security.SecIdentityCreateWithCertificate(
keychain, certificates[0], ctypes.byref(new_identity)
)
_assert_no_error(status)
identities.append(new_identity)
# We now want to release the original certificate, as we no longer
# need it.
CoreFoundation.CFRelease(certificates.pop(0))
# We now need to build a new CFArray that holds the trust chain.
trust_chain = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
)
for item in itertools.chain(identities, certificates):
# ArrayAppendValue does a CFRetain on the item. That's fine,
# because the finally block will release our other refs to them.
CoreFoundation.CFArrayAppendValue(trust_chain, item)
return trust_chain
finally:
for obj in itertools.chain(identities, certificates):
CoreFoundation.CFRelease(obj)
TLS_PROTOCOL_VERSIONS = {
"SSLv2": (0, 2),
"SSLv3": (3, 0),
"TLSv1": (3, 1),
"TLSv1.1": (3, 2),
"TLSv1.2": (3, 3),
}
def _build_tls_unknown_ca_alert(version: str) -> bytes:
"""
Builds a TLS alert record for an unknown CA.
"""
ver_maj, ver_min = TLS_PROTOCOL_VERSIONS[version]
severity_fatal = 0x02
description_unknown_ca = 0x30
msg = struct.pack(">BB", severity_fatal, description_unknown_ca)
msg_len = len(msg)
record_type_alert = 0x15
record = struct.pack(">BBBH", record_type_alert, ver_maj, ver_min, msg_len) + msg
return record
class SecurityConst:
"""
A class object that acts as essentially a namespace for Security constants.
"""
kSSLSessionOptionBreakOnServerAuth = 0
kSSLProtocol2 = 1
kSSLProtocol3 = 2
kTLSProtocol1 = 4
kTLSProtocol11 = 7
kTLSProtocol12 = 8
# SecureTransport does not support TLS 1.3 even if there's a constant for it
kTLSProtocol13 = 10
kTLSProtocolMaxSupported = 999
kSSLClientSide = 1
kSSLStreamType = 0
kSecFormatPEMSequence = 10
kSecTrustResultInvalid = 0
kSecTrustResultProceed = 1
# This gap is present on purpose: this was kSecTrustResultConfirm, which
# is deprecated.
kSecTrustResultDeny = 3
kSecTrustResultUnspecified = 4
kSecTrustResultRecoverableTrustFailure = 5
kSecTrustResultFatalTrustFailure = 6
kSecTrustResultOtherError = 7
errSSLProtocol = -9800
errSSLWouldBlock = -9803
errSSLClosedGraceful = -9805
errSSLClosedNoNotify = -9816
errSSLClosedAbort = -9806
errSSLXCertChainInvalid = -9807
errSSLCrypto = -9809
errSSLInternal = -9810
errSSLCertExpired = -9814
errSSLCertNotYetValid = -9815
errSSLUnknownRootCert = -9812
errSSLNoRootCert = -9813
errSSLHostNameMismatch = -9843
errSSLPeerHandshakeFail = -9824
errSSLPeerUserCancelled = -9839
errSSLWeakPeerEphemeralDHKey = -9850
errSSLServerAuthCompleted = -9841
errSSLRecordOverflow = -9847
errSecVerifyFailed = -67808
errSecNoTrustSettings = -25263
errSecItemNotFound = -25300
errSecInvalidTrustSettings = -25262
|
|
import ptypes
from ptypes import *
from . import umtypes, mmtypes
from .datatypes import *
class SIZE_T64(ULONGLONG): pass
class RTL_CRITICAL_SECTION(pstruct.type, versioned):
_fields_ = [
(PVOID, 'DebugInfo'),
(LONG, 'LockCount'),
(LONG, 'RecursionCount'),
(PVOID, 'OwningThread'),
(PVOID, 'LockSemaphore'),
(lambda self: ULONGLONG if getattr(self, 'WIN64', False) else ULONG, 'SpinCount'),
]
class RTL_BITMAP(pstruct.type):
class _Buffer(BitmapBitsArray):
_object_ = ULONG
def __Buffer(self):
res = self['SizeOfBitMap'].l
fractionQ = 1 if res.int() % 32 else 0
target = dyn.clone(RTL_BITMAP._Buffer, length=fractionQ + res.int() // 32)
return P(target)
_fields_ = [
(ULONG, 'SizeOfBitMap'),
(__Buffer, 'Buffer'),
]
class RTL_BITMAP_EX(pstruct.type):
class _Buffer(BitmapBitsArray):
_object_ = ULONGLONG
def __Buffer(self):
res = self['SizeOfBitMap'].l
fractionQ = 1 if res.int() % 64 else 0
target = dyn.clone(RTL_BITMAP_EX._Buffer, length=fractionQ + res.int() // 64)
return P(target)
_fields_ = [
(ULONGLONG, 'SizeOfBitMap'),
(__Buffer, 'Buffer'),
]
class RTL_DRIVE_LETTER_CURDIR(pstruct.type):
_fields_ = [
(WORD, 'Flags'),
(WORD, 'Length'),
(ULONG, 'TimeStamp'),
(umtypes.STRING, 'DosPath'),
]
class CURDIR(pstruct.type):
_fields_ = [
(umtypes.UNICODE_STRING, 'DosPath'),
(HANDLE, 'Handle'),
]
def summary(self):
return 'Handle={:x} DosPath={!r}'.format(self['Handle'].int(), self['DosPath'].str())
class RTL_USER_PROCESS_INFORMATION(pstruct.type):
_fields_ = [
(ULONG, 'Size'),
(HANDLE, 'Process'),
(HANDLE, 'Thread'),
(umtypes.CLIENT_ID, 'ClientId'),
(mmtypes.SECTION_IMAGE_INFORMATION, 'ImageInformation'),
]
class RTL_USER_PROCESS_PARAMETERS(pstruct.type):
_fields_ = [
(ULONG, 'MaximumLength'),
(ULONG, 'Length'),
(ULONG, 'Flags'),
(ULONG, 'DebugFlags'),
(PVOID, 'ConsoleHandle'),
(ULONG, 'ConsoleFlags'),
(PVOID, 'StandardInput'),
(PVOID, 'StandardOutput'),
(PVOID, 'StandardError'),
(CURDIR, 'CurrentDirectory'),
(umtypes.UNICODE_STRING, 'DllPath'),
(umtypes.UNICODE_STRING, 'ImagePathName'),
(umtypes.UNICODE_STRING, 'CommandLine'),
# (P(lambda s: dyn.block(s.getparent(RTL_USER_PROCESS_PARAMETERS)['EnvironmentSize'].int())), 'Environment'),
# (P(lambda s: dyn.lazyblockarray(pstr.szwstring, s.getparent()['EnvironmentSize'].li.int())), 'Environment'),
(P(lambda s: dyn.blockarray(pstr.szwstring, s.getparent()['EnvironmentSize'].li.int())), 'Environment'),
(ULONG, 'StartingX'),
(ULONG, 'StartingY'),
(ULONG, 'CountX'),
(ULONG, 'CountY'),
(ULONG, 'CountCharsX'),
(ULONG, 'CountCharsY'),
(ULONG, 'FillAttribute'),
(ULONG, 'WindowFlags'),
(ULONG, 'ShowWindowFlags'),
(umtypes.UNICODE_STRING, 'WindowTitle'),
(umtypes.UNICODE_STRING, 'DesktopInfo'),
(umtypes.UNICODE_STRING, 'ShellInfo'),
(umtypes.UNICODE_STRING, 'RuntimeData'),
(dyn.array(RTL_DRIVE_LETTER_CURDIR, 32), 'CurrentDirectories'),
(ULONG, 'EnvironmentSize'),
]
class RTL_PATH_TYPE(pint.enum):
_values_ = [
('RtlPathTypeUnknown', 0),
('RtlPathTypeUncAbsolute', 1),
('RtlPathTypeDriveAbsolute', 2),
('RtlPathTypeDriveRelative', 3),
('RtlPathTypeRooted', 4),
('RtlPathTypeRelative', 5),
('RtlPathTypeLocalDevice', 6),
('RtlPathTypeRootLocalDevice', 7),
]
class RTL_RELATIVE_NAME(pstruct.type):
_fields_ = [
(umtypes.UNICODE_STRING, 'RelativeName'),
(HANDLE, 'ContainingDirectory'),
(PVOID, 'CurDirRef'),
]
class RTL_PROCESS_MODULE_INFORMATION(pstruct.type):
_fields_ = [
(PVOID, 'MappedBase'),
(PVOID, 'ImageBase'),
(ULONG, 'ImageSize'),
(ULONG, 'Flags'),
(USHORT, 'LoadOrderIndex'),
(USHORT, 'InitOrderIndex'),
(USHORT, 'LoadCount'),
(USHORT, 'OffsetToFileName'),
(dyn.clone(pstr.string, length=256), 'FullPathName'),
]
class RTL_BALANCED_LINKS(pstruct.type):
def __init__(self, **attrs):
super(RTL_BALANCED_LINKS, self).__init__(**attrs)
f = self._fields_ = []
f.extend([
(P(RTL_BALANCED_LINKS), 'Parent'),
(P(RTL_BALANCED_LINKS), 'LeftChild'),
(P(RTL_BALANCED_LINKS), 'RightChild'),
(CHAR, 'Balance'),
(dyn.array(UCHAR, 3), 'Reserved'),
])
class RTL_AVL_COMPARE_ROUTINE(void): pass
class RTL_AVL_ALLOCATE_ROUTINE(void): pass
class RTL_AVL_FREE_ROUTINE(void): pass
class RTL_AVL_TABLE(pstruct.type, versioned):
_fields_ = [
(RTL_BALANCED_LINKS, 'BalancedRoot'),
(PVOID, 'OrderedPointer'),
(ULONG, 'WhichOrderedElement'),
(ULONG, 'NumberGenericTableElements'),
(ULONG, 'DepthOfTree'),
(P(RTL_BALANCED_LINKS), 'RestartKey'),
(ULONG, 'DeleteCount'),
(P(RTL_AVL_COMPARE_ROUTINE), 'CompareRoutine'),
(P(RTL_AVL_ALLOCATE_ROUTINE), 'AllocateRoutine'),
(P(RTL_AVL_FREE_ROUTINE), 'FreeRoutine'),
(PVOID, 'TableContext'),
]
class RTL_BALANCED_NODE(pstruct.type, versioned):
def __init__(self, **attrs):
super(RTL_BALANCED_NODE, self).__init__(**attrs)
f = self._fields_ = []
f.extend([
(P(RTL_BALANCED_NODE), 'Left'),
(P(RTL_BALANCED_NODE), 'Right'),
(ULONG, 'ParentValue'),
(dyn.block(4 if getattr(self, 'WIN64', False) else 0), 'padding(ParentValue)'),
])
class RTL_RB_TREE(pstruct.type):
_fields_ = [
(P(RTL_BALANCED_NODE), 'Root'),
(P(RTL_BALANCED_NODE), 'Min'),
]
class RTL_STACK_TRACE_ENTRY(pstruct.type, versioned):
def __init__(self, **attrs):
super(RTL_STACK_TRACE_ENTRY, self).__init__(**attrs)
f = self._fields_ = []
f.extend([
(P(RTL_STACK_TRACE_ENTRY), 'HashChain'),
(ULONG, 'TraceCount'),
(USHORT, 'Index'),
(USHORT, 'Depth'),
(dyn.array(PVOID, 32), 'BackTrace'),
])
class STACK_TRACE_DATABASE(pstruct.type, versioned):
def __init__(self, **attrs):
super(STACK_TRACE_DATABASE, self).__init__(**attrs)
from . import extypes
f = self._fields_ = []
f.extend([
(extypes.ERESOURCE, 'Lock'),
(BOOLEAN, 'DumpInProgress'),
(dyn.align(8 if getattr(self, 'WIN64', False) else 4), 'align(CommitBase)'),
(PVOID, 'CommitBase'),
(PVOID, 'CurrentLowerCommitLimit'),
(PVOID, 'CurrentUpperCommitLimit'),
(P(UCHAR), 'NextFreeLowerMemory'),
(P(UCHAR), 'NextFreeUpperMemory'),
(ULONG, 'NumberOfEntriesAdded'),
(ULONG, 'NumberOfAllocationFailures'),
(P(RTL_STACK_TRACE_ENTRY), 'EntryIndexArray'),
(ULONG, 'NumberOfBuckets'),
(lambda self: dyn.array(P(RTL_STACK_TRACE_ENTRY), self['NumberOfBuckets'].li.int()), 'Buckets'),
])
class RTL_TRACE_BLOCK(pstruct.type, versioned):
def __init__(self, **attrs):
super(RTL_TRACE_BLOCK, self).__init__(**attrs)
f = self._fields_ = []
f.extend([
(ULONG, 'Magic'),
(ULONG, 'Count'),
(ULONG, 'Size'),
(ULONG, 'UserCount'),
(ULONG, 'UserSize'),
(PVOID, 'UserContext'),
(P(RTL_TRACE_BLOCK), 'Next'),
(PVOID, 'Trace'),
])
class RTL_TRACE_DATABASE(pstruct.type, versioned):
def __init__(self, **attrs):
super(RTL_TRACE_DATABASE, self).__init__(**attrs)
f = self._fields_ = []
f.extend([
(ULONG, 'Magic'),
(ULONG, 'Flags'),
(ULONG, 'Tag'),
(P(RTL_TRACE_SEGMENT), 'SegmentList'),
(SIZE_T64 if getattr(self, 'WIN64', False) else SIZE_T, 'MaximumSize'),
(SIZE_T64 if getattr(self, 'WIN64', False) else SIZE_T, 'CurrentSize'),
(PVOID, 'Owner'),
(RTL_CRITICAL_SECTION, 'Lock'),
(ULONG, 'NoOfBuckets'),
(lambda self: P(dyn.array(RTL_TRACE_BLOCK, self['NoOfBuckets'].li.int())), 'Buckets'),
(RTL_TRACE_HASH_FUNCTION, 'HashFunction'),
(SIZE_T64 if getattr(self, 'WIN64', False) else SIZE_T, 'NoOfTraces'),
(SIZE_T64 if getattr(self, 'WIN64', False) else SIZE_T, 'NoOfHits'),
(dyn.array(ULONG, 16), 'HashCount'),
])
class RTL_TRACE_SEGMENT(pstruct.type, versioned):
def __init__(self, **attrs):
super(RTL_TRACE_SEGMENT, self).__init__(**attrs)
f = self._fields_ = []
f.extend([
(ULONG, 'Magic'),
(P(RTL_TRACE_DATABASE), 'Database'),
(P(RTL_TRACE_SEGMENT), 'NextSegment'),
(SIZE_T64 if getattr(self, 'WIN64', False) else SIZE_T, 'TotalSize'),
(P(CHAR), 'SegmentStart'),
(P(CHAR), 'SegmentEnd'),
(P(CHAR), 'SegmentFree'),
])
class RTL_TRACE_ENUMERATE(pstruct.type, versioned):
def __init__(self, **attrs):
super(RTL_TRACE_SEGMENT, self).__init__(**attrs)
f = self._fields_ = []
f.extend([
(P(RTL_TRACE_DATABASE), 'Database'),
(ULONG, 'Index'),
(P(RTL_TRACE_BLOCK), 'Block'),
])
class RTL_RUN_ONCE(dynamic.union, versioned):
def __ULONG3264(self):
return ULONGLONG if getattr(self, 'WIN64', False) else ULONG
_fields_ = [
(PVOID, 'Ptr'),
(__ULONG3264, 'Value'),
(__ULONG3264, 'State'), # ULONGLONG State:2
]
class RTL_SRWLOCK(dynamic.union, versioned):
class _Shared(pbinary.flags):
_fields_ = [
(lambda self: 60 if getattr(self, 'WIN64', False) else 28, 'Shared'),
(1, 'MultipleShared'),
(1, 'Waking'),
(1, 'Waiting'),
(1, 'Locked'),
]
_fields_ = [
(_Shared, 'Shared'),
(ULONG, 'Value'),
(PVOID, 'Ptr'),
]
@pbinary.littleendian
class HEAP_(pbinary.flags):
'''ULONG'''
_fields_ = [
(1, 'LOCK_USER_ALLOCATED'),
(1, 'VALIDATE_PARAMETERS_ENABLED'),
(1, 'VALIDATE_ALL_ENABLED'),
(1, 'SKIP_VALIDATION_CHECKS'),
(1, 'CAPTURE_STACK_BACKTRACES'),
(1, 'BREAK_WHEN_OUT_OF_VM'),
(1, 'PROTECTION_ENABLED'),
(1, 'FLAG_PAGE_ALLOCS'),
(5, 'RESERVED'),
(1, 'CREATE_ENABLE_EXECUTE'),
(1, 'CREATE_ENABLE_TRACING'),
(1, 'CREATE_ALIGN_16'),
(4, 'CLASS'),
(1, 'SETTABLE_USER_FLAG3'),
(1, 'SETTABLE_USER_FLAG2'),
(1, 'SETTABLE_USER_FLAG1'),
(1, 'SETTABLE_USER_VALUE'),
(1, 'DISABLE_COALESCE_ON_FREE'),
(1, 'FREE_CHECKING_ENABLED'),
(1, 'TAIL_CHECKING_ENABLED'),
(1, 'REALLOC_IN_PLACE_ONLY'),
(1, 'ZERO_MEMORY'),
(1, 'GENERATE_EXCEPTIONS'),
(1, 'GROWABLE'),
(1, 'NO_SERIALIZE'),
]
class RTL_HP_SEG_ALLOC_POLICY(pstruct.type, versioned):
def __ULONG3264(self):
return ULONGLONG if getattr(self, 'WIN64', False) else ULONG
_fields_ = [
(__ULONG3264, 'MinLargePages'),
(__ULONG3264, 'MaxLargePages'),
(UCHAR, 'MinUtilization'),
(lambda self: dyn.block(7 if getattr(self, 'WIN64', False) else 3), 'padding(MinUtilization)'),
]
class RTL_HP_ENV_HANDLE(pstruct.type):
_fields_ = [
(dyn.array(PVOID, 2), 'h'),
]
class RTL_HEAP_MEMORY_LIMIT_DATA(pstruct.type, versioned):
def __ULONG3264(self):
return ULONGLONG if getattr(self, 'WIN64', False) else ULONG
_fields_ = [
(__ULONG3264, 'CommitLimitBytes'),
(__ULONG3264, 'CommitLimitFailureCode'),
(__ULONG3264, 'MaxAllocationSizeBytes'),
(__ULONG3264, 'AllocationLimitFailureCode'),
]
class RTLP_HP_LOCK_TYPE(pint.enum):
_values_ = [
('HeapLockPaged', 0),
('HeapLockNonPaged', 1),
('HeapLockTypeMax', 2),
]
class RTL_HP_VS_CONFIG(pstruct.type):
@pbinary.littleendian
class _Flags(pbinary.flags):
_fields_ = [
(29, 'Reserved'),
(1, 'EnableDelayFree'),
(1, 'FullDecommit'),
(1, 'PageAlignLargeAllocs'),
]
_fields_ = [
(_Flags, 'Flags'),
]
class RTL_HP_LFH_CONFIG(pstruct.type):
@pbinary.littleendian
class _Options(pbinary.flags):
_fields_ = [
(14, 'Reserved'),
(1, 'DisableRandomization'),
(1, 'WitholdPageCrossingBlocks'),
]
_fields_ = [
(USHORT, 'MaxBlockSize'),
(_Options, 'Options'),
]
|
|
from flask import Flask
from flask import g
from flask import jsonify
from flask import redirect
from flask import render_template
from flask import render_template_string
from flask import request
from flask import send_from_directory
from flask import url_for
from flask.ext.babel import Babel
from flask.ext.mail import Mail
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.user import current_user
from flask.ext.user import login_required
from flask.ext.user import SQLAlchemyAdapter
from flask.ext.user import UserManager
from flask.ext.user import UserMixin
from docker import client
import json
import os
import re
import redis
import sys
import time
import uuid
# set defaults
IMAGE_NAME1 = "lab41/dendrite"
IMAGE_NAME2 = "lab41/redwood"
IMAGE_NAME3 = "lab41/hemlock"
DOCKER_HOST = "172.17.42.1"
DOMAIN = "127.0.0.1"
REDIS_HOST = "localhost"
RSYSLOG_HOST = "rsyslog"
PARENT_HOST = "parent"
COOKIE="try41-uid"
REDIS_PORT=6379
DOCKER_PORT=2375
# use user accounts
USERS=False
# use ssl
SSL=False
# dendrite
EXPOSED_PORT1=8000
EXPOSED_PORT2=8448
# redwood
EXPOSED_PORT3=8000
# hemlock
EXPOSED_PORT4=8000
EXPOSED_PORT5=9200
r = redis.StrictRedis(host=REDIS_HOST, port=int(REDIS_PORT))
c = client.Client(version="1.6", base_url='http://%s:%s' % (DOCKER_HOST, DOCKER_PORT))
BAD = False
UUID4 = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
# Use a Class-based config to avoid needing a 2nd file
class ConfigClass(object):
# Configure Flask
SECRET_KEY = 'secret' # change for production
CSRF_ENABLED = True
if USERS:
SQLALCHEMY_DATABASE_URI = 'postgresql' # change for production
# Configure session cookie
if not USERS:
SESSION_COOKIE_SECURE = True
SESSION_REFRESH_EACH_REQUEST = False
SESSION_COOKIE_HTTPONLY = True
# Configure Flask-Mail
if USERS:
MAIL_SERVER = 'smtp' # change for production
MAIL_PORT = 25
MAIL_USE_SSL = False
MAIL_DEFAULT_SENDER = 'sender' # change for production
# Configure Flask-User
if USERS:
USER_ENABLE_USERNAME = True
USER_ENABLE_CONFIRM_EMAIL = True
USER_ENABLE_CHANGE_USERNAME = True
USER_ENABLE_CHANGE_PASSWORD = True
USER_ENABLE_FORGOT_PASSWORD = True
USER_ENABLE_RETYPE_PASSWORD = True
USER_LOGIN_TEMPLATE = 'flask_user/login_or_register.html'
USER_REGISTER_TEMPLATE = 'flask_user/login_or_register.html'
def create_app():
# Setup Flask and read config from ConfigClass defined above
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Initialize Flask extensions
if USERS:
babel = Babel(app)
db = SQLAlchemy(app)
mail = Mail(app)
@babel.localeselector
def get_locale():
translations = [str(translation) for translation in babel.list_translations()]
return request.accept_languages.best_match(translations)
# Define User model. Make sure to add flask.ext.user UserMixin!!
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
active = db.Column(db.Boolean(), nullable=False, default=False)
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False, default='')
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime())
reset_password_token = db.Column(db.String(100), nullable=False, default='')
# Create all database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User)
user_manager = UserManager(db_adapter, app)
# The '/profile' page requires a logged-in user
@app.route('/profile')
@login_required
def profile():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Profile Page{%endtrans%}</h2>
<p> {%trans%}Hello{%endtrans%}
{{ current_user.username or current_user.email }},</p>
<p> <a href="{{ url_for('user.change_username') }}">
{%trans%}Change username{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.change_password') }}">
{%trans%}Change password{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.logout') }}?next={{ url_for('user.login') }}">
{%trans%}Sign out{%endtrans%}</a></p>
{% endblock %}
""")
def store_metadata(exposed_ports, container_id, container, image_name):
global BAD
urlport = ""
for exposed_port in exposed_ports:
container_port = c.port(container_id, exposed_port)
url = "%s:%s" % (DOMAIN, container_port)
urlport += url+","
hmap = {}
hmap['container_id'] = container_id
hmap['container'] = container
hmap['url'] = urlport[:-1]
hmap['timestamp'] = int(time.time())
hmap['expired'] = 0
hmap['image'] = image_name
data = json.dumps(hmap)
check_cookie()
# check cookie formatting, ensure that it exists in sessions
# also check that it doesn't already exist
if not BAD:
cookie = request.cookies.get(COOKIE)
if re.match(UUID4, cookie):
if r.sismember('sessions', cookie):
r.lpush(cookie, data)
else:
app.logger.info('invalid session')
BAD = True
else:
app.logger.info('invalid uuid')
BAD = True
def get_url(request):
global BAD
# this is validated with check_cookie before_request
if not BAD:
uid = request.cookies.get(COOKIE)
container = r.lindex(uid, 0)
container = json.loads(container)
url = container['url']
if "," in url:
url_list = url.split(',')
url = url_list[-1]
return url
else:
return ""
def after_this_request(f):
if not hasattr(g, 'after_request_callbacks'):
g.after_request_callbacks = []
g.after_request_callbacks.append(f)
return f
@app.after_request
def call_after_request_callbacks(response):
for callback in getattr(g, 'after_request_callbacks', ()):
callback(response)
return response
@app.before_request
def check_cookie():
global BAD
uid = request.cookies.get(COOKIE)
if uid is None:
uid = str(uuid.uuid4())
@after_this_request
def save_cookie(response):
# validate uid formatting, and that it doesn't conflict
if re.match(UUID4, uid):
if r.sismember('sessions', uid):
app.logger.info('uuid already exists')
BAD = True
else:
r.sadd('sessions', uid)
g.uid = uid
BAD = False
response.set_cookie(COOKIE, uid, httponly=True)
else:
app.logger.info('invalid uuid')
BAD = True
BAD = False
@app.route('/')
def index():
return render_template("index.html")
@app.route('/github-buttons')
def github_buttons():
return render_template("github-btn.html")
@app.route('/details/wait')
def wait():
return render_template("wait.html")
@app.route('/details2/wait2')
def wait2():
return render_template("wait.html")
@app.route('/details3/wait3')
def wait3():
return render_template("wait.html")
@app.route('/new', methods=["POST"])
def new():
if not USERS or current_user.is_authenticated():
exposed_ports = [EXPOSED_PORT2, EXPOSED_PORT1]
cookie = request.cookies.get(COOKIE)
if re.match(UUID4, cookie):
spinup = 1
# check if this image has already been spun up for this session
if r.exists(cookie):
# !! TODO error check
data = r.lrange(cookie, 0, -1)
for record in data:
jrec = json.loads(record)
if jrec['image'] == "lab41/dendrite":
if jrec['expired'] == 0:
app.logger.info('a dendrite container is already running for this session')
spinup = 0
return jsonify(url="wait")
if spinup == 1:
if SSL:
container = c.create_container(IMAGE_NAME1, environment={'REMOTE_HOST': RSYSLOG_HOST, 'PARENT_HOST': PARENT_HOST, 'SSL': "True"})
else:
container = c.create_container(IMAGE_NAME1, environment={'REMOTE_HOST': RSYSLOG_HOST, 'PARENT_HOST': PARENT_HOST})
container_id = container["Id"]
c.start(container, publish_all_ports=True)
b = c.inspect_container(container)
bad = store_metadata(exposed_ports, container_id, container, IMAGE_NAME1)
if bad:
return render_template("index.html")
else:
return jsonify(url="launch")
else:
return jsonify(url="wait")
else:
return jsonify(url="login")
@app.route('/new2', methods=["POST"])
def new2():
if not USERS or current_user.is_authenticated():
exposed_ports = [EXPOSED_PORT3]
cookie = request.cookies.get(COOKIE)
if re.match(UUID4, cookie):
spinup = 1
# check if this image has already been spun up for this session
if r.exists(cookie):
# !! TODO error check
data = r.lrange(cookie, 0, -1)
for record in data:
jrec = json.loads(record)
if jrec['image'] == "lab41/redwood":
if jrec['expired'] == 0:
app.logger.info('a redwood container is already running for this session')
spinup = 0
return jsonify(url="wait2")
if spinup == 1:
container = c.create_container(IMAGE_NAME2, tty=True, environment={'REMOTE_HOST': RSYSLOG_HOST, 'PARENT_HOST': PARENT_HOST})
container_id = container["Id"]
c.start(container, publish_all_ports=True)
b = c.inspect_container(container)
bad = store_metadata(exposed_ports, container_id, container, IMAGE_NAME2)
if bad:
return render_template("index.html")
else:
return jsonify(url="launch")
else:
return jsonify(url="wait2")
else:
return jsonify(url="login")
@app.route('/new3', methods=["POST"])
def new3():
if not USERS or current_user.is_authenticated():
exposed_ports = [EXPOSED_PORT5, EXPOSED_PORT4]
cookie = request.cookies.get(COOKIE)
if re.match(UUID4, cookie):
spinup = 1
# check if this image has already been spun up for this session
if r.exists(cookie):
# !! TODO error check
data = r.lrange(cookie, 0, -1)
for record in data:
jrec = json.loads(record)
if jrec['image'] == "lab41/hemlock":
if jrec['expired'] == 0:
app.logger.info('a hemlock container is already running for this session')
spinup = 0
return jsonify(url="wait3")
if spinup == 1:
if SSL:
container = c.create_container(IMAGE_NAME3, tty=True, environment={'REMOTE_HOST': RSYSLOG_HOST, 'PARENT_HOST': PARENT_HOST, 'SSL': "True"})
else:
container = c.create_container(IMAGE_NAME3, tty=True, environment={'REMOTE_HOST': RSYSLOG_HOST, 'PARENT_HOST': PARENT_HOST})
container_id = container["Id"]
c.start(container, publish_all_ports=True)
b = c.inspect_container(container)
bad = store_metadata(exposed_ports, container_id, container, IMAGE_NAME3)
if bad:
return render_template("index.html")
else:
return jsonify(url="launch")
else:
return jsonify(url="wait3")
else:
return jsonify(url="login")
@app.route('/details/login')
def details_login():
return redirect(url_for('user.login'))
@app.route('/details2/login')
def details2_login():
return redirect(url_for('user.login'))
@app.route('/details3/login')
def details3_login():
return redirect(url_for('user.login'))
@app.route('/details/launch')
def details():
if not USERS or current_user.is_authenticated():
url = get_url(request)
return render_template("details.html",url=url, USERS=USERS, SSL=SSL, DOMAIN=DOMAIN)
else:
return jsonify(url="login")
@app.route('/details2/launch')
def details2():
if not USERS or current_user.is_authenticated():
url = get_url(request)
return render_template("details2.html",url=url, USERS=USERS, SSL=SSL, DOMAIN=DOMAIN)
else:
return jsonify(url="login")
@app.route('/details3/launch')
def details3():
if not USERS or current_user.is_authenticated():
url = get_url(request)
return render_template("details3.html",url=url, USERS=USERS, SSL=SSL, DOMAIN=DOMAIN)
else:
return jsonify(url="login")
@app.route('/robot.txt')
def robot():
return render_template("robot.html")
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
return app
if __name__ == '__main__':
app = create_app()
app.run(host="0.0.0.0")
|
|
import datetime
import random
import re
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db import transaction
from django.template.loader import render_to_string
from django.utils.hashcompat import sha_constructor
from django.utils.translation import ugettext_lazy as _
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class RegistrationManager(models.Manager):
"""
Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
To prevent reactivation of an account which has been
deactivated by site administrators, the activation key is
reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
profile.activation_key = self.model.ACTIVATED
profile.save()
return user
return False
def create_inactive_user(self, username, email, password,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
new_user = User.objects.create_user(username, email, password)
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site)
return new_user
create_inactive_user = transaction.commit_on_success(create_inactive_user)
def create_profile(self, user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1 hash, generated from a combination of the ``User``'s
username and a random salt.
"""
salt = sha_constructor(str(random.random())).hexdigest()[:5]
username = user.username
if isinstance(username, unicode):
username = username.encode('utf-8')
activation_key = sha_constructor(salt+username).hexdigest()
return self.create(user=user,
activation_key=activation_key)
def delete_expired_users(self):
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for profile in self.all():
if profile.activation_key_expired():
user = profile.user
if not user.is_active:
user.delete()
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key for use during
user account user_registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.ForeignKey(User, unique=True, verbose_name=_('user'))
activation_key = models.CharField(_('activation key'), max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _('registration profile')
verbose_name_plural = _('registration profiles')
def __unicode__(self):
return u"Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
expiration_date = datetime.timedelta(days=int(settings.ACCOUNT_ACTIVATION_DAYS))
return self.activation_key == self.ACTIVATED or \
(self.user.date_joined + expiration_date <= datetime.datetime.now())
activation_key_expired.boolean = True
def send_activation_email(self, site):
"""
Send an activation email to the user associated with this
``RegistrationProfile``.
The activation email will make use of two templates:
``registration/activation_email_subject.txt``
This template will be used for the subject line of the
email. Because it is used as the subject line of an email,
this template's output **must** be only a single line of
text; output longer than one line will be forcibly joined
into only a single line.
``registration/activation_email.txt``
This template will be used for the body of the email.
These templates will each receive the following context
variables:
``activation_key``
The activation key for the new account.
``expiration_days``
The number of days remaining during which the account may
be activated.
``site``
An object representing the site on which the user
registered; depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
"""
ctx_dict = { 'activation_key': self.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site }
# 20100118 RL
# Changed location of the activation email templates.
# These should never have been hard-coded!
subject = render_to_string('domain/user_registration/activation_email_subject.txt',
ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('domain/user_registration/activation_email.txt',
ctx_dict)
self.user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################
#### author: wukai ####
#### License: MIT ####
#### v1.0 ####
##############################
import copy
__all__ = ["QS", "T", "F", "E"]
class Error(Exception):
pass
def quote(name):
return "`%s`" % name.replace("`", "``")
##################################################################
class MetaTable(type):
def __getattr__(cls, key):
temp = key.split("__")
name = quote(temp[0])
alias = None
if len(temp) > 1:
alias = quote(temp[1])
return cls(name, alias)
class Table(object):
__metaclass__ = MetaTable
def __init__(self, name, alias=None):
self._name = name
self._alias = alias
self._join = None
self._on = None
def __mul__(self, obj):
return TableSet(self).__mul__(obj)
def __add__(self, obj):
return TableSet(self).__add__(obj)
@property
def sql(self):
sql = [self._name]
if self._join:
sql.insert(0, self._join)
if self._alias:
sql.extend(["AS", self._alias])
if self._on:
sql.extend(["ON", "(%s)" % (self._on.sql,)])
return " ".join(sql)
@property
def params(self):
return self._on.params if self._on else []
class TableSet(object):
def __init__(self, join_obj):
self._join_list = [join_obj]
self._sub = False
self._join = None
self._on = None
def __mul__(self, obj):
return self._add_join("JOIN", obj)
def __add__(self, obj):
return self._add_join("LEFT JOIN", obj)
@property
def sql(self):
sql = [" ".join([k.sql for k in self._join_list])]
if self._join:
sql[0] = "(%s)" % (sql[0],)
sql.insert(0, self._join)
if self._on:
sql.extend(["ON", "(%s)" % (self._on.sql,)])
return " ".join(sql)
@property
def params(self):
params = []
for sql_obj in self._join_list:
params.extend(sql_obj.params)
return params
#public func
def on(self, c):
self._join_list[-1]._on = c
return self
#private func
def _add_join(self, join_type, obj):
obj._join = join_type
self._join_list.append(obj)
return self
##################################################################
class MetaField(type):
def __getattr__(cls, key):
temp = key.split("__")
name = quote(temp[0])
prefix = None
if len(temp) > 1:
prefix = quote(temp[0])
name = quote(temp[1])
return cls(name, prefix)
class Field(object):
__metaclass__ = MetaField
def __init__(self, name, prefix=None):
self._name = name
self._prefix = prefix
def __eq__(self, f):
if f is None:
return Condition("%s IS NULL" % (self.sql,))
if isinstance(f, Field):
return Condition("%s = %s" % (self.sql, f.sql))
if isinstance(f, Expr):
return Condition("%s = %s" % (self.sql, f.sql), f.params)
if isinstance(f, list) or isinstance(f, tuple) or isinstance(f, set):
if len(f) < 1:
return Condition("FALSE")
sql = ", ".join(["%s" for i in xrange(len(f))])
return Condition("%s IN (%s)" % (self.sql, sql), list(f))
return Condition(self.sql + " = %s", [f])
def __ne__(self, f):
if f is None:
return Condition("%s IS NOT NULL" % (self.sql,))
if isinstance(f, Field):
return Condition("%s <> %s" % (self.sql, f.sql))
if isinstance(f, Expr):
return Condition("%s <> %s" % (self.sql, f.sql), f.params)
if isinstance(f, list) or isinstance(f, tuple) or isinstance(f, set):
if len(f) < 1:
return Condition("TRUE")
sql = ", ".join(["%s" for i in xrange(len(f))])
return Condition("%s NOT IN (%s)" % (self.sql, sql), list(f))
return Condition(self.sql + " <> %s", [f])
def __gt__(self, f):
if isinstance(f, Field):
return Condition("%s > %s" % (self.sql, f.sql))
if isinstance(f, Expr):
return Condition("%s > %s" % (self.sql, f.sql), f.params)
return Condition(self.sql + " > %s", [f])
def __lt__(self, f):
if isinstance(f, Field):
return Condition("%s < %s" % (self.sql, f.sql))
if isinstance(f, Expr):
return Condition("%s < %s" % (self.sql, f.sql), f.params)
return Condition(self.sql + " < %s", [f])
def __ge__(self, f):
if isinstance(f, Field):
return Condition("%s >= %s" % (self.sql, f.sql))
if isinstance(f, Expr):
return Condition("%s >= %s" % (self.sql, f.sql), f.params)
return Condition(self.sql + " >= %s", [f])
def __le__(self, f):
if isinstance(f, Field):
return Condition("%s <= %s" % (self.sql, f.sql))
if isinstance(f, Expr):
return Condition("%s <= %s" % (self.sql, f.sql), f.params)
return Condition(self.sql + " <= %s", [f])
def __mod__(self, f):
if isinstance(f, Field):
return Condition("%s LIKE %s" % (self.sql, f.sql))
if isinstance(f, Expr):
return Condition("%s LIKE %s" % (self.sql, f.sql), f.params)
return Condition(self.sql + " LIKE %s", [f])
@property
def sql(self):
return ".".join([self._prefix, self._name]) if self._prefix else self._name
class Condition(object):
def __init__(self, sql, params=None):
self._sql = sql
self._params = params if params else []
def __and__(self, c):
if isinstance(c, str):
return self & Condition(c)
if isinstance(c, Condition):
return ConditionSet(self) & c
if isinstance(c, ConditionSet):
return c.__rand__(self)
raise TypeError("Can't do operation with %s" % str(type(c)))
def __or__(self, c):
if isinstance(c, str):
return self | Condition(c)
if isinstance(c, Condition):
return ConditionSet(self) | c
if isinstance(c, ConditionSet):
return c.__ror__(self)
raise TypeError("Can't do operation with %s" % str(type(c)))
@property
def sql(self):
return self._sql
@property
def params(self):
return self._params
class ConditionSet(object):
OP_AND = 0
OP_OR = 1
def __init__(self, c=None):
self._empty = True
self._last_op = None
if c:
self._init(c)
def _init(self, c):
self._sql = c.sql
self._params = c.params
if isinstance(c, ConditionSet):
self._last_op = c._last_op
self._empty = False
return self
def _pre_extend(self, array1, array2):
for item in array2:
array1.insert(0, item)
##################################
def __rand__(self, c):
return copy.deepcopy(self)._rand(c)
def _rand(self, c):
if isinstance(c, str):
return self._rand(Condition(c))
if not isinstance(c, Condition):
raise TypeError("Can't do operation with %s" % str(type(c)))
if self._empty:
return self._init(c)
if self._last_op is not None and self._last_op == ConditionSet.OP_OR:
self._sql = "(%s)" % (self._sql,)
self._sql = "%s AND %s" % (c.sql, self._sql)
self._pre_extend(self._params, c.params)
self._last_op = ConditionSet.OP_AND
return self
###################################
def __and__(self, c):
return copy.deepcopy(self)._and(c)
def _and(self, c):
if isinstance(c, str):
return self._and(Condition(c))
if not isinstance(c, Condition) and not isinstance(c, ConditionSet):
raise TypeError("Can't do operation with %s" % str(type(c)))
if self._empty:
return self._init(c)
if self._last_op is not None and self._last_op == ConditionSet.OP_OR:
self._sql = "(%s)" % (self._sql,)
if isinstance(c, ConditionSet) and c._last_op == ConditionSet.OP_OR:
self._sql = "%s AND (%s)" % (self._sql, c.sql)
else:
self._sql = "%s AND %s" % (self._sql, c.sql)
self._params.extend(c.params)
self._last_op = ConditionSet.OP_AND
return self
###################################
def __ror__(self, c):
return copy.deepcopy(self)._ror(c)
def _ror(self, c):
if isinstance(c, str):
return self._ror(Condition(c))
if not isinstance(c, Condition):
raise TypeError("Can't do operation with %s" % str(type(c)))
if self._empty:
return self._init(c)
self._sql = "%s OR %s" % (c.sql, self._sql)
self._pre_extend(self._params, c.params)
self._last_op = ConditionSet.OP_OR
return self
###################################
def __or__(self, c):
return copy.deepcopy(self)._or(c)
def _or(self, c):
if isinstance(c, str):
return self._or(Condition(c))
if not isinstance(c, Condition) and not isinstance(c, ConditionSet):
raise TypeError("Can't do operation with %s" % str(type(c)))
if self._empty:
return self._init(c)
self._sql = "%s OR %s" % (self._sql, c.sql)
self._params.extend(c.params)
self._last_op = ConditionSet.OP_OR
return self
@property
def sql(self):
return "" if self._empty else self._sql
@property
def params(self):
return [] if self._empty else self._params
################################################
class Expr(object):
def __init__(self, sql, *params):
self.sql = sql
self._params = params
@property
def params(self):
return self._params
################################################
def opt_checker(k_list):
def new_deco(func):
def new_func(self, *args, **opt):
for k, v in opt.items():
if k not in k_list:
raise TypeError("Not implemented option: %s" % (k,))
return func(self, *args, **opt)
new_func.__doc__ = func.__doc__
return new_func
return new_deco
def _gen_order_by_list(f_list, direct="ASC"):
return ", ".join(["%s %s" % ((f.sql if isinstance(f, Field) else f), direct) for f in f_list])
def _gen_f_list(f_list, default=None):
if len(f_list) < 1 and default is not None:
return default
return ", ".join([(f.sql if isinstance(f, Field) else f) for f in f_list])
def _gen_v_list(v_list, params):
values = []
for v in v_list:
values.append("%s")
params.append(v)
return "(%s)" % (", ".join(values),)
def _gen_v_list_set(v_list_set, params):
return ", ".join([_gen_v_list(v_list, params) for v_list in v_list_set])
def _gen_fv_dict(fv_dict, params):
sql = []
for f, v in fv_dict.items():
if isinstance(v, Expr):
sql.append("%s = %s" % (f, v.sql))
params.extend(v.params)
else:
sql.append("%s = %%s" % (f,))
params.append(v)
return ", ".join(sql)
class QuerySetDeepcopyHelper(object):
"""
used to avoid deep copy the db
"""
def __init__(self, db):
self._db = db
def __deepcopy__(self, memo):
return self
def __getattr__(self, attr):
return getattr(self._db, attr)
class QuerySet(object):
def __init__(self, db_or_t):
# complex var
self._db = None
self.tables = None
self._wheres = None
self._havings = None
if isinstance(db_or_t, Table) or isinstance(db_or_t, TableSet):
self.tables = db_or_t
else:
self._db = QuerySetDeepcopyHelper(db_or_t)
# simple var
self._group_by = None
self._order_by = None
self._limit = None
# default var
self._default_count_field_list = ("*",)
self._default_count_distinct = False
@apply
def wheres():
def fget(self):
return self._wheres if self._wheres else ConditionSet()
def fset(self, cs):
self._wheres = cs
return property(**locals())
@apply
def havings():
def fget(self):
return self._havings if self._havings else ConditionSet()
def fset(self, cs):
self._havings = cs
return property(**locals())
# public function
def clone(self):
return copy.deepcopy(self)
def table(self, t):
self.tables = t
return self
def on(self, c):
if not isinstance(self.tables, TableSet):
raise Error("Can't set on without join table")
self.tables.on(c)
return self
def where(self, c):
self._wheres = c
return self
def group_by(self, *f_list):
self._group_by = "GROUP BY %s" % (_gen_f_list(f_list),)
self._default_count_field_list = f_list
self._default_count_distinct = True
return self
def having(self, c):
self._havings = c
return self
@opt_checker(["desc"])
def order_by(self, *f_list, **opt):
direct = "DESC" if opt.get("desc") else "ASC"
order_by_field = _gen_order_by_list(f_list, direct)
if self._order_by is None:
self._order_by = "ORDER BY %s" % (order_by_field,)
else:
self._order_by = "%s, %s" % (self._order_by, order_by_field)
return self
def limit(self, offset, limit):
self._limit = "LIMIT %u, %u" % (offset, limit)
return self
@opt_checker(["distinct", "for_update"])
def count(self, *f_list, **opt):
sql = ["SELECT"]
params = []
if len(f_list) == 0:
f_list = self._default_count_field_list
if opt.get("distinct", self._default_count_distinct):
sql.append("COUNT(DISTINCT %s)" % (_gen_f_list(f_list),))
else:
sql.append("COUNT(%s)" % (_gen_f_list(f_list, "*"),))
self._join_sql_part(sql, params, ["from", "where"])
if opt.get("for_update"):
sql.append("FOR UPDATE")
sql = " ".join(sql)
if self._db is None:
return sql, params
return self._db.select(sql, params)[0][0]
@opt_checker(["distinct", "for_update", "dict_cursor", "dry"])
def select(self, *f_list, **opt):
sql = ["SELECT"]
params = []
if opt.get("distinct"):
sql.append("DISTINCT")
sql.append(_gen_f_list(f_list, "*"))
self._join_sql_part(sql, params, ["from", "where", "group", "having", "order", "limit"])
if opt.get("for_update"):
sql.append("FOR UPDATE")
sql = " ".join(sql)
if self._db is None or opt.get("dry") == True:
return sql, params
return self._db.select(sql, params, dict_cursor=opt.get("dict_cursor", True))
@opt_checker(["distinct", "for_update"])
def select_one(self, *f_list, **opt):
sql = ["SELECT"]
params = []
if opt.get("distinct"):
sql.append("DISTINCT")
sql.append(_gen_f_list(f_list, "*"))
self._join_sql_part(sql, params, ["from", "where", "group", "having", "order"])
sql.append("LIMIT 0, 1")
if opt.get("for_update"):
sql.append("FOR UPDATE")
sql = " ".join(sql)
if self._db is None:
return sql, params
result = self._db.select(sql, params, dict_cursor=True)
return None if len(result) < 1 else result[0]
def select_for_union(self, *f_list, **opt):
return UnionPart(db=self._db, *self.select(dry=True, *f_list, **opt))
def insert(self, fv_dict, **opt):
sql, params = self.insert_many(
fv_dict.keys(), ([fv_dict[k] for k in fv_dict.keys()],), __dry_run__=True, **opt)
if self._db is None:
return sql, params
return self._db.insert(sql, params)
@opt_checker(["ignore", "replace", "on_duplicate_key_update", "__dry_run__"])
def insert_many(self, f_list, v_list_set, **opt):
sql = ["REPLACE"] if opt.get("replace") else ["INSERT"]
params = []
if opt.get("ignore"):
sql.append("IGNORE")
sql.append("INTO")
self._join_sql_part(sql, params, ["tables"])
sql.append("(%s) VALUES %s" % (_gen_f_list(f_list), _gen_v_list_set(v_list_set, params)))
fv_dict = opt.get("on_duplicate_key_update")
if fv_dict:
sql.append("ON DUPLICATE KEY UPDATE")
sql.append(_gen_fv_dict(fv_dict, params))
sql = " ".join(sql)
if self._db is None or opt.get("__dry_run__", False):
return sql, params
return self._db.execute(sql, params)
@opt_checker(["ignore"])
def update(self, fv_dict, **opt):
sql = ["UPDATE"]
params = []
if opt.get("ignore"):
sql.append("IGNORE")
self._join_sql_part(sql, params, ["tables"])
sql.append("SET")
sql.append(_gen_fv_dict(fv_dict, params))
self._join_sql_part(sql, params, ["where", "limit"])
sql = " ".join(sql)
if self._db is None:
return sql, params
return self._db.execute(sql, params)
def delete(self):
sql = ["DELETE"]
params = []
self._join_sql_part(sql, params, ["from", "where"])
sql = " ".join(sql)
if self._db is None:
return sql, params
return self._db.execute(sql, params)
# private function
def _join_sql_part(self, sql, params, join_list):
if "tables" in join_list and self.tables:
sql.append(self.tables.sql)
params.extend(self.tables.params)
if "from" in join_list and self.tables:
sql.extend(["FROM", self.tables.sql])
params.extend(self.tables.params)
if "where" in join_list and self._wheres:
sql.extend(["WHERE", self._wheres.sql])
params.extend(self._wheres.params)
if "group" in join_list and self._group_by:
sql.append(self._group_by)
if "having" in join_list and self._havings:
sql.extend(["HAVING", self._havings.sql])
params.extend(self._havings.params)
if "order" in join_list and self._order_by:
sql.append(self._order_by)
if "limit" in join_list and self._limit:
sql.append(self._limit)
class UnionPart(object):
def __init__(self, sql, params, db=None):
self.db = db
self.sql = sql
self.params = params
def __mul__(self, up):
if not isinstance(up, UnionPart):
raise TypeError("Can't do operation with %s" % str(type(up)))
return UnionQuerySet(self) * up
def __add__(self, up):
if not isinstance(up, UnionPart):
raise TypeError("Can't do operation with %s" % str(type(up)))
return UnionQuerySet(self) + up
class UnionQuerySet(object):
def __init__(self, up):
self._db = up.db
self._union_part_list = [(None, up)]
self._group_by = None
self._order_by = None
self._limit = None
def __mul__(self, up):
if not isinstance(up, UnionPart):
raise TypeError("Can't do operation with %s" % str(type(up)))
if self._db is None:
self._db = up.db
self._union_part_list.append(("UNION DISTINCT", up))
return self
def __add__(self, up):
if not isinstance(up, UnionPart):
raise TypeError("Can't do operation with %s" % str(type(up)))
if self._db is None:
self._db = up.db
self._union_part_list.append(("UNION ALL", up))
return self
@opt_checker(["desc"])
def order_by(self, *f_list, **opt):
direct = "DESC" if opt.get("desc") else "ASC"
order_by_field = _gen_order_by_list(f_list, direct)
if self._order_by is None:
self._order_by = "ORDER BY %s" % (order_by_field,)
else:
self._order_by = "%s, %s" % (self._order_by, order_by_field)
return self
def limit(self, offset, limit):
self._limit = "LIMIT %u, %u" % (offset, limit)
return self
def select(self, db=None):
sql = []
params = []
for union_type, part in self._union_part_list:
if union_type:
sql.append(union_type)
sql.append("(%s)" % (part.sql,))
params.extend(part.params)
if self._order_by:
sql.append(self._order_by)
if self._limit:
sql.append(self._limit)
sql = " ".join(sql)
if db is not None:
return db.select(sql, params, dict_cursor=True)
if self._db is not None:
return self._db.select(sql, params, dict_cursor=True)
return sql, params
############## alias ###############
QS, T, F, E = QuerySet, Table, Field, Expr
if __name__ == "__main__":
print
print "*******************************************"
print "************ Single Query *************"
print "*******************************************"
print QS((T.base + T.grade).on((F.base__type == F.grade__item_type) & (F.base__type == 1)) + T.lottery).on(
F.base__type == F.lottery__item_type
).where(
(F.name == "name") & (F.status == 0) | (F.name == None)
).group_by("base.type").having(F("count(*)") > 1).select(F.type, F.grade__grade, F.lottery__grade)
print
print "*******************************************"
print "********** Step by Step Query **********"
print "*******************************************"
t = T.grade
print QS(t).limit(0,100).select(F.name)
print "==========================================="
t = (t * T.base).on(F.grade__item_type == F.base__type)
print QS(t).order_by(F.grade__name, F.base__name, desc=True).select(F.grade__name, F.base__img)
print "==========================================="
t = (t + T.lottery).on(F.base__type == F.lottery__item_type)
print QS(t).group_by(F.grade__grade).having(F.grade__grade > 0).select(F.grade__name, F.base__img, F.lottery__price)
print "==========================================="
w = (F.base__type == 1)
print QS(t).where(w).select(F.grade__name, for_update=True)
print "==========================================="
w = w & (F.grade__status == [0,1])
print QS(t).where(w).group_by(F.grade__name, F.base__img).count()
print "==========================================="
from datetime import datetime
w = w | (F.lottery__add_time > "2009-01-01") & (F.lottery__add_time <= datetime.now())
print QS(t).where(w).select_one(F.grade__name, F.base__img, F.lottery__price)
print "==========================================="
w = w & (F.base__status != [1, 2])
print QS(t).where(w).select(F.grade__name, F.base__img, F.lottery__price, "CASE 1 WHEN 1")
print
print "*******************************************"
print "********** Step by Step Query2 **********"
print "*******************************************"
qs = QS(T.user)
print qs.select(F.name)
print "==========================================="
qs.tables = (qs.tables * T.address).on(F.user__id == F.address__user_id)
print qs.select(F.user__name, F.address__street)
print "==========================================="
qs.wheres = qs.wheres & (F.id == 1)
print qs.select(F.name, F.id)
print "==========================================="
qs.wheres = qs.wheres & ((F.address__city_id == [111, 112]) | "address.city_id IS NULL")
print qs.select(F.user__name, F.address__street, "COUNT(*) AS count")
print "==========================================="
print
print "*******************************************"
print "********** Union Query **********"
print "*******************************************"
a = QS(T.item).where(F.status != -1).select_for_union("type, name, img")
b = QS(T.gift).where(F.storage > 0).select_for_union("type, name, img")
print (a + b).order_by("type", "name", desc=True).limit(100, 10).select()
print
print "*******************************************"
print "********** Other Operation **********"
print "*******************************************"
print QS(T.user).insert({
"name": "garfield",
"gender": "male",
"status": 0
}, ignore=True)
print "==========================================="
fl = ("name", "gender", "status", "age")
vl = (("garfield", "male", 0, 1), ("superwoman", "female", 0, 10))
print QS(T.user).insert_many(fl, vl, on_duplicate_key_update={"age" : E("age + VALUES(age)")})
print "==========================================="
print QS(T.user).where(F.id == 100).update({"name": "nobody", "status": 1}, ignore=True)
print "==========================================="
print QS(T.user).where(F.status == 1).delete()
|
|
from datetime import datetime
from dateutil import rrule
from osgeo import gdal, ogr
import numpy as np
start, end = datetime(2000, 1, 1), datetime(2013, 12, 31)
sWin, eWin = datetime(start.year, 11, 1), datetime(end.year, 3, 30)
sMon, eMon = datetime(start.year, 6, 1), datetime(start.year, 10, 1)
shp_filename = 'C:\\Recharge_GIS\\qgis_layers\\sensitivity_points\\SA_pnts29APR16_UTM.shp'
ds = ogr.Open(shp_filename)
lyr = ds.GetLayer()
defs = lyr.GetLayerDefn()
x = 0
already_done = ['Bateman']
for feat in lyr:
name = feat.GetField("Name")
if name in already_done:
pass
else:
print name
x += 1
point_id_obj = x
geom = feat.GetGeometryRef()
mx, my = geom.GetX(), geom.GetY()
for month in rrule.rrule(rrule.MONTHLY, dtstart=start, until=end):
path = 'C:\\Recharge_GIS\\OSG_Data\\current_use'
raster = 'aws_mod_4_21_10_0'
aws_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
gt = aws_open.GetGeoTransform()
rb = aws_open.GetRasterBand(1)
px = abs(int((mx - gt[0]) / gt[1]))
py = int((my - gt[3]) / gt[5])
aws_obj = rb.ReadAsArray(px, py, 1, 1)
raster = 'nlcd_root_dpth_15apr'
nlcd_rt_z_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = nlcd_rt_z_open.GetRasterBand(1)
nlcd_rt_obj = rb.ReadAsArray(px, py, 1, 1)
nlcd_rt_z_open = []
raster = 'nlcd_plnt_hgt1_250_m_degraded1'
nlcd_plt_hgt_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = nlcd_plt_hgt_open.GetRasterBand(1)
nlcd_plt_hgt_obj = rb.ReadAsArray(px, py, 1, 1)
nlcd_plt_hgt_open = []
raster = 'Soil_Ksat_15apr' # convert from micrometer/sec to mm/day
ksat_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = ksat_open.GetRasterBand(1)
ksat_obj = rb.ReadAsArray(px, py, 1, 1)
ksat_open = []
raster = 'tew_250_15apr'
tew_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = tew_open.GetRasterBand(1)
tew_obj = rb.ReadAsArray(px, py, 1, 1)
tew_open = []
path = 'C:\\Recharge_GIS\\Array_Results\\initialize'
raster = 'dr_4_18_2_49'
dr_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = dr_open.GetRasterBand(1)
dr_obj = rb.ReadAsArray(px, py, 1, 1)
dr_open = []
raster = 'de_4_18_2_49'
de_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = de_open.GetRasterBand(1)
de_obj = rb.ReadAsArray(px, py, 1, 1)
de_open = []
raster = 'drew_4_19_23_11'
drew_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = drew_open.GetRasterBand(1)
drew_obj = rb.ReadAsArray(px, py, 1, 1)
drew_open = []
path = 'C:\\Recharge_GIS\\OSG_Data\\not_in_use'
raster = 'FC_Ras_SSGO1'
fc_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = fc_open.GetRasterBand(1)
fc_obj = rb.ReadAsArray(px, py, 1, 1)
fc_open = []
raster = 'WP_Ras_SSGO1'
wp_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
rb = wp_open.GetRasterBand(1)
wp_obj = rb.ReadAsArray(px, py, 1, 1)
wp_open = []
print ''
print point_id_obj
print name
print mx, my
point_id = []
date = []
ksat = []
soil_ksat = []
kcb = []
rlin = []
rg =[]
etrs_Pm = []
p_hgt = []
minTemp = []
maxTemp = []
temp = []
ppt = []
fc = []
wp = []
taw = []
aws = []
rt_z = []
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
# prDe.append(pDe)
# prDr.append(pDr)
date.append(dday)
taw.append(aws_obj)
aws.append(aws_obj)
fc.append(fc_obj)
wp.append(wp_obj)
point_id.append(point_id_obj)
p_hgt.append(nlcd_plt_hgt_obj)
rt_z.append(nlcd_rt_obj)
if dday in rrule.rrule(rrule.DAILY, dtstart=sMon, until=eMon):
ksat.append(ksat_obj * 2/24)
soil_ksat.append(ksat_obj * 2/24)
else:
ksat.append(ksat_obj * 6/24)
soil_ksat.append(ksat_obj * 6/24)
# Daily Values
# NDVI
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
doy = dday.timetuple().tm_yday
if dday.year == 2000:
path = 'F:\\NDVI\\NDVI_std_all'
obj = [1, 49, 81, 113, 145, 177, 209, 241, 273, 305, 337]
if doy < 49:
strt = 1
band = doy
nd = 48
raster = '{a}\\T{b}_{c}_2000_etrf_subset_001_048_ndvi_daily.tif'.format(a=path,
b=str(strt).rjust(3, '0'),
c=str(nd).rjust(3, '0'),
d=band)
kcb_open = gdal.Open(raster)
rb = kcb_open.GetRasterBand(band)
kcb_obj = rb.ReadAsArray(px, py, 1, 1) * 1.25
kcb.append(kcb_obj)
kcb_open = []
else:
for num in obj[1:]:
diff = doy - num
if 0 <= diff <= 31:
pos = obj.index(num)
strt = obj[pos]
band = diff + 1
if num == 337:
nd = num + 29
else:
nd = num + 31
raster = '{a}\\T{b}_{c}_2000_etrf_subset_001_048_ndvi_daily.tif'.format(a=path,
b=str(strt).rjust(3, '0'),
c=str(nd).rjust(3, '0'),
d=str(doy - num + 1))
kcb_open = gdal.Open(raster)
rb = kcb_open.GetRasterBand(band)
kcb_obj = rb.ReadAsArray(px, py, 1, 1) * 1.25
kcb.append(kcb_obj)
kcb_open = []
elif dday.year == 2001:
path = "F:\\NDVI\\NDVI_std_all"
pathyear = path + "\\" + str(dday.year)
obj = [1, 17, 33, 49, 65, 81, 97, 113, 129, 145, 161, 177, 193, 209,
225, 241, 257, 273, 289, 305, 321, 337, 353]
for num in obj:
diff = doy - num
if 0 <= diff <= 15:
pos = obj.index(num)
strt = obj[pos]
band = diff + 1
if num == 353:
nd = num + 12
else:
nd = num + 15
raster = '{a}\\{b}_{c}_{d}.tif'.format(a=path, b=dday.year, c=strt, d=nd, e=band)
kcb_open = gdal.Open(raster)
rb = kcb_open.GetRasterBand(band)
kcb_obj = rb.ReadAsArray(px, py, 1, 1) * 1.25
kcb.append(kcb_obj)
kcb_open = []
else:
path = "F:\\NDVI\\NDVI_std_all"
obj = [1, 17, 33, 49, 65, 81, 97, 113, 129, 145, 161, 177, 193, 209,
225, 241, 257, 273, 289, 305, 321, 337, 353]
for num in obj:
diff = doy - num
if 0 <= diff <= 15:
pos = obj.index(num)
strt = obj[pos]
band = diff + 1
if num == 353:
nd = num + 12
else:
nd = num + 15
raster = '{a}\\{b}_{c}.tif'.format(a=path, b=dday.year, c=pos+1, d=nd, e=band)
kcb_open = gdal.Open(raster)
rb = kcb_open.GetRasterBand(band)
kcb_obj = rb.ReadAsArray(px, py, 1, 1) * 1.25
kcb.append(kcb_obj)
kcb_open = []
x = 0
for element in kcb:
if element < 0.001 or element > 1.5:
kcb[x] = kcb[x - 1]
print 'found bad value'
x += 1
print 'NDVI point extract at {a} {b} done'.format(a=point_id_obj, b=name)
# RLIN net longwave radiation
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
doy = dday.timetuple().tm_yday
doy_str = str(doy)
path = "F:\\PM_RAD"
raster = '{a}\\PM{d}\\RLIN_NM_{b}_{c}.tif'.format(a=path, b=dday.year, c=str(doy).rjust(3, '0'), d=dday.year)
rlin_open = gdal.Open(raster)
rb = rlin_open.GetRasterBand(1)
rlin_obj = rb.ReadAsArray(px, py, 1, 1)
rlin.append(rlin_obj)
rlin_open = []
print 'RLIN extract at {a} {b} done'.format(a=point_id_obj, b=name)
# RTOT net shortwave radiation
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
doy = dday.timetuple().tm_yday
doy_str = str(doy)
path = "F:\\PM_RAD"
raster = '{a}\\rad{d}\\RTOT_{b}_{c}.tif'.format(a=path, b=dday.year, c=str(doy).rjust(3, '0'), d=dday.year)
rg_open = gdal.Open(raster)
rb = rg_open.GetRasterBand(1)
rg_obj = rb.ReadAsArray(px, py, 1, 1)
rg.append(rg_obj)
rg_open = []
print 'RG extract at {a} {b} done'.format(a=point_id_obj, b=name)
# refET PM
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
doy = dday.timetuple().tm_yday
doy_str = str(doy)
raster = '{a}\\PM{d}\\PM_NM_{b}_{c}.tif'.format(a=path, b=dday.year,c=str(doy).rjust(3, '0'), d=dday.year)
etrs_open = gdal.Open(raster)
rb = etrs_open.GetRasterBand(1)
etrs_obj = rb.ReadAsArray(px, py, 1, 1)
etrs_Pm.append(etrs_obj)
etrs_open = []
print 'refET PM extract at at {a} {b} done'.format(a=point_id_obj, b=name)
# TEMP
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
path = "F:\\PRISM\\Temp\\Minimum_standard"
month_str = str(dday.month)
day_str = str(dday.day)
if dday.year in [2002, 2004, 2005]:
raster = '{a}\\TempMin_NMHW2Buff_{b}{c}{d}.tif'.format(a=path, b=dday.year, c=month_str.rjust(2, '0'),
d=day_str.rjust(2, '0'))
else:
raster = '{a}\\cai_tmin_us_us_30s_{b}{c}{d}.tif'.format(a=path, b=dday.year, c=month_str.rjust(2, '0'),
d=day_str.rjust(2, '0'))
min_temp_open = gdal.Open(raster)
rb = min_temp_open.GetRasterBand(1)
min_temp_obj = rb.ReadAsArray(px, py, 1, 1)
minTemp.append(min_temp_obj)
min_temp_open = []
path = "F:\\PRISM\\Temp\\Maximum_standard"
raster = '{a}\\TempMax_NMHW2Buff_{b}{c}{d}.tif'.format(a=path,b=dday.year, c=month_str.rjust(2, '0'),
d=day_str.rjust(2, '0'))
max_temp_open = gdal.Open(raster)
rb = max_temp_open.GetRasterBand(1)
max_temp_obj = rb.ReadAsArray(px, py, 1, 1)
maxTemp.append(max_temp_obj)
max_temp_open = []
rslt = (max_temp_obj + min_temp_obj)/2
temp.append(rslt)
print 'TEMP extract at at {a} {b} done'.format(a=point_id_obj, b=name)
# Precipitation
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
path = 'F:\\PRISM\\Precip\\800m_std_all'
month_str = str(dday.month)
day_str = str(dday.day)
raster = '{a}\\PRISMD2_NMHW2mi_{b}{c}{d}.tif'.format(a=path, b=dday.year, c=month_str.rjust(2, '0'),
d=day_str.rjust(2, '0'))
ppt_open = gdal.Open(raster)
rb = ppt_open.GetRasterBand(1)
ppt_obj = rb.ReadAsArray(px, py, 1, 1)
ppt.append(ppt_obj)
ppt_open = []
print 'Precip extract at at {a} {b} done'.format(a=point_id_obj, b=name)
point_id = np.array(point_id).squeeze()
date = [rec.strftime('%Y/%m/%d') for rec in date]
date = np.array(date, object)
ksat = np.array(ksat, dtype=float).squeeze()
soil_ksat = np.array(soil_ksat, dtype=float).squeeze()
kcb = np.array(kcb, dtype=float).squeeze()
etrs_Pm = np.array(etrs_Pm, dtype=float).squeeze()
rlin = np.array(rlin, dtype=float).squeeze()
rg = np.array(rg, dtype=float).squeeze()
p_hgt = np.array(p_hgt, dtype=float).squeeze()
minTemp = np.array(minTemp, dtype=float).squeeze()
maxTemp = np.array(maxTemp, dtype=float).squeeze()
temp = np.array(temp, dtype=float).squeeze()
ppt = np.array(ppt, dtype=float).squeeze()
taw = np.array(taw, dtype=float).squeeze()
aws = np.array(aws, dtype=float).squeeze()
fc = np.array(fc, dtype=float).squeeze()
wp = np.array(wp, dtype=float).squeeze()
rt_z = np.array(rt_z, dtype=float).squeeze()
# b = np.array([['date', 'ksat', 'soil_ksat', 'kcb', 'rlin', 'rg', 'etrs_Pm', 'plant height', 'min temp',
# 'max temp', 'temp', 'precip', 'fc', 'wp', 'taw', 'aws', 'root_z']])
recs = np.column_stack((date, ksat, soil_ksat, kcb, rlin, rg, etrs_Pm, p_hgt, minTemp,
maxTemp, temp, ppt, fc, wp, taw, aws, rt_z))
# data = np.concatenate((b, recs), axis=0)
name = name.replace(' ', '_')
path = 'C:\Users\David\Documents\Recharge\Sensitivity_analysis\SA_extracts'
np.savetxt('{f}\\{g}_extract.csv'.format(f=path, g=name),
recs, fmt=['%s', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f',
'%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f'],
delimiter=',')
print "You have been saved!"
|
|
#!/usr/bin/env python2.6
"""Classes and functions for UnofficialDDNSnix.
https://github.com/Robpol86/UnofficialDDNSnix
"""
from __future__ import division
from __future__ import print_function
import StringIO
import functools
import logging
import logging.handlers
import os
import re
import sys
import yaml
import yaml.parser
import yaml.reader
import yaml.scanner
# noinspection PyCallingNonCallable
class Color(str):
"""Converts {color} tags to Bash color codes. However len() will show the length of visible colors and not include \
the invisible color codes."""
_codes = dict(b=1, i=3, u=4, flash=5, outline=6, negative=7, invis=8, strike=9, black=30, red=31, green=32,
brown=33, blue=34, purple=35, cyan=36, gray=37, bgblack=40, bgred=41, bggreen=42, bgbrown=43,
bgblue=44, bgpurple=45, bgcyan=46, bggray=47, hiblack=90, hired=91, higreen=92, hibrown=93, hiblue=94,
hipurple=95, hicyan=96, higray=97, hibgblack=100, hibgred=101, hibggreen=102, hibgbrown=103,
hibgblue=104, hibgpurple=105, hibgcyan=106, hibggray=107, pink=95, yellow=93, white=97, bgyellow=103,
bgpink=105, bgwhite=107)
_codes.update({'/all': 0, '/attr': 10, '/b': 22, '/i': 23, '/u': 24, '/flash': 25, '/outline': 26, '/negative': 27,
'/strike': 29, '/fg': 39, '/bg': 49})
_codes_parsed = dict([(k, "\033[%sm" % v) for k, v in _codes.iteritems()])
def __new__(cls, value):
parsed = str(value.format(**cls._codes_parsed))
for p in [(sub, sub.replace("m\033[", ';')) for sub in re.compile(r"((?:\033\[[\d;]+m){2,})").findall(parsed)]:
parsed = str.replace(parsed, p[0], p[1]) # Merge consecutive formatting.
obj = str.__new__(cls, parsed)
obj.stripped = str(re.compile(r"\033\[[\d;]+m").sub('', parsed))
return obj
def __len__(self):
return str.__len__(self.stripped)
def _case(self):
"""Fix bash color code casing."""
@functools.wraps(self)
def wrapped(inst, *args, **kwargs):
return re.sub(r"\033\[([\d;]+)M", r"\033\[\1m", self(inst, *args, **kwargs))
return wrapped
def _stp(self):
"""String to parsed conversion."""
@functools.wraps(self)
def wrapped(inst, *args, **kwargs):
return str.replace(self(inst, *args, **kwargs), inst.stripped, inst)
return wrapped
def _color(self):
"""Converts string type outputs to Color type."""
@functools.wraps(self)
def wrapped(inst, *args, **kwargs):
return Color(self(inst, *args, **kwargs))
return wrapped
for f in ['center', 'ljust', 'rjust', 'zfill']:
exec("@_stp\n@_color\ndef {0}(self, *args, **kwargs): return str.{0}(self.stripped, *args, **kwargs)".format(f))
for f in ['join', 'lower', 'lstrip', 'replace', 'rstrip', 'strip']:
exec("@_color\ndef {0}(self, *args, **kwargs): return str.{0}(self, *args, **kwargs)".format(f))
for f in ['swapcase', 'upper']:
exec("@_case\n@_color\ndef {0}(self, *args, **kwargs): return str.{0}(self, *args, **kwargs)".format(f))
for f in ['rsplit', 'split']:
exec("def {0}(self, *args, **kwargs): return [Color(s) for s in str.{0}(self, *args, **kwargs)]".format(f))
def title(self, *args, **kwargs):
"""Don't use: Can't figure out how to implement this properly."""
raise NotImplementedError
class LoggingSetup(object):
"""Generates a StringIO pseudo file handler to be passed to logging.config.fileConfig. Use it with "with"."""
def __init__(self, verbose=False, log_file='', console_quiet=False):
self.level = "INFO" if not verbose else "DEBUG"
self.log_file = log_file
self.console_quiet = console_quiet
self.draft = StringIO.StringIO()
self.config = StringIO.StringIO()
def __enter__(self):
self.generate_draft()
self.draft_to_config()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.draft.close()
self.config.close()
class ConsoleHandler(logging.StreamHandler):
"""A handler that logs to console in the sensible way.
StreamHandler can log to *one of* sys.stdout or sys.stderr.
It is more sensible to log to sys.stdout by default with only error
(logging.WARNING and above) messages going to sys.stderr. This is how
ConsoleHandler behaves.
http://code.activestate.com/recipes/576819-logging-to-console-without-surprises/
Modified by @Robpol86.
"""
def __init__(self):
logging.StreamHandler.__init__(self)
def emit(self, record):
self.stream = sys.stderr if record.levelno >= logging.WARNING else sys.stdout
logging.StreamHandler.emit(self, record)
def flush(self):
# Workaround a bug in logging module
# See:
# http://bugs.python.org/issue6333
if self.stream and hasattr(self.stream, 'flush') and not self.stream.closed:
logging.StreamHandler.flush(self)
class NullHandler(logging.Handler):
def emit(self, record):
pass
class TimedRotatingFileHandler(logging.handlers.TimedRotatingFileHandler, object):
"""Overrides TimedRotatingFileHandler to support the Color class. Gets rid of colors from file logging."""
def emit(self, record):
if isinstance(record, Color):
record = record.stripped
super(LoggingSetup.TimedRotatingFileHandler, self).emit(record)
def generate_draft(self):
"""Create a first draft of the pseudo config file for logging."""
# Write static data to the pseudo config file.
self.draft.write(
"""
[formatters]
keys=console,file
[formatter_console]
format=%(message)s
[formatter_file]
format=%(asctime)s %(levelname)-8s %(name)-30s %(message)s
datefmt=%Y-%m-%dT%H:%M:%S
[loggers]
keys=root
[handler_null]
class=libs.LoggingSetup.NullHandler
args=()
"""
)
# Add handlers.
handlers = []
if not self.console_quiet:
handlers.append('console')
self.draft.write(
"""
[handler_console]
class=libs.LoggingSetup.ConsoleHandler
level=DEBUG
formatter=console
args=()
"""
)
if self.log_file:
handlers.append('file')
self.draft.write(
"""
[handler_file]
class=libs.LoggingSetup.TimedRotatingFileHandler
level=DEBUG
formatter=file
args=('%s','D',30,5)
""" % self.log_file
)
if not handlers:
handlers.append('null')
self.draft.write(
"""
[logger_root]
level={level}
handlers={handlers}
[handlers]
keys={handlers}
""".format(level=self.level, handlers=','.join(handlers))
)
def draft_to_config(self):
self.draft.seek(0)
self.config.writelines(("%s\n" % line for line in (l.strip() for l in self.draft) if line))
self.config.seek(0)
class MultipleConfigSources(object):
"""Handles configuration options from command line and YAML config file."""
def __init__(self, docopt_parsed, config_file):
self.docopt_parsed = dict([(o[2:], v) for o, v in docopt_parsed.iteritems()])
if not config_file:
self.config_file_parsed = dict()
return
if not os.path.isfile(config_file):
raise self.ConfigError("Config file %s does not exist, not a file, or no permission." % config_file)
try:
with open(config_file, 'rb') as f:
self.config_file_parsed = yaml.load(f)
except IOError:
raise self.ConfigError("Unable to read config file %s." % config_file)
except yaml.reader.ReaderError:
raise self.ConfigError("Unable to read config file %s, invalid data." % config_file)
except (yaml.scanner.ScannerError, yaml.parser.ParserError) as e:
if r"found character '\t' that cannot start any token" in str(e):
raise self.ConfigError("Tab character found in config file %s. Must use spaces only!" % config_file)
raise self.ConfigError("Config file %s contents not YAML formatted: %s" % (config_file, e))
if not isinstance(self.config_file_parsed, dict):
raise self.ConfigError(
"Config file %s contents didn't yield dict or not YAML: %s" % (config_file, self.config_file_parsed)
)
for key in self.config_file_parsed:
if key not in self.docopt_parsed:
raise self.ConfigError("Unknown option %s in config file %s." % (key, config_file))
if isinstance(self.docopt_parsed[key], bool) and not isinstance(self.config_file_parsed[key], bool):
raise self.ConfigError("Config file option %s must be True or False." % key)
class ConfigError(Exception):
"""Raised when insufficient/invalid config file or CLI options are given."""
pass
def merge(self):
"""Merges command line options and config file options, config file taking precedence."""
config = self.docopt_parsed.copy()
for key, value in config.iteritems():
if isinstance(value, str) and value.isdigit():
config[key] = int(value)
for key, value in self.config_file_parsed.iteritems():
config[key] = value
return config
def get_config(cli_args, test=False):
"""Verifies all the required config options for UnofficialDDNS are satisfied."""
# Read from multiple sources and get final config.
multi_config = MultipleConfigSources(cli_args, cli_args.get('--config', ''))
config = multi_config.merge()
if test:
# Skip checks if testing.
return config
config['registrar'] = 'name.com' # In the future I might support other registrars.
# Validate interval.
if not isinstance(config['interval'], int):
raise multi_config.ConfigError("Config option 'interval' must be a number.")
if not config['interval']:
raise multi_config.ConfigError("Config option 'interval' must be greater than 0.")
# Validate pid and log.
for option in ('log', 'pid'):
if not config[option]:
continue
parent = os.path.dirname(config[option])
if not os.path.exists(parent):
raise multi_config.ConfigError("Parent directory %s of %s file does not exist." % (parent, option))
if not os.access(parent, os.W_OK):
raise multi_config.ConfigError("Parent directory %s of %s file not writable." % (parent, option))
# Now make sure we got everything we need.
if not all([config.get(o, None) for o in ('domain', 'user', 'passwd')]):
raise multi_config.ConfigError("A domain, username, and password must be specified.")
# Done.
return config
|
|
import copy
from haystack.exceptions import AlreadyRegistered, NotRegistered, SearchFieldError
class SearchSite(object):
"""
Encapsulates all the indexes that should be available.
This allows you to register indexes on models you don't control (reusable
apps, django.contrib, etc.) as well as customize on a per-site basis what
indexes should be available (different indexes for different sites, same
codebase).
A SearchSite instance should be instantiated in your URLconf, since all
models will have been loaded by that point.
The API intentionally follows that of django.contrib.admin's AdminSite as
much as it makes sense to do.
"""
def __init__(self):
self._registry = {}
self._cached_field_mapping = None
def register(self, model, index_class=None):
"""
Registers a model with the site.
The model should be a Model class, not instances.
If no custom index is provided, a generic SearchIndex will be applied
to the model.
"""
if not index_class:
from haystack.indexes import BasicSearchIndex
index_class = BasicSearchIndex
if not hasattr(model, '_meta'):
raise AttributeError('The model being registered must derive from Model.')
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__class__)
self._registry[model] = index_class(model)
self._setup(model, self._registry[model])
def unregister(self, model):
"""
Unregisters a model from the site.
"""
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__class__)
self._teardown(model, self._registry[model])
del(self._registry[model])
def _setup(self, model, index):
index._setup_save(model)
index._setup_delete(model)
def _teardown(self, model, index):
index._teardown_save(model)
index._teardown_delete(model)
def get_index(self, model):
"""Provide the index that're being used for a particular model."""
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__class__)
return self._registry[model]
def get_indexes(self):
"""Provide a dict of all indexes that're being used."""
return self._registry
def get_indexed_models(self):
"""Provide a list of all models being indexed."""
return self._registry.keys()
def all_searchfields(self):
"""
Builds a dictionary of all fields appearing in any of the `SearchIndex`
instances registered with a site.
This is useful when building a schema for an engine. A dictionary is
returned, with each key being a fieldname (or index_fieldname) and the
value being the `SearchField` class assigned to it.
"""
content_field_name = ''
fields = {}
for model, index in self.get_indexes().items():
for field_name, field_object in index.fields.items():
if field_object.document is True:
if content_field_name != '' and content_field_name != field_object.index_fieldname:
raise SearchFieldError("All SearchIndex fields with 'document=True' must use the same fieldname.")
content_field_name = field_object.index_fieldname
if not field_object.index_fieldname in fields:
fields[field_object.index_fieldname] = field_object
fields[field_object.index_fieldname] = copy.copy(field_object)
else:
# If the field types are different, we can mostly
# safely ignore this. The exception is ``MultiValueField``,
# in which case we'll use it instead, copying over the
# values.
if field_object.is_multivalued == True:
old_field = fields[field_object.index_fieldname]
fields[field_object.index_fieldname] = field_object
fields[field_object.index_fieldname] = copy.copy(field_object)
# Switch it so we don't have to dupe the remaining
# checks.
field_object = old_field
# We've already got this field in the list. Ensure that
# what we hand back is a superset of all options that
# affect the schema.
if field_object.indexed is True:
fields[field_object.index_fieldname].indexed = True
if field_object.stored is True:
fields[field_object.index_fieldname].stored = True
if field_object.faceted is True:
fields[field_object.index_fieldname].faceted = True
if field_object.use_template is True:
fields[field_object.index_fieldname].use_template = True
if field_object.null is True:
fields[field_object.index_fieldname].null = True
return fields
def get_index_fieldname(self, fieldname):
"""
Returns the actual name of the field in the index.
If not found, returns the fieldname provided.
This is useful because it handles the case where a ``index_fieldname``
was provided, allowing the user to use the variable name from their
``SearchIndex`` instead of having to remember & use the overridden
name.
"""
if fieldname in self._field_mapping():
return self._field_mapping()[fieldname]['index_fieldname']
else:
return fieldname
def get_facet_field_name(self, fieldname):
"""
Returns the actual name of the facet field in the index.
If not found, returns the fieldname provided.
"""
facet_fieldname = None
reverse_map = {}
for field, info in self._field_mapping().items():
if info['facet_fieldname'] and info['facet_fieldname'] == fieldname:
return info['index_fieldname']
return self.get_index_fieldname(fieldname)
def _field_mapping(self):
mapping = {}
if self._cached_field_mapping:
return self._cached_field_mapping
for model, index in self.get_indexes().items():
for field_name, field_object in index.fields.items():
if field_name in mapping and field_object.index_fieldname != mapping[field_name]['index_fieldname']:
# We've already seen this field in the list. Raise an exception if index_fieldname differs.
raise SearchFieldError("All uses of the '%s' field need to use the same 'index_fieldname' attribute." % field_name)
facet_fieldname = None
if hasattr(field_object, 'facet_for'):
if field_object.facet_for:
facet_fieldname = field_object.facet_for
else:
facet_fieldname = field_object.instance_name
mapping[field_name] = {
'index_fieldname': field_object.index_fieldname,
'facet_fieldname': facet_fieldname,
}
self._cached_field_mapping = mapping
return mapping
def update_object(self, instance):
"""
Updates the instance's data in the index.
A shortcut for updating on the instance's index. Errors from `get_index`
and `update_object` will be allowed to propogate.
"""
return self.get_index(type(instance)).update_object(instance)
def remove_object(self, instance):
"""
Removes the instance's data in the index.
A shortcut for removing on the instance's index. Errors from `get_index`
and `remove_object` will be allowed to propogate.
"""
return self.get_index(type(instance)).remove_object(instance)
# The common case. Feel free to override/replace/define your own in your URLconfs.
site = SearchSite()
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to store, generate, and manipulate material interfaces.
"""
from itertools import chain, combinations, product
from typing import Dict, List, Tuple, Optional
import numpy as np
from scipy.cluster.hierarchy import fcluster, linkage
from scipy.spatial.distance import squareform
from pymatgen.analysis.adsorption import AdsorbateSiteFinder
from pymatgen.core import Lattice, Site, Structure
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.surface import Slab
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
class Interface(Structure):
"""
This class stores data for defining an interface between two structures.
It is a subclass of pymatgen.core.structure.Structure.
"""
def __init__(
self,
lattice,
species,
coords,
site_properties,
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=False,
in_plane_offset: Tuple[float, float] = (0, 0),
gap: float = 0,
vacuum_over_film: float = 0.0,
interface_properties: Optional[Dict] = None,
):
"""
Makes an interface structure, a structure object with additional information
and methods pertaining to interfaces
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Species]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
in_plane_offset: fractional shift in plane for the film with respect
to the substrate
gap: gap between substrate and film in Angstroms; zero corresponds to
the original distance between substrate and film sites
vacuum_over_film: vacuum space above the film in Angstroms
"""
assert "interface_label" in site_properties, ValueError(
"Must provide labeling of substrate and film sites in site properties"
)
self._in_plane_offset = np.array(in_plane_offset, dtype="float")
self._gap = gap
self._vacuum_over_film = vacuum_over_film
self.interface_properties = interface_properties or {}
super().__init__(
lattice,
species,
coords,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties,
)
self.sort()
@property
def in_plane_offset(self) -> np.ndarray:
"""
The shift between the film and substrate in fractional
coordinates
"""
return self._in_plane_offset
@in_plane_offset.setter
def in_plane_offset(self, new_shift: np.ndarray) -> None:
if len(new_shift) != 2:
raise ValueError("In-plane shifts require two floats for a and b vectors")
new_shift = np.mod(new_shift, 1)
delta = new_shift - np.array(self.in_plane_offset)
self._in_plane_offset = new_shift
self.translate_sites(self.film_indices, [delta[0], delta[1], 0], to_unit_cell=True)
@property
def gap(self) -> float:
"""
The gap in cartesian units between the film and the substrate
"""
return self._gap
@gap.setter
def gap(self, new_gap: float) -> None:
if new_gap < 0:
raise ValueError("Can't reduce interface gap below 0")
delta = new_gap - self.gap
self._gap = new_gap
self.__update_c(self.lattice.c + delta)
self.translate_sites(self.film_indices, [0, 0, delta], frac_coords=False, to_unit_cell=True)
@property
def vacuum_over_film(self) -> float:
"""
The vacuum space over the film in cartesian units
"""
return self._vacuum_over_film
@vacuum_over_film.setter
def vacuum_over_film(self, new_vacuum: float) -> None:
if new_vacuum < 0:
raise ValueError("The vacuum over the film can not be less then 0")
delta = new_vacuum - self.vacuum_over_film
self._vacuum_over_film = new_vacuum
self.__update_c(self.lattice.c + delta)
@property
def substrate_indicies(self) -> List[int]:
"""
Site indicies for the substrate atoms
"""
sub_indicies = [i for i, tag in enumerate(self.site_properties["interface_label"]) if "substrate" in tag]
return sub_indicies
@property
def substrate_sites(self) -> List[Site]:
"""
The site objects in the substrate
"""
sub_sites = [site for site, tag in zip(self, self.site_properties["interface_label"]) if "substrate" in tag]
return sub_sites
@property
def substrate(self) -> Structure:
"""
A pymatgen Structure for just the substrate
"""
return Structure.from_sites(self.substrate_sites)
@property
def film_indices(self) -> List[int]:
"""
Site indices of the film sites
"""
f_indicies = [i for i, tag in enumerate(self.site_properties["interface_label"]) if "film" in tag]
return f_indicies
@property
def film_sites(self) -> List[Site]:
"""
Return the film sites of the interface.
"""
film_sites = [site for site, tag in zip(self, self.site_properties["interface_label"]) if "film" in tag]
return film_sites
@property
def film(self) -> Structure:
"""
A pymatgen Structure for just the film
"""
return Structure.from_sites(self.film_sites)
def copy(self) -> "Interface": # type:ignore
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Returns:
A copy of the Interface.
"""
return Interface.from_dict(self.as_dict())
def get_sorted_structure(self, key=None, reverse=False) -> Structure:
"""
Get a sorted structure for the interface. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
struct_copy = Structure.from_sites(self)
struct_copy.sort(key=key, reverse=reverse)
return struct_copy
def get_shifts_based_on_adsorbate_sites(self, tolerance: float = 0.1) -> List[Tuple[float, float]]:
"""
Computes possible in-plane shifts based on an adsorbate site algorithm
Args:
tolerance: tolerance for "uniqueness" for shifts in Cartesian unit
This is usually Angstroms.
"""
substrate = self.substrate
film = self.film
substrate_surface_sites = np.dot(
list(chain.from_iterable(AdsorbateSiteFinder(substrate).find_adsorption_sites().values())),
substrate.lattice.inv_matrix,
)
# Film gets forced into substrate lattice anyways, so shifts can be computed in fractional coords
film_surface_sites = np.dot(
list(chain.from_iterable(AdsorbateSiteFinder(film).find_adsorption_sites().values())),
film.lattice.inv_matrix,
)
pos_shift = np.array(
[
np.add(np.multiply(-1, film_shift), sub_shift)
for film_shift, sub_shift in product(film_surface_sites, substrate_surface_sites)
]
)
def _base_round(x, base=0.05):
return base * (np.array(x) / base).round()
# Round shifts to tolerance
pos_shift[:, 0] = _base_round(pos_shift[:, 0], base=tolerance / substrate.lattice.a)
pos_shift[:, 1] = _base_round(pos_shift[:, 1], base=tolerance / substrate.lattice.b)
# C-axis is not usefull
pos_shift = pos_shift[:, 0:2]
return list(np.unique(pos_shift, axis=0))
@property
def film_termination(self) -> str:
"""Label for the film termination chemistry"""
return label_termination(self.film)
@property
def substrate_termination(self) -> str:
"""Label for the substrate termination chemistry"""
return label_termination(self.substrate)
@property
def film_layers(self) -> int:
"""Number of layers of the minimum element in the film composition"""
sorted_element_list = sorted(
self.film.composition.element_composition.items(), key=lambda x: x[1], reverse=True
)
return count_layers(self.film, sorted_element_list[0][0])
@property
def substrate_layers(self) -> int:
"""Number of layers of the minimum element in the substrate composition"""
sorted_element_list = sorted(
self.substrate.composition.element_composition.items(), key=lambda x: x[1], reverse=True
)
return count_layers(self.substrate, sorted_element_list[0][0])
def __update_c(self, new_c: float) -> None:
"""
Modifies the c-direction of the lattice without changing the site cartesian coordinates
Be carefull you can mess up the interface by setting a c-length that can't accomodate all the sites
"""
if new_c <= 0:
raise ValueError("New c-length must be greater than 0")
new_latt_matrix = self.lattice.matrix[:2].tolist() + [[0, 0, new_c]]
new_latice = Lattice(new_latt_matrix)
self._lattice = new_latice
for site, c_coords in zip(self, self.cart_coords):
site._lattice = new_latice # Update the lattice
site.coords = c_coords # Put back into original cartesian space
def as_dict(self):
"""
:return: MSONAble dict
"""
d = super().as_dict()
d["in_plane_offset"] = self.in_plane_offset.tolist()
d["gap"] = self.gap
d["vacuum_over_film"] = self.vacuum_over_film
d["interface_properties"] = self.interface_properties
return d
@classmethod
def from_dict(cls, d):
"""
:param d: dict
:return: Creates slab from dict.
"""
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
s = Structure.from_sites(sites)
optional = dict(
in_plane_offset=d.get("in_plane_offset"),
gap=d.get("gap"),
vacuum_over_film=d.get("vacuum_over_film"),
interface_properties=d.get("interface_properties"),
)
return Interface(
lattice=lattice,
species=s.species_and_occu,
coords=s.frac_coords,
site_properties=s.site_properties,
**{k: v for k, v in optional.items() if v is not None},
)
@classmethod
def from_slabs(
cls,
substrate_slab: Slab,
film_slab: Slab,
in_plane_offset: Tuple[float, float] = (0, 0),
gap: float = 1.6,
vacuum_over_film: float = 0.0,
interface_properties: Optional[Dict] = None,
center_slab: bool = True,
) -> "Interface":
"""
Makes an interface structure by merging a substrate and film slabs
The film a- and b-vectors will be forced to be the substrate slab's
a- and b-vectors.
For now, it's suggested to use a factory method that will ensure the
appropriate interface structure is already met.
Args:
sub_slab: slab for the substrate
film_slab: slab for the film
in_plane_offset: fractional shift in plane
for the film with respect to the substrate
gap: gap between substrate and film in Angstroms
vacuum_over_film: vacuum space above the film in Angstroms
structure_properties: dictionary of misc properties for this structure
center_slab: center the slab
"""
interface_properties = interface_properties or {}
# Ensure c-axis is orthogonal to a/b plane
if isinstance(substrate_slab, Slab):
substrate_slab = substrate_slab.get_orthogonal_c_slab()
if isinstance(film_slab, Slab):
film_slab = film_slab.get_orthogonal_c_slab()
assert np.allclose(film_slab.lattice.alpha, 90, 0.1)
assert np.allclose(film_slab.lattice.beta, 90, 0.1)
assert np.allclose(substrate_slab.lattice.alpha, 90, 0.1)
assert np.allclose(substrate_slab.lattice.beta, 90, 0.1)
# Ensure sub is right-handed
# IE sub has surface facing "up"
sub_vecs = substrate_slab.lattice.matrix.copy()
if np.dot(np.cross(*sub_vecs[:2]), sub_vecs[2]) < 0:
sub_vecs[2] *= -1.0
substrate_slab.lattice = Lattice(sub_vecs)
# Find the limits of C-coords
sub_coords = substrate_slab.frac_coords
film_coords = film_slab.frac_coords
sub_min_c = np.min(sub_coords[:, 2]) * substrate_slab.lattice.c
sub_max_c = np.max(sub_coords[:, 2]) * substrate_slab.lattice.c
film_min_c = np.min(film_coords[:, 2]) * film_slab.lattice.c
film_max_c = np.max(film_coords[:, 2]) * film_slab.lattice.c
min_height = np.abs(film_max_c - film_min_c) + np.abs(sub_max_c - sub_min_c)
# construct new lattice
abc = substrate_slab.lattice.abc[:2] + (min_height + gap + vacuum_over_film,)
angles = substrate_slab.lattice.angles
lattice = Lattice.from_parameters(*abc, *angles)
# Get the species
species = substrate_slab.species + film_slab.species
# Get the coords
# Shift substrate to bottom in new lattice
sub_coords = np.subtract(sub_coords, [0, 0, np.min(sub_coords[:, 2])])
sub_coords[:, 2] *= substrate_slab.lattice.c / lattice.c
# Flip the film over
film_coords[:, 2] *= -1.0
film_coords[:, 2] *= film_slab.lattice.c / lattice.c
# Shift the film coords to right over the substrate + gap
film_coords = np.subtract(film_coords, [0, 0, np.min(film_coords[:, 2])])
film_coords = np.add(film_coords, [0, 0, gap / lattice.c + np.max(sub_coords[:, 2])])
# Build coords
coords = np.concatenate([sub_coords, film_coords])
# Shift coords to center
if center_slab:
coords = np.add(coords, [0, 0, 0.5 - np.average(coords[:, 2])])
# Only merge site properties in both slabs
site_properties = {}
site_props_in_both = set(substrate_slab.site_properties.keys()) & set(film_slab.site_properties.keys())
for key in site_props_in_both:
site_properties[key] = [
*substrate_slab.site_properties[key],
*film_slab.site_properties[key],
]
site_properties["interface_label"] = ["substrate"] * len(substrate_slab) + ["film"] * len(film_slab)
iface = cls(
lattice=lattice,
species=species,
coords=coords,
to_unit_cell=False,
coords_are_cartesian=False,
site_properties=site_properties,
validate_proximity=False,
in_plane_offset=in_plane_offset,
gap=gap,
vacuum_over_film=vacuum_over_film,
interface_properties=interface_properties,
)
iface.sort()
return iface
def label_termination(slab: Structure) -> str:
"""Labels the slab surface termination"""
frac_coords = slab.frac_coords
n = len(frac_coords)
if n == 1:
# Clustering does not work when there is only one data point.
form = slab.composition.reduced_formula
sp_symbol = SpacegroupAnalyzer(slab, symprec=0.1).get_space_group_symbol()
return f"{form}_{sp_symbol}_{len(slab)}"
dist_matrix = np.zeros((n, n))
h = slab.lattice.c
# Projection of c lattice vector in
# direction of surface normal.
for i, j in combinations(list(range(n)), 2):
if i != j:
cdist = frac_coords[i][2] - frac_coords[j][2]
cdist = abs(cdist - round(cdist)) * h
dist_matrix[i, j] = cdist
dist_matrix[j, i] = cdist
condensed_m = squareform(dist_matrix)
z = linkage(condensed_m)
clusters = fcluster(z, 0.25, criterion="distance")
clustered_sites: Dict[int, List[Site]] = {c: [] for c in clusters}
for i, c in enumerate(clusters):
clustered_sites[c].append(slab[i])
plane_heights = {
np.average(np.mod([s.frac_coords[2] for s in sites], 1)): c for c, sites in clustered_sites.items()
}
top_plane_cluster = sorted(plane_heights.items(), key=lambda x: x[0])[-1][1]
top_plane_sites = clustered_sites[top_plane_cluster]
top_plane = Structure.from_sites(top_plane_sites)
sp_symbol = SpacegroupAnalyzer(top_plane, symprec=0.1).get_space_group_symbol()
form = top_plane.composition.reduced_formula
return f"{form}_{sp_symbol}_{len(top_plane)}"
def count_layers(struc: Structure, el=None) -> int:
"""
Counts the number of 'layers' along the c-axis
"""
el = el if el else struc.composition.elements[0]
frac_coords = [site.frac_coords for site in struc if site.species_string == str(el)]
n = len(frac_coords)
if n == 1:
return 1
dist_matrix = np.zeros((n, n))
h = struc.lattice.c
# Projection of c lattice vector in
# direction of surface normal.
for i, j in combinations(list(range(n)), 2):
if i != j:
cdist = frac_coords[i][2] - frac_coords[j][2]
cdist = abs(cdist - round(cdist)) * h
dist_matrix[i, j] = cdist
dist_matrix[j, i] = cdist
condensed_m = squareform(dist_matrix)
z = linkage(condensed_m)
clusters = fcluster(z, 0.25, criterion="distance")
clustered_sites: Dict[int, List[Site]] = {c: [] for c in clusters}
for i, c in enumerate(clusters):
clustered_sites[c].append(struc[i])
plane_heights = {
np.average(np.mod([s.frac_coords[2] for s in sites], 1)): c for c, sites in clustered_sites.items()
}
return len(plane_heights)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.orchestration.airflow.service_v1beta1.types import environments
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-orchestration-airflow-service",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class EnvironmentsTransport(abc.ABC):
"""Abstract transport class for Environments."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "composer.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_environment: gapic_v1.method.wrap_method(
self.create_environment, default_timeout=None, client_info=client_info,
),
self.get_environment: gapic_v1.method.wrap_method(
self.get_environment, default_timeout=None, client_info=client_info,
),
self.list_environments: gapic_v1.method.wrap_method(
self.list_environments, default_timeout=None, client_info=client_info,
),
self.update_environment: gapic_v1.method.wrap_method(
self.update_environment, default_timeout=None, client_info=client_info,
),
self.delete_environment: gapic_v1.method.wrap_method(
self.delete_environment, default_timeout=None, client_info=client_info,
),
self.restart_web_server: gapic_v1.method.wrap_method(
self.restart_web_server, default_timeout=None, client_info=client_info,
),
self.check_upgrade: gapic_v1.method.wrap_method(
self.check_upgrade, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_environment(
self,
) -> Callable[
[environments.CreateEnvironmentRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_environment(
self,
) -> Callable[
[environments.GetEnvironmentRequest],
Union[environments.Environment, Awaitable[environments.Environment]],
]:
raise NotImplementedError()
@property
def list_environments(
self,
) -> Callable[
[environments.ListEnvironmentsRequest],
Union[
environments.ListEnvironmentsResponse,
Awaitable[environments.ListEnvironmentsResponse],
],
]:
raise NotImplementedError()
@property
def update_environment(
self,
) -> Callable[
[environments.UpdateEnvironmentRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_environment(
self,
) -> Callable[
[environments.DeleteEnvironmentRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def restart_web_server(
self,
) -> Callable[
[environments.RestartWebServerRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def check_upgrade(
self,
) -> Callable[
[environments.CheckUpgradeRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("EnvironmentsTransport",)
|
|
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import re
import socket
import sys
import types
import uuid
import eventlet
import greenlet
from oslo.config import cfg
import six
from six import moves
from designate.openstack.common import excutils
from designate.openstack.common.gettextutils import _
from designate.openstack.common import importutils
from designate.openstack.common import jsonutils
from designate.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('designate.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memorized matchmaker object
def _serialize(data):
"""Serialization wrapper.
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_("JSON serialization failed."))
def _deserialize(data):
"""Deserialization wrapper."""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""A tiny wrapper around ZeroMQ.
Simplifies the send/recv protocol and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if self.subscriptions:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error(_("ZeroMQ socket could not be closed."))
self.sock = None
def recv(self, **kwargs):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart(**kwargs)
def send(self, data, **kwargs):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data, **kwargs)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr):
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
def cast(self, msg_id, topic, data, envelope):
msg_id = msg_id or 0
if not envelope:
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'],
data.get('namespace'), **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException as e:
LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
# this may be able to be removed earlier than
# 'I' if ConsumerBase.process were refactored.
if type(msg) is list:
payload = msg[-1]
else:
payload = msg
response = ConsumerBase.normalize_reply(
self._get_response(ctx, proxy, topic, payload),
ctx.replies)
LOG.debug(_("Sending reply"))
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
}, _msg_id=msg_id)
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
# Method starting with - are
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_("RPC message did not include method."))
return
# Internal method
# uses internal context for safety.
if method == '-reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
proxy.dispatch(ctx, data['version'],
data['method'], data.get('namespace'), **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
Used for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in,
in_bind=True, subscribe=None):
LOG.info(_("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
def consume_in_thread(self):
@excutils.forever_retry_uncaught_exceptions
def _consume(sock):
LOG.info(_("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""A consumer class implementing a topic-based proxy.
Forwards to IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
data = sock.recv(copy=False)
topic = data[1].bytes
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
# don't trust this input.
if self.badchars.search(topic) is not None:
emsg = _("Topic contained dangerous characters.")
LOG.warn(emsg)
raise RPCException(emsg)
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data, copy=False)
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service."""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
try:
os.makedirs(ipc_dir)
except os.error:
if not os.path.isdir(ipc_dir):
with excutils.save_and_reraise_exception():
LOG.error(_("Required IPC directory does not exist at"
" %s") % (ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL)
except zmq.ZMQError:
if os.access(ipc_dir, os.X_OK):
with excutils.save_and_reraise_exception():
LOG.error(_("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = six.next(i)
h[k] = six.next(i)
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""A consumer class implementing a consumer for messages.
Can also be used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
proxy = self.proxies[sock]
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Register with matchmaker.
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
_get_matchmaker().stop_heartbeat()
for topic in self.topics:
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
_get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, topic, msg, timeout=None,
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'topic': reply_topic,
# TODO(ewindisch): safe to remove mcontext in I.
'msg': [mcontext, msg]
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
_cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
except (IndexError, KeyError):
raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""Wraps the sending of messages.
Dispatches to the matchmaker and sends message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results
if not queues:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, envelope):
"""Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic = topic.replace('.', '-')
cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
mm = CONF.rpc_zmq_matchmaker
if mm.endswith('matchmaker.MatchMakerRing'):
mm.replace('matchmaker', 'matchmaker_ring')
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
' %(new)s instead') % dict(
orig=CONF.rpc_zmq_matchmaker, new=mm))
matchmaker = importutils.import_object(mm, *args, **kwargs)
return matchmaker
|
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library of losses for disentanglement learning.
Implementation of VAE based models for unsupervised learning of disentangled
representations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from disentanglement_lib.methods.shared import architectures # pylint: disable=unused-import
from disentanglement_lib.methods.shared import losses # pylint: disable=unused-import
from disentanglement_lib.methods.shared import optimizers # pylint: disable=unused-import
from disentanglement_lib.methods.unsupervised import gaussian_encoder_model
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
import gin.tf
from tensorflow.contrib import tpu as contrib_tpu
class BaseVAE(gaussian_encoder_model.GaussianEncoderModel):
"""Abstract base class of a basic Gaussian encoder model."""
def model_fn(self, features, labels, mode, params):
"""TPUEstimator compatible model function."""
del labels
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
data_shape = features.get_shape().as_list()[1:]
z_mean, z_logvar = self.gaussian_encoder(features, is_training=is_training)
z_sampled = self.sample_from_latent_distribution(z_mean, z_logvar)
reconstructions = self.decode(z_sampled, data_shape, is_training)
per_sample_loss = losses.make_reconstruction_loss(features, reconstructions)
reconstruction_loss = tf.reduce_mean(per_sample_loss)
kl_loss = compute_gaussian_kl(z_mean, z_logvar)
regularizer = self.regularizer(kl_loss, z_mean, z_logvar, z_sampled)
loss = tf.add(reconstruction_loss, regularizer, name="loss")
elbo = tf.add(reconstruction_loss, kl_loss, name="elbo")
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = optimizers.make_vae_optimizer()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = optimizer.minimize(
loss=loss, global_step=tf.train.get_global_step())
train_op = tf.group([train_op, update_ops])
tf.summary.scalar("reconstruction_loss", reconstruction_loss)
tf.summary.scalar("elbo", -elbo)
logging_hook = tf.train.LoggingTensorHook({
"loss": loss,
"reconstruction_loss": reconstruction_loss,
"elbo": -elbo
},
every_n_iter=100)
return contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
training_hooks=[logging_hook])
elif mode == tf.estimator.ModeKeys.EVAL:
return contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(make_metric_fn("reconstruction_loss", "elbo",
"regularizer", "kl_loss"),
[reconstruction_loss, -elbo, regularizer, kl_loss]))
else:
raise NotImplementedError("Eval mode not supported.")
def gaussian_encoder(self, input_tensor, is_training):
"""Applies the Gaussian encoder to images.
Args:
input_tensor: Tensor with the observations to be encoded.
is_training: Boolean indicating whether in training mode.
Returns:
Tuple of tensors with the mean and log variance of the Gaussian encoder.
"""
return architectures.make_gaussian_encoder(
input_tensor, is_training=is_training)
def decode(self, latent_tensor, observation_shape, is_training):
"""Decodes the latent_tensor to an observation."""
return architectures.make_decoder(
latent_tensor, observation_shape, is_training=is_training)
def shuffle_codes(z):
"""Shuffles latent variables across the batch.
Args:
z: [batch_size, num_latent] representation.
Returns:
shuffled: [batch_size, num_latent] shuffled representation across the batch.
"""
z_shuffle = []
for i in range(z.get_shape()[1]):
z_shuffle.append(tf.random_shuffle(z[:, i]))
shuffled = tf.stack(z_shuffle, 1, name="latent_shuffled")
return shuffled
def compute_gaussian_kl(z_mean, z_logvar):
"""Compute KL divergence between input Gaussian and Standard Normal."""
return tf.reduce_mean(
0.5 * tf.reduce_sum(
tf.square(z_mean) + tf.exp(z_logvar) - z_logvar - 1, [1]),
name="kl_loss")
def make_metric_fn(*names):
"""Utility function to report tf.metrics in model functions."""
def metric_fn(*args):
return {name: tf.metrics.mean(vec) for name, vec in zip(names, args)}
return metric_fn
@gin.configurable("vae")
class BetaVAE(BaseVAE):
"""BetaVAE model."""
def __init__(self, beta=gin.REQUIRED):
"""Creates a beta-VAE model.
Implementing Eq. 4 of "beta-VAE: Learning Basic Visual Concepts with a
Constrained Variational Framework"
(https://openreview.net/forum?id=Sy2fzU9gl).
Args:
beta: Hyperparameter for the regularizer.
Returns:
model_fn: Model function for TPUEstimator.
"""
self.beta = beta
def regularizer(self, kl_loss, z_mean, z_logvar, z_sampled):
del z_mean, z_logvar, z_sampled
return self.beta * kl_loss
def anneal(c_max, step, iteration_threshold):
"""Anneal function for anneal_vae (https://arxiv.org/abs/1804.03599).
Args:
c_max: Maximum capacity.
step: Current step.
iteration_threshold: How many iterations to reach c_max.
Returns:
Capacity annealed linearly until c_max.
"""
return tf.math.minimum(c_max * 1.,
c_max * 1. * tf.to_float(step) / iteration_threshold)
@gin.configurable("annealed_vae")
class AnnealedVAE(BaseVAE):
"""AnnealedVAE model."""
def __init__(self,
gamma=gin.REQUIRED,
c_max=gin.REQUIRED,
iteration_threshold=gin.REQUIRED):
"""Creates an AnnealedVAE model.
Implementing Eq. 8 of "Understanding disentangling in beta-VAE"
(https://arxiv.org/abs/1804.03599).
Args:
gamma: Hyperparameter for the regularizer.
c_max: Maximum capacity of the bottleneck.
iteration_threshold: How many iterations to reach c_max.
"""
self.gamma = gamma
self.c_max = c_max
self.iteration_threshold = iteration_threshold
def regularizer(self, kl_loss, z_mean, z_logvar, z_sampled):
del z_mean, z_logvar, z_sampled
c = anneal(self.c_max, tf.train.get_global_step(), self.iteration_threshold)
return self.gamma * tf.math.abs(kl_loss - c)
@gin.configurable("factor_vae")
class FactorVAE(BaseVAE):
"""FactorVAE model."""
def __init__(self, gamma=gin.REQUIRED):
"""Creates a FactorVAE model.
Implementing Eq. 2 of "Disentangling by Factorizing"
(https://arxiv.org/pdf/1802.05983).
Args:
gamma: Hyperparameter for the regularizer.
"""
self.gamma = gamma
def model_fn(self, features, labels, mode, params):
"""TPUEstimator compatible model function."""
del labels
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
data_shape = features.get_shape().as_list()[1:]
z_mean, z_logvar = self.gaussian_encoder(features, is_training=is_training)
z_sampled = self.sample_from_latent_distribution(z_mean, z_logvar)
z_shuffle = shuffle_codes(z_sampled)
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
logits_z, probs_z = architectures.make_discriminator(
z_sampled, is_training=is_training)
_, probs_z_shuffle = architectures.make_discriminator(
z_shuffle, is_training=is_training)
reconstructions = self.decode(z_sampled, data_shape, is_training)
per_sample_loss = losses.make_reconstruction_loss(
features, reconstructions)
reconstruction_loss = tf.reduce_mean(per_sample_loss)
kl_loss = compute_gaussian_kl(z_mean, z_logvar)
standard_vae_loss = tf.add(reconstruction_loss, kl_loss, name="VAE_loss")
# tc = E[log(p_real)-log(p_fake)] = E[logit_real - logit_fake]
tc_loss_per_sample = logits_z[:, 0] - logits_z[:, 1]
tc_loss = tf.reduce_mean(tc_loss_per_sample, axis=0)
regularizer = kl_loss + self.gamma * tc_loss
factor_vae_loss = tf.add(
standard_vae_loss, self.gamma * tc_loss, name="factor_VAE_loss")
discr_loss = tf.add(
0.5 * tf.reduce_mean(tf.log(probs_z[:, 0])),
0.5 * tf.reduce_mean(tf.log(probs_z_shuffle[:, 1])),
name="discriminator_loss")
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer_vae = optimizers.make_vae_optimizer()
optimizer_discriminator = optimizers.make_discriminator_optimizer()
all_variables = tf.trainable_variables()
encoder_vars = [var for var in all_variables if "encoder" in var.name]
decoder_vars = [var for var in all_variables if "decoder" in var.name]
discriminator_vars = [var for var in all_variables \
if "discriminator" in var.name]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op_vae = optimizer_vae.minimize(
loss=factor_vae_loss,
global_step=tf.train.get_global_step(),
var_list=encoder_vars + decoder_vars)
train_op_discr = optimizer_discriminator.minimize(
loss=-discr_loss,
global_step=tf.train.get_global_step(),
var_list=discriminator_vars)
train_op = tf.group(train_op_vae, train_op_discr, update_ops)
tf.summary.scalar("reconstruction_loss", reconstruction_loss)
logging_hook = tf.train.LoggingTensorHook({
"loss": factor_vae_loss,
"reconstruction_loss": reconstruction_loss
},
every_n_iter=50)
return contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=factor_vae_loss,
train_op=train_op,
training_hooks=[logging_hook])
elif mode == tf.estimator.ModeKeys.EVAL:
return contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=factor_vae_loss,
eval_metrics=(make_metric_fn("reconstruction_loss", "regularizer",
"kl_loss"),
[reconstruction_loss, regularizer, kl_loss]))
else:
raise NotImplementedError("Eval mode not supported.")
def compute_covariance_z_mean(z_mean):
"""Computes the covariance of z_mean.
Uses cov(z_mean) = E[z_mean*z_mean^T] - E[z_mean]E[z_mean]^T.
Args:
z_mean: Encoder mean, tensor of size [batch_size, num_latent].
Returns:
cov_z_mean: Covariance of encoder mean, tensor of size [num_latent,
num_latent].
"""
expectation_z_mean_z_mean_t = tf.reduce_mean(
tf.expand_dims(z_mean, 2) * tf.expand_dims(z_mean, 1), axis=0)
expectation_z_mean = tf.reduce_mean(z_mean, axis=0)
cov_z_mean = tf.subtract(
expectation_z_mean_z_mean_t,
tf.expand_dims(expectation_z_mean, 1) * tf.expand_dims(
expectation_z_mean, 0))
return cov_z_mean
def regularize_diag_off_diag_dip(covariance_matrix, lambda_od, lambda_d):
"""Compute on and off diagonal regularizers for DIP-VAE models.
Penalize deviations of covariance_matrix from the identity matrix. Uses
different weights for the deviations of the diagonal and off diagonal entries.
Args:
covariance_matrix: Tensor of size [num_latent, num_latent] to regularize.
lambda_od: Weight of penalty for off diagonal elements.
lambda_d: Weight of penalty for diagonal elements.
Returns:
dip_regularizer: Regularized deviation from diagonal of covariance_matrix.
"""
covariance_matrix_diagonal = tf.diag_part(covariance_matrix)
covariance_matrix_off_diagonal = covariance_matrix - tf.diag(
covariance_matrix_diagonal)
dip_regularizer = tf.add(
lambda_od * tf.reduce_sum(covariance_matrix_off_diagonal**2),
lambda_d * tf.reduce_sum((covariance_matrix_diagonal - 1)**2))
return dip_regularizer
@gin.configurable("dip_vae")
class DIPVAE(BaseVAE):
"""DIPVAE model."""
def __init__(self,
lambda_od=gin.REQUIRED,
lambda_d_factor=gin.REQUIRED,
dip_type="i"):
"""Creates a DIP-VAE model.
Based on Equation 6 and 7 of "Variational Inference of Disentangled Latent
Concepts from Unlabeled Observations"
(https://openreview.net/pdf?id=H1kG7GZAW).
Args:
lambda_od: Hyperparameter for off diagonal values of covariance matrix.
lambda_d_factor: Hyperparameter for diagonal values of covariance matrix
lambda_d = lambda_d_factor*lambda_od.
dip_type: "i" or "ii".
"""
self.lambda_od = lambda_od
self.lambda_d_factor = lambda_d_factor
self.dip_type = dip_type
def regularizer(self, kl_loss, z_mean, z_logvar, z_sampled):
cov_z_mean = compute_covariance_z_mean(z_mean)
lambda_d = self.lambda_d_factor * self.lambda_od
if self.dip_type == "i": # Eq 6 page 4
# mu = z_mean is [batch_size, num_latent]
# Compute cov_p(x) [mu(x)] = E[mu*mu^T] - E[mu]E[mu]^T]
cov_dip_regularizer = regularize_diag_off_diag_dip(
cov_z_mean, self.lambda_od, lambda_d)
elif self.dip_type == "ii":
cov_enc = tf.matrix_diag(tf.exp(z_logvar))
expectation_cov_enc = tf.reduce_mean(cov_enc, axis=0)
cov_z = expectation_cov_enc + cov_z_mean
cov_dip_regularizer = regularize_diag_off_diag_dip(
cov_z, self.lambda_od, lambda_d)
else:
raise NotImplementedError("DIP variant not supported.")
return kl_loss + cov_dip_regularizer
def gaussian_log_density(samples, mean, log_var):
pi = tf.constant(math.pi)
normalization = tf.log(2. * pi)
inv_sigma = tf.exp(-log_var)
tmp = (samples - mean)
return -0.5 * (tmp * tmp * inv_sigma + log_var + normalization)
def total_correlation(z, z_mean, z_logvar):
"""Estimate of total correlation on a batch.
We need to compute the expectation over a batch of: E_j [log(q(z(x_j))) -
log(prod_l q(z(x_j)_l))]. We ignore the constants as they do not matter
for the minimization. The constant should be equal to (num_latents - 1) *
log(batch_size * dataset_size)
Args:
z: [batch_size, num_latents]-tensor with sampled representation.
z_mean: [batch_size, num_latents]-tensor with mean of the encoder.
z_logvar: [batch_size, num_latents]-tensor with log variance of the encoder.
Returns:
Total correlation estimated on a batch.
"""
# Compute log(q(z(x_j)|x_i)) for every sample in the batch, which is a
# tensor of size [batch_size, batch_size, num_latents]. In the following
# comments, [batch_size, batch_size, num_latents] are indexed by [j, i, l].
log_qz_prob = gaussian_log_density(
tf.expand_dims(z, 1), tf.expand_dims(z_mean, 0),
tf.expand_dims(z_logvar, 0))
# Compute log prod_l p(z(x_j)_l) = sum_l(log(sum_i(q(z(z_j)_l|x_i)))
# + constant) for each sample in the batch, which is a vector of size
# [batch_size,].
log_qz_product = tf.reduce_sum(
tf.reduce_logsumexp(log_qz_prob, axis=1, keepdims=False),
axis=1,
keepdims=False)
# Compute log(q(z(x_j))) as log(sum_i(q(z(x_j)|x_i))) + constant =
# log(sum_i(prod_l q(z(x_j)_l|x_i))) + constant.
log_qz = tf.reduce_logsumexp(
tf.reduce_sum(log_qz_prob, axis=2, keepdims=False),
axis=1,
keepdims=False)
return tf.reduce_mean(log_qz - log_qz_product)
@gin.configurable("beta_tc_vae")
class BetaTCVAE(BaseVAE):
"""BetaTCVAE model."""
def __init__(self, beta=gin.REQUIRED):
"""Creates a beta-TC-VAE model.
Based on Equation 4 with alpha = gamma = 1 of "Isolating Sources of
Disentanglement in Variational Autoencoders"
(https://arxiv.org/pdf/1802.04942).
If alpha = gamma = 1, Eq. 4 can be written as ELBO + (1 - beta) * TC.
Args:
beta: Hyperparameter total correlation.
"""
self.beta = beta
def regularizer(self, kl_loss, z_mean, z_logvar, z_sampled):
tc = (self.beta - 1.) * total_correlation(z_sampled, z_mean, z_logvar)
return tc + kl_loss
|
|
from dateutil.parser import parse as parse_date
import pytest
from future.moves.urllib.parse import urlparse
from api.base.settings.defaults import API_BASE
from api.nodes.serializers import NodeSerializer
from api.sparse.serializers import SparseNodeSerializer, SparseRegistrationSerializer
from api.registrations.serializers import RegistrationSerializer
from framework.auth import Auth
from osf_tests.factories import (
AuthUserFactory,
UserFactory,
NodeFactory,
RegistrationFactory,
ProjectFactory
)
from tests.base import assert_datetime_equal
from tests.utils import make_drf_request_with_version
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeSerializer:
def test_node_serializer(self, user):
# test_node_serialization
parent = ProjectFactory(creator=user)
node = NodeFactory(creator=user, parent=parent)
req = make_drf_request_with_version(version='2.0')
result = NodeSerializer(node, context={'request': req}).data
data = result['data']
assert data['id'] == node._id
assert data['type'] == 'nodes'
# Attributes
attributes = data['attributes']
assert attributes['title'] == node.title
assert attributes['description'] == node.description
assert attributes['public'] == node.is_public
assert set(attributes['tags']) == set(node.tags.values_list('name', flat=True))
assert not attributes['current_user_can_comment']
assert attributes['category'] == node.category
assert attributes['registration'] == node.is_registration
assert attributes['fork'] == node.is_fork
assert attributes['collection'] == node.is_collection
assert attributes['analytics_key'] == node.keenio_read_key
assert attributes['wiki_enabled'] == node.has_addon('wiki')
# Relationships
relationships = data['relationships']
assert 'region' in relationships
assert 'children' in relationships
assert 'contributors' in relationships
assert 'files' in relationships
assert 'parent' in relationships
assert 'affiliated_institutions' in relationships
assert 'registrations' in relationships
assert 'forked_from' not in relationships
parent_link = relationships['parent']['links']['related']['href']
assert urlparse(
parent_link).path == '/{}nodes/{}/'.format(API_BASE, parent._id)
# test_fork_serialization
node = NodeFactory(creator=user)
fork = node.fork_node(auth=Auth(user))
req = make_drf_request_with_version(version='2.0')
result = NodeSerializer(fork, context={'request': req}).data
data = result['data']
# Relationships
relationships = data['relationships']
forked_from = relationships['forked_from']['links']['related']['href']
assert urlparse(
forked_from).path == '/{}nodes/{}/'.format(API_BASE, node._id)
# test_template_serialization
node = NodeFactory(creator=user)
fork = node.use_as_template(auth=Auth(user))
req = make_drf_request_with_version(version='2.0')
result = NodeSerializer(fork, context={'request': req}).data
data = result['data']
# Relationships
relationships = data['relationships']
templated_from = relationships['template_node']['links']['related']['href']
assert urlparse(
templated_from).path == '/{}nodes/{}/'.format(API_BASE, node._id)
@pytest.mark.django_db
class TestSparseNodeSerializer:
def test_sparse_node_serializer(self, user):
# test_node_serialization
parent = ProjectFactory(creator=user)
node = NodeFactory(creator=user, parent=parent)
req = make_drf_request_with_version(version='2.15')
result = SparseNodeSerializer(node, context={'request': req}).data
data = result['data']
assert data['id'] == node._id
assert data['type'] == 'sparse-nodes'
# Attributes
attributes = data['attributes']
assert attributes['title'] == node.title
assert attributes['description'] == node.description
assert attributes['public'] == node.is_public
assert set(attributes['tags']) == set(node.tags.values_list('name', flat=True))
assert 'current_user_can_comment' not in attributes
assert 'license' not in attributes
assert attributes['category'] == node.category
assert 'registration' not in attributes
assert attributes['fork'] == node.is_fork
# Relationships
relationships = data['relationships']
assert 'region' not in relationships
assert 'children' in relationships
assert 'detail' in relationships
assert 'contributors' in relationships
assert 'files' not in relationships
assert 'parent' in relationships
assert 'affiliated_institutions' not in relationships
assert 'registrations' not in relationships
assert 'forked_from' not in relationships
parent_link = relationships['parent']['links']['related']['href']
assert urlparse(parent_link).path == '/{}sparse/nodes/{}/'.format(API_BASE, parent._id)
assert 'sparse' not in relationships['detail']['links']['related']['href']
sparse_children_path = urlparse(relationships['children']['links']['related']['href']).path
assert sparse_children_path == '/{}sparse/nodes/{}/children/'.format(API_BASE, node._id)
@pytest.mark.django_db
class TestNodeRegistrationSerializer:
def test_serialization(self):
user = UserFactory()
versioned_request = make_drf_request_with_version(version='2.2')
registration = RegistrationFactory(creator=user)
result = RegistrationSerializer(
registration, context={
'request': versioned_request}).data
data = result['data']
assert data['id'] == registration._id
assert data['type'] == 'registrations'
should_not_relate_to_registrations = [
'registered_from',
'registered_by',
'registration_schema',
'region',
'provider',
'groups',
]
# Attributes
attributes = data['attributes']
assert_datetime_equal(
parse_date(attributes['date_registered']),
registration.registered_date
)
assert attributes['withdrawn'] == registration.is_retracted
# Relationships
relationships = data['relationships']
# Relationships with data
relationship_urls = {
k: v['links']['related']['href'] for k, v
in relationships.items()}
assert 'registered_by' in relationships
registered_by = relationships['registered_by']['links']['related']['href']
assert urlparse(
registered_by).path == '/{}users/{}/'.format(API_BASE, user._id)
assert 'registered_from' in relationships
registered_from = relationships['registered_from']['links']['related']['href']
assert urlparse(registered_from).path == '/{}nodes/{}/'.format(
API_BASE, registration.registered_from._id)
api_registrations_url = '/{}registrations/'.format(API_BASE)
for relationship in relationship_urls:
if relationship in should_not_relate_to_registrations:
assert api_registrations_url not in relationship_urls[relationship]
else:
assert api_registrations_url in relationship_urls[relationship], 'For key {}'.format(
relationship)
@pytest.mark.django_db
class TestSparseRegistrationSerializer:
def test_sparse_registration_serializer(self, user):
user = UserFactory()
versioned_request = make_drf_request_with_version(version='2.2')
registration = RegistrationFactory(creator=user)
result = SparseRegistrationSerializer(
registration, context={
'request': versioned_request}).data
data = result['data']
assert data['id'] == registration._id
assert data['type'] == 'sparse-registrations'
# Attributes
attributes = data['attributes']
assert attributes['withdrawn'] == registration.is_retracted
assert attributes['title'] == registration.title
assert attributes['description'] == registration.description
assert attributes['public'] == registration.is_public
assert set(attributes['tags']) == set(registration.tags.values_list('name', flat=True))
assert 'current_user_can_comment' not in attributes
assert 'license' not in attributes
assert attributes['category'] == registration.category
assert attributes['fork'] == registration.is_fork
# Relationships
relationships = data['relationships']
assert 'registered_by' not in relationships
assert 'registered_from' not in relationships
assert 'region' not in relationships
assert 'children' in relationships
assert 'detail' in relationships
assert 'contributors' in relationships
assert 'files' not in relationships
assert 'affiliated_institutions' not in relationships
assert 'registrations' not in relationships
assert 'forked_from' not in relationships
assert 'sparse' not in relationships['detail']['links']['related']['href']
assert 'sparse' in relationships['children']['links']['related']['href']
|
|
from app import app, mdb
from flask import request, json
from config import *
import os
import datetime, time
def connect_database():
"""Returns a connection to database"""
try:
con = mdb.connect(os.environ.get('SQL_DATABASE_URI'), SQL_DATABASE_USER, \
SQL_DATABASE_PASS, SQL_DATABASE_SCHEMA, \
use_unicode=True, charset='utf8')
return con
except Exception as e:
print(e)
return None
@app.route('/', methods=['GET', 'POST'])
def index():
"""Returns home page of API server"""
return '<h1>Howdy, Ags!</h1><h3>API server is running normally. Refer to API doc on Google Drive for usage.</h3>'
@app.route('/search', methods=['GET'])
def search():
"""Returns search results of doctors for given query"""
result = dict()
result['success'] = False
city = request.args.get('city', None)
doctor_type = request.args.get('type', None)
# Check for null data
if city is None or doctor_type is None:
result['error'] = 'Either profile_id or user_type is null.'
return json.dumps(result)
try:
# Connect to database
con = connect_database()
cursor = con.cursor()
# Get search results from doctor, user_profile and review tables
sql_query = '''select D.profile_id, U.photo_url, U.full_name, D.qualification, D.experience, D.type,
D.address, U.city, U.state, U.country,
CASE WHEN AVG(R.score) IS NULL THEN 5
ELSE AVG(R.score) END
FROM user_profile AS U
INNER JOIN doctor AS D ON U.profile_id=D.profile_id
LEFT OUTER JOIN reviews AS R ON D.doctor_id=R.doctor_id
WHERE D.type='{}' AND U.city='{}'
GROUP BY D.doctor_id''' \
.format(doctor_type, city)
print(sql_query)
cursor.execute(sql_query)
search_iterator = cursor.fetchall()
result['search'] = list()
for search_result in search_iterator:
search_dict = dict()
search_dict['doctor_id'] = int(search_result[0])
search_dict['photo_url'] = str(search_result[1])
search_dict['name'] = str(search_result[2])
search_dict['qualification'] = str(search_result[3])
search_dict['experience'] = int(search_result[4])
search_dict['type'] = str(search_result[5])
search_dict['address'] = str(search_result[6]) + ', ' + str(search_result[7]) + ', ' + str(search_result[8])\
+ ', ' + str(search_result[9])
search_dict['rating'] = int(round(float(search_result[10])))
result['search'].append(search_dict)
# Close connections
cursor.close()
con.commit()
result['success'] = True
return json.dumps(result)
except Exception as e:
con.rollback()
result['error'] = e
return json.dumps(result)
finally:
con.close()
@app.route('/appointment', methods=['GET'])
def read_appointment():
"""Returns a list of appointments booked by a customer"""
result = dict()
result['success'] = False
profile_id = request.args.get('id', None)
user_type = request.args.get('user_type', None)
# Check for null data
if profile_id is None or user_type is None:
result['error'] = 'Either profile_id or user_type is null.'
return json.dumps(result)
try:
# Connect to database
con = connect_database()
cursor = con.cursor()
# Get appointments for doctor or customer
if user_type == 'doctor':
sql_query = '''select a.appointment_id, c.profile_id, u.full_name, a.date, a.time, u.phone
from appointment a, user_profile u, doctor d, customer c
where d.profile_id = {}
and a.doctor_id = d.doctor_id
and a.customer_id = c.customer_id
and u.profile_id = c.profile_id'''.format(profile_id)
else:
sql_query = '''select a.appointment_id, d.profile_id, u.full_name, a.date, a.time, u.phone, d.address
from appointment a, user_profile u, doctor d, customer c
where c.profile_id = {}
and a.customer_id = c.customer_id
and d.doctor_id = a.doctor_id
and u.profile_id = d.profile_id'''.format(profile_id)
cursor.execute(sql_query)
appointment_iterator = cursor.fetchall()
result['appointments'] = list()
# Return list of reviews
for appointment in appointment_iterator:
appointment_dict = dict()
appointment_dict['appointment_id'] = int(appointment[0])
appointment_dict['name'] = str(appointment[2])
appointment_dict['date'] = str(appointment[3])
appointment_dict['time'] = str(appointment[4])
appointment_dict['phone'] = str(appointment[5])
if user_type == 'patient':
appointment_dict['doctor_id'] = str(appointment[1])
appointment_dict['address'] = str(appointment[6])
else:
appointment_dict['customer_id'] = str(appointment[1])
result['appointments'].append(appointment_dict)
# Close connections
cursor.close()
con.commit()
result['success'] = True
return json.dumps(result)
except Exception as e:
con.rollback()
result['error'] = str(e)
return json.dumps(result)
finally:
con.close()
@app.route('/appointment', methods=['POST'])
def create_appointment():
"""Creates a new appointment for given time and date"""
result = dict()
result['success'] = False
customer_id = request.form.get('customer_id')
doctor_id = request.form.get('doctor_id')
date = request.form.get('date')
time = request.form.get('time')
# Check for null data
if doctor_id is None or customer_id is None:
result['error'] = 'Either doctor_id or customer_id is null.'
return json.dumps(result)
elif date is None or time is None:
result['error'] = 'Both date and time are required. check the parameters'
return json.dumps(result)
try:
# Connect to database
con = connect_database()
cursor = con.cursor()
# check if user exists
sql_query = '''INSERT IGNORE INTO appointment(customer_id, doctor_id, date, time)
SELECT c.customer_id, d.doctor_id, '{}', '{}'
FROM customer c, doctor d
WHERE c.profile_id = {} AND d.profile_id = {}'''\
.format(date, time, customer_id, doctor_id)
cursor.execute(sql_query)
sql_query = '''DELETE FROM availability
WHERE date = '{}' AND time = '{}'
AND doctor_id = (SELECT doctor_id FROM doctor WHERE profile_id = {})'''.\
format(date, time, doctor_id)
cursor.execute(sql_query)
sql_query = 'SELECT MAX(appointment_id) FROM appointment'
cursor.execute(sql_query)
appointment_id = cursor.fetchone()[0]
result['appointment_id'] = appointment_id
# Close connections
cursor.close()
con.commit()
result['success'] = True
return json.dumps(result)
except Exception as e:
con.rollback()
result['error'] = str(e)
return json.dumps(result)
finally:
con.close()
@app.route('/appointment/<int:id>', methods=['DELETE'])
def delete_appointment(id):
"""Deletes a previously created appointment"""
result = dict()
result['success'] = False
appointment_id = id
if appointment_id is None:
result['error'] = 'appointment_id is not provided'
return json.dumps(result)
try:
# Connect to database
con = connect_database()
cursor = con.cursor()
# Add slot to availability
sql_query = '''INSERT IGNORE INTO availability(doctor_id, date, time)
SELECT doctor_id, date, time FROM appointment WHERE appointment_id = {}'''\
.format(appointment_id)
cursor.execute(sql_query)
# Get id from customer table
sql_query = 'DELETE FROM appointment WHERE appointment_id = {}'.format(appointment_id)
cursor.execute(sql_query)
cursor.close()
con.commit()
result['success'] = True
return json.dumps(result)
except Exception as e:
con.rollback()
result['error'] = str(e)
return json.dumps(result)
finally:
con.close()
@app.route('/login', methods=['POST'])
def user_login():
"""Returns log in information of user"""
result = dict()
result['success'] = False
username = request.form.get('username')
password = request.form.get('password')
# Check for null data
if username is None:
result['error'] = 'username is null'
return json.dumps(result)
elif password is None:
result['error'] = 'password is null'
return json.dumps(result)
try:
con = connect_database()
cursor = con.cursor()
# Get id from customer table
sql_query = "SELECT profile_id, full_name as id FROM user_profile WHERE username='{}' and password = '{}'".format(username, password)
cursor.execute(sql_query)
login_success = cursor.fetchone()
if login_success is None:
return json.dumps(result)
else:
profile_id = login_success[0]
full_name = login_success[1]
sql_query = "SELECT 1 FROM customer WHERE profile_id={}".format(profile_id)
cursor.execute(sql_query)
if cursor.fetchone() is not None:
result['user_type'] = 'customer'
else:
result['user_type'] = 'doctor'
result['profile_id'] = profile_id
result['full_name'] = full_name
result['success'] = True
return json.dumps(result)
except Exception as e:
con.rollback()
return json.dumps(result)
finally:
con.close()
@app.route('/user', methods=['GET'])
def read_user():
"""Returns information pertaining to a user"""
result = dict()
result['success'] = False
id = request.args.get('id', None)
user_type = request.args.get('user_type', None)
# Check for null data
if id is None or user_type is None:
result['error'] = 'Either id or user_type is null.'
return json.dumps(result)
try:
# Connect to database
con = connect_database()
cursor = con.cursor()
# get doctor or customer info
if user_type == 'doctor':
sql_query = '''SELECT u.full_name, u.city, u.state, u.country, u.phone, u.email, u.photo_url,
u.address, d.experience, d.qualification,
(CASE WHEN AVG(r.score) IS NULL THEN 5
ELSE ROUND(AVG(r.score),0) END) AS score
FROM user_profile AS u
INNER JOIN doctor AS d ON u.profile_id=d.profile_id
LEFT OUTER JOIN reviews AS r ON d.doctor_id=r.doctor_id
WHERE d.profile_id = {}
GROUP BY d.doctor_id'''.format(int(id))
else:
sql_query = '''SELECT full_name, city, state, country, phone, email, photo_url, address
FROM user_profile
WHERE profile_id = {}'''\
.format(int(id))
cursor.execute(sql_query)
info = cursor.fetchone()
info_dict = dict()
info_dict['name'] = str(info[0])
info_dict['city'] = str(info[1])
info_dict['state'] = str(info[2])
info_dict['country'] = str(info[3])
info_dict['phone'] = str(info[4])
info_dict['email'] = str(info[5])
info_dict['photo_url'] = str(info[6])
info_dict['address'] = str(info[7])
if user_type == 'doctor':
info_dict['experience'] = int(info[8])
info_dict['qualification'] = str(info[9])
info_dict['rating'] = int(info[10])
result['info'] = info_dict
# Close connections
cursor.close()
con.commit()
result['success'] = True
return json.dumps(result)
except Exception as e:
con.rollback()
result['error'] = str(e)
return json.dumps(result)
finally:
con.close()
@app.route('/user', methods=['POST'])
def create_user():
"""Creates a new user account"""
result = dict()
result['success'] = False
full_name = request.form.get('name')
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
phone = request.form.get('phone')
address = request.form.get('address')
city = request.form.get('city')
state = request.form.get('state')
country = request.form.get('country')
user_type = request.form.get('user_type')
# Check for null data
if username is None or password is None or email is None:
result['error'] = 'Either username, password or email is null.'
return json.dumps(result)
elif user_type is None:
result['error'] = 'User_Type is required'
return json.dumps(result)
try:
# Connect to database
con = connect_database()
cursor = con.cursor()
# check if user exists
sql_query = "SELECT 1 FROM user_profile WHERE username='{}' or email = '{}'".format(username, email)
cursor.execute(sql_query)
print(sql_query)
existing_user = cursor.fetchone()
if existing_user is not None:
result['error'] = 'User already exists'
return json.dumps(result)
# add record to user_profile
sql_query = "INSERT INTO user_profile(username, password, email, phone, full_name, address, state, city, country) " \
"VALUES('{}','{}','{}','{}','{}','{}','{}','{}','{}')"\
.format(username, password, email, phone, full_name, address, state, city, country)
print(sql_query)
cursor.execute(sql_query)
sql_query = "SELECT max(profile_id) FROM user_profile"
cursor.execute(sql_query)
profile_id = int(cursor.fetchone()[0])
if user_type == 'doctor':
sql_query = "INSERT INTO doctor(profile_id, address) VALUES ({}, {})".format(profile_id, address)
else:
sql_query = "INSERT INTO customer(profile_id) VALUES ({})".format(profile_id)
cursor.execute(sql_query)
result['profile_id'] = profile_id
# Close connections
cursor.close()
con.commit()
result['success'] = True
return json.dumps(result)
except Exception as e:
con.rollback()
result['error'] = str(e)
return json.dumps(result)
finally:
con.close()
@app.route('/user/patient/<int:id>', methods=['DELETE'])
def delete_patient(id):
"""Delete a patient record from database"""
result = dict()
result['success'] = False
profile_id = id
try:
# Connect to database
con = connect_database()
cursor = con.cursor()
# Get id from customer table
sql_query = 'SELECT customer_id FROM customer WHERE profile_id={}'.format(profile_id)
cursor.execute(sql_query)
customer_id = cursor.fetchone()
customer_id = int(customer_id[0])
# Delete all appointments for patient
sql_query = 'DELETE FROM appointment WHERE customer_id={}'.format(customer_id)
cursor.execute(sql_query)
# Delete from customer table
sql_query = 'DELETE FROM customer WHERE profile_id={}'.format(profile_id)
cursor.execute(sql_query)
# Delete from user_profile table
sql_query = 'DELETE FROM user_profile WHERE profile_id={}'.format(profile_id)
cursor.execute(sql_query)
# Close connections
cursor.close()
con.commit()
result['success'] = True
return json.dumps(result)
except Exception as e:
con.rollback()
result['error'] = e
return json.dumps(result)
finally:
con.close()
@app.route('/user/doctor/<int:id>', methods=['DELETE'])
def delete_doctor(id):
"""Delete a doctor record from database"""
result = dict()
result['success'] = False
profile_id = id
try:
# Connect to database
con = connect_database()
cursor = con.cursor()
# Get id from customer table
sql_query = 'SELECT doctor_id FROM doctor WHERE profile_id={}'.format(profile_id)
cursor.execute(sql_query)
doctor_id = cursor.fetchone()
doctor_id = int(doctor_id[0])
# Delete all appointments for patient
sql_query = 'DELETE FROM appointment WHERE doctor_id={}'.format(doctor_id)
print(sql_query)
cursor.execute(sql_query)
# Delete from customer table
sql_query = 'DELETE FROM doctor WHERE profile_id={}'.format(profile_id)
print(sql_query)
cursor.execute(sql_query)
# Delete from user_profile table
sql_query = 'DELETE FROM user_profile WHERE profile_id={}'.format(profile_id)
print(sql_query)
cursor.execute(sql_query)
# Close connections
cursor.close()
con.commit()
result['success'] = True
return json.dumps(result)
except Exception as e:
con.rollback()
result['error'] = str(e)
return json.dumps(result)
finally:
con.close()
@app.route('/logout/<user_type>/<int:id>', methods=['POST'])
def user_logout(user_type, id):
"""Logs out a user from application"""
result = dict()
result['success'] = True
return json.dumps(result)
@app.route('/review', methods=['GET'])
def read_review():
"""Reads list of reviews for a doctor"""
result = dict()
result['success'] = False
profile_id = request.args.get('id', None)
user_type = request.args.get('user_type', None)
# Check for null data
if profile_id is None or user_type is None:
result['error'] = 'Either profile_id or user_type is null.'
return json.dumps(result)
elif user_type != 'doctor':
result['error'] = 'Request should contain doctor as user_type.'
return json.dumps(result)
try:
# Connect to database
con = connect_database()
cursor = con.cursor()
# Get doctor_id from doctor table
sql_query = 'SELECT doctor_id FROM doctor WHERE profile_id={}'.format(profile_id)
cursor.execute(sql_query)
doctor_id = cursor.fetchone()
doctor_id = int(doctor_id[0])
# Get list of reviews from reviews table
sql_query = "SELECT R.review_id, R.score, R.comment, U.full_name " \
"FROM reviews as R " \
"INNER JOIN customer as C ON R.customer_id=C.customer_id " \
"INNER JOIN user_profile as U ON C.profile_id=U.profile_id "\
"WHERE R.doctor_id={}".format(doctor_id)
print(sql_query)
cursor.execute(sql_query)
reviews_iterator = cursor.fetchall()
result['reviews'] = list()
# Return list of reviews
for review in reviews_iterator:
review_dict = dict()
review_dict['review_id'] = int(review[0])
review_dict['score'] = int(review[1])
review_dict['comment'] = str(review[2])
review_dict['full_name'] = str(review[3])
result['reviews'].append(review_dict)
# Close connections
cursor.close()
con.commit()
result['success'] = True
return json.dumps(result)
except Exception as e:
con.rollback()
result['error'] = str(e)
return json.dumps(result)
finally:
con.close()
@app.route('/review', methods=['POST'])
def create_review():
"""Creates a new review for a doctor"""
result = dict()
result['success'] = False
customer_id = request.form.get('id')
doctor_id = request.form.get('doctor_id')
score = request.form.get('score')
comment = request.form.get('comment')
print(comment)
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
# Check for null data
if customer_id is None:
result['error'] = 'Profile_id is null.'
return json.dumps(result)
elif score is None:
result['error'] = '[Missing Score] Score for review is compulsory field.'
return json.dumps(result)
try:
# Connect to database
con = connect_database()
cursor = con.cursor()
# Insert review in reviews table
sql_query = '''INSERT INTO reviews(score, comment, customer_id, doctor_id, date)
SELECT {}, '{}', c.customer_id, d.doctor_id, '{}'
FROM doctor d, customer c
WHERE c.profile_id = {} AND d.profile_id={}'''\
.format(score, comment, timestamp, customer_id, doctor_id)
cursor.execute(sql_query)
# Close connections
cursor.close()
con.commit()
result['success'] = True
return json.dumps(result)
except Exception as e:
con.rollback()
result['error'] = str(e)
return json.dumps(result)
finally:
con.close()
@app.route('/availability', methods=['GET'])
def read_availability():
"""Read list of available slots for a doctor"""
result = dict()
result['success'] = False
profile_id = request.args.get('id', None)
user_type = request.args.get('user_type', None)
# Check for null data
if profile_id is None or user_type is None:
result['error'] = 'Either profile_id or user_type is null.'
return result
elif user_type != 'doctor':
result['error'] = 'Request should contain doctor as user_type.'
return result
try:
# Connect to database
con = connect_database()
cursor = con.cursor()
# Get doctor_id from doctor table
sql_query = 'SELECT doctor_id FROM doctor WHERE profile_id={}'.format(profile_id)
cursor.execute(sql_query)
doctor_id = int(cursor.fetchone()[0])
# Get list of reviews from reviews table
sql_query ='''SELECT date, time FROM availability
WHERE doctor_id={} and DATE(date) > CURDATE()'''.format(doctor_id)
cursor.execute(sql_query)
availability_iterator = cursor.fetchall()
result['available_slots'] = list()
# Return list of reviews
for slot in availability_iterator:
available_slot = dict()
available_slot['date'] = slot[0]
available_slot['time'] = slot[1]
result['available_slots'].append(available_slot)
# Close connections
cursor.close()
con.commit()
result['success'] = True
return json.dumps(result)
except Exception as e:
con.rollback()
result['error'] = str(e)
return json.dumps(result)
finally:
con.close()
@app.route('/availability', methods=['POST'])
def create_availability():
"""Create available slots for a doctor"""
result = dict()
result['success'] = False
profile_id = request.form.get('id')
available_slots = request.form.get('available_slots')
# Check for null data
if profile_id is None or available_slots is None:
result['error'] = 'Either profile_id or available_slots is null.'
return json.dumps(result)
try:
# Connect to database
con = connect_database()
cursor = con.cursor()
# Unpack JSON from request
availability_list = json.loads(available_slots)
# Get doctor_id from doctor table
sql_query = 'SELECT doctor_id FROM doctor WHERE profile_id={}'.format(profile_id)
cursor.execute(sql_query)
doctor_id = cursor.fetchone()
doctor_id = int(doctor_id[0])
# Check if the slots being provided have been already made available
for slot in availability_list:
date = slot['date']
time = slot['time']
sql_query = '''INSERT IGNORE INTO availability(doctor_id, date, time) VALUES({},'{}','{}')'''\
.format(doctor_id, date, time)
cursor.execute(sql_query)
# Close connections
cursor.close()
con.commit()
result['success'] = True
return json.dumps(result)
except Exception as e:
con.rollback()
result['error'] = str(e)
return json.dumps(result)
finally:
con.close()
|
|
"""
Stochastic Gradient Descent.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import time
import logging
import theano
import theano.tensor as TT
from theano.sandbox.scan import scan
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog.utils import print_time, print_mem, const
logger = logging.getLogger(__name__)
class SGD(object):
def __init__(self,
model,
state,
data):
"""
Parameters:
:param model:
Class describing the model used. It should provide the
computational graph to evaluate the model, and have a
similar structure to classes on the models folder
:param state:
Dictionary containing the current state of your job. This
includes configuration of the job, specifically the seed,
the startign damping factor, batch size, etc. See main.py
for details
:param data:
Class describing the dataset used by the model
"""
if 'adarho' not in state:
state['adarho'] = 0.96
if 'adaeps' not in state:
state['adaeps'] = 1e-6
#####################################
# Step 0. Constructs shared variables
#####################################
bs = state['bs']
self.model = model
self.rng = numpy.random.RandomState(state['seed'])
srng = RandomStreams(self.rng.randint(213))
self.gs = [theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
dtype=theano.config.floatX),
name=p.name)
for p in model.params]
self.gnorm2 = [theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
dtype=theano.config.floatX),
name=p.name+'_g2')
for p in model.params]
self.dnorm2 = [theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
dtype=theano.config.floatX),
name=p.name+'_d2')
for p in model.params]
self.step = 0
self.bs = bs
self.state = state
self.data = data
self.step_timer = time.time()
self.gdata = [theano.shared(numpy.zeros( (2,)*x.ndim,
dtype=x.dtype),
name=x.name) for x in model.inputs]
if 'profile' not in self.state:
self.state['profile'] = 0
###################################
# Step 1. Compile training function
###################################
logger.debug('Constructing grad function')
loc_data = self.gdata
self.prop_exprs = [x[1] for x in model.properties]
self.prop_names = [x[0] for x in model.properties]
self.update_rules = [x[1] for x in model.updates]
rval = theano.clone(model.param_grads + self.update_rules + \
self.prop_exprs + [model.train_cost],
replace=zip(model.inputs, loc_data))
nparams = len(model.params)
nouts = len(self.prop_exprs)
nrules = len(self.update_rules)
gs = rval[:nparams]
rules = rval[nparams:nparams + nrules]
outs = rval[nparams + nrules:]
norm_gs = TT.sqrt(sum(TT.sum(x**2)
for x,p in zip(gs, self.model.params) if p not in self.model.exclude_params_for_norm))
if 'cutoff' in state and state['cutoff'] > 0:
c = numpy.float32(state['cutoff'])
if state['cutoff_rescale_length']:
c = c * TT.cast(loc_data[0].shape[0], 'float32')
notfinite = TT.or_(TT.isnan(norm_gs), TT.isinf(norm_gs))
_gs = []
for g,p in zip(gs,self.model.params):
if p not in self.model.exclude_params_for_norm:
tmpg = TT.switch(TT.ge(norm_gs, c), g*c/norm_gs, g)
_gs.append(
TT.switch(notfinite, numpy.float32(.1)*p, tmpg))
else:
_gs.append(g)
gs = _gs
store_gs = [(s,g) for s,g in zip(self.gs, gs)]
updates = store_gs + [(s[0], r) for s,r in zip(model.updates, rules)]
rho = self.state['adarho']
eps = self.state['adaeps']
# grad2
gnorm2_up = [rho * gn2 + (1. - rho) * (g ** 2.) for gn2,g in zip(self.gnorm2, gs)]
updates = updates + zip(self.gnorm2, gnorm2_up)
logger.debug('Compiling grad function')
st = time.time()
self.train_fn = theano.function(
[], outs, name='train_function',
updates = updates,
givens = zip(model.inputs, loc_data))
logger.debug('took {}'.format(time.time() - st))
self.lr = numpy.float32(1.)
new_params = [p - (TT.sqrt(dn2 + eps) / TT.sqrt(gn2 + eps)) * g
for p, g, gn2, dn2 in
zip(model.params, self.gs, self.gnorm2, self.dnorm2)]
updates = zip(model.params, new_params)
# d2
d2_up = [(dn2, rho * dn2 + (1. - rho) *
(((TT.sqrt(dn2 + eps) / TT.sqrt(gn2 + eps)) * g) ** 2.))
for dn2, gn2, g in zip(self.dnorm2, self.gnorm2, self.gs)]
updates = updates + d2_up
self.update_fn = theano.function(
[], [], name='update_function',
allow_input_downcast=True,
updates = updates)
self.old_cost = 1e20
self.schedules = model.get_schedules()
self.return_names = self.prop_names + \
['cost',
'error',
'time_step',
'whole_time', 'lr']
self.prev_batch = None
def __call__(self):
df_st = time.time()
batch = self.data.next()
df_et = time.time()
assert batch
# Perturb the data (! and the model)
if isinstance(batch, dict):
batch = self.model.perturb(**batch)
else:
batch = self.model.perturb(*batch)
# Load the dataset into GPU
# Note: not the most efficient approach in general, as it involves
# each batch is copied individually on gpu
if isinstance(batch, dict):
for gdata in self.gdata:
gdata.set_value(batch[gdata.name], borrow=True)
else:
for gdata, data in zip(self.gdata, batch):
gdata.set_value(data, borrow=True)
# Run the trianing function
g_st = time.time()
rvals = self.train_fn()
for schedule in self.schedules:
schedule(self, rvals[-1])
self.update_fn()
g_ed = time.time()
self.state['lr'] = float(self.lr)
cost = rvals[-1]
self.old_cost = cost
whole_time = time.time() - self.step_timer
if self.step % self.state['trainFreq'] == 0:
msg = '.. iter %4d cost %.3f'
vals = [self.step, cost]
for dx, prop in enumerate(self.prop_names):
msg += ' '+prop+' %.2e'
vals += [float(numpy.array(rvals[dx]))]
msg += ' dload %s step time %s whole time %s lr %.2e'
vals += [print_time(df_et-df_st),
print_time(g_ed - g_st),
print_time(time.time() - self.step_timer),
float(self.lr)]
print msg % tuple(vals)
self.step += 1
ret = dict([('cost', float(cost)),
('error', float(cost)),
('lr', float(self.lr)),
('time_step', float(g_ed - g_st)),
('whole_time', float(whole_time))]+zip(self.prop_names, rvals))
return ret
|
|
from base64 import b64decode
import copy
import importlib
from django.contrib.auth import (get_user_model, authenticate)
from django.core.files.base import ContentFile
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
from rest_framework import serializers
from rest_framework_jwt.compat import (Serializer, get_username_field,
PasswordField)
from rest_framework_jwt.settings import api_settings
from rest_framework.relations import RelatedField
from rest_framework.settings import APISettings
from .oauth2_client import get_local_host
from .models import SocialUserData
User = get_user_model()
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_decode_handler = api_settings.JWT_DECODE_HANDLER
jwt_get_username_from_payload = api_settings.JWT_PAYLOAD_GET_USERNAME_HANDLER
class ForeignKeyRelatedField(RelatedField):
"""you can customize the field name to filter the result
"""
def __init__(self, **kwargs):
self.fname = kwargs.pop('fname', None)
super().__init__(**kwargs)
def to_internal_value(self, data):
try:
if self.fname:
filter_kwargs = {self.fname: data}
return self.get_queryset().get(**filter_kwargs)
else:
return self.get_queryset().get(pk=data)
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=data)
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
def to_representation(self, value):
if self.fname:
return getattr(value, self.fname)
else:
return value.pk
class JSONWebTokenSerializerWithEmail(Serializer):
"""a customize jwt serializer use email and password.
"""
def __init__(self, *args, **kwargs):
"""
Dynamically add the USERNAME_FIELD to self.fields.
"""
super().__init__(*args, **kwargs)
self.fields[self.username_field] = serializers.CharField()
self.fields['password'] = PasswordField(write_only=True)
@property
def username_field(self):
return get_username_field()
def validate(self, attrs):
credentials = {
'username': attrs.get(self.username_field),
'password': attrs.get('password')
}
if all(credentials.values()):
user = authenticate(**credentials)
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise serializers.ValidationError(msg, 1000)
payload = jwt_payload_handler(user)
return {'token': jwt_encode_handler(payload), 'user': user}
else:
msg = _('Unable to login with provided credentials.')
raise serializers.ValidationError(msg)
else:
msg = _('Must include "{username_field}" and "password".')
msg = msg.format(username_field=self.fields['email'])
raise serializers.ValidationError(msg, 1000)
class UserCreateSerializer(serializers.ModelSerializer):
"""handle user creation validation
"""
password = serializers.CharField(max_length=20,
min_length=6,
error_messages={
'blank': 'password can not be empty',
'min_length': 'password is too short'
})
password2 = serializers.CharField()
class Meta:
model = get_user_model()
fields = ('id', 'email', 'password', 'password2')
def validate_email(self, value):
if User.objects.filter(email=value).exists():
raise serializers.ValidationError('this email has been registered',
1004)
return value
def validate_password2(self, value):
if value != self.initial_data['password']:
raise serializers.ValidationError('password is not consistent',
1001)
def create(self, validated_data):
validated_data.pop('password2')
instance = User.objects.create_user(**validated_data)
return instance
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = (
'token',
'username',
'email',
)
class SocialAuthSerializer(serializers.Serializer):
"""encapsulate social auth process
"""
service = serializers.CharField(max_length=256)
access_token = serializers.CharField(max_length=1024)
def _service_factory(self, name):
try:
module = importlib.import_module('member.oauth2_client')
service = getattr(module, name.capitalize() + 'Client')
return service
except:
return None
def validate_access_token(self, value):
"""
"""
service_class = self.validate_service(self.initial_data['service'])
self.service_client = service_class(
local_host=get_local_host(self.context['request']))
self.service_client.set_access_token(value)
try:
self._social_data = self.service_client.get_user_info()
except Exception as e:
raise serializers.ValidationError(str(e), 1003)
user = User.objects.filter(email=self._social_data['email'])
if user.existst():
social_id = SocialUserData.objects.filter(user=user.first())
if social_id.exists():
raise serializers.ValidationError(
'this email has been registered in social auth', 1002)
return value
def validate_service(self, value):
"""check whether the service is supported or not.
return the service if it's supported
"""
service = self._service_factory(value)
if not service:
raise serializers.ValidationError(
'{} social auth not supported currently'.format(value), 1008)
return service
def create(self, validated_data):
"""maybe user is registered but not create its own social auth account so
we need to do a check first
return an instance with social data and user
"""
user = User.objects.filter(email=self._social_data['email'])
if user.exists():
user = user.first()
else:
user = User.objects.create_user(email=self._social_data['email'])
instance = SocialUserData.objects.create(
user=user,
service=validated_data['service'].service.lowser(),
username=self._social_data['id'])
self._social_data.update({'user': user})
return self._social_data
|
|
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Routines and classes for working with Portage overlays and ebuilds."""
import collections
import filecmp
import fileinput
import glob
import logging
import multiprocessing
import os
import re
import shutil
import sys
from chromite.buildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import gerrit
from chromite.lib import git
from chromite.lib import osutils
_PRIVATE_PREFIX = '%(buildroot)s/src/private-overlays'
_GLOBAL_OVERLAYS = [
'%s/chromeos-overlay' % _PRIVATE_PREFIX,
'%s/chromeos-partner-overlay' % _PRIVATE_PREFIX,
'%(buildroot)s/src/third_party/chromiumos-overlay',
'%(buildroot)s/src/third_party/portage-stable',
]
# Define datastructures for holding PV and CPV objects.
_PV_FIELDS = ['pv', 'package', 'version', 'version_no_rev', 'rev']
PV = collections.namedtuple('PV', _PV_FIELDS)
CPV = collections.namedtuple('CPV', ['category'] + _PV_FIELDS)
# Package matching regexp, as dictated by package manager specification:
# http://www.gentoo.org/proj/en/qa/pms.xml
_pkg = r'(?P<package>' + r'[\w+][\w+-]*)'
_ver = r'(?P<version>' + \
r'(?P<version_no_rev>(\d+)((\.\d+)*)([a-z]?)' + \
r'((_(pre|p|beta|alpha|rc)\d*)*))' + \
r'(-(?P<rev>r(\d+)))?)'
_pvr_re = re.compile(r'^(?P<pv>%s-%s)$' % (_pkg, _ver), re.VERBOSE)
# This regex matches blank lines, commented lines, and the EAPI line.
_blank_or_eapi_re = re.compile(r'^\s*(?:#|EAPI=|$)')
def _ListOverlays(board=None, buildroot=constants.SOURCE_ROOT):
"""Return the list of overlays to use for a given buildbot.
Always returns all overlays, and does not perform any filtering.
Args:
board: Board to look at.
buildroot: Source root to find overlays.
"""
overlays, patterns = [], []
if board is None:
patterns += ['overlay*']
else:
board_no_variant, _, variant = board.partition('_')
patterns += ['overlay-%s' % board_no_variant]
if variant:
patterns += ['overlay-variant-%s' % board.replace('_', '-')]
for d in _GLOBAL_OVERLAYS:
d %= dict(buildroot=buildroot)
if os.path.isdir(d):
overlays.append(d)
for p in patterns:
overlays += glob.glob('%s/src/overlays/%s' % (buildroot, p))
overlays += glob.glob('%s/src/private-overlays/%s-private' % (buildroot, p))
return overlays
def FindOverlays(overlay_type, board=None, buildroot=constants.SOURCE_ROOT):
"""Return the list of overlays to use for a given buildbot.
Args:
board: Board to look at.
buildroot: Source root to find overlays.
overlay_type: A string describing which overlays you want.
'private': Just the private overlays.
'public': Just the public overlays.
'both': Both the public and private overlays.
"""
overlays = _ListOverlays(board=board, buildroot=buildroot)
private_prefix = _PRIVATE_PREFIX % dict(buildroot=buildroot)
if overlay_type == constants.PRIVATE_OVERLAYS:
return [x for x in overlays if x.startswith(private_prefix)]
elif overlay_type == constants.PUBLIC_OVERLAYS:
return [x for x in overlays if not x.startswith(private_prefix)]
elif overlay_type == constants.BOTH_OVERLAYS:
return overlays
else:
assert overlay_type is None
return []
class MissingOverlayException(Exception):
"""This exception indicates that a needed overlay is missing."""
def FindPrimaryOverlay(overlay_type, board, buildroot=constants.SOURCE_ROOT):
"""Return the primary overlay to use for a given buildbot.
An overlay is only considered a primary overlay if it has a make.conf and a
toolchain.conf. If multiple primary overlays are found, the first primary
overlay is returned.
Args:
overlay_type: A string describing which overlays you want.
'private': Just the private overlays.
'public': Just the public overlays.
'both': Both the public and private overlays.
board: Board to look at.
Raises:
MissingOverlayException: No primary overlay found.
"""
for overlay in FindOverlays(overlay_type, board, buildroot):
if (os.path.exists(os.path.join(overlay, 'make.conf')) and
os.path.exists(os.path.join(overlay, 'toolchain.conf'))):
return overlay
raise MissingOverlayException('No primary overlay found for board=%r' % board)
def GetOverlayName(overlay):
try:
return open('%s/profiles/repo_name' % overlay).readline().rstrip()
except IOError:
# Not all overlays have a repo_name, so don't make a fuss.
return None
class EBuildVersionFormatException(Exception):
def __init__(self, filename):
self.filename = filename
message = ('Ebuild file name %s '
'does not match expected format.' % filename)
super(EBuildVersionFormatException, self).__init__(message)
class EbuildFormatIncorrectException(Exception):
def __init__(self, filename, message):
message = 'Ebuild %s has invalid format: %s ' % (filename, message)
super(EbuildFormatIncorrectException, self).__init__(message)
class EBuild(object):
"""Wrapper class for information about an ebuild."""
VERBOSE = False
_PACKAGE_VERSION_PATTERN = re.compile(
r'.*-(([0-9][0-9a-z_.]*)(-r[0-9]+)?)[.]ebuild')
_WORKON_COMMIT_PATTERN = re.compile(r'^CROS_WORKON_COMMIT="(.*)"$')
@classmethod
def _Print(cls, message):
"""Verbose print function."""
if cls.VERBOSE:
cros_build_lib.Info(message)
@classmethod
def _RunCommand(cls, command, **kwargs):
return cros_build_lib.RunCommandCaptureOutput(
command, print_cmd=cls.VERBOSE, **kwargs).output
def IsSticky(self):
"""Returns True if the ebuild is sticky."""
return self.is_stable and self.current_revision == 0
@classmethod
def UpdateEBuild(cls, ebuild_path, variables, redirect_file=None,
make_stable=True):
"""Static function that updates WORKON information in the ebuild.
This function takes an ebuild_path and updates WORKON information.
Args:
ebuild_path: The path of the ebuild.
variables: Dictionary of variables to update in ebuild.
redirect_file: Optionally redirect output of new ebuild somewhere else.
make_stable: Actually make the ebuild stable.
"""
written = False
for line in fileinput.input(ebuild_path, inplace=1):
# Has to be done here to get changes to sys.stdout from fileinput.input.
if not redirect_file:
redirect_file = sys.stdout
# Always add variables at the top of the ebuild, before the first
# nonblank line other than the EAPI line.
if not written and not _blank_or_eapi_re.match(line):
for key, value in sorted(variables.items()):
assert key is not None and value is not None
redirect_file.write('%s=%s\n' % (key, value))
written = True
# Mark KEYWORDS as stable by removing ~'s.
if line.startswith('KEYWORDS=') and make_stable:
line = line.replace('~', '')
varname, eq, _ = line.partition('=')
if not (eq == '=' and varname.strip() in variables):
# Don't write out the old value of the variable.
redirect_file.write(line)
fileinput.close()
@classmethod
def MarkAsStable(cls, unstable_ebuild_path, new_stable_ebuild_path,
variables, redirect_file=None, make_stable=True):
"""Static function that creates a revved stable ebuild.
This function assumes you have already figured out the name of the new
stable ebuild path and then creates that file from the given unstable
ebuild and marks it as stable. If the commit_value is set, it also
set the commit_keyword=commit_value pair in the ebuild.
Args:
unstable_ebuild_path: The path to the unstable ebuild.
new_stable_ebuild_path: The path you want to use for the new stable
ebuild.
variables: Dictionary of variables to update in ebuild.
redirect_file: Optionally redirect output of new ebuild somewhere else.
make_stable: Actually make the ebuild stable.
"""
shutil.copyfile(unstable_ebuild_path, new_stable_ebuild_path)
EBuild.UpdateEBuild(new_stable_ebuild_path, variables, redirect_file,
make_stable)
@classmethod
def CommitChange(cls, message, overlay):
"""Commits current changes in git locally with given commit message.
Args:
message: the commit string to write when committing to git.
overlay: directory in which to commit the changes.
Raises:
RunCommandError: Error occurred while committing.
"""
logging.info('Committing changes with commit message: %s', message)
git_commit_cmd = ['git', 'commit', '-a', '-m', message]
cros_build_lib.RunCommand(git_commit_cmd, cwd=overlay,
print_cmd=cls.VERBOSE)
def __init__(self, path):
"""Sets up data about an ebuild from its path.
Args:
path: Path to the ebuild.
"""
self._overlay, self._category, self._pkgname, filename = path.rsplit('/', 3)
m = self._PACKAGE_VERSION_PATTERN.match(filename)
if not m:
raise EBuildVersionFormatException(filename)
self.version, self.version_no_rev, revision = m.groups()
if revision is not None:
self.current_revision = int(revision.replace('-r', ''))
else:
self.current_revision = 0
self.package = '%s/%s' % (self._category, self._pkgname)
self._ebuild_path_no_version = os.path.join(
os.path.dirname(path), self._pkgname)
self.ebuild_path_no_revision = '%s-%s' % (
self._ebuild_path_no_version, self.version_no_rev)
self._unstable_ebuild_path = '%s-9999.ebuild' % (
self._ebuild_path_no_version)
self.ebuild_path = path
self.is_workon = False
self.is_stable = False
self.is_blacklisted = False
self._ReadEBuild(path)
def _ReadEBuild(self, path):
"""Determine the settings of `is_workon` and `is_stable`.
`is_workon` is determined by whether the ebuild inherits from
the 'cros-workon' eclass. `is_stable` is determined by whether
there's a '~' in the KEYWORDS setting in the ebuild.
This function is separate from __init__() to allow unit tests to
stub it out.
"""
for line in fileinput.input(path):
if line.startswith('inherit ') and 'cros-workon' in line:
self.is_workon = True
elif line.startswith('KEYWORDS='):
for keyword in line.split('=', 1)[1].strip("\"'").split():
if not keyword.startswith('~') and keyword != '-*':
self.is_stable = True
elif line.startswith('CROS_WORKON_BLACKLIST='):
self.is_blacklisted = True
fileinput.close()
def GetGitProjectName(self, manifest, path):
"""Read the project variable from a git repository at given path."""
return manifest.FindProjectFromPath(path)
def GetSourcePath(self, srcroot, manifest):
"""Get the project and path for this ebuild.
The path is guaranteed to exist, be a directory, and be absolute.
"""
workon_vars = (
'CROS_WORKON_LOCALNAME',
'CROS_WORKON_PROJECT',
'CROS_WORKON_SUBDIR',
)
env = {
'CROS_WORKON_LOCALNAME': self._pkgname,
'CROS_WORKON_PROJECT': self._pkgname,
'CROS_WORKON_SUBDIR': '',
}
settings = osutils.SourceEnvironment(self._unstable_ebuild_path,
workon_vars, env=env)
localnames = settings['CROS_WORKON_LOCALNAME'].split(',')
projects = settings['CROS_WORKON_PROJECT'].split(',')
subdirs = settings['CROS_WORKON_SUBDIR'].split(',')
# Sanity checks and completion.
# Each project specification has to have the same amount of items.
if len(projects) != len(localnames):
raise EbuildFormatIncorrectException(self._unstable_ebuild_path,
'Number of _PROJECT and _LOCALNAME items don\'t match.')
# Subdir must be either 0,1 or len(project)
if len(projects) != len(subdirs) and len(subdirs) > 1:
raise EbuildFormatIncorrectException(self._unstable_ebuild_path,
'Incorrect number of _SUBDIR items.')
# If there's one, apply it to all.
if len(subdirs) == 1:
subdirs = subdirs * len(projects)
# If there is none, make an empty list to avoid exceptions later.
if len(subdirs) == 0:
subdirs = [''] * len(projects)
# Calculate srcdir.
if self._category == 'chromeos-base':
dir_ = 'platform'
else:
dir_ = 'third_party'
subdir_paths = [os.path.realpath(os.path.join(srcroot, dir_, l, s))
for l, s in zip(localnames, subdirs)]
for subdir_path, project in zip(subdir_paths, projects):
if not os.path.isdir(subdir_path):
cros_build_lib.Die('Source repository %s '
'for project %s does not exist.' % (subdir_path,
self._pkgname))
# Verify that we're grabbing the commit id from the right project name.
real_project = self.GetGitProjectName(manifest, subdir_path)
if project != real_project:
cros_build_lib.Die('Project name mismatch for %s '
'(found %s, expected %s)' % (subdir_path,
real_project,
project))
return projects, subdir_paths
def GetCommitId(self, srcdir):
"""Get the commit id for this ebuild."""
output = self._RunCommand(['git', 'rev-parse', 'HEAD'], cwd=srcdir)
if not output:
cros_build_lib.Die('Cannot determine HEAD commit for %s' % srcdir)
return output.rstrip()
def GetTreeId(self, srcdir):
"""Get the SHA1 of the source tree for this ebuild.
Unlike the commit hash, the SHA1 of the source tree is unaffected by the
history of the repository, or by commit messages.
"""
output = self._RunCommand(['git', 'log', '-1', '--format=%T'], cwd=srcdir)
if not output:
cros_build_lib.Die('Cannot determine HEAD tree hash for %s' % srcdir)
return output.rstrip()
def GetVersion(self, srcroot, manifest, default):
"""Get the base version number for this ebuild.
The version is provided by the ebuild through a specific script in
the $FILESDIR (chromeos-version.sh).
"""
vers_script = os.path.join(os.path.dirname(self._ebuild_path_no_version),
'files', 'chromeos-version.sh')
if not os.path.exists(vers_script):
return default
srcdirs = self.GetSourcePath(srcroot, manifest)[1]
# The chromeos-version script will output a usable raw version number,
# or nothing in case of error or no available version
try:
output = self._RunCommand([vers_script] + srcdirs).strip()
except cros_build_lib.RunCommandError as e:
cros_build_lib.Die('Package %s chromeos-version.sh failed: %s' %
(self._pkgname, e))
if not output:
cros_build_lib.Die('Package %s has a chromeos-version.sh script but '
'it returned no valid version for "%s"' %
(self._pkgname, ' '.join(srcdirs)))
return output
@staticmethod
def FormatBashArray(unformatted_list):
"""Returns a python list in a bash array format.
If the list only has one item, format as simple quoted value.
That is both backwards-compatible and more readable.
Args:
unformatted_list: an iterable to format as a bash array. This variable
has to be sanitized first, as we don't do any safeties.
Returns:
A text string that can be used by bash as array declaration.
"""
if len(unformatted_list) > 1:
return '("%s")' % '" "'.join(unformatted_list)
else:
return '"%s"' % unformatted_list[0]
def RevWorkOnEBuild(self, srcroot, manifest, redirect_file=None):
"""Revs a workon ebuild given the git commit hash.
By default this class overwrites a new ebuild given the normal
ebuild rev'ing logic. However, a user can specify a redirect_file
to redirect the new stable ebuild to another file.
Args:
srcroot: full path to the 'src' subdirectory in the source
repository.
manifest: git.ManifestCheckout object.
redirect_file: Optional file to write the new ebuild. By default
it is written using the standard rev'ing logic. This file must be
opened and closed by the caller.
Raises:
OSError: Error occurred while creating a new ebuild.
IOError: Error occurred while writing to the new revved ebuild file.
Returns:
If the revved package is different than the old ebuild, return the full
revved package name, including the version number. Otherwise, return None.
"""
if self.is_stable:
stable_version_no_rev = self.GetVersion(srcroot, manifest,
self.version_no_rev)
else:
# If given unstable ebuild, use preferred version rather than 9999.
stable_version_no_rev = self.GetVersion(srcroot, manifest, '0.0.1')
new_version = '%s-r%d' % (
stable_version_no_rev, self.current_revision + 1)
new_stable_ebuild_path = '%s-%s.ebuild' % (
self._ebuild_path_no_version, new_version)
self._Print('Creating new stable ebuild %s' % new_stable_ebuild_path)
if not os.path.exists(self._unstable_ebuild_path):
cros_build_lib.Die('Missing unstable ebuild: %s' %
self._unstable_ebuild_path)
srcdirs = self.GetSourcePath(srcroot, manifest)[1]
commit_ids = map(self.GetCommitId, srcdirs)
tree_ids = map(self.GetTreeId, srcdirs)
variables = dict(CROS_WORKON_COMMIT=self.FormatBashArray(commit_ids),
CROS_WORKON_TREE=self.FormatBashArray(tree_ids))
self.MarkAsStable(self._unstable_ebuild_path, new_stable_ebuild_path,
variables, redirect_file)
old_ebuild_path = self.ebuild_path
if filecmp.cmp(old_ebuild_path, new_stable_ebuild_path, shallow=False):
os.unlink(new_stable_ebuild_path)
return None
else:
self._Print('Adding new stable ebuild to git')
self._RunCommand(['git', 'add', new_stable_ebuild_path],
cwd=self._overlay)
if self.is_stable:
self._Print('Removing old ebuild from git')
self._RunCommand(['git', 'rm', old_ebuild_path],
cwd=self._overlay)
return '%s-%s' % (self.package, new_version)
@classmethod
def GitRepoHasChanges(cls, directory):
"""Returns True if there are changes in the given directory."""
# Refresh the index first. This squashes just metadata changes.
cros_build_lib.RunCommand(['git', 'update-index', '-q', '--refresh'],
cwd=directory, print_cmd=cls.VERBOSE)
ret_obj = cros_build_lib.RunCommand(
['git', 'diff-index', '--name-only', 'HEAD'], cwd=directory,
print_cmd=cls.VERBOSE, redirect_stdout=True)
return ret_obj.output not in [None, '']
@staticmethod
def _GetSHA1ForProject(manifest, project):
"""Get the latest SHA1 for a given project from Gerrit.
This function looks up the remote and branch for a given project in the
manifest, and uses this to lookup the SHA1 from Gerrit. This only makes
sense for unpinned manifests.
Args:
manifest: git.ManifestCheckout object.
project: Project to look up.
Raises:
Exception if the manifest is pinned.
"""
helper = gerrit.GerritHelper.FromManifestProject(manifest, project)
manifest_branch = manifest.GetAttributeForProject(project, 'revision')
branch = git.StripRefsHeads(manifest_branch)
return helper.GetLatestSHA1ForBranch(project, branch)
@staticmethod
def _GetEBuildProjects(buildroot, manifest, overlay_list, changes):
"""Calculate ebuild->project map for changed ebuilds.
Args:
buildroot: Path to root of build directory.
manifest: git.ManifestCheckout object.
overlay_list: List of all overlays.
changes: Changes from Gerrit that are being pushed.
Returns:
A dictionary mapping changed ebuilds to lists of associated projects.
"""
directory_src = os.path.join(buildroot, 'src')
overlay_dict = dict((o, []) for o in overlay_list)
BuildEBuildDictionary(overlay_dict, True, None)
changed_projects = set(c.project for c in changes)
ebuild_projects = {}
for ebuilds in overlay_dict.itervalues():
for ebuild in ebuilds:
projects = ebuild.GetSourcePath(directory_src, manifest)[0]
if changed_projects.intersection(projects):
ebuild_projects[ebuild] = projects
return ebuild_projects
@classmethod
def UpdateCommitHashesForChanges(cls, changes, buildroot, manifest):
"""Updates the commit hashes for the EBuilds uprevved in changes.
Args:
changes: Changes from Gerrit that are being pushed.
buildroot: Path to root of build directory.
manifest: git.ManifestCheckout object.
"""
project_sha1s = {}
overlay_list = FindOverlays(constants.BOTH_OVERLAYS, buildroot=buildroot)
ebuild_projects = cls._GetEBuildProjects(buildroot, manifest, overlay_list,
changes)
for ebuild, projects in ebuild_projects.iteritems():
for project in set(projects).difference(project_sha1s):
project_sha1s[project] = cls._GetSHA1ForProject(manifest, project)
sha1s = [project_sha1s[project] for project in projects]
logging.info('Updating ebuild for project %s with commit hashes %r',
ebuild.package, sha1s)
updates = dict(CROS_WORKON_COMMIT=cls.FormatBashArray(sha1s))
EBuild.UpdateEBuild(ebuild.ebuild_path, updates)
# Commit any changes to all overlays.
for overlay in overlay_list:
if EBuild.GitRepoHasChanges(overlay):
EBuild.CommitChange('Updating commit hashes in ebuilds '
'to match remote repository.', overlay=overlay)
def BestEBuild(ebuilds):
"""Returns the newest EBuild from a list of EBuild objects."""
from portage.versions import vercmp
winner = ebuilds[0]
for ebuild in ebuilds[1:]:
if vercmp(winner.version, ebuild.version) < 0:
winner = ebuild
return winner
def _FindUprevCandidates(files):
"""Return the uprev candidate ebuild from a specified list of files.
Usually an uprev candidate is a the stable ebuild in a cros_workon
directory. However, if no such stable ebuild exists (someone just
checked in the 9999 ebuild), this is the unstable ebuild.
If the package isn't a cros_workon package, return None.
Args:
files: List of files in a package directory.
"""
stable_ebuilds = []
unstable_ebuilds = []
for path in files:
if not path.endswith('.ebuild') or os.path.islink(path):
continue
ebuild = EBuild(path)
if not ebuild.is_workon or ebuild.is_blacklisted:
continue
if ebuild.is_stable:
if ebuild.version == '9999':
cros_build_lib.Die('KEYWORDS in 9999 ebuild should not be stable %s'
% path)
stable_ebuilds.append(ebuild)
else:
unstable_ebuilds.append(ebuild)
# If both ebuild lists are empty, the passed in file list was for
# a non-workon package.
if not unstable_ebuilds:
if stable_ebuilds:
path = os.path.dirname(stable_ebuilds[0].ebuild_path)
cros_build_lib.Die('Missing 9999 ebuild in %s' % path)
return None
path = os.path.dirname(unstable_ebuilds[0].ebuild_path)
if len(unstable_ebuilds) > 1:
cros_build_lib.Die('Found multiple unstable ebuilds in %s' % path)
if not stable_ebuilds:
cros_build_lib.Warning('Missing stable ebuild in %s' % path)
return unstable_ebuilds[0]
if len(stable_ebuilds) == 1:
return stable_ebuilds[0]
stable_versions = set(ebuild.version_no_rev for ebuild in stable_ebuilds)
if len(stable_versions) > 1:
package = stable_ebuilds[0].package
message = 'Found multiple stable ebuild versions in %s:' % path
for version in stable_versions:
message += '\n %s-%s' % (package, version)
cros_build_lib.Die(message)
uprev_ebuild = max(stable_ebuilds, key=lambda eb: eb.current_revision)
for ebuild in stable_ebuilds:
if ebuild != uprev_ebuild:
cros_build_lib.Warning('Ignoring stable ebuild revision %s in %s' %
(ebuild.version, path))
return uprev_ebuild
def BuildEBuildDictionary(overlays, use_all, packages):
"""Build a dictionary of the ebuilds in the specified overlays.
overlays: A map which maps overlay directories to arrays of stable EBuilds
inside said directories.
use_all: Whether to include all ebuilds in the specified directories.
If true, then we gather all packages in the directories regardless
of whether they are in our set of packages.
packages: A set of the packages we want to gather. If use_all is
True, this argument is ignored, and should be None.
"""
for overlay in overlays:
for package_dir, _dirs, files in os.walk(overlay):
# Add stable ebuilds to overlays[overlay].
paths = [os.path.join(package_dir, path) for path in files]
ebuild = _FindUprevCandidates(paths)
# If the --all option isn't used, we only want to update packages that
# are in packages.
if ebuild and (use_all or ebuild.package in packages):
overlays[overlay].append(ebuild)
def RegenCache(overlay):
"""Regenerate the cache of the specified overlay.
overlay: The tree to regenerate the cache for.
"""
repo_name = GetOverlayName(overlay)
if not repo_name:
return
layout = cros_build_lib.LoadKeyValueFile('%s/metadata/layout.conf' % overlay,
ignore_missing=True)
if layout.get('cache-format') != 'md5-dict':
return
# Regen for the whole repo.
cros_build_lib.RunCommand(['egencache', '--update', '--repo', repo_name,
'--jobs', str(multiprocessing.cpu_count())])
# If there was nothing new generated, then let's just bail.
result = cros_build_lib.RunCommand(['git', 'status', '-s', 'metadata/'],
cwd=overlay, redirect_stdout=True)
if not result.output:
return
# Explicitly add any new files to the index.
cros_build_lib.RunCommand(['git', 'add', 'metadata/'], cwd=overlay)
# Explicitly tell git to also include rm-ed files.
cros_build_lib.RunCommand(['git', 'commit', '-m', 'regen cache',
'metadata/'], cwd=overlay)
def ParseBashArray(value):
"""Parse a valid bash array into python list."""
# The syntax for bash arrays is nontrivial, so let's use bash to do the
# heavy lifting for us.
sep = ','
# Because %s may contain bash comments (#), put a clever newline in the way.
cmd = 'ARR=%s\nIFS=%s; echo -n "${ARR[*]}"' % (value, sep)
return cros_build_lib.RunCommandCaptureOutput(
cmd, print_cmd=False, shell=True).output.split(sep)
def GetWorkonProjectMap(overlay, subdirectories):
"""Get the project -> ebuild mapping for cros_workon ebuilds.
Args:
overlay: Overlay to look at.
subdirectories: List of subdirectories to look in on the overlay.
Returns:
A list of (filename, projects) tuples for cros-workon ebuilds in the
given overlay under the given subdirectories.
"""
# Search ebuilds for project names, ignoring non-existent directories.
cmd = ['grep', '^CROS_WORKON_PROJECT=', '--include', '*-9999.ebuild',
'-Hsr'] + list(subdirectories)
result = cros_build_lib.RunCommandCaptureOutput(
cmd, cwd=overlay, error_code_ok=True, print_cmd=False)
for grep_line in result.output.splitlines():
filename, _, line = grep_line.partition(':')
value = line.partition('=')[2]
projects = ParseBashArray(value)
yield filename, projects
def SplitEbuildPath(path):
"""Split an ebuild path into its components.
Given a specified ebuild filename, returns $CATEGORY, $PN, $P. It does not
perform any check on ebuild name elements or their validity, merely splits
a filename, absolute or relative, and returns the last 3 components.
Example: For /any/path/chromeos-base/power_manager/power_manager-9999.ebuild,
returns ('chromeos-base', 'power_manager', 'power_manager-9999').
Returns:
$CATEGORY, $PN, $P
"""
return os.path.splitext(path)[0].rsplit('/', 3)[-3:]
def SplitPV(pv):
"""Takes a PV value and splits it into individual components.
Returns:
A collection with named members:
pv, package, version, version_no_rev, rev
"""
m = _pvr_re.match(pv)
if m is None:
return None
return PV(**m.groupdict())
def SplitCPV(cpv):
"""Splits a CPV value into components.
Returns:
A collection with named members:
category, pv, package, version, version_no_rev, rev
"""
(category, pv) = cpv.split('/', 1)
m = SplitPV(pv)
if m is None:
return None
# pylint: disable=W0212
return CPV(category=category, **m._asdict())
def FindWorkonProjects(packages):
"""Find the projects associated with the specified cros_workon packages.
Args:
packages: List of cros_workon packages.
Returns:
The set of projects associated with the specified cros_workon packages.
"""
all_projects = set()
buildroot, both = constants.SOURCE_ROOT, constants.BOTH_OVERLAYS
for overlay in FindOverlays(both, buildroot=buildroot):
for _, projects in GetWorkonProjectMap(overlay, packages):
all_projects.update(projects)
return all_projects
def ListInstalledPackages(sysroot):
"""Lists all portage packages in a given portage-managed root.
Assumes the existence of a /var/db/pkg package database.
Args:
sysroot: The root being inspected.
Returns:
A list of (cp,v) tuples in the given sysroot.
"""
vdb_path = os.path.join(sysroot, 'var/db/pkg')
ebuild_pattern = os.path.join(vdb_path, '*/*/*.ebuild')
packages = []
for path in glob.glob(ebuild_pattern):
category, package, packagecheck = SplitEbuildPath(path)
pv = SplitPV(package)
if package == packagecheck and pv is not None:
packages.append(('%s/%s' % (category, pv.package), pv.version))
return packages
def BestVisible(atom, board=None, buildroot=constants.SOURCE_ROOT):
"""Get the best visible ebuild CPV for the given atom.
Args:
atom: Portage atom.
board: Board to look at. By default, look in chroot.
root: Directory
Returns:
A CPV object.
"""
portageq = 'portageq' if board is None else 'portageq-%s' % board
root = '/' if board is None else '/build/%s' % board
cmd = [portageq, 'best_visible', root, 'ebuild', atom]
result = cros_build_lib.RunCommandCaptureOutput(
cmd, cwd=buildroot, enter_chroot=True, debug_level=logging.DEBUG)
return SplitCPV(result.output.strip())
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Juergen Brendel, Cisco Systems Inc.
# @author: Abhishek Raut, Cisco Systems Inc.
from mock import patch
from oslo.config import cfg
from neutron.api import extensions as neutron_extensions
from neutron.api.v2 import attributes
from neutron.common.test_lib import test_config
from neutron import context
import neutron.db.api as db
from neutron.plugins.cisco.db import n1kv_db_v2
from neutron.plugins.cisco.db import network_db_v2 as cdb
from neutron.plugins.cisco import extensions
from neutron.plugins.cisco.extensions import n1kv_profile
from neutron.plugins.cisco.extensions import network_profile
from neutron.plugins.cisco.n1kv import n1kv_client
from neutron.plugins.cisco.n1kv import n1kv_neutron_plugin
from neutron.tests import base
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_db_plugin as test_plugin
class FakeResponse(object):
"""
This object is returned by mocked httplib instead of a normal response.
Initialize it with the status code, content type and buffer contents
you wish to return.
"""
def __init__(self, status, response_text, content_type):
self.buffer = response_text
self.status = status
def __getitem__(cls, val):
return "application/xml"
def read(self, *args, **kwargs):
return self.buffer
def _fake_setup_vsm(self):
"""Fake establish Communication with Cisco Nexus1000V VSM."""
self.agent_vsm = True
self._poll_policies(event_type="port_profile")
class NetworkProfileTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
network_profile.RESOURCE_ATTRIBUTE_MAP)
return network_profile.Network_profile.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class N1kvPluginTestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = ('neutron.plugins.cisco.n1kv.'
'n1kv_neutron_plugin.N1kvNeutronPluginV2')
tenant_id = "some_tenant"
DEFAULT_RESP_BODY = ""
DEFAULT_RESP_CODE = 200
DEFAULT_CONTENT_TYPE = ""
fmt = "json"
def _make_test_policy_profile(self, name='service_profile'):
"""
Create a policy profile record for testing purpose.
:param name: string representing the name of the policy profile to
create. Default argument value chosen to correspond to the
default name specified in config.py file.
"""
uuid = test_api_v2._uuid()
profile = {'id': uuid,
'name': name}
return n1kv_db_v2.create_policy_profile(profile)
def _make_test_profile(self, name='default_network_profile'):
"""
Create a profile record for testing purposes.
:param name: string representing the name of the network profile to
create. Default argument value chosen to correspond to the
default name specified in config.py file.
"""
db_session = db.get_session()
profile = {'name': name,
'segment_type': 'vlan',
'physical_network': 'phsy1',
'segment_range': '3968-4047'}
self.network_vlan_ranges = {profile[
'physical_network']: [(3968, 4047)]}
n1kv_db_v2.sync_vlan_allocations(db_session, self.network_vlan_ranges)
return n1kv_db_v2.create_network_profile(db_session, profile)
def setUp(self):
"""
Setup method for n1kv plugin tests.
First step is to define an acceptable response from the VSM to
our requests. This needs to be done BEFORE the setUp() function
of the super-class is called.
This default here works for many cases. If you need something
extra, please define your own setUp() function in your test class,
and set your DEFAULT_RESPONSE value also BEFORE calling the
setUp() of the super-function (this one here). If you have set
a value already, it will not be overwritten by this code.
"""
if not self.DEFAULT_RESP_BODY:
self.DEFAULT_RESP_BODY = (
"""<?xml version="1.0" encoding="utf-8"?>
<set name="events_set">
<instance name="1" url="/api/hyper-v/events/1">
<properties>
<cmd>configure terminal ; port-profile type vethernet grizzlyPP
(SUCCESS)
</cmd>
<id>42227269-e348-72ed-bdb7-7ce91cd1423c</id>
<time>1369223611</time>
<name>grizzlyPP</name>
</properties>
</instance>
<instance name="2" url="/api/hyper-v/events/2">
<properties>
<cmd>configure terminal ; port-profile type vethernet havanaPP
(SUCCESS)
</cmd>
<id>3fc83608-ae36-70e7-9d22-dec745623d06</id>
<time>1369223661</time>
<name>havanaPP</name>
</properties>
</instance>
</set>
""")
# Creating a mock HTTP connection object for httplib. The N1KV client
# interacts with the VSM via HTTP. Since we don't have a VSM running
# in the unit tests, we need to 'fake' it by patching the HTTP library
# itself. We install a patch for a fake HTTP connection class.
# Using __name__ to avoid having to enter the full module path.
http_patcher = patch(n1kv_client.httplib2.__name__ + ".Http")
FakeHttpConnection = http_patcher.start()
self.addCleanup(http_patcher.stop)
# Now define the return values for a few functions that may be called
# on any instance of the fake HTTP connection class.
instance = FakeHttpConnection.return_value
instance.getresponse.return_value = (FakeResponse(
self.DEFAULT_RESP_CODE,
self.DEFAULT_RESP_BODY,
'application/xml'))
instance.request.return_value = (instance.getresponse.return_value,
self.DEFAULT_RESP_BODY)
# Patch some internal functions in a few other parts of the system.
# These help us move along, without having to mock up even more systems
# in the background.
# Return a dummy VSM IP address
get_vsm_hosts_patcher = patch(n1kv_client.__name__ +
".Client._get_vsm_hosts")
fake_get_vsm_hosts = get_vsm_hosts_patcher.start()
self.addCleanup(get_vsm_hosts_patcher.stop)
fake_get_vsm_hosts.return_value = ["127.0.0.1"]
# Return dummy user profiles
get_cred_name_patcher = patch(cdb.__name__ + ".get_credential_name")
fake_get_cred_name = get_cred_name_patcher.start()
self.addCleanup(get_cred_name_patcher.stop)
fake_get_cred_name.return_value = {"user_name": "admin",
"password": "admin_password"}
n1kv_neutron_plugin.N1kvNeutronPluginV2._setup_vsm = _fake_setup_vsm
test_config['plugin_name_v2'] = self._plugin_name
neutron_extensions.append_api_extensions_path(extensions.__path__)
self.addCleanup(cfg.CONF.reset)
ext_mgr = NetworkProfileTestExtensionManager()
test_config['extension_manager'] = ext_mgr
self.addCleanup(self.restore_test_config)
# Save the original RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.items():
self.saved_attr_map[resource] = attrs.copy()
# Update the RESOURCE_ATTRIBUTE_MAP with n1kv specific extended attrs.
attributes.RESOURCE_ATTRIBUTE_MAP["networks"].update(
n1kv_profile.EXTENDED_ATTRIBUTES_2_0["networks"])
attributes.RESOURCE_ATTRIBUTE_MAP["ports"].update(
n1kv_profile.EXTENDED_ATTRIBUTES_2_0["ports"])
self.addCleanup(self.restore_resource_attribute_map)
self.addCleanup(db.clear_db)
super(N1kvPluginTestCase, self).setUp(self._plugin_name)
# Create some of the database entries that we require.
self._make_test_profile()
self._make_test_policy_profile()
def restore_resource_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def restore_test_config(self):
# Restore the original test_config
del test_config['plugin_name_v2']
def test_plugin(self):
self._make_network('json',
'some_net',
True,
tenant_id=self.tenant_id,
set_context=True)
req = self.new_list_request('networks', params="fields=tenant_id")
req.environ['neutron.context'] = context.Context('', self.tenant_id)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
body = self.deserialize('json', res)
self.assertIn('tenant_id', body['networks'][0])
class TestN1kvNetworkProfiles(N1kvPluginTestCase):
def _prepare_net_profile_data(self, segment_type):
netp = {'network_profile': {'name': 'netp1',
'segment_type': segment_type,
'tenant_id': self.tenant_id}}
if segment_type == 'vlan':
netp['network_profile']['segment_range'] = '100-180'
netp['network_profile']['physical_network'] = 'phys1'
elif segment_type == 'overlay':
netp['network_profile']['segment_range'] = '10000-10010'
netp['network_profile']['sub_type'] = 'enhanced'
return netp
def test_create_network_profile_plugin(self):
data = self._prepare_net_profile_data('vlan')
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
def test_update_network_profile_physical_network_fail(self):
net_p = self._make_test_profile(name='netp1')
data = {'network_profile': {'physical_network': 'some-phys-net'}}
net_p_req = self.new_update_request('network_profiles',
data,
net_p['id'])
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_update_network_profile_segment_type_fail(self):
net_p = self._make_test_profile(name='netp1')
data = {'network_profile': {'segment_type': 'overlay'}}
net_p_req = self.new_update_request('network_profiles',
data,
net_p['id'])
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_update_network_profile_sub_type_fail(self):
net_p_dict = self._prepare_net_profile_data('overlay')
net_p_req = self.new_create_request('network_profiles', net_p_dict)
net_p = self.deserialize(self.fmt,
net_p_req.get_response(self.ext_api))
data = {'network_profile': {'sub_type': 'vlan'}}
update_req = self.new_update_request('network_profiles',
data,
net_p['network_profile']['id'])
update_res = update_req.get_response(self.ext_api)
self.assertEqual(update_res.status_int, 400)
class TestN1kvBasicGet(test_plugin.TestBasicGet,
N1kvPluginTestCase):
pass
class TestN1kvHTTPResponse(test_plugin.TestV2HTTPResponse,
N1kvPluginTestCase):
pass
class TestN1kvPorts(test_plugin.TestPortsV2,
N1kvPluginTestCase):
def test_create_port_with_default_n1kv_profile_id(self):
"""Test port create without passing policy profile id."""
with self.port() as port:
db_session = db.get_session()
pp = n1kv_db_v2.get_policy_profile(
db_session, port['port'][n1kv_profile.PROFILE_ID])
self.assertEqual(pp['name'], 'service_profile')
def test_create_port_with_n1kv_profile_id(self):
"""Test port create with policy profile id."""
profile_obj = self._make_test_policy_profile(name='test_profile')
with self.network() as network:
data = {'port': {n1kv_profile.PROFILE_ID: profile_obj.id,
'tenant_id': self.tenant_id,
'network_id': network['network']['id']}}
port_req = self.new_create_request('ports', data)
port = self.deserialize(self.fmt,
port_req.get_response(self.api))
self.assertEqual(port['port'][n1kv_profile.PROFILE_ID],
profile_obj.id)
self._delete('ports', port['port']['id'])
def test_update_port_with_n1kv_profile_id(self):
"""Test port update failure while updating policy profile id."""
with self.port() as port:
data = {'port': {n1kv_profile.PROFILE_ID: 'some-profile-uuid'}}
port_req = self.new_update_request('ports',
data,
port['port']['id'])
res = port_req.get_response(self.api)
# Port update should fail to update policy profile id.
self.assertEqual(res.status_int, 400)
class TestN1kvNetworks(test_plugin.TestNetworksV2,
N1kvPluginTestCase):
def _prepare_net_data(self, net_profile_id):
return {'network': {'name': 'net1',
n1kv_profile.PROFILE_ID: net_profile_id,
'tenant_id': self.tenant_id}}
def test_create_network_with_default_n1kv_profile_id(self):
"""Test network create without passing network profile id."""
with self.network() as network:
db_session = db.get_session()
np = n1kv_db_v2.get_network_profile(
db_session, network['network'][n1kv_profile.PROFILE_ID])
self.assertEqual(np['name'], 'default_network_profile')
def test_create_network_with_n1kv_profile_id(self):
"""Test network create with network profile id."""
profile_obj = self._make_test_profile(name='test_profile')
data = self._prepare_net_data(profile_obj.id)
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual(network['network'][n1kv_profile.PROFILE_ID],
profile_obj.id)
def test_update_network_with_n1kv_profile_id(self):
"""Test network update failure while updating network profile id."""
with self.network() as network:
data = {'network': {n1kv_profile.PROFILE_ID: 'some-profile-uuid'}}
network_req = self.new_update_request('networks',
data,
network['network']['id'])
res = network_req.get_response(self.api)
# Network update should fail to update network profile id.
self.assertEqual(res.status_int, 400)
class TestN1kvNonDbTest(base.BaseTestCase):
"""
This test class here can be used to test the plugin directly,
without going through the DB plugin test cases.
None of the set-up done in N1kvPluginTestCase applies here.
"""
def test_db(self):
n1kv_db_v2.initialize()
|
|
import os, sys
sys.path.append(os.getcwd())
import time
import numpy as np
import tensorflow as tf
import tflib as lib
import tflib.ops.linear
import tflib.ops.conv2d
import tflib.ops.batchnorm
import tflib.ops.deconv2d
import tflib.save_images
import tflib.cifar10
import tflib.inception_score
import tflib.plot
# Download CIFAR-10 (Python version) at
# https://www.cs.toronto.edu/~kriz/cifar.html and fill in the path to the
# extracted files here!
DATA_DIR = ''
if len(DATA_DIR) == 0:
raise Exception('Please specify path to data directory in gan_cifar.py!')
MODE = 'wgan-gp' # Valid options are dcgan, wgan, or wgan-gp
DIM = 128 # This overfits substantially; you're probably better off with 64
LAMBDA = 10 # Gradient penalty lambda hyperparameter
CRITIC_ITERS = 5 # How many critic iterations per generator iteration
BATCH_SIZE = 64 # Batch size
ITERS = 200000 # How many generator iterations to train for
OUTPUT_DIM = 3072 # Number of pixels in CIFAR10 (3*32*32)
lib.print_model_settings(locals().copy())
def LeakyReLU(x, alpha=0.2):
return tf.maximum(alpha*x, x)
def ReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs)
return tf.nn.relu(output)
def LeakyReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs)
return LeakyReLU(output)
def Generator(n_samples, noise=None):
if noise is None:
noise = tf.random_normal([n_samples, 128])
output = lib.ops.linear.Linear('Generator.Input', 128, 4*4*4*DIM, noise)
output = lib.ops.batchnorm.Batchnorm('Generator.BN1', [0], output)
output = tf.nn.relu(output)
output = tf.reshape(output, [-1, 4*DIM, 4, 4])
output = lib.ops.deconv2d.Deconv2D('Generator.2', 4*DIM, 2*DIM, 5, output)
output = lib.ops.batchnorm.Batchnorm('Generator.BN2', [0,2,3], output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator.3', 2*DIM, DIM, 5, output)
output = lib.ops.batchnorm.Batchnorm('Generator.BN3', [0,2,3], output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator.5', DIM, 3, 5, output)
output = tf.tanh(output)
return tf.reshape(output, [-1, OUTPUT_DIM])
def Discriminator(inputs):
output = tf.reshape(inputs, [-1, 3, 32, 32])
output = lib.ops.conv2d.Conv2D('Discriminator.1', 3, DIM, 5, output, stride=2)
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator.2', DIM, 2*DIM, 5, output, stride=2)
if MODE != 'wgan-gp':
output = lib.ops.batchnorm.Batchnorm('Discriminator.BN2', [0,2,3], output)
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator.3', 2*DIM, 4*DIM, 5, output, stride=2)
if MODE != 'wgan-gp':
output = lib.ops.batchnorm.Batchnorm('Discriminator.BN3', [0,2,3], output)
output = LeakyReLU(output)
output = tf.reshape(output, [-1, 4*4*4*DIM])
output = lib.ops.linear.Linear('Discriminator.Output', 4*4*4*DIM, 1, output)
return tf.reshape(output, [-1])
real_data_int = tf.placeholder(tf.int32, shape=[BATCH_SIZE, OUTPUT_DIM])
real_data = 2*((tf.cast(real_data_int, tf.float32)/255.)-.5)
fake_data = Generator(BATCH_SIZE)
disc_real = Discriminator(real_data)
disc_fake = Discriminator(fake_data)
gen_params = lib.params_with_name('Generator')
disc_params = lib.params_with_name('Discriminator')
if MODE == 'wgan':
gen_cost = -tf.reduce_mean(disc_fake)
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
gen_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(gen_cost, var_list=gen_params)
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(disc_cost, var_list=disc_params)
clip_ops = []
for var in disc_params:
clip_bounds = [-.01, .01]
clip_ops.append(
tf.assign(
var,
tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])
)
)
clip_disc_weights = tf.group(*clip_ops)
elif MODE == 'wgan-gp':
# Standard WGAN loss
gen_cost = -tf.reduce_mean(disc_fake)
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
# Gradient penalty
alpha = tf.random_uniform(
shape=[BATCH_SIZE,1],
minval=0.,
maxval=1.
)
differences = fake_data - real_data
interpolates = real_data + (alpha*differences)
gradients = tf.gradients(Discriminator(interpolates), [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1.)**2)
disc_cost += LAMBDA*gradient_penalty
gen_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(gen_cost, var_list=gen_params)
disc_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(disc_cost, var_list=disc_params)
elif MODE == 'dcgan':
gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_fake, tf.ones_like(disc_fake)))
disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_fake, tf.zeros_like(disc_fake)))
disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_real, tf.ones_like(disc_real)))
disc_cost /= 2.
gen_train_op = tf.train.AdamOptimizer(learning_rate=2e-4, beta1=0.5).minimize(gen_cost,
var_list=lib.params_with_name('Generator'))
disc_train_op = tf.train.AdamOptimizer(learning_rate=2e-4, beta1=0.5).minimize(disc_cost,
var_list=lib.params_with_name('Discriminator.'))
# For generating samples
fixed_noise_128 = tf.constant(np.random.normal(size=(128, 128)).astype('float32'))
fixed_noise_samples_128 = Generator(128, noise=fixed_noise_128)
def generate_image(frame, true_dist):
samples = session.run(fixed_noise_samples_128)
samples = ((samples+1.)*(255./2)).astype('int32')
lib.save_images.save_images(samples.reshape((128, 3, 32, 32)), 'samples_{}.jpg'.format(frame))
# For calculating inception score
samples_100 = Generator(100)
def get_inception_score():
all_samples = []
for i in xrange(10):
all_samples.append(session.run(samples_100))
all_samples = np.concatenate(all_samples, axis=0)
all_samples = ((all_samples+1.)*(255./2)).astype('int32')
all_samples = all_samples.reshape((-1, 3, 32, 32)).transpose(0,2,3,1)
return lib.inception_score.get_inception_score(list(all_samples))
# Dataset iterators
train_gen, dev_gen = lib.cifar10.load(BATCH_SIZE, data_dir=DATA_DIR)
def inf_train_gen():
while True:
for images,_ in train_gen():
yield images
# Train loop
with tf.Session() as session:
session.run(tf.initialize_all_variables())
gen = inf_train_gen()
for iteration in xrange(ITERS):
start_time = time.time()
# Train generator
if iteration > 0:
_ = session.run(gen_train_op)
# Train critic
if MODE == 'dcgan':
disc_iters = 1
else:
disc_iters = CRITIC_ITERS
for i in xrange(disc_iters):
_data = gen.next()
_disc_cost, _ = session.run([disc_cost, disc_train_op], feed_dict={real_data_int: _data})
if MODE == 'wgan':
_ = session.run(clip_disc_weights)
lib.plot.plot('train disc cost', _disc_cost)
lib.plot.plot('time', time.time() - start_time)
# Calculate inception score every 1K iters
if iteration % 1000 == 999:
inception_score = get_inception_score()
lib.plot.plot('inception score', inception_score[0])
# Calculate dev loss and generate samples every 100 iters
if iteration % 100 == 99:
dev_disc_costs = []
for images,_ in dev_gen():
_dev_disc_cost = session.run(disc_cost, feed_dict={real_data_int: images})
dev_disc_costs.append(_dev_disc_cost)
lib.plot.plot('dev disc cost', np.mean(dev_disc_costs))
generate_image(iteration, _data)
# Save logs every 100 iters
if (iteration < 5) or (iteration % 100 == 99):
lib.plot.flush()
lib.plot.tick()
|
|
import sys
if sys.hexversion >= 0x02050000:
import xml.etree.ElementTree as ET
else:
import elementtree.ElementTree as ET
import socket
import vdebug.log
import base64
import time
import string
""" Response objects for the DBGP module."""
class Response:
"""Contains response data from a command made to the debugger."""
ns = '{urn:debugger_protocol_v1}'
def __init__(self,response,cmd,cmd_args,api):
self.response = response
self.cmd = cmd
self.cmd_args = cmd_args
self.xml = None
self.api = api
if "<error" in self.response:
self.__parse_error()
def __parse_error(self):
"""Parse an error message which has been returned
in the response, then raise it as a DBGPError."""
xml = self.as_xml()
err_el = xml.find('%serror' % self.ns)
if err_el is None:
raise DBGPError("Could not parse error from return XML",1)
else:
code = err_el.get("code")
if code is None:
raise ResponseError(
"Missing error code in response",
self.response)
elif int(code) == 4:
raise CmdNotImplementedError('Command not implemented')
msg_el = err_el.find('%smessage' % self.ns)
if msg_el is None:
raise ResponseError(
"Missing error message in response",
self.response)
raise DBGPError(msg_el.text,code)
def get_cmd(self):
"""Get the command that created this response."""
return self.cmd
def get_cmd_args(self):
"""Get the arguments to the command."""
return self.cmd_args
def as_string(self):
"""Return the full response as a string.
There is a __str__ method, which will render the
whole object as a string and should be used for
displaying.
"""
return self.response
def as_xml(self):
"""Get the response as element tree XML.
Returns an xml.etree.ElementTree.Element object.
"""
if self.xml == None:
self.xml = ET.fromstring(self.response)
self.__determine_ns()
return self.xml
def __determine_ns(self):
tag_repr = str(self.xml.tag)
if tag_repr[0] != '{':
raise DBGPError('Invalid or missing XML namespace',1)
else:
ns_parts = tag_repr.split('}')
self.ns = ns_parts[0] + '}'
def __str__(self):
return self.as_string()
class ContextNamesResponse(Response):
def names(self):
names = {}
for c in list(self.as_xml()):
names[int(c.get('id'))] = c.get('name')
return names
class TraceResponse(Response):
"""Response object returned by the trace command."""
def __str__(self):
return self.as_xml().get('trace')
class StatusResponse(Response):
"""Response object returned by the status command."""
def __str__(self):
return self.as_xml().get('status')
class StackGetResponse(Response):
"""Response object used by the stack_get command."""
def get_stack(self):
return list(self.as_xml())
class ContextGetResponse(Response):
"""Response object used by the context_get command.
The property nodes are converted into ContextProperty
objects, which are much easier to use."""
def __init__(self,response,cmd,cmd_args,api):
Response.__init__(self,response,cmd,cmd_args,api)
self.properties = []
def get_context(self):
for c in list(self.as_xml()):
self.create_properties(ContextProperty(c))
return self.properties
def create_properties(self,property):
self.properties.append(property)
for p in property.children:
self.create_properties(p)
class EvalResponse(ContextGetResponse):
"""Response object returned by the eval command."""
def __init__(self,response,cmd,cmd_args,api):
try:
ContextGetResponse.__init__(self,response,cmd,cmd_args,api)
except DBGPError, e:
if int(e.args[1]) == 206:
raise EvalError()
else:
raise e
def get_context(self):
code = self.get_code()
for c in list(self.as_xml()):
self.create_properties(EvalProperty(c,code,self.api.language))
return self.properties
def get_code(self):
cmd = self.get_cmd_args()
parts = cmd.split('-- ')
return base64.decodestring(parts[1])
class BreakpointSetResponse(Response):
"""Response object returned by the breakpoint_set command."""
def get_id(self):
return int(self.as_xml().get('id'))
def __str__(self):
return self.as_xml().get('id')
class FeatureGetResponse(Response):
"""Response object specifically for the feature_get command."""
def is_supported(self):
"""Whether the feature is supported or not."""
xml = self.as_xml()
return int(xml.get('supported'))
def __str__(self):
if self.is_supported():
xml = self.as_xml()
return xml.text
else:
return "* Feature not supported *"
class Api:
"""Api for eBGP commands.
Uses a Connection object to read and write with the debugger,
and builds commands and returns the results.
"""
conn = None
transID = 0
def __init__(self,connection):
"""Create a new Api using a Connection object.
The Connection object specifies the debugger connection,
and the Protocol provides a OO api to interacting
with it.
connection -- The Connection object to use
"""
self.language = None
self.protocol = None
self.idekey = None
self.startfile = None
self.conn = connection
if self.conn.isconnected() == 0:
self.conn.open()
self.__parse_init_msg(self.conn.recv_msg())
def __parse_init_msg(self,msg):
"""Parse the init message from the debugger"""
xml = ET.fromstring(msg)
self.language = xml.get("language")
if self.language is None:
raise ResponseError(
"Invalid XML response from debugger",
msg)
self.language = self.language.lower()
self.idekey = xml.get("idekey")
self.version = xml.get("api_version")
self.startfile = xml.get("fileuri")
def send_cmd(self,cmd,args = '',
res_cls = Response):
"""Send a command to the debugger.
This method automatically adds a unique transaction
ID to the command which is required by the debugger.
Returns a Response object, which contains the
response message and command.
cmd -- the command name, e.g. 'status'
args -- arguments for the command, which is optional
for certain commands (default '')
"""
args = args.strip()
send = cmd.strip()
self.transID += 1
send += ' -i '+ str(self.transID)
if len(args) > 0:
send += ' ' + args
vdebug.log.Log("Command: "+send,\
vdebug.log.Logger.DEBUG)
self.conn.send_msg(send)
msg = self.conn.recv_msg()
vdebug.log.Log("Response: "+msg,\
vdebug.log.Logger.DEBUG)
return res_cls(msg,cmd,args,self)
def status(self):
"""Get the debugger status.
Returns a Response object.
"""
return self.send_cmd('status','',StatusResponse)
def feature_get(self,name):
"""Get the value of a feature from the debugger.
See the DBGP documentation for a list of features.
Returns a FeatureGetResponse object.
name -- name of the feature, e.g. encoding
"""
return self.send_cmd(
'feature_get',
'-n '+str(name),
FeatureGetResponse)
def feature_set(self,name,value):
"""Set the value of a debugger feature.
See the DBGP documentation for a list of features.
Returns a Response object.
name -- name of the feature, e.g. encoding
value -- new value for the feature
"""
return self.send_cmd(
'feature_set',
'-n ' + str(name) + ' -v ' + str(value))
def run(self):
"""Tell the debugger to start or resume
execution."""
return self.send_cmd('run','',StatusResponse)
def eval(self,code):
"""Tell the debugger to start or resume
execution."""
code_enc = base64.encodestring(code)
args = '-- %s' % code_enc
""" The python engine incorrectly requires length.
if self.language == 'python':
args = ("-l %i " % len(code_enc) ) + args"""
return self.send_cmd('eval',args,EvalResponse)
def step_into(self):
"""Tell the debugger to step to the next
statement.
If there's a function call, the debugger engine
will break on the first statement in the function.
"""
return self.send_cmd('step_into','',StatusResponse)
def step_over(self):
"""Tell the debugger to step to the next
statement.
If there's a function call, the debugger engine
will stop at the next statement after the function call.
"""
return self.send_cmd('step_over','',StatusResponse)
def step_out(self):
"""Tell the debugger to step out of the statement.
The debugger will step out of the current scope.
"""
return self.send_cmd('step_out','',StatusResponse)
def stop(self):
"""Tell the debugger to stop execution.
The script is terminated immediately."""
return self.send_cmd('stop','',StatusResponse)
def stack_get(self):
"""Get the stack information.
"""
return self.send_cmd('stack_get','',StackGetResponse)
def context_get(self,context = 0):
"""Get the context variables.
"""
return self.send_cmd('context_get',\
'-c %i' % int(context),\
ContextGetResponse)
def context_names(self):
"""Get the context types.
"""
return self.send_cmd('context_names','',ContextNamesResponse)
def property_get(self,name):
"""Get a property.
"""
return self.send_cmd('property_get','-n %s -d 0' % name,ContextGetResponse)
def detach(self):
"""Tell the debugger to detach itself from this
client.
The script is not terminated, but runs as normal
from this point."""
ret = self.send_cmd('detach','',StatusResponse)
self.conn.close()
return ret
def breakpoint_set(self,cmd_args):
"""Set a breakpoint.
The breakpoint type is defined by the arguments, see the
Breakpoint class for more detail."""
return self.send_cmd('breakpoint_set',cmd_args,\
BreakpointSetResponse)
def breakpoint_list(self):
return self.send_cmd('breakpoint_list')
def breakpoint_remove(self,id):
"""Remove a breakpoint by ID.
The ID is that returned in the response from breakpoint_set."""
return self.send_cmd('breakpoint_remove','-d %i' % id,Response)
"""Connection module for managing a socket connection
between this client and the debugger."""
class Connection:
"""DBGP connection class, for managing the connection to the debugger.
The host, port and socket timeout are configurable on object construction.
"""
sock = None
address = None
isconned = 0
def __init__(self, host = '', port = 9000, timeout = 30, input_stream = None):
"""Create a new Connection.
The connection is not established until open() is called.
host -- host name where debugger is running (default '')
port -- port number which debugger is listening on (default 9000)
timeout -- time in seconds to wait for a debugger connection before giving up (default 30)
input_stream -- object for checking input stream and user interrupts (default None)
"""
self.port = port
self.host = host
self.timeout = timeout
self.input_stream = input_stream
def __del__(self):
"""Make sure the connection is closed."""
self.close()
def isconnected(self):
"""Whether the connection has been established."""
return self.isconned
def open(self):
"""Listen for a connection from the debugger. Listening for the actual
connection is handled by self.listen()."""
print 'Waiting for a connection (Ctrl-C to cancel, this message will self-destruct in ',self.timeout,' seconds...)'
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv.setblocking(0)
serv.bind((self.host, self.port))
serv.listen(5)
(self.sock, self.address) = self.listen(serv, self.timeout)
self.sock.settimeout(None)
except socket.timeout:
serv.close()
raise TimeoutError("Timeout waiting for connection")
except:
serv.close()
raise
self.isconned = 1
serv.close()
def listen(self, serv, timeout):
"""Non-blocking listener. Provides support for keyboard interrupts from
the user. Although it's non-blocking, the user interface will still
block until the timeout is reached.
serv -- Socket server to listen to.
timeout -- Seconds before timeout.
"""
start = time.time()
while True:
if (time.time() - start) > timeout:
raise socket.timeout
try:
"""Check for user interrupts"""
if self.input_stream is not None:
self.input_stream.probe()
return serv.accept()
except socket.error:
pass
def close(self):
"""Close the connection."""
if self.sock != None:
vdebug.log.Log("Closing the socket",\
vdebug.log.Logger.DEBUG)
self.sock.close()
self.sock = None
self.isconned = 0
def __recv_length(self):
"""Get the length of the proceeding message."""
length = ''
while 1:
c = self.sock.recv(1)
if c == '':
self.close()
raise EOFError('Socket Closed')
if c == '\0':
return int(length)
if c.isdigit():
length = length + c
def __recv_null(self):
"""Receive a null byte."""
while 1:
c = self.sock.recv(1)
if c == '':
self.close()
raise EOFError('Socket Closed')
if c == '\0':
return
def __recv_body(self, to_recv):
"""Receive a message of a given length.
to_recv -- length of the message to receive
"""
body = ''
while to_recv > 0:
buf = self.sock.recv(to_recv)
if buf == '':
self.close()
raise EOFError('Socket Closed')
to_recv -= len(buf)
body = body + buf
return body
def recv_msg(self):
"""Receive a message from the debugger.
Returns a string, which is expected to be XML.
"""
length = self.__recv_length()
body = self.__recv_body(length)
self.__recv_null()
return body
def send_msg(self, cmd):
"""Send a message to the debugger.
cmd -- command to send
"""
self.sock.send(cmd + '\0')
class ContextProperty:
ns = '{urn:debugger_protocol_v1}'
def __init__(self,node,parent = None,depth = 0):
self.parent = parent
self.__determine_type(node)
self._determine_displayname(node)
self.encoding = node.get('encoding')
self.depth = depth
self.size = node.get('size')
self.value = ""
self.is_last_child = False
self._determine_children(node)
self.__determine_value(node)
self.__init_children(node)
if self.type == 'scalar':
self.size = len(self.value) - 2
def __determine_value(self,node):
if self.has_children:
self.value = ""
return
self.value = self._get_enc_node_text(node,'value')
if self.value is None:
if self.encoding == 'base64':
if node.text is None:
self.value = ""
else:
self.value = base64.decodestring(node.text)
elif not self.is_uninitialized() \
and not self.has_children:
self.value = node.text
if self.value is None:
self.value = ""
self.num_crs = self.value.count('\n')
if self.type.lower() in ("string","str","scalar"):
self.value = '`%s`' % self.value.replace('`','\\`')
def __determine_type(self,node):
type = node.get('classname')
if type is None:
type = node.get('type')
if type is None:
type = 'unknown'
self.type = type
def _determine_displayname(self,node):
display_name = node.get('fullname')
if display_name == None:
display_name = self._get_enc_node_text(node,'fullname',"")
if display_name == '::':
display_name = self.type
self.display_name = display_name
def _get_enc_node_text(self,node,name,default =
None):
n = node.find('%s%s' %(self.ns, name))
if n is not None and n.text is not None:
if n.get('encoding') == 'base64':
val = base64.decodestring(n.text)
else:
val = n.text
else:
val = None
if val is None:
return default
else:
return val
def _determine_children(self,node):
children = node.get('numchildren')
if children is None:
children = node.get('children')
if children is None:
children = 0
else:
children = int(children)
self.num_declared_children = children
self.has_children = children > 0
self.children = []
def __init_children(self,node):
if self.has_children:
idx = 0
tagname = '%sproperty' % self.ns
children = list(node)
if children is not None:
for c in children:
if c.tag == tagname:
idx += 1
p = self._create_child(c,self,self.depth+1)
self.children.append(p)
if idx == self.num_declared_children:
p.mark_as_last_child()
def _create_child(self,node,parent,depth):
return ContextProperty(node,parent,depth)
def mark_as_last_child(self):
self.is_last_child = True
def is_uninitialized(self):
if self.type == 'uninitialized':
return True
else:
return False
def child_count(self):
return len(self.children)
def type_and_size(self):
size = None
if self.has_children:
size = self.num_declared_children
elif self.size is not None:
size = self.size
if size is None:
return self.type
else:
return "%s [%s]" %(self.type,size)
class EvalProperty(ContextProperty):
def __init__(self,node,code,language,parent=None,depth=0):
self.code = code
self.language = language.lower()
if parent is None:
self.is_parent = True
else:
self.is_parent = False
ContextProperty.__init__(self,node,parent,depth)
def _create_child(self,node,parent,depth):
return EvalProperty(node,self.code,self.language,parent,depth)
def _determine_displayname(self,node):
if self.is_parent:
self.display_name = self.code
else:
if self.language == 'php' or \
self.language == 'perl':
if self.parent.type == 'array':
self.display_name = self.parent.display_name + \
"['%s']" % node.get('name')
else:
self.display_name = self.parent.display_name + \
"->"+node.get('name')
else:
name = node.get('name')
if name is None:
name = "?"
name = self._get_enc_node_text(node,'name','?')
if self.parent.type == 'list':
self.display_name = self.parent.display_name + name
else:
self.display_name = self.parent.display_name + \
"." + name
""" Proxy """
class Proxy:
def __init__(self,host,port,local_port,ide_key):
self.host = host
self.port = int(port)
self.local_port = int(local_port)
self.ide_key = ide_key
self.register()
def register(self):
self.command(['proxyinit', '-p', str(self.local_port), '-k', self.ide_key, '-m 1'])
def stop(self):
self.command(['proxystop', '-k', self.ide_key])
def command(self,commands):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((self.host, self.port))
s.sendall(string.join(commands, ' '))
finally:
s.close()
""" Errors/Exceptions """
class TimeoutError(Exception):
pass
class DBGPError(Exception):
"""Raised when the debugger returns an error message."""
pass
class CmdNotImplementedError(Exception):
"""Raised when the debugger returns an error message."""
pass
class EvalError(Exception):
"""Raised when some evaluated code is invalid."""
pass
class ResponseError(Exception):
"""An error caused by an unexpected response from the
debugger (e.g. invalid format XML)."""
pass
class TraceError(Exception):
"""Raised when trace is out of domain."""
pass
|
|
'''Splitter
======
.. versionadded:: 1.5.0
.. image:: images/splitter.jpg
:align: right
The :class:`Splitter` is a widget that helps you re-size it's child
widget/layout by letting you re-size it via dragging the boundary or
double tapping the boundary. This widget is similar to the
:class:`~kivy.uix.scrollview.ScrollView` in that it allows only one
child widget.
Usage::
splitter = Splitter(sizable_from = 'right')
splitter.add_widget(layout_or_widget_instance)
splitter.min_size = 100
splitter.max_size = 250
To change the size of the strip/border used for resizing::
splitter.strip_size = '10pt'
To change its appearance::
splitter.strip_cls = your_custom_class
You can also change the appearance of the `strip_cls`, which defaults to
:class:`SplitterStrip`, by overriding the `kv` rule in your app::
<SplitterStrip>:
horizontal: True if self.parent and self.parent.sizable_from[0] \
in ('t', 'b') else False
background_normal: 'path to normal horizontal image' \
if self.horizontal else 'path to vertical normal image'
background_down: 'path to pressed horizontal image' \
if self.horizontal else 'path to vertical pressed image'
'''
__all__ = ('Splitter', )
from kivy.compat import string_types
from kivy.factory import Factory
from kivy.uix.button import Button
from kivy.properties import (OptionProperty, NumericProperty, ObjectProperty,
ListProperty, BooleanProperty)
from kivy.uix.boxlayout import BoxLayout
class SplitterStrip(Button):
'''Class used for tbe graphical representation of a
:class:`kivy.uix.splitter.SplitterStripe`.
'''
pass
class Splitter(BoxLayout):
'''See module documentation.
:Events:
`on_press`:
Fired when the splitter is pressed.
`on_release`:
Fired when the splitter is released.
.. versionchanged:: 1.6.0
Added `on_press` and `on_release` events.
'''
border = ListProperty([4, 4, 4, 4])
'''Border used for the
:class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction.
This must be a list of four values: (top, right, bottom, left).
Read the BorderImage instructions for more information about how
to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and
defaults to (4, 4, 4, 4).
'''
strip_cls = ObjectProperty(SplitterStrip)
'''Specifies the class of the resize Strip.
:attr:`strip_cls` is an :class:`kivy.properties.ObjectProperty` and
defaults to :class:`~kivy.uix.splitter.SplitterStrip`, which is of type
:class:`~kivy.uix.button.Button`.
.. versionchanged:: 1.8.0
If you set a string, the :class:`~kivy.factory.Factory` will be used to
resolve the class.
'''
sizable_from = OptionProperty('left', options=(
'left', 'right', 'top', 'bottom'))
'''Specifies whether the widget is resizable. Options are::
`left`, `right`, `top` or `bottom`
:attr:`sizable_from` is an :class:`~kivy.properties.OptionProperty`
and defaults to `left`.
'''
strip_size = NumericProperty('10pt')
'''Specifies the size of resize strip
:attr:`strp_size` is a :class:`~kivy.properties.NumericProperty`
defaults to `10pt`
'''
min_size = NumericProperty('100pt')
'''Specifies the minimum size beyond which the widget is not resizable.
:attr:`min_size` is a :class:`~kivy.properties.NumericProperty` and
defaults to `100pt`.
'''
max_size = NumericProperty('500pt')
'''Specifies the maximum size beyond which the widget is not resizable.
:attr:`max_size` is a :class:`~kivy.properties.NumericProperty`
and defaults to `500pt`.
'''
_parent_proportion = NumericProperty(0.)
'''(internal) Specifies the distance that the slider has travelled
across its parent, used to automatically maintain a sensible
position if the parent is resized.
:attr:`_parent_proportion` is a
:class:`~kivy.properties.NumericProperty` and defaults to 0.
.. versionadded:: 1.9.0
'''
_bound_parent = ObjectProperty(None, allownone=True)
'''(internal) References the widget whose size is currently being
tracked by :attr:`_parent_proportion`.
:attr:`_bound_parent` is a
:class:`~kivy.properties.ObjectProperty` and defaults to None.
.. versionadded:: 1.9.0
'''
keep_within_parent = BooleanProperty(False)
'''If True, will limit the splitter to stay within its parent widget.
:attr:`keep_within_parent` is a
:class:`~kivy.properties.BooleanProperty` and defaults to False.
.. versionadded:: 1.9.0
'''
rescale_with_parent = BooleanProperty(False)
'''If True, will automatically change size to take up the same
proportion of the parent widget when it is resized, while
staying within :attr:`min_size` and :attr:`max_size`. As long as
these attributes can be satisfied, this stops the
:class:`Splitter` from exceeding the parent size during rescaling.
:attr:`rescale_with_parent` is a
:class:`~kivy.properties.BooleanProperty` and defaults to False.
.. versionadded:: 1.9.0
'''
__events__ = ('on_press', 'on_release')
def __init__(self, **kwargs):
self._container = None
self._strip = None
super(Splitter, self).__init__(**kwargs)
do_size = self._do_size
fbind = self.fbind
fbind('max_size', do_size)
fbind('min_size', do_size)
fbind('parent', self._rebind_parent)
def on_sizable_from(self, instance, sizable_from):
if not instance._container:
return
sup = super(Splitter, instance)
_strp = instance._strip
if _strp:
# remove any previous binds
_strp.unbind(on_touch_down=instance.strip_down)
_strp.unbind(on_touch_move=instance.strip_move)
_strp.unbind(on_touch_up=instance.strip_up)
self.unbind(disabled=_strp.setter('disabled'))
sup.remove_widget(instance._strip)
else:
cls = instance.strip_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
instance._strip = _strp = cls()
sz_frm = instance.sizable_from[0]
if sz_frm in ('l', 'r'):
_strp.size_hint = None, 1
_strp.width = instance.strip_size
instance.orientation = 'horizontal'
instance.unbind(strip_size=_strp.setter('width'))
instance.bind(strip_size=_strp.setter('width'))
else:
_strp.size_hint = 1, None
_strp.height = instance.strip_size
instance.orientation = 'vertical'
instance.unbind(strip_size=_strp.setter('height'))
instance.bind(strip_size=_strp.setter('height'))
index = 1
if sz_frm in ('r', 'b'):
index = 0
sup.add_widget(_strp, index)
_strp.bind(on_touch_down=instance.strip_down)
_strp.bind(on_touch_move=instance.strip_move)
_strp.bind(on_touch_up=instance.strip_up)
_strp.disabled = self.disabled
self.bind(disabled=_strp.setter('disabled'))
def add_widget(self, widget, index=0):
if self._container or not widget:
return Exception('Splitter accepts only one Child')
self._container = widget
sz_frm = self.sizable_from[0]
if sz_frm in ('l', 'r'):
widget.size_hint_x = 1
else:
widget.size_hint_y = 1
index = 0
if sz_frm in ('r', 'b'):
index = 1
super(Splitter, self).add_widget(widget, index)
self.on_sizable_from(self, self.sizable_from)
def remove_widget(self, widget, *largs):
super(Splitter, self).remove_widget(widget)
if widget == self._container:
self._container = None
def clear_widgets(self):
self.remove_widget(self._container)
def strip_down(self, instance, touch):
if not instance.collide_point(*touch.pos):
return False
touch.grab(self)
self.dispatch('on_press')
def on_press(self):
pass
def _rebind_parent(self, instance, new_parent):
if self._bound_parent is not None:
self._bound_parent.unbind(size=self.rescale_parent_proportion)
if self.parent is not None:
new_parent.bind(size=self.rescale_parent_proportion)
self._bound_parent = new_parent
self.rescale_parent_proportion()
def rescale_parent_proportion(self, *args):
if self.rescale_with_parent:
parent_proportion = self._parent_proportion
if self.sizable_from in ('top', 'bottom'):
new_height = parent_proportion * self.parent.height
self.height = max(self.min_size, min(new_height, self.max_size))
else:
new_width = parent_proportion * self.parent.width
self.width = max(self.min_size, min(new_width, self.max_size))
def _do_size(self, instance, value):
if self.sizable_from[0] in ('l', 'r'):
self.width = max(self.min_size, min(self.width, self.max_size))
else:
self.height = max(self.min_size, min(self.height, self.max_size))
def strip_move(self, instance, touch):
if touch.grab_current is not instance:
return False
max_size = self.max_size
min_size = self.min_size
sz_frm = self.sizable_from[0]
if sz_frm in ('t', 'b'):
diff_y = (touch.dy)
if self.keep_within_parent:
if sz_frm == 't' and (self.top + diff_y) > self.parent.top:
diff_y = self.parent.top - self.top
elif sz_frm == 'b' and (self.y + diff_y) < self.parent.y:
diff_y = self.parent.y - self.y
if sz_frm == 'b':
diff_y *= -1
if self.size_hint_y:
self.size_hint_y = None
if self.height > 0:
self.height += diff_y
else:
self.height = 1
height = self.height
self.height = max(min_size, min(height, max_size))
self._parent_proportion = self.height / self.parent.height
else:
diff_x = (touch.dx)
if self.keep_within_parent:
if sz_frm == 'l' and (self.x + diff_x) < self.parent.x:
diff_x = self.parent.x - self.x
elif (sz_frm == 'r' and
(self.right + diff_x) > self.parent.right):
diff_x = self.parent.right - self.right
if sz_frm == 'l':
diff_x *= -1
if self.size_hint_x:
self.size_hint_x = None
if self.width > 0:
self.width += diff_x
else:
self.width = 1
width = self.width
self.width = max(min_size, min(width, max_size))
self._parent_proportion = self.width / self.parent.width
def strip_up(self, instance, touch):
if touch.grab_current is not instance:
return
if touch.is_double_tap:
max_size = self.max_size
min_size = self.min_size
sz_frm = self.sizable_from[0]
s = self.size
if sz_frm in ('t', 'b'):
if self.size_hint_y:
self.size_hint_y = None
if s[1] - min_size <= max_size - s[1]:
self.height = max_size
else:
self.height = min_size
else:
if self.size_hint_x:
self.size_hint_x = None
if s[0] - min_size <= max_size - s[0]:
self.width = max_size
else:
self.width = min_size
touch.ungrab(instance)
self.dispatch('on_release')
def on_release(self):
pass
if __name__ == '__main__':
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
class SplitterApp(App):
def build(self):
root = FloatLayout()
bx = BoxLayout()
bx.add_widget(Button())
bx.add_widget(Button())
bx2 = BoxLayout()
bx2.add_widget(Button())
bx2.add_widget(Button())
bx2.add_widget(Button())
spl = Splitter(
size_hint=(1, .25),
pos_hint = {'top': 1},
sizable_from = 'bottom')
spl1 = Splitter(
sizable_from='left',
size_hint=(None, 1), width=90)
spl1.add_widget(Button())
bx.add_widget(spl1)
spl.add_widget(bx)
spl2 = Splitter(size_hint=(.25, 1))
spl2.add_widget(bx2)
spl2.sizable_from = 'right'
root.add_widget(spl)
root.add_widget(spl2)
return root
SplitterApp().run()
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shlex
from st2common.exceptions import content
__all__ = [
'JsonValueParser',
'StringValueParser',
'DefaultParser',
'KeyValueActionAliasFormatParser',
'ActionAliasFormatParser'
]
class JsonValueParser(object):
"""
Sort of but not really JSON parsing. This parser only really cares if there are matching
braces to stop the iteration.
"""
start = '{'
end = '}'
@staticmethod
def is_applicable(first_char):
return first_char == JsonValueParser.start
@staticmethod
def parse(start, stream):
end = 0
char_idx = start
message_depth = 0
while not end:
char = stream[char_idx]
if char == JsonValueParser.start:
message_depth += 1
elif char == JsonValueParser.end:
message_depth -= 1
if not message_depth:
end = char_idx
else:
char_idx += 1
if char_idx == len(stream):
raise content.ParseException('What sort of messed up stream did you provide!')
# preserve the start and end chars
return start, stream[start:end + 1], end + 1
class StringValueParser(object):
def __init__(self, start, end, escape):
self.start = start
self.end = end
self.escape = escape
def is_applicable(self, first_char):
return first_char == self.start
def parse(self, start, stream):
end = 0
char_idx = start + 1
while not end:
char = stream[char_idx]
if char == self.end and stream[char_idx - 1] != self.escape:
end = char_idx
else:
char_idx += 1
if char_idx == len(stream):
raise content.ParseException('What sort of messed up stream did you provide!')
# skip the start and end chars
return start, stream[start + 1:end], end + 1
class DefaultParser(object):
end = ' '
@staticmethod
def is_applicable(first_char):
return True
@staticmethod
def parse(start, stream):
end = stream.find(DefaultParser.end, start)
# if not found pick until end of stream. In this way the default parser is different
# from other parser as they would always requires an end marker
if end == -1:
end = len(stream)
try:
return start, stream[start:end], end
except IndexError:
raise content.ParseException('What sort of messed up stream did you provide!')
PARSERS = [
JsonValueParser,
StringValueParser(start='"', end='"', escape='\\'),
StringValueParser(start='\'', end='\'', escape='\\'),
DefaultParser
]
class KeyValueActionAliasFormatParser(object):
"""
Parser which parses action parameters in the format of "key=value" from the provided param
string.
"""
delimiter = '='
def __init__(self, alias_format, param_stream=None):
self._format_stream = alias_format
self._param_stream = param_stream or ''
def parse(self):
result = {}
try:
tokens = shlex.split(self._param_stream)
except ValueError:
return result
for token in tokens:
split = token.split('=', 1)
if len(split) != 2:
continue
key, value = split
result[key] = value
return result
class ActionAliasFormatParser(object):
FORMAT_MARKER_START = '{{'
FORMAT_MARKER_END = '}}'
PARAM_DEFAULT_VALUE_SEPARATOR = '='
def __init__(self, alias_format=None, param_stream=None):
self._format = alias_format or ''
self._param_stream = param_stream or ''
self._alias_fmt_ptr = 0
self._param_strm_ptr = 0
def __iter__(self):
return self
def next(self):
try:
p_start, param_format, p_end = self._get_next_param_format()
param_name, default_value = self._get_param_name_default_value(param_format)
except ValueError:
# If we get back a ValueError then time to stop the iteration.
raise StopIteration()
# compute forward progress of the alias format pointer
v_start = p_start - self._alias_fmt_ptr + self._param_strm_ptr
value = None
# make sure v_start is within param_stream
if v_start < len(self._param_stream):
_, value, v_end = self._get_next_value(v_start)
# move the alias_fmt_ptr to one beyond the end of each
self._alias_fmt_ptr = p_end
self._param_strm_ptr = v_end
elif v_start < len(self._format):
# Advance in the format string
# Note: We still want to advance in the format string even though
# there is nothing left in the param stream since we support default
# values and param_stream can be empty
self._alias_fmt_ptr = p_end
if not value and not default_value:
raise content.ParseException('No value supplied and no default value found.')
return param_name, value if value else default_value
def get_extracted_param_value(self):
result = {}
# First extract format params provided in the string (if any)
format_params = {name: value for name, value in self}
result.update(format_params)
# Second extract key=value params provided in the param string
kv_parser = KeyValueActionAliasFormatParser(alias_format=self._format,
param_stream=self._param_stream)
kv_params = kv_parser.parse()
result.update(kv_params)
return result
def _get_next_param_format(self):
mrkr_strt_ps = self._format.index(self.FORMAT_MARKER_START, self._alias_fmt_ptr)
try:
mrkr_end_ps = self._format.index(self.FORMAT_MARKER_END, mrkr_strt_ps)
except ValueError:
# A start marker was found but end is not therefore this is a Parser exception.
raise content.ParseException('Expected end marker.')
param_format = self._format[mrkr_strt_ps + len(self.FORMAT_MARKER_START): mrkr_end_ps]
return mrkr_strt_ps, param_format.strip(), mrkr_end_ps + len(self.FORMAT_MARKER_END)
def _get_param_name_default_value(self, param_format):
if not param_format:
return None, None
values = param_format.split(self.PARAM_DEFAULT_VALUE_SEPARATOR)
return values[0], values[1] if len(values) > 1 else None
def _get_next_value(self, start):
parser = self._get_parser(self._param_stream[start])
return parser.parse(start, self._param_stream)
def _get_parser(self, first_char):
for parser in PARSERS:
if parser.is_applicable(first_char):
return parser
raise Exception('No parser found')
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import random
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import StringIO
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.utils.py3 import parse_qsl
from libcloud.utils.py3 import u
from libcloud.utils.py3 import unittest2_required
if unittest2_required:
import unittest2 as unittest
else:
import unittest
XML_HEADERS = {'content-type': 'application/xml'}
class LibcloudTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
self._visited_urls = []
self._executed_mock_methods = []
super(LibcloudTestCase, self).__init__(*args, **kwargs)
def setUp(self):
self._visited_urls = []
self._executed_mock_methods = []
def _add_visited_url(self, url):
self._visited_urls.append(url)
def _add_executed_mock_method(self, method_name):
self._executed_mock_methods.append(method_name)
def assertExecutedMethodCount(self, expected):
actual = len(self._executed_mock_methods)
self.assertEqual(actual, expected,
'expected %d, but %d mock methods were executed'
% (expected, actual))
class multipleresponse(object):
"""
A decorator that allows MockHttp objects to return multi responses
"""
count = 0
func = None
def __init__(self, f):
self.func = f
def __call__(self, *args, **kwargs):
ret = self.func(self.func.__class__, *args, **kwargs)
response = ret[self.count]
self.count = self.count + 1
return response
class MockResponse(object):
"""
A mock HTTPResponse
"""
headers = {}
body = StringIO()
status = 0
reason = ''
version = 11
def __init__(self, status, body=None, headers=None, reason=None):
self.status = status
self.body = StringIO(u(body)) if body else StringIO()
self.headers = headers or self.headers
self.reason = reason or self.reason
def read(self, *args, **kwargs):
return self.body.read(*args, **kwargs)
def next(self):
if sys.version_info >= (2, 5) and sys.version_info <= (2, 6):
return self.body.next()
else:
return next(self.body)
def __next__(self):
return self.next()
def getheader(self, name, *args, **kwargs):
return self.headers.get(name, *args, **kwargs)
def getheaders(self):
return list(self.headers.items())
def msg(self):
raise NotImplemented
class BaseMockHttpObject(object):
def _get_method_name(self, type, use_param, qs, path):
path = path.split('?')[0]
meth_name = path.replace('/', '_').replace('.', '_').replace('-', '_')
if type:
meth_name = '%s_%s' % (meth_name, self.type)
if use_param and use_param in qs:
param = qs[use_param][0].replace('.', '_').replace('-', '_')
meth_name = '%s_%s' % (meth_name, param)
if meth_name == '':
meth_name = 'root'
return meth_name
class MockHttp(BaseMockHttpObject):
"""
A mock HTTP client/server suitable for testing purposes. This replaces
`HTTPConnection` by implementing its API and returning a mock response.
Define methods by request path, replacing slashes (/) with underscores (_).
Each of these mock methods should return a tuple of:
(int status, str body, dict headers, str reason)
>>> mock = MockHttp('localhost', 8080)
>>> mock.request('GET', '/example/')
>>> response = mock.getresponse()
>>> response.body.read()
'Hello World!'
>>> response.status
200
>>> response.getheaders()
[('X-Foo', 'libcloud')]
>>> MockHttp.type = 'fail'
>>> mock.request('GET', '/example/')
>>> response = mock.getresponse()
>>> response.body.read()
'Oh Noes!'
>>> response.status
403
>>> response.getheaders()
[('X-Foo', 'fail')]
"""
responseCls = MockResponse
host = None
port = None
response = None
type = None
use_param = None # will use this param to namespace the request function
test = None # TestCase instance which is using this mock
proxy_url = None
def __init__(self, host, port, *args, **kwargs):
self.host = host
self.port = port
def request(self, method, url, body=None, headers=None, raw=False):
# Find a method we can use for this request
parsed = urlparse.urlparse(url)
scheme, netloc, path, params, query, fragment = parsed
qs = parse_qs(query)
if path.endswith('/'):
path = path[:-1]
meth_name = self._get_method_name(type=self.type,
use_param=self.use_param,
qs=qs, path=path)
meth = getattr(self, meth_name.replace('%', '_'))
if self.test and isinstance(self.test, LibcloudTestCase):
self.test._add_visited_url(url=url)
self.test._add_executed_mock_method(method_name=meth_name)
status, body, headers, reason = meth(method, url, body, headers)
self.response = self.responseCls(status, body, headers, reason)
def getresponse(self):
return self.response
def connect(self):
"""
Can't think of anything to mock here.
"""
pass
def close(self):
pass
def set_http_proxy(self, proxy_url):
self.proxy_url = proxy_url
# Mock request/response example
def _example(self, method, url, body, headers):
"""
Return a simple message and header, regardless of input.
"""
return (httplib.OK, 'Hello World!', {'X-Foo': 'libcloud'},
httplib.responses[httplib.OK])
def _example_fail(self, method, url, body, headers):
return (httplib.FORBIDDEN, 'Oh Noes!', {'X-Foo': 'fail'},
httplib.responses[httplib.FORBIDDEN])
class MockHttpTestCase(MockHttp, unittest.TestCase):
# Same as the MockHttp class, but you can also use assertions in the
# classes which inherit from this one.
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self)
if kwargs.get('host', None) and kwargs.get('port', None):
MockHttp.__init__(self, *args, **kwargs)
def runTest(self):
pass
def assertUrlContainsQueryParams(self, url, expected_params, strict=False):
"""
Assert that provided url contains provided query parameters.
:param url: URL to assert.
:type url: ``str``
:param expected_params: Dictionary of expected query parameters.
:type expected_params: ``dict``
:param strict: Assert that provided url contains only expected_params.
(defaults to ``False``)
:type strict: ``bool``
"""
question_mark_index = url.find('?')
if question_mark_index != -1:
url = url[question_mark_index + 1:]
params = dict(parse_qsl(url))
if strict:
self.assertDictEqual(params, expected_params)
else:
for key, value in expected_params.items():
self.assertEqual(params[key], value)
class StorageMockHttp(MockHttp):
def putrequest(self, method, action, skip_host=0, skip_accept_encoding=0):
pass
def putheader(self, key, value):
pass
def endheaders(self):
pass
def send(self, data):
pass
class MockRawResponse(BaseMockHttpObject):
"""
Mock RawResponse object suitable for testing.
"""
type = None
responseCls = MockResponse
def __init__(self, connection):
super(MockRawResponse, self).__init__()
self._data = []
self._current_item = 0
self._status = None
self._response = None
self._headers = None
self._reason = None
self.connection = connection
def next(self):
if self._current_item == len(self._data):
raise StopIteration
value = self._data[self._current_item]
self._current_item += 1
return value
def __next__(self):
return self.next()
def _generate_random_data(self, size):
data = ''
current_size = 0
while current_size < size:
value = str(random.randint(0, 9))
value_size = len(value)
data += value
current_size += value_size
return data
@property
def response(self):
return self._get_response_if_not_available()
@property
def status(self):
self._get_response_if_not_available()
return self._status
@property
def headers(self):
self._get_response_if_not_available()
return self._headers
@property
def reason(self):
self._get_response_if_not_available()
return self._reason
def _get_response_if_not_available(self):
if not self._response:
meth_name = self._get_method_name(type=self.type,
use_param=False, qs=None,
path=self.connection.action)
meth = getattr(self, meth_name.replace('%', '_'))
result = meth(self.connection.method, None, None, None)
self._status, self._body, self._headers, self._reason = result
self._response = self.responseCls(self._status, self._body,
self._headers, self._reason)
return self._response
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Jack River'
from random import shuffle, randint
from itertools import cycle
CARD_2_VALUE = 13
CARD_A_VALUE = 12
class Card():
SUITS = ['Spades', 'Hearts', 'Diamonds', 'Clubs']
VALUES = range(3, 14) # value 12 = 'A' | value 13 = '2'
def __init__(self, suit, value):
if (suit in self.SUITS or suit == 'Joker') and value in self.VALUES:
self.suit = suit
self.value = value
else:
raise AttributeError()
def print_card(self):
print('%s %d\n' % (self.suit, self.value))
def cards_in_same_value(cards):
not_joker_cards = []
has_joker = False
for card in cards:
if card.suit == 'Joker':
has_joker = True
else:
not_joker_cards.append(card)
if has_joker and not_joker_cards[0].value == CARD_2_VALUE:
# joker cant replace 2
raise AttributeError()
elif has_joker:
return cards_in_same_value(not_joker_cards)
else:
value = not_joker_cards[0].value
result = True
for card in cards:
if not card.value == value:
result = False
return result
def cards_in_sequence_pair_value(cards):
return True
def compare_cards(a, b):
return a.value > b.value
def cards_in_sequence_value(cards):
not_joker_cards = []
has_joker = False
joker_count = 0
for card in cards:
if card.suit == 'Joker':
has_joker = True
joker_count += 1
else:
not_joker_cards.append(card)
if has_joker and not_joker_cards[0].value == CARD_2_VALUE:
# joker cant replace 2
raise AttributeError()
elif has_joker:
not_joker_cards = sorted(not_joker_cards, cmp=compare_cards)
return ((not_joker_cards[-1].value - not_joker_cards[0].value) + 1) == (len(not_joker_cards) + joker_count)
else:
not_joker_cards = sorted(not_joker_cards, cmp=compare_cards)
return (not_joker_cards[-1].value - not_joker_cards[0].value + 1) == len(not_joker_cards)
def cards_in_allowed_patterns(cards):
if len(cards) == 1:
# single card
return 1
elif len(cards) == 2:
# pair of same cards
return cards[0].value == cards[1].value
elif len(cards) == 3 and cards_in_same_value(cards):
# three same cards
return True
elif len(cards) == 4 and cards_in_same_value(cards):
# bomb
return True
elif len(cards) >= 3 and cards_in_sequence_value(cards):
# sequence of cards >= 3
return True
elif len(cards) >= 4 and len(cards) % 2 == 0 and cards_in_sequence_pair_value(cards):
# sequence pair of cards >= 4
return True
class Deck():
def __init__(self):
self.cards = []
for suit in Card.SUITS:
for value in Card.VALUES:
self.cards.append(Card(suit, value))
# insert two joker cards
self.cards.extend([Card('Joker', 3), Card('Joker', 3)])
def shuffle(self):
shuffle(self.cards)
def deal_card(self):
return self.cards.pop()
class Player():
def __init__(self, nickname):
self.nickname = nickname
self.hand_cards = []
def take_card(self, card):
self.hand_cards.append(card)
def play(self, play_callback):
print('Hello %s, you have following cards:\n')
index = 0
for card in self.hand_cards:
print('%d: ' % index)
index += 1
card.print_card()
played_cards = []
input_numbers = [int(x) for x in raw_input('Choose some cards to play(index seperated by spaces): ').split(' ')]
input_numbers = set(sorted(input_numbers, reverse=True))
for index in input_numbers:
if index < len(self.hand_cards):
played_cards.append(self.hand_cards.pop(index))
else:
raise IndexError('Index out of range!')
if not play_callback(played_cards, self):
# if played invalid cards, then try again
print('Invalid played cards, try again!')
self.hand_cards.extend(played_cards)
self.play(play_callback)
class Game():
def __init__(self):
self.deck = Deck()
self.dealer = None
self.all_players = []
self.players = []
self.previous_played_cards = None
self.winner = None
def valid_game(self):
""" if it is a valid game to start
"""
return len(self.all_players) >= 2
def join_game(self, player):
""" player join current game
"""
self.players.append(player)
self.all_players.append(player)
def decide_dealer(self):
""" choose a player become dealer randomly
"""
self.dealer = self.players.pop(randint(0, len(self.all_players) - 1))
def deal(self):
""" deal initial cards
"""
for i in range(0, 6):
self.dealer.take_card(self.deck.deal_card())
for player in self.players:
for i in range(0, 5):
player.take_card(self.deck.deal_card())
def decide(self, cards):
""" decide if player give out cards following the game rules
"""
if not self.previous_played_cards and cards_in_allowed_patterns(cards):
# initial
self.previous_played_cards = cards
return True
if len(cards) == 1 and len(self.previous_played_cards) == 1:
# single card
if len(self.previous_played_cards) == 1 and cards[0].value == self.previous_played_cards[0].vale + 1:
self.previous_played_cards = cards
return True
else:
return False
elif len(cards) == 2 and len(self.previous_played_cards) == 2:
if cards_in_same_value(cards) and cards_in_same_value(self.previous_played_cards) and cards[0].value == self.previous_played_cards[0].value + 1:
self.previous_played_cards = cards
return True
else:
return False
if len(cards) == 3 and len(self.previous_played_cards) == 3 and cards_in_same_value(cards) and cards_in_same_value(self.previous_played_cards):
if cards[0].value == self.previous_played_cards[0].value:
self.previous_played_cards = cards
def is_game_ended(self):
""" decide if current game is ended
Condition:
deck is empty or one of the player played out all cards
"""
game_ended = False
for player in self.all_players:
if len(player.hand_cards) == 0:
# played all cards
game_ended = True
self.winner = player
if len(self.deck.cards) == 0:
game_ended = True
self.winner = None
return game_ended
def play_callback(self, cards, player):
""" play callback function after player give out cards
"""
return self.decide(cards)
def start(self):
""" start a game
"""
if not self.valid_game():
raise AttributeError('Invalid game!')
self.decide_dealer()
self.deal()
for player in cycle(self.all_players):
if self.is_game_ended():
break
player.play(self.play_callback)
if self.winner:
print('Winner is %s!' % self.winner.nickname)
else:
print('A tie!')
|
|
"""
Control the OpenBSD packet filter (PF).
:codeauthor: Jasper Lievisse Adriaanse <[email protected]>
.. versionadded:: 2019.2.0
"""
import logging
import re
import salt.utils.path
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
def __virtual__():
"""
Only works on OpenBSD and FreeBSD for now; other systems with pf (macOS,
FreeBSD, etc) need to be tested before enabling them.
"""
tested_oses = ["FreeBSD", "OpenBSD"]
if __grains__["os"] in tested_oses and salt.utils.path.which("pfctl"):
return True
return (
False,
"The pf execution module cannot be loaded: either the OS ({}) is not "
"tested or the pfctl binary was not found".format(__grains__["os"]),
)
def enable():
"""
Enable the Packet Filter.
CLI Example:
.. code-block:: bash
salt '*' pf.enable
"""
ret = {}
result = __salt__["cmd.run_all"](
"pfctl -e", output_loglevel="trace", python_shell=False
)
if result["retcode"] == 0:
ret = {"comment": "pf enabled", "changes": True}
else:
# If pf was already enabled the return code is also non-zero.
# Don't raise an exception in that case.
if result["stderr"] == "pfctl: pf already enabled":
ret = {"comment": "pf already enabled", "changes": False}
else:
raise CommandExecutionError(
"Could not enable pf",
info={"errors": [result["stderr"]], "changes": False},
)
return ret
def disable():
"""
Disable the Packet Filter.
CLI Example:
.. code-block:: bash
salt '*' pf.disable
"""
ret = {}
result = __salt__["cmd.run_all"](
"pfctl -d", output_loglevel="trace", python_shell=False
)
if result["retcode"] == 0:
ret = {"comment": "pf disabled", "changes": True}
else:
# If pf was already disabled the return code is also non-zero.
# Don't raise an exception in that case.
if result["stderr"] == "pfctl: pf not enabled":
ret = {"comment": "pf already disabled", "changes": False}
else:
raise CommandExecutionError(
"Could not disable pf",
info={"errors": [result["stderr"]], "changes": False},
)
return ret
def loglevel(level):
"""
Set the debug level which limits the severity of log messages printed by ``pf(4)``.
level:
Log level. Should be one of the following: emerg, alert, crit, err, warning, notice,
info or debug (OpenBSD); or none, urgent, misc, loud (FreeBSD).
CLI Example:
.. code-block:: bash
salt '*' pf.loglevel emerg
"""
# There's no way to getting the previous loglevel so imply we've
# always made a change.
ret = {"changes": True}
myos = __grains__["os"]
if myos == "FreeBSD":
all_levels = ["none", "urgent", "misc", "loud"]
else:
all_levels = [
"emerg",
"alert",
"crit",
"err",
"warning",
"notice",
"info",
"debug",
]
if level not in all_levels:
raise SaltInvocationError("Unknown loglevel: {}".format(level))
result = __salt__["cmd.run_all"](
"pfctl -x {}".format(level), output_loglevel="trace", python_shell=False
)
if result["retcode"] != 0:
raise CommandExecutionError(
"Problem encountered setting loglevel",
info={"errors": [result["stderr"]], "changes": False},
)
return ret
def load(file="/etc/pf.conf", noop=False):
"""
Load a ruleset from the specific file, overwriting the currently loaded ruleset.
file:
Full path to the file containing the ruleset.
noop:
Don't actually load the rules, just parse them.
CLI Example:
.. code-block:: bash
salt '*' pf.load /etc/pf.conf.d/lockdown.conf
"""
# We cannot precisely determine if loading the ruleset implied
# any changes so assume it always does.
ret = {"changes": True}
cmd = ["pfctl", "-f", file]
if noop:
ret["changes"] = False
cmd.append("-n")
result = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
if result["retcode"] != 0:
raise CommandExecutionError(
"Problem loading the ruleset from {}".format(file),
info={"errors": [result["stderr"]], "changes": False},
)
return ret
def flush(modifier):
"""
Flush the specified packet filter parameters.
modifier:
Should be one of the following:
- all
- info
- osfp
- rules
- sources
- states
- tables
Please refer to the OpenBSD `pfctl(8) <https://man.openbsd.org/pfctl#T>`_
documentation for a detailed explanation of each command.
CLI Example:
.. code-block:: bash
salt '*' pf.flush states
"""
ret = {}
all_modifiers = ["rules", "states", "info", "osfp", "all", "sources", "tables"]
# Accept the following two modifiers to allow for a consistent interface between
# pfctl(8) and Salt.
capital_modifiers = ["Sources", "Tables"]
all_modifiers += capital_modifiers
if modifier.title() in capital_modifiers:
modifier = modifier.title()
if modifier not in all_modifiers:
raise SaltInvocationError("Unknown modifier: {}".format(modifier))
cmd = "pfctl -v -F {}".format(modifier)
result = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
if result["retcode"] == 0:
if re.match(r"^0.*", result["stderr"]):
ret["changes"] = False
else:
ret["changes"] = True
ret["comment"] = result["stderr"]
else:
raise CommandExecutionError(
"Could not flush {}".format(modifier),
info={"errors": [result["stderr"]], "changes": False},
)
return ret
def table(command, table, **kwargs):
"""
Apply a command on the specified table.
table:
Name of the table.
command:
Command to apply to the table. Supported commands are:
- add
- delete
- expire
- flush
- kill
- replace
- show
- test
- zero
Please refer to the OpenBSD `pfctl(8) <https://man.openbsd.org/pfctl#T>`_
documentation for a detailed explanation of each command.
CLI Example:
.. code-block:: bash
salt '*' pf.table expire table=spam_hosts number=300
salt '*' pf.table add table=local_hosts addresses='["127.0.0.1", "::1"]'
"""
ret = {}
all_commands = [
"kill",
"flush",
"add",
"delete",
"expire",
"replace",
"show",
"test",
"zero",
]
if command not in all_commands:
raise SaltInvocationError("Unknown table command: {}".format(command))
cmd = ["pfctl", "-t", table, "-T", command]
if command in ["add", "delete", "replace", "test"]:
cmd += kwargs.get("addresses", [])
elif command == "expire":
number = kwargs.get("number", None)
if not number:
raise SaltInvocationError("need expire_number argument for expire command")
else:
cmd.append(number)
result = __salt__["cmd.run_all"](cmd, output_level="trace", python_shell=False)
if result["retcode"] == 0:
if command == "show":
ret = {"comment": result["stdout"].split()}
elif command == "test":
ret = {"comment": result["stderr"], "matches": True}
else:
if re.match(r"^(0.*|no changes)", result["stderr"]):
ret["changes"] = False
else:
ret["changes"] = True
ret["comment"] = result["stderr"]
else:
# 'test' returns a non-zero code if the address didn't match, even if
# the command itself ran fine; also set 'matches' to False since not
# everything matched.
if command == "test" and re.match(
r"^\d+/\d+ addresses match.$", result["stderr"]
):
ret = {"comment": result["stderr"], "matches": False}
else:
raise CommandExecutionError(
"Could not apply {} on table {}".format(command, table),
info={"errors": [result["stderr"]], "changes": False},
)
return ret
def show(modifier):
"""
Show filter parameters.
modifier:
Modifier to apply for filtering. Only a useful subset of what pfctl supports
can be used with Salt.
- rules
- states
- tables
CLI Example:
.. code-block:: bash
salt '*' pf.show rules
"""
# By definition showing the parameters makes no changes.
ret = {"changes": False}
capital_modifiers = ["Tables"]
all_modifiers = ["rules", "states", "tables"]
all_modifiers += capital_modifiers
if modifier.title() in capital_modifiers:
modifier = modifier.title()
if modifier not in all_modifiers:
raise SaltInvocationError("Unknown modifier: {}".format(modifier))
cmd = "pfctl -s {}".format(modifier)
result = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
if result["retcode"] == 0:
ret["comment"] = result["stdout"].split("\n")
else:
raise CommandExecutionError(
"Could not show {}".format(modifier),
info={"errors": [result["stderr"]], "changes": False},
)
return ret
|
|
#9DOF.py
# script to adquire data from a I2C 9DOF
#
# Oriol Sanchez, 9/19/2013
#
# Changelog:
# V0 9/17/2013
#
# Readme:
#
# Instructions: - execute phyton 9DOF.py from the shell inside the arm
# - visualize the data in the shell or retreive it after
# - from the txt file created by the script
#
import smbus #I2C library
import time #Time library
import datetime #Timestamp library
import ctypes #Conversion Type library
import os
addr=0x68
maddr=0x0C
#Define a new class called Accel
class Accel():
#Select the I2C device on /dev/i2c-0
b=smbus.SMBus(0)
def config(self):
#SET Internal clock to work as z axis gyro as a reference with a phase loop lock
self.b.write_byte_data(0x68,0x6B,0x03)
#CONFIG THE SYSTEM
#REGISTER 19, SAMPLE RATE
self.b.write_byte_data(addr,0x19,0x07)
#CONFIG REGISTER on 0x1A, data: [ExtSync][DLPF] : 0x010
self.b.write_byte_data(addr,0x1A,0x00)
#CONFIG GYRO on 0x1B, test disabled and scale selected 500 degrees/s
self.b.write_byte_data(addr,0x1B,0x08)
#CONFIG ACCEL on 0x1C, test disabled and scale +-2g without DHPF
self.b.write_byte_data(addr,0x1C,0x00)
#CONFIG Freefall threshold of 0mg
#self.b.write_byte_data(addr,0x1D,0x00)
#CONFIG Freefall duration limit of 0
#self.b.write_byte_data(addr,0x1E,0x00)
#CONFIG Motion threshold of 0mg
#self.b.write_byte_data(addr,0x1F,0x00)
#CONFIG Motion duration limit of 0
#self.b.write_byte_data(addr,0x20,0x00)
#CONFIG Zero Motion threshold
#self.b.write_byte_data(addr,0x21,0x00)
#CONFIG Zero Motion duration limit
#self.b.write_byte_data(addr,0x22,0x00)
#DISABLE Sensor output to FIFO buffer
#self.b.write_byte_data(addr,0x23,0x00)
#AUX I2C SETUP
#self.b.write_byte_data(addr,0x24,0x00)
#I2C SLAVES SETUP
#SLAVE0
self.b.write_byte_data(addr,0x25,0x00)
self.b.write_byte_data(addr,0x26,0x00)
self.b.write_byte_data(addr,0x27,0x00)
#SLAVE1
self.b.write_byte_data(addr,0x28,0x00)
self.b.write_byte_data(addr,0x29,0x00)
self.b.write_byte_data(addr,0x2A,0x00)
#SLAVE2
self.b.write_byte_data(addr,0x2B,0x00)
self.b.write_byte_data(addr,0x2C,0x00)
self.b.write_byte_data(addr,0x2D,0x00)
#SLAVE3
self.b.write_byte_data(addr,0x2E,0x00)
self.b.write_byte_data(addr,0x2F,0x00)
self.b.write_byte_data(addr,0x30,0x00)
#SLAVE4
self.b.write_byte_data(addr,0x31,0x00)
self.b.write_byte_data(addr,0x32,0x00)
self.b.write_byte_data(addr,0x33,0x00)
self.b.write_byte_data(addr,0x34,0x00)
self.b.write_byte_data(addr,0x35,0x00)
#INT pin
self.b.write_byte_data(addr,0x37,0x00)
#DATA Interrupt
self.b.write_byte_data(addr,0x38,0x00)
#SLAVE Out, don't care
self.b.write_byte_data(addr,0x63,0x00)
self.b.write_byte_data(addr,0x64,0x00)
self.b.write_byte_data(addr,0x65,0x00)
self.b.write_byte_data(addr,0x66,0x00)
time.sleep(0.002)
return 1
def getGValue(self):
gxh = self.b.read_byte_data(addr,0x43) #GyroxH
gxl = self.b.read_byte_data(addr,0x44) #GyroxL
#print gxh
#print gxl
gx = ((gxh << 8) | gxl)
gyh = self.b.read_byte_data(addr,0x45) #GyroyH
gyl = self.b.read_byte_data(addr,0x46) #GyroyL
gy = ((gyh << 8) | gyl)
gzh = self.b.read_byte_data(addr,0x47) #GyrozH
gzl = self.b.read_byte_data(addr,0x48) #GyrozL
gz = ((gzh << 8) | gzl)
if gx>32767:
gx=(gx-65535)
if gy>32767:
gy=(gy-65535)
if gz>32767:
gz=(gz-65535)
gx=float((gx*500)/32767)
gy=float((gy*500)/32767)
gz=float((gz*500)/32767)
return (gx,gy,gz)
def getAValue(self):
axh = self.b.read_byte_data(addr,0x3B) #AccelH
axl = self.b.read_byte_data(addr,0x3C) #AccelL
#print axh
#print axl
ax = float((axh << 8) | axl)
ayh = self.b.read_byte_data(addr,0x3D) #AccelH
ayl = self.b.read_byte_data(addr,0x3E) #AccelL
ay = float((ayh << 8) | ayl)
azh = self.b.read_byte_data(addr,0x3F) #AccelH
azl = self.b.read_byte_data(addr,0x40) #AccelL
az = float((azh << 8) | azl)
if ax>32767:
ax=(ax-65535)
if ay>32767:
ay=(ay-65535)
if az>32767:
az=(az-65535)
ax=(ax*2)/32767
ay=(ay*2)/32767
az=(az*2)/32767
return (ax,ay,az)
def getMValue(self):
#BYPASS MAIN I2C to Aux I2C
self.b.write_byte_data(addr,0x37,0x02)
#CONTROL BYTE set to single measurement mode
self.b.write_byte_data(maddr,0x0A,0x01)
time.sleep(0.01)
mxh = self.b.read_byte_data(maddr,0x04) #XMagneH
mxl = self.b.read_byte_data(maddr,0x03) #XMagneL
myh = self.b.read_byte_data(maddr,0x06) #YMagneH
myl = self.b.read_byte_data(maddr,0x05) #YMagneL
mzh = self.b.read_byte_data(maddr,0x08) #ZMagneH
mzl = self.b.read_byte_data(maddr,0x07) #ZMagneL
#CONFIG System to acces the Fuse ROM, to get the sensitivity adjustment
# self.b.write_byte_data(maddr,0x0A,0x0F)
# asax= self.b.read_byte_data(maddr,0x10) #Sensitivity Adjustment
# asay= self.b.read_byte_data(maddr,0x11) #Sensitivity Adjustment
# asaz= self.b.read_byte_data(maddr,0x12) #Sensitivity Adjustment
# print asax
# print asay
# print asaz
#CONFIG System again in Single measurement mode
# self.b.write_byte_data(maddr,0x0A,0x01)
#UNDO THE BYPASS OF MAIN I2C to Aux I2C
# self.b.write_byte_data(addr,0x37,0x00)
#BUILD the 16bit data and adjust it throught the sensitivity as stated in the manual
mx = float((mxh << 8) | mxl)
my = float((myh << 8) | myl)
mz = float((mzh << 8) | mzl)
if mx>32767:
mx=(mx-65535)
if my>32767:
my=(my-65535)
if mz>32767:
mz=(mz-65535)
mx=mx*.3
my=my*.3
mz=mz*.3
return (mx,my,mz)
def getTValue(self):
temph = self.b.read_byte_data(addr,0x41) #TempH
templ = self.b.read_byte_data(addr,0x42) #TempL
temp = ((temph << 8) | templ)
if temp>32767:
temp=(temp-65535)
temp = ((float(temp)/340) +35)
return(temp)
#create an accel object called mpu9150
#mpu9150 = Accel()
mpu9150=Accel()
mpu9150.config()
i=0
(gx,gy,gz)=mpu9150.getGValue()
(ax,ay,az)=mpu9150.getAValue()
(mx,my,mz)=mpu9150.getMValue()
gx3=gx2=gx1=pitch=0
gy3=gy2=gy1=roll=0
gz3=gz2=gz1=yaw=0
while 1:
i=i+1
#Update integration values
gx3=gx2
gx2=gx1
gx1=gx
gy3=gy2
gy2=gy1
gy1=gy
gz3=gz2
gz2=gz1
gz1=gz
(gx,gy,gz)=mpu9150.getGValue()
(ax,ay,az)=mpu9150.getAValue()
(mx,my,mz)=mpu9150.getMValue()
tmp=mpu9150.getTValue()
#Integrate form the angular velocity with a runge-kutta algorithm
pitch=pitch+(gx3+(gx2*2)+(gx1*2)+gx)/6
roll=roll+(gy3+(gy2*2)+(gy1*2)+gy)/6
yaw=yaw+(gz3+(gz2*2)+(gz1*2)+gz)/6
if i==1:
i=0
os.system('clear')
#filename = raw_input("Give name for the file: ")
filename="testdata.txt"
target = open (filename, 'a') ## a will append, w will over-write
ts=time.time()
st=datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print st
print "Pitch: %d degrees" % (pitch)
print "Roll: %d degrees" % (roll)
print "Yaw: %d degrees" % (yaw)
print "Gyroscope X:%d" % (gx)
print "Gyroscope Y:%d" % (gy)
print "Gyroscope Z:%d" % (gz)
print "Accelerometer X:%d" % (ax)
print "Accelerometer Y:%d" % (ay)
print "Accelerometer Z:%d" % (az)
print "Magnetometer X:%d" % (mx)
print "Magnetometer Y:%d" % (my)
print "Magnetometer Z:%d" % (mz)
target.write(st)
target.write(",")
target.write(str(gx))
target.write(",")
target.write(str(gy))
target.write(",")
target.write(str(gz))
target.write(",")
target.write(str(ax))
target.write(",")
target.write(str(ay))
target.write(",")
target.write(str(az))
target.write(",")
target.write(str(mx))
target.write(",")
target.write(str(my))
target.write(",")
target.write(str(mz))
target.write(",")
target.write(str(tmp))
target.write("\n")
target.close()
time.sleep(1)
|
|
__author__ = 'CwT'
from idaapi import *
from idautils import *
from idc import *
import struct
global baseAddr
def writefile(file, addr, len):
for i in range(len):
one = getByte(addr+i)
file.write(struct.pack("B", one))
def getDword(addr):
return Dword(addr)
def getByte(addr):
return Byte(addr)
def getWord(addr):
return Word(addr)
def dexGetStringData(dexfile, offset):
addr = dexfile.baseAddr + offset
while getByte(addr) > 0x7f: # skip uleb len
addr += 1
addr += 1
str = ""
one = getByte(addr)
while one != 0:
str += chr(one)
addr += 1
one = getByte(addr)
return str
def dexGetStringId(dexfile, idx):
return getDword(dexfile.pStringIds+4*idx)
def dexStringById(dexfile, idx):
offset = dexGetStringId(dexfile, idx)
return dexGetStringData(dexfile, offset)
def dexGetTypeId(dexfile, idx):
return getDword(dexfile.pTypeIds+4*idx)
def dexStringByTypeIdx(dexfile, idx):
return dexStringById(dexfile, dexGetTypeId(dexfile, idx))
def dexGetClassDescriptor(dexfile, classdef):
return dexStringByTypeIdx(dexfile, classdef.classIdx)
def slashtodot(str):
ret = ""
for i in str:
if i == '/':
ret += '.'
elif i == ';':
continue
else:
ret += i
return ret
def rightshift(value, n):
mask = 0x80000000
check = value & mask
if check != mask:
return value >> n
else:
submask = mask
for loop in range(0, n):
submask = (submask | (mask >> loop))
strdata = struct.pack("I", submask | (value >> n))
ret = struct.unpack("i", strdata)[0]
return ret
def readunsignedleb128(addr):
res = getByte(addr)
len = 1
if res > 0x7f:
cur = getByte(addr + 1)
res = (res & 0x7f) | ((cur & 0x7f) << 7)
len = 2
if cur > 0x7f:
cur = getByte(addr + 2)
res |= (cur & 0x7f) << 14
len = 3
if cur > 0x7f:
cur = getByte(addr + 3)
res |= (cur & 0x7f) << 21
len = 4
if cur > 0x7f:
cur = getByte(addr + 4)
res |= cur << 28
len = 5
return res, len
def readsignedleb128(addr):
res = getByte(addr)
len = 1
if res <= 0x7f:
res = rightshift((res << 25), 25)
else:
cur = getByte(addr + 1)
res = (res & 0x7f) | ((cur & 0x7f) << 7)
len = 2
if cur <= 0x7f:
res = rightshift((res << 18), 18)
else:
cur = getByte(addr + 2)
res |= (cur & 0x7f) << 14
len = 3
if cur <= 0x7f:
res = rightshift((res << 11), 11)
else:
cur = getByte(addr + 3)
res |= (cur & 0x7f) << 21
len = 4
if cur <= 0x7f:
res = rightshift((res << 4), 4)
else:
cur = getByte(addr + 4)
res |= cur << 28
len = 5
return res, len
def writesignedleb128(num, file):
if num >= 0:
writeunsignedleb128(num, file)
else:
mask = 0x80000000
for i in range(0, 32):
tmp = num & mask
mask >>= 1
if tmp == 0:
break
loop = 32 - i + 1
while loop > 7:
cur = num & 0x7f | 0x80
num >>= 7
file.write(struct.pack("B", cur))
loop -= 7
cur = num & 0x7f
file.write(struct.pack("B", cur))
def signedleb128forlen(num):
if num >= 0:
return unsignedleb128forlen(num)
else:
mask = 0x80000000
for i in range(0, 32):
tmp = num & mask
mask >>= 1
if tmp == 0:
break
loop = 32 - i + 1
if loop % 7 == 0:
return loop / 7
else:
return loop / 7 + 1
def writeunsignedleb128(num, file):
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7F | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
file.write(struct.pack("B", num))
def unsignedleb128forlen(num):
len = 1
temp = num
while num > 0x7f:
len += 1
num >>= 7
if len > 5:
print("error for unsignedleb128forlen", temp)
return len
def readunsignedleb128p1(addr):
res, len = readunsignedleb128(addr)
return res - 1, len
def writeunsignedleb128p1(num, file):
writeunsignedleb128(num+1, file)
def unsignedleb128p1forlen(num):
return unsignedleb128forlen(num+1)
class DvmDex:
def __init__(self):
self.pDexFile = 0
self.pHeader = 0 # it is a clone of dex file
# just for now
def dump(self, addr):
self.pDexFile = getDword(addr)
self.pHeader = getDword(addr + 4)
def printf(self):
# i wanna see the diff between the pDexFile.dexfile and pheader
print("dexfile addr is: ", hex(self.pDexFile))
print("header addr is: ", hex(self.pHeader))
class DexFile:
def __init__(self):
self.pOptHeader = 0
self.pHeader = 0
self.pStringIds = 0
self.pTypeIds = 0
self.pFieldIds = 0
self.pMethodIds = 0
self.pProtoIds = 0
self.pClassDefs = 0
self.pLinkData = 0
self.baseAddr = 0
self.OptHeader = OptHeader()
self.dexHeader = DexHeader()
def dump(self, addr):
global baseAddr
self.pOptHeader = getDword(addr)
self.pHeader = getDword(addr + 4)
self.pStringIds = getDword(addr + 8)
self.pTypeIds = getDword(addr + 12)
self.pFieldIds = getDword(addr + 16)
self.pMethodIds = getDword(addr + 20)
self.pProtoIds = getDword(addr + 24)
self.pClassDefs = getDword(addr + 28)
self.pLinkData = getDword(addr + 32)
self.baseAddr = getDword(addr + 44)
baseAddr = self.baseAddr
self.OptHeader.dump(self.pOptHeader)
self.dexHeader.dump(self.pHeader)
self.fixDexHeader()
def fixDexHeader(self):
self.dexHeader.stringIdsOff = self.pStringIds - self.pHeader
self.dexHeader.typeIdsOff = self.pTypeIds - self.pHeader
self.dexHeader.fieldIdsOff = self.pFieldIds - self.pHeader
self.dexHeader.methodIdsOff = self.pMethodIds - self.pHeader
self.dexHeader.protoIdsOff = self.pProtoIds - self.pHeader
self.dexHeader.classDefsOff = self.pClassDefs - self.pHeader
if self.dexHeader.dataOff == 0:
self.dexHeader.dataOff = self.dexHeader.classDefsOff + self.dexHeader.classDefsSize*32
# We should figure out a new method to fix the data size
# self.dexHeader.dataSize = 0x5DD28000 - self.baseAddr - self.dexHeader.dataOff
def copytofile(self):
classfile = open("classdef", "wb+")
extra = open("extra", "wb+")
num_class_def = self.dexHeader.classDefsSize
total_point = self.dexHeader.dataOff + self.dexHeader.dataSize
start = self.dexHeader.dataOff
end = total_point
while total_point&3:
total_point += 1
print "num class def:", num_class_def
for i in range(num_class_def):
classdef = DexClassDef()
classdef.dump(self.pClassDefs+32*i)
descriptor = dexGetClassDescriptor(self, classdef)
need_extra = False
need_pass = False
if descriptor.startswith("Landroid") or classdef.classDataOff == 0:
need_pass = True
else:
tmp = slashtodot(descriptor)
if classdef.classDataOff < start or classdef.classDataOff > end:
need_extra = True
classdata = ClassdataItem()
classdata.dump(int(self.baseAddr+classdef.classDataOff) & 0xffffffff)
if classdata.direct_methods_size:
for j in range(classdata.direct_methods_size):
method = classdata.direct_methods[j]
if method.access_flags & 0x100: # native func
if method.code_off:
need_extra = True
method.code_off = 0
continue
if (method.code_off < start or method.code_off > end) and method.code_off:
need_extra = True
codeitem = CodeItem()
codeitem.dump(int(self.baseAddr+method.code_off) & 0xffffffff)
writefile(extra, int(self.baseAddr+method.code_off) & 0xffffffff, codeitem.len)
method.code_off = total_point
total_point += codeitem.len
while (total_point & 3):
extra.write(struct.pack("B", 0))
total_point += 1
if classdata.virtual_methods_size:
for j in range(classdata.virtual_methods_size):
method = classdata.virtual_methods[j]
if method.access_flags & 0x100:
if method.code_off:
need_extra = True
method.code_off = 0
continue
if (method.code_off < start or method.code_off > end) and method.code_off:
need_extra = True
codeitem = CodeItem()
codeitem.dump(int(self.baseAddr+method.code_off) & 0xffffffff)
writefile(extra, int(self.baseAddr+method.code_off) & 0xffffffff, codeitem.len)
method.code_off = total_point
total_point += codeitem.len
while (total_point & 3):
extra.write(struct.pack("B", 0))
total_point += 1
if need_extra:
classdef.classDataOff = total_point
classdata.copytofile(extra)
classdata.recallLength() # re-calculate the length of this structure
total_point += classdata.len
while (total_point & 3):
extra.write(struct.pack("B", 0))
total_point += 1
print "des", descriptor
if need_pass:
classdef.classDataOff = 0
classdef.copytofile(classfile)
optdex = self.pOptHeader + self.OptHeader.depsOffset
if optdex != 0:
writefile(extra, optdex, self.OptHeader.optOffset-self.OptHeader.depsOffset+self.OptHeader.optLength)
extra.close()
classfile.close()
self.OptHeader.optOffset = total_point + self.OptHeader.optOffset - self.OptHeader.depsOffset + 40
self.OptHeader.depsOffset = total_point + 40
self.saveHeaderandData()
whole = open("whole.dex", "wb+")
with open("header", "rb") as header:
whole.writelines(header.readlines())
with open("classdef", "rb") as classfile:
whole.writelines(classfile.readlines())
with open("data", "rb") as data:
whole.writelines(data.readlines())
with open("extra", "rb") as extra:
whole.writelines(extra.readlines())
whole.close()
print("DONE")
def saveHeaderandData(self):
header = open("header", "wb+")
if self.pOptHeader != 0:
self.OptHeader.copytofile(header)
self.dexHeader.copytofile(header)
writefile(header, self.pStringIds, self.pClassDefs-self.pStringIds)
header.close()
data = open("data", "wb+")
addr = self.baseAddr + self.dexHeader.classDefsOff + self.dexHeader.classDefsSize*32
length = self.dexHeader.dataSize + self.dexHeader.dataOff - (addr - self.baseAddr)
writefile(data, addr, length)
data.close()
def printf(self):
print("dex head addr: ", hex(self.pHeader))
print("dex head addr: ", hex(self.baseAddr))
class DexClassDef:
def __init__(self):
self.classIdx = 0
self.accessFlags = 0
self.superclassIdx = 0
self.interfacesOff = 0
self.sourceFileIdx = 0
self.annotationsOff = 0
self.classDataOff = 0
self.staticValuesOff = 0
def dump(self, addr):
self.classIdx = getDword(addr)
self.accessFlags = getDword(addr + 4)
self.superclassIdx = getDword(addr + 8)
self.interfacesOff = getDword(addr + 12)
self.sourceFileIdx = getDword(addr + 16)
self.annotationsOff = getDword(addr + 20)
self.classDataOff = getDword(addr + 24)
self.staticValuesOff = getDword(addr + 28)
def copytofile(self, file):
file.write(struct.pack("I", self.classIdx))
file.write(struct.pack("I", self.accessFlags))
file.write(struct.pack("I", self.superclassIdx))
file.write(struct.pack("I", self.interfacesOff))
file.write(struct.pack("I", self.sourceFileIdx))
file.write(struct.pack("I", self.annotationsOff))
file.write(struct.pack("I", self.classDataOff))
file.write(struct.pack("I", self.staticValuesOff))
class DexHeader:
def __init__(self):
self.magic = []
self.checksum = 0
self.signature = []
self.fileSize = 0
self.headerSize = 0
self.endianTag = 0
self.linkSize = 0
self.linkOff = 0
self.mapOff = 0
self.stringIdsSize = 0
self.stringIdsOff = 0
self.typeIdsSize = 0
self.typeIdsOff = 0
self.protoIdsSize = 0
self.protoIdsOff = 0
self.fieldIdsSize = 0
self.fieldIdsOff = 0
self.methodIdsSize = 0
self.methodIdsOff = 0
self.classDefsSize = 0
self.classDefsOff = 0
self.dataSize = 0 # have it
self.dataOff = 0 # have it
def dump(self, addr):
len = 0
while len < 8:
self.magic.append(getByte(addr + len))
len += 1
self.checksum = getDword(addr + 8)
len = 0
while len < 20:
self.signature.append(getByte(addr + 12 + len))
len += 1
self.fileSize = getDword(addr + 32)
self.headerSize = getDword(addr + 36)
self.endianTag = getDword(addr + 40)
self.linkSize = getDword(addr + 44)
self.linkOff = getDword(addr + 48)
self.mapOff = getDword(addr + 52)
self.stringIdsSize = getDword(addr + 56)
self.stringIdsOff = getDword(addr + 60)
self.typeIdsSize = getDword(addr + 64)
self.typeIdsOff = getDword(addr + 68)
self.protoIdsSize = getDword(addr + 72)
self.protoIdsOff = getDword(addr + 76)
self.fieldIdsSize = getDword(addr + 80)
self.fieldIdsOff = getDword(addr + 84)
self.methodIdsSize = getDword(addr + 88)
self.methodIdsOff = getDword(addr + 92)
self.classDefsSize = getDword(addr + 96)
self.classDefsOff = getDword(addr + 100)
self.dataSize = getDword(addr + 104)
self.dataOff = getDword(addr + 108)
def printf(self):
print "string off", self.stringIdsOff
print "type off", self.typeIdsOff
print "proto off", self.protoIdsOff
print "field off", self.fieldIdsOff
print "method off", self.methodIdsOff
print "classdef off", self.classDefsOff
print "classdef size:", self.classDefsSize
def copytofile(self, file):
len = 0
while len < 8:
file.write(struct.pack("B", self.magic[len]))
len += 1
file.write(struct.pack("I", self.checksum))
len = 0
while len < 20:
file.write(struct.pack("B", self.signature[len]))
len += 1
file.write(struct.pack("I", self.fileSize))
file.write(struct.pack("I", self.headerSize))
file.write(struct.pack("I", self.endianTag))
file.write(struct.pack("I", self.linkSize))
file.write(struct.pack("I", self.linkOff))
file.write(struct.pack("I", self.mapOff))
file.write(struct.pack("I", self.stringIdsSize))
file.write(struct.pack("I", self.stringIdsOff))
file.write(struct.pack("I", self.typeIdsSize))
file.write(struct.pack("I", self.typeIdsOff))
file.write(struct.pack("I", self.protoIdsSize))
file.write(struct.pack("I", self.protoIdsOff))
file.write(struct.pack("I", self.fieldIdsSize))
file.write(struct.pack("I", self.fieldIdsOff))
file.write(struct.pack("I", self.methodIdsSize))
file.write(struct.pack("I", self.methodIdsOff))
file.write(struct.pack("I", self.classDefsSize))
file.write(struct.pack("I", self.classDefsOff))
file.write(struct.pack("I", self.dataSize))
file.write(struct.pack("I", self.dataOff))
class OptHeader:
def __init__(self):
self.magic = [] # take 8 bytes
self.dexoffset = 0
self.dexLength = 0
self.depsOffset = 0
self.depsLength = 0
self.optOffset = 0
self.optLength = 0
self.flag = 0
self.checksum = 0
def dump(self, addr):
if addr == 0:
return
len = 0
while len < 8:
self.magic.append(getByte(addr + len))
len += 1
self.dexoffset = getDword(addr+8)
self.dexLength = getDword(addr+12)
self.depsOffset = getDword(addr+16)
self.depsLength = getDword(addr+20)
self.optOffset = getDword(addr+24)
self.optLength = getDword(addr+28)
self.flag = getDword(addr+32)
self.checksum = getDword(addr+36)
def copytofile(self, file):
len = 0
while len < 8:
file.write(struct.pack("B", self.magic[len]))
len += 1
file.write(struct.pack("I", self.dexoffset))
file.write(struct.pack("I", self.dexLength))
file.write(struct.pack("I", self.depsOffset))
file.write(struct.pack("I", self.depsLength))
file.write(struct.pack("I", self.optOffset))
file.write(struct.pack("I", self.optLength))
file.write(struct.pack("I", self.flag))
file.write(struct.pack("I", self.checksum))
class ClassdataItem:
def __init__(self):
self.len = 0
self.static_field_size = 0
self.instance_fields_size = 0
self.direct_methods_size = 0
self.virtual_methods_size = 0
self.static_fields = []
self.instance_fields = []
self.direct_methods = []
self.virtual_methods = []
def dump(self, addr):
self.static_field_size, length = readunsignedleb128(addr)
self.len += length
self.instance_fields_size, length = readunsignedleb128(addr + self.len)
self.len += length
self.direct_methods_size, length = readunsignedleb128(addr + self.len)
self.len += length
self.virtual_methods_size, length = readunsignedleb128(addr + self.len)
self.len += length
for i in range(0, self.static_field_size):
field = Encodedfield()
field.dump(addr + self.len)
self.len += field.len
self.static_fields.append(field)
for i in range(0, self.instance_fields_size):
field = Encodedfield()
field.dump(addr + self.len)
self.len += field.len
self.instance_fields.append(field)
for i in range(0, self.direct_methods_size):
method = Encodedmethod()
method.dump(addr + self.len)
self.len += method.len
self.direct_methods.append(method)
for i in range(0, self.virtual_methods_size):
method = Encodedmethod()
method.dump(addr + self.len)
self.len += method.len
self.virtual_methods.append(method)
def recallLength(self):
self.len = 0
self.len += unsignedleb128forlen(self.static_field_size)
self.len += unsignedleb128forlen(self.instance_fields_size)
self.len += unsignedleb128forlen(self.direct_methods_size)
self.len += unsignedleb128forlen(self.virtual_methods_size)
for i in range(0, self.static_field_size):
self.len += self.static_fields[i].len
for i in range(0, self.instance_fields_size):
self.len += self.instance_fields[i].len
for i in range(0, self.direct_methods_size):
self.len += self.direct_methods[i].recallLength()
for i in range(0, self.virtual_methods_size):
self.len += self.virtual_methods[i].recallLength()
return self.len
def copytofile(self, file):
writeunsignedleb128(self.static_field_size, file)
writeunsignedleb128(self.instance_fields_size, file)
writeunsignedleb128(self.direct_methods_size, file)
writeunsignedleb128(self.virtual_methods_size, file)
for i in range(0, self.static_field_size):
self.static_fields[i].copytofile(file)
for i in range(0, self.instance_fields_size):
self.instance_fields[i].copytofile(file)
for i in range(0, self.direct_methods_size):
self.direct_methods[i].copytofile(file)
for i in range(0, self.virtual_methods_size):
self.virtual_methods[i].copytofile(file)
class Encodedfield:
def __init__(self):
self.len = 0
self.field_idx_diff = 0
self.access_flags = 0
self.field_idx = 0 # need to set later
def dump(self, addr):
self.field_idx_diff, length = readunsignedleb128(addr)
self.len += length
self.access_flags, length = readunsignedleb128(addr + self.len)
self.len += length
def copytofile(self, file):
writeunsignedleb128(self.field_idx_diff, file)
writeunsignedleb128(self.access_flags, file)
class Encodedmethod:
def __init__(self):
self.len = 0
self.method_idx_diff = 0
self.access_flags = 0
self.code_off = 0
self.method_idx = 0
def dump(self, addr):
self.method_idx_diff, length = readunsignedleb128(addr)
self.len += length
self.access_flags, length = readunsignedleb128(addr + self.len)
self.len += length
self.code_off, length = readunsignedleb128(addr + self.len)
self.len += length
def recallLength(self):
self.len = 0
self.len += unsignedleb128forlen(self.method_idx_diff)
self.len += unsignedleb128forlen(self.access_flags)
self.len += unsignedleb128forlen(self.code_off)
return self.len
def copytofile(self, file):
writeunsignedleb128(self.method_idx_diff, file)
writeunsignedleb128(self.access_flags, file)
writeunsignedleb128(self.code_off, file)
# alignment: 4bytes
class CodeItem:
def __init__(self):
self.len = 0
self.register_size = 0
self.ins_size = 0
self.outs_size = 0
self.tries_size = 0
self.debug_info_off = 0
self.insns_size = 0
self.insns = []
self.debugRef = None
self.padding = 0
self.tries = []
self.handler = None
def dump(self, addr):
self.register_size = getWord(addr) # 2
self.ins_size = getWord(addr + 2) # 0
self.outs_size = getWord(addr + 4) # 0x4187
self.tries_size = getWord(addr + 6) # 0x13
self.debug_info_off = getDword(addr + 8) # 0xD
self.insns_size = getDword(addr + 12) # 0x22
self.len += 16
for i in range(0, self.insns_size):
self.insns.append(getWord(addr + self.len + 2 * i))
self.len += 2 * self.insns_size
if self.tries_size != 0 and self.insns_size % 2 == 1:
self.len += 2
for i in range(0, self.tries_size):
tryitem = TryItem()
tryitem.dump(addr + self.len + 8 * i)
self.tries.append(tryitem)
self.len += 8 * self.tries_size
if self.tries_size != 0:
self.handler = EncodedhandlerList()
self.handler.dump(addr + self.len)
self.len += self.handler.len
# align = self.len % 4
# if align != 0:
# self.len += (4 - align)
class TryItem:
def __init__(self):
self.start = 0
self.len = 8
self.start_addr = 0
self.insn_count = 0
self.handler_off = 0
def dump(self, addr):
self.start = addr
self.start_addr = getDword(addr)
self.insn_count = getWord(addr + 4)
self.handler_off = getWord(addr + 6)
class EncodedhandlerList:
def __init__(self):
self.start = 0
self.len = 0
self.size = 0
self.list = []
def dump(self, addr):
self.start = addr
self.size, length = readunsignedleb128(addr)
self.len += length
for i in range(0, self.size):
handler = EncodedhandlerItem()
handler.dump(addr + self.len)
self.len += handler.len
self.list.append(handler)
class EncodedhandlerItem:
def __init__(self):
self.start = 0
self.len = 0
self.size = 0
self.handlers = []
self.catch_all_addr = 0
def dump(self, addr):
self.start = addr
self.size, length = readsignedleb128(addr)
self.len += length
for i in range(0, abs(self.size)):
pair = EncodedTypeAddrPair()
pair.dump(addr + self.len)
self.len += pair.len
self.handlers.append(pair)
if self.size <= 0:
self.catch_all_addr, length = readunsignedleb128(addr + self.len)
self.len += length
class EncodedTypeAddrPair:
def __init__(self):
self.type_idx = 0
self.addr = 0
self.len = 0
def dump(self, addr):
self.type_idx, length = readunsignedleb128(addr)
self.len += length
self.addr, length = readunsignedleb128(addr + length)
self.len += length
address = int(0x5d4e8020) # DexFile address
dexfile = DexFile()
dexfile.dump(address)
dexfile.dexHeader.printf()
dexfile.copytofile()
|
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import sys
from collections import OrderedDict
import opt_einsum
from pyro.distributions.score_parts import ScoreParts
from pyro.distributions.util import scale_and_mask
from pyro.ops.packed import pack
from pyro.poutine.util import is_validation_enabled
from pyro.util import warn_if_inf, warn_if_nan
class Trace:
"""
Graph data structure denoting the relationships amongst different pyro primitives
in the execution trace.
An execution trace of a Pyro program is a record of every call
to ``pyro.sample()`` and ``pyro.param()`` in a single execution of that program.
Traces are directed graphs whose nodes represent primitive calls or input/output,
and whose edges represent conditional dependence relationships
between those primitive calls. They are created and populated by ``poutine.trace``.
Each node (or site) in a trace contains the name, input and output value of the site,
as well as additional metadata added by inference algorithms or user annotation.
In the case of ``pyro.sample``, the trace also includes the stochastic function
at the site, and any observed data added by users.
Consider the following Pyro program:
>>> def model(x):
... s = pyro.param("s", torch.tensor(0.5))
... z = pyro.sample("z", dist.Normal(x, s))
... return z ** 2
We can record its execution using ``pyro.poutine.trace``
and use the resulting data structure to compute the log-joint probability
of all of the sample sites in the execution or extract all parameters.
>>> trace = pyro.poutine.trace(model).get_trace(0.0)
>>> logp = trace.log_prob_sum()
>>> params = [trace.nodes[name]["value"].unconstrained() for name in trace.param_nodes]
We can also inspect or manipulate individual nodes in the trace.
``trace.nodes`` contains a ``collections.OrderedDict``
of site names and metadata corresponding to ``x``, ``s``, ``z``, and the return value:
>>> list(name for name in trace.nodes.keys()) # doctest: +SKIP
["_INPUT", "s", "z", "_RETURN"]
Values of ``trace.nodes`` are dictionaries of node metadata:
>>> trace.nodes["z"] # doctest: +SKIP
{'type': 'sample', 'name': 'z', 'is_observed': False,
'fn': Normal(), 'value': tensor(0.6480), 'args': (), 'kwargs': {},
'infer': {}, 'scale': 1.0, 'cond_indep_stack': (),
'done': True, 'stop': False, 'continuation': None}
``'infer'`` is a dictionary of user- or algorithm-specified metadata.
``'args'`` and ``'kwargs'`` are the arguments passed via ``pyro.sample``
to ``fn.__call__`` or ``fn.log_prob``.
``'scale'`` is used to scale the log-probability of the site when computing the log-joint.
``'cond_indep_stack'`` contains data structures corresponding to ``pyro.plate`` contexts
appearing in the execution.
``'done'``, ``'stop'``, and ``'continuation'`` are only used by Pyro's internals.
:param string graph_type: string specifying the kind of trace graph to construct
"""
def __init__(self, graph_type="flat"):
assert graph_type in ("flat", "dense"), "{} not a valid graph type".format(
graph_type
)
self.graph_type = graph_type
self.nodes = OrderedDict()
self._succ = OrderedDict()
self._pred = OrderedDict()
def __contains__(self, name):
return name in self.nodes
def __iter__(self):
return iter(self.nodes.keys())
def __len__(self):
return len(self.nodes)
@property
def edges(self):
for site, adj_nodes in self._succ.items():
for adj_node in adj_nodes:
yield site, adj_node
def add_node(self, site_name, **kwargs):
"""
:param string site_name: the name of the site to be added
Adds a site to the trace.
Raises an error when attempting to add a duplicate node
instead of silently overwriting.
"""
if site_name in self:
site = self.nodes[site_name]
if site["type"] != kwargs["type"]:
# Cannot sample or observe after a param statement.
raise RuntimeError(
"{} is already in the trace as a {}".format(site_name, site["type"])
)
elif kwargs["type"] != "param":
# Cannot sample after a previous sample statement.
raise RuntimeError(
"Multiple {} sites named '{}'".format(kwargs["type"], site_name)
)
# XXX should copy in case site gets mutated, or dont bother?
self.nodes[site_name] = kwargs
self._pred[site_name] = set()
self._succ[site_name] = set()
def add_edge(self, site1, site2):
for site in (site1, site2):
if site not in self.nodes:
self.add_node(site)
self._succ[site1].add(site2)
self._pred[site2].add(site1)
def remove_node(self, site_name):
self.nodes.pop(site_name)
for p in self._pred[site_name]:
self._succ[p].remove(site_name)
for s in self._succ[site_name]:
self._pred[s].remove(site_name)
self._pred.pop(site_name)
self._succ.pop(site_name)
def predecessors(self, site_name):
return self._pred[site_name]
def successors(self, site_name):
return self._succ[site_name]
def copy(self):
"""
Makes a shallow copy of self with nodes and edges preserved.
"""
new_tr = Trace(graph_type=self.graph_type)
new_tr.nodes.update(self.nodes)
new_tr._succ.update(self._succ)
new_tr._pred.update(self._pred)
return new_tr
def _dfs(self, site, visited):
if site in visited:
return
for s in self._succ[site]:
for node in self._dfs(s, visited):
yield node
visited.add(site)
yield site
def topological_sort(self, reverse=False):
"""
Return a list of nodes (site names) in topologically sorted order.
:param bool reverse: Return the list in reverse order.
:return: list of topologically sorted nodes (site names).
"""
visited = set()
top_sorted = []
for s in self._succ:
for node in self._dfs(s, visited):
top_sorted.append(node)
return top_sorted if reverse else list(reversed(top_sorted))
def log_prob_sum(self, site_filter=lambda name, site: True):
"""
Compute the site-wise log probabilities of the trace.
Each ``log_prob`` has shape equal to the corresponding ``batch_shape``.
Each ``log_prob_sum`` is a scalar.
The computation of ``log_prob_sum`` is memoized.
:returns: total log probability.
:rtype: torch.Tensor
"""
result = 0.0
for name, site in self.nodes.items():
if site["type"] == "sample" and site_filter(name, site):
if "log_prob_sum" in site:
log_p = site["log_prob_sum"]
else:
try:
log_p = site["fn"].log_prob(
site["value"], *site["args"], **site["kwargs"]
)
except ValueError as e:
_, exc_value, traceback = sys.exc_info()
shapes = self.format_shapes(last_site=site["name"])
raise ValueError(
"Error while computing log_prob_sum at site '{}':\n{}\n{}\n".format(
name, exc_value, shapes
)
).with_traceback(traceback) from e
log_p = scale_and_mask(log_p, site["scale"], site["mask"]).sum()
site["log_prob_sum"] = log_p
if is_validation_enabled():
warn_if_nan(log_p, "log_prob_sum at site '{}'".format(name))
warn_if_inf(
log_p,
"log_prob_sum at site '{}'".format(name),
allow_neginf=True,
)
result = result + log_p
return result
def compute_log_prob(self, site_filter=lambda name, site: True):
"""
Compute the site-wise log probabilities of the trace.
Each ``log_prob`` has shape equal to the corresponding ``batch_shape``.
Each ``log_prob_sum`` is a scalar.
Both computations are memoized.
"""
for name, site in self.nodes.items():
if site["type"] == "sample" and site_filter(name, site):
if "log_prob" not in site:
try:
log_p = site["fn"].log_prob(
site["value"], *site["args"], **site["kwargs"]
)
except ValueError as e:
_, exc_value, traceback = sys.exc_info()
shapes = self.format_shapes(last_site=site["name"])
raise ValueError(
"Error while computing log_prob at site '{}':\n{}\n{}".format(
name, exc_value, shapes
)
).with_traceback(traceback) from e
site["unscaled_log_prob"] = log_p
log_p = scale_and_mask(log_p, site["scale"], site["mask"])
site["log_prob"] = log_p
site["log_prob_sum"] = log_p.sum()
if is_validation_enabled():
warn_if_nan(
site["log_prob_sum"],
"log_prob_sum at site '{}'".format(name),
)
warn_if_inf(
site["log_prob_sum"],
"log_prob_sum at site '{}'".format(name),
allow_neginf=True,
)
def compute_score_parts(self):
"""
Compute the batched local score parts at each site of the trace.
Each ``log_prob`` has shape equal to the corresponding ``batch_shape``.
Each ``log_prob_sum`` is a scalar.
All computations are memoized.
"""
for name, site in self.nodes.items():
if site["type"] == "sample" and "score_parts" not in site:
# Note that ScoreParts overloads the multiplication operator
# to correctly scale each of its three parts.
try:
value = site["fn"].score_parts(
site["value"], *site["args"], **site["kwargs"]
)
except ValueError as e:
_, exc_value, traceback = sys.exc_info()
shapes = self.format_shapes(last_site=site["name"])
raise ValueError(
"Error while computing score_parts at site '{}':\n{}\n{}".format(
name, exc_value, shapes
)
).with_traceback(traceback) from e
site["unscaled_log_prob"] = value.log_prob
value = value.scale_and_mask(site["scale"], site["mask"])
site["score_parts"] = value
site["log_prob"] = value.log_prob
site["log_prob_sum"] = value.log_prob.sum()
if is_validation_enabled():
warn_if_nan(
site["log_prob_sum"], "log_prob_sum at site '{}'".format(name)
)
warn_if_inf(
site["log_prob_sum"],
"log_prob_sum at site '{}'".format(name),
allow_neginf=True,
)
def detach_(self):
"""
Detach values (in-place) at each sample site of the trace.
"""
for _, site in self.nodes.items():
if site["type"] == "sample":
site["value"] = site["value"].detach()
@property
def observation_nodes(self):
"""
:return: a list of names of observe sites
"""
return [
name
for name, node in self.nodes.items()
if node["type"] == "sample" and node["is_observed"]
]
@property
def param_nodes(self):
"""
:return: a list of names of param sites
"""
return [name for name, node in self.nodes.items() if node["type"] == "param"]
@property
def stochastic_nodes(self):
"""
:return: a list of names of sample sites
"""
return [
name
for name, node in self.nodes.items()
if node["type"] == "sample" and not node["is_observed"]
]
@property
def reparameterized_nodes(self):
"""
:return: a list of names of sample sites whose stochastic functions
are reparameterizable primitive distributions
"""
return [
name
for name, node in self.nodes.items()
if node["type"] == "sample"
and not node["is_observed"]
and getattr(node["fn"], "has_rsample", False)
]
@property
def nonreparam_stochastic_nodes(self):
"""
:return: a list of names of sample sites whose stochastic functions
are not reparameterizable primitive distributions
"""
return list(set(self.stochastic_nodes) - set(self.reparameterized_nodes))
def iter_stochastic_nodes(self):
"""
:return: an iterator over stochastic nodes in the trace.
"""
for name, node in self.nodes.items():
if node["type"] == "sample" and not node["is_observed"]:
yield name, node
def symbolize_dims(self, plate_to_symbol=None):
"""
Assign unique symbols to all tensor dimensions.
"""
plate_to_symbol = {} if plate_to_symbol is None else plate_to_symbol
symbol_to_dim = {}
for site in self.nodes.values():
if site["type"] != "sample":
continue
# allocate even symbols for plate dims
dim_to_symbol = {}
for frame in site["cond_indep_stack"]:
if frame.vectorized:
if frame.name in plate_to_symbol:
symbol = plate_to_symbol[frame.name]
else:
symbol = opt_einsum.get_symbol(2 * len(plate_to_symbol))
plate_to_symbol[frame.name] = symbol
symbol_to_dim[symbol] = frame.dim
dim_to_symbol[frame.dim] = symbol
# allocate odd symbols for enum dims
for dim, id_ in site["infer"].get("_dim_to_id", {}).items():
symbol = opt_einsum.get_symbol(1 + 2 * id_)
symbol_to_dim[symbol] = dim
dim_to_symbol[dim] = symbol
enum_dim = site["infer"].get("_enumerate_dim")
if enum_dim is not None:
site["infer"]["_enumerate_symbol"] = dim_to_symbol[enum_dim]
site["infer"]["_dim_to_symbol"] = dim_to_symbol
self.plate_to_symbol = plate_to_symbol
self.symbol_to_dim = symbol_to_dim
def pack_tensors(self, plate_to_symbol=None):
"""
Computes packed representations of tensors in the trace.
This should be called after :meth:`compute_log_prob` or :meth:`compute_score_parts`.
"""
self.symbolize_dims(plate_to_symbol)
for site in self.nodes.values():
if site["type"] != "sample":
continue
dim_to_symbol = site["infer"]["_dim_to_symbol"]
packed = site.setdefault("packed", {})
try:
packed["mask"] = pack(site["mask"], dim_to_symbol)
if "score_parts" in site:
log_prob, score_function, entropy_term = site["score_parts"]
log_prob = pack(log_prob, dim_to_symbol)
score_function = pack(score_function, dim_to_symbol)
entropy_term = pack(entropy_term, dim_to_symbol)
packed["score_parts"] = ScoreParts(
log_prob, score_function, entropy_term
)
packed["log_prob"] = log_prob
packed["unscaled_log_prob"] = pack(
site["unscaled_log_prob"], dim_to_symbol
)
elif "log_prob" in site:
packed["log_prob"] = pack(site["log_prob"], dim_to_symbol)
packed["unscaled_log_prob"] = pack(
site["unscaled_log_prob"], dim_to_symbol
)
except ValueError as e:
_, exc_value, traceback = sys.exc_info()
shapes = self.format_shapes(last_site=site["name"])
raise ValueError(
"Error while packing tensors at site '{}':\n {}\n{}".format(
site["name"], exc_value, shapes
)
).with_traceback(traceback) from e
def format_shapes(self, title="Trace Shapes:", last_site=None):
"""
Returns a string showing a table of the shapes of all sites in the
trace.
"""
if not self.nodes:
return title
rows = [[title]]
rows.append(["Param Sites:"])
for name, site in self.nodes.items():
if site["type"] == "param":
rows.append([name, None] + [str(size) for size in site["value"].shape])
if name == last_site:
break
rows.append(["Sample Sites:"])
for name, site in self.nodes.items():
if site["type"] == "sample":
# param shape
batch_shape = getattr(site["fn"], "batch_shape", ())
event_shape = getattr(site["fn"], "event_shape", ())
rows.append(
[name + " dist", None]
+ [str(size) for size in batch_shape]
+ ["|", None]
+ [str(size) for size in event_shape]
)
# value shape
event_dim = len(event_shape)
shape = getattr(site["value"], "shape", ())
batch_shape = shape[: len(shape) - event_dim]
event_shape = shape[len(shape) - event_dim :]
rows.append(
["value", None]
+ [str(size) for size in batch_shape]
+ ["|", None]
+ [str(size) for size in event_shape]
)
# log_prob shape
if "log_prob" in site:
batch_shape = getattr(site["log_prob"], "shape", ())
rows.append(
["log_prob", None]
+ [str(size) for size in batch_shape]
+ ["|", None]
)
if name == last_site:
break
return _format_table(rows)
def _format_table(rows):
"""
Formats a right justified table using None as column separator.
"""
# compute column widths
column_widths = [0, 0, 0]
for row in rows:
widths = [0, 0, 0]
j = 0
for cell in row:
if cell is None:
j += 1
else:
widths[j] += 1
for j in range(3):
column_widths[j] = max(column_widths[j], widths[j])
# justify columns
for i, row in enumerate(rows):
cols = [[], [], []]
j = 0
for cell in row:
if cell is None:
j += 1
else:
cols[j].append(cell)
cols = [
[""] * (width - len(col)) + col
if direction == "r"
else col + [""] * (width - len(col))
for width, col, direction in zip(column_widths, cols, "rrl")
]
rows[i] = sum(cols, [])
# compute cell widths
cell_widths = [0] * len(rows[0])
for row in rows:
for j, cell in enumerate(row):
cell_widths[j] = max(cell_widths[j], len(cell))
# justify cells
return "\n".join(
" ".join(cell.rjust(width) for cell, width in zip(row, cell_widths))
for row in rows
)
|
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import sys
import inspect
from twisted.python import log
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp import protocol
from autobahn.wamp.types import ComponentConfig
from autobahn.websocket.protocol import parseWsUrl
from autobahn.twisted.websocket import WampWebSocketClientFactory
import six
import txaio
txaio.use_twisted()
__all__ = [
'ApplicationSession',
'ApplicationSessionFactory',
'ApplicationRunner',
'Application',
'Service'
]
try:
from twisted.application import service
except (ImportError, SyntaxError):
# Not on PY3 yet
service = None
__all__.pop(__all__.index('Service'))
class ApplicationSession(protocol.ApplicationSession):
"""
WAMP application session for Twisted-based applications.
"""
def onUserError(self, e, msg):
"""
Override of wamp.ApplicationSession
"""
# see docs; will print currently-active exception to the logs,
# which is just what we want.
log.err(e)
# also log the framework-provided error-message
log.err(msg)
class ApplicationSessionFactory(protocol.ApplicationSessionFactory):
"""
WAMP application session factory for Twisted-based applications.
"""
session = ApplicationSession
"""
The application session class this application session factory will use. Defaults to :class:`autobahn.twisted.wamp.ApplicationSession`.
"""
class ApplicationRunner(object):
"""
This class is a convenience tool mainly for development and quick hosting
of WAMP application components.
It can host a WAMP application component in a WAMP-over-WebSocket client
connecting to a WAMP router.
"""
def __init__(self, url, realm, extra=None, debug=False, debug_wamp=False, debug_app=False, ssl=None):
"""
:param url: The WebSocket URL of the WAMP router to connect to (e.g. `ws://somehost.com:8090/somepath`)
:type url: unicode
:param realm: The WAMP realm to join the application session to.
:type realm: unicode
:param extra: Optional extra configuration to forward to the application component.
:type extra: dict
:param debug: Turn on low-level debugging.
:type debug: bool
:param debug_wamp: Turn on WAMP-level debugging.
:type debug_wamp: bool
:param debug_app: Turn on app-level debugging.
:type debug_app: bool
:param ssl: (Optional). If specified this should be an
instance suitable to pass as ``sslContextFactory`` to
:class:`twisted.internet.endpoints.SSL4ClientEndpoint`` such
as :class:`twisted.internet.ssl.CertificateOptions`. Leaving
it as ``None`` will use the result of calling Twisted's
:meth:`twisted.internet.ssl.platformTrust` which tries to use
your distribution's CA certificates.
:type ssl: :class:`twisted.internet.ssl.CertificateOptions`
"""
self.url = url
self.realm = realm
self.extra = extra or dict()
self.debug = debug
self.debug_wamp = debug_wamp
self.debug_app = debug_app
self.make = None
self.ssl = ssl
def run(self, make, start_reactor=True):
"""
Run the application component.
:param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
:type make: callable
:param start_reactor: if True (the default) this method starts
the Twisted reactor and doesn't return until the reactor
stops. If there are any problems starting the reactor or
connect()-ing, we stop the reactor and raise the exception
back to the caller.
:returns: None is returned, unless you specify
``start_reactor=False`` in which case the Deferred that
connect() returns is returned; this will callback() with
an IProtocol instance, which will actually be an instance
of :class:`WampWebSocketClientProtocol`
"""
from twisted.internet import reactor
txaio.use_twisted()
txaio.config.loop = reactor
isSecure, host, port, resource, path, params = parseWsUrl(self.url)
# start logging to console
if self.debug or self.debug_wamp or self.debug_app:
log.startLogging(sys.stdout)
# factory for use ApplicationSession
def create():
cfg = ComponentConfig(self.realm, self.extra)
try:
session = make(cfg)
except Exception as e:
if start_reactor:
# the app component could not be created .. fatal
log.err(str(e))
reactor.stop()
else:
# if we didn't start the reactor, it's up to the
# caller to deal with errors
raise
else:
session.debug_app = self.debug_app
return session
# create a WAMP-over-WebSocket transport client factory
transport_factory = WampWebSocketClientFactory(create, url=self.url,
debug=self.debug, debug_wamp=self.debug_wamp)
# if user passed ssl= but isn't using isSecure, we'll never
# use the ssl argument which makes no sense.
context_factory = None
if self.ssl is not None:
if not isSecure:
raise RuntimeError(
'ssl= argument value passed to %s conflicts with the "ws:" '
'prefix of the url argument. Did you mean to use "wss:"?' %
self.__class__.__name__)
context_factory = self.ssl
elif isSecure:
from twisted.internet.ssl import optionsForClientTLS
context_factory = optionsForClientTLS(six.u(host))
if isSecure:
from twisted.internet.endpoints import SSL4ClientEndpoint
assert context_factory is not None
client = SSL4ClientEndpoint(reactor, host, port, context_factory)
else:
from twisted.internet.endpoints import TCP4ClientEndpoint
client = TCP4ClientEndpoint(reactor, host, port)
d = client.connect(transport_factory)
# as the reactor shuts down, we wish to wait until we've sent
# out our "Goodbye" message; leave() returns a Deferred that
# fires when the transport gets to STATE_CLOSED
def cleanup(proto):
if hasattr(proto, '_session') and proto._session is not None:
return proto._session.leave()
# if we connect successfully, the arg is a WampWebSocketClientProtocol
d.addCallback(lambda proto: reactor.addSystemEventTrigger(
'before', 'shutdown', cleanup, proto))
# if the user didn't ask us to start the reactor, then they
# get to deal with any connect errors themselves.
if start_reactor:
# if an error happens in the connect(), we save the underlying
# exception so that after the event-loop exits we can re-raise
# it to the caller.
class ErrorCollector(object):
exception = None
def __call__(self, failure):
self.exception = failure.value
# print(failure.getErrorMessage())
reactor.stop()
connect_error = ErrorCollector()
d.addErrback(connect_error)
# now enter the Twisted reactor loop
reactor.run()
# if we exited due to a connection error, raise that to the
# caller
if connect_error.exception:
raise connect_error.exception
else:
# let the caller handle any errors
return d
class _ApplicationSession(ApplicationSession):
"""
WAMP application session class used internally with :class:`autobahn.twisted.app.Application`.
"""
def __init__(self, config, app):
"""
:param config: The component configuration.
:type config: Instance of :class:`autobahn.wamp.types.ComponentConfig`
:param app: The application this session is for.
:type app: Instance of :class:`autobahn.twisted.wamp.Application`.
"""
# noinspection PyArgumentList
ApplicationSession.__init__(self, config)
self.app = app
@inlineCallbacks
def onConnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onConnect`
"""
yield self.app._fire_signal('onconnect')
self.join(self.config.realm)
@inlineCallbacks
def onJoin(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onJoin`
"""
for uri, proc in self.app._procs:
yield self.register(proc, uri)
for uri, handler in self.app._handlers:
yield self.subscribe(handler, uri)
yield self.app._fire_signal('onjoined')
@inlineCallbacks
def onLeave(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onLeave`
"""
yield self.app._fire_signal('onleave')
self.disconnect()
@inlineCallbacks
def onDisconnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onDisconnect`
"""
yield self.app._fire_signal('ondisconnect')
class Application(object):
"""
A WAMP application. The application object provides a simple way of
creating, debugging and running WAMP application components.
"""
def __init__(self, prefix=None):
"""
:param prefix: The application URI prefix to use for procedures and topics,
e.g. ``"com.example.myapp"``.
:type prefix: unicode
"""
self._prefix = prefix
# procedures to be registered once the app session has joined the router/realm
self._procs = []
# event handler to be subscribed once the app session has joined the router/realm
self._handlers = []
# app lifecycle signal handlers
self._signals = {}
# once an app session is connected, this will be here
self.session = None
def __call__(self, config):
"""
Factory creating a WAMP application session for the application.
:param config: Component configuration.
:type config: Instance of :class:`autobahn.wamp.types.ComponentConfig`
:returns: obj -- An object that derives of
:class:`autobahn.twisted.wamp.ApplicationSession`
"""
assert(self.session is None)
self.session = _ApplicationSession(config, self)
return self.session
def run(self, url=u"ws://localhost:8080/ws", realm=u"realm1",
debug=False, debug_wamp=False, debug_app=False,
start_reactor=True):
"""
Run the application.
:param url: The URL of the WAMP router to connect to.
:type url: unicode
:param realm: The realm on the WAMP router to join.
:type realm: unicode
:param debug: Turn on low-level debugging.
:type debug: bool
:param debug_wamp: Turn on WAMP-level debugging.
:type debug_wamp: bool
:param debug_app: Turn on app-level debugging.
:type debug_app: bool
"""
runner = ApplicationRunner(url, realm,
debug=debug, debug_wamp=debug_wamp, debug_app=debug_app)
runner.run(self.__call__, start_reactor)
def register(self, uri=None):
"""
Decorator exposing a function as a remote callable procedure.
The first argument of the decorator should be the URI of the procedure
to register under.
:Example:
.. code-block:: python
@app.register('com.myapp.add2')
def add2(a, b):
return a + b
Above function can then be called remotely over WAMP using the URI `com.myapp.add2`
the function was registered under.
If no URI is given, the URI is constructed from the application URI prefix
and the Python function name.
:Example:
.. code-block:: python
app = Application('com.myapp')
# implicit URI will be 'com.myapp.add2'
@app.register()
def add2(a, b):
return a + b
If the function `yields` (is a co-routine), the `@inlineCallbacks` decorator
will be applied automatically to it. In that case, if you wish to return something,
you should use `returnValue`:
:Example:
.. code-block:: python
from twisted.internet.defer import returnValue
@app.register('com.myapp.add2')
def add2(a, b):
res = yield stuff(a, b)
returnValue(res)
:param uri: The URI of the procedure to register under.
:type uri: unicode
"""
def decorator(func):
if uri:
_uri = uri
else:
assert(self._prefix is not None)
_uri = "{0}.{1}".format(self._prefix, func.__name__)
if inspect.isgeneratorfunction(func):
func = inlineCallbacks(func)
self._procs.append((_uri, func))
return func
return decorator
def subscribe(self, uri=None):
"""
Decorator attaching a function as an event handler.
The first argument of the decorator should be the URI of the topic
to subscribe to. If no URI is given, the URI is constructed from
the application URI prefix and the Python function name.
If the function yield, it will be assumed that it's an asynchronous
process and inlineCallbacks will be applied to it.
:Example:
.. code-block:: python
@app.subscribe('com.myapp.topic1')
def onevent1(x, y):
print("got event on topic1", x, y)
:param uri: The URI of the topic to subscribe to.
:type uri: unicode
"""
def decorator(func):
if uri:
_uri = uri
else:
assert(self._prefix is not None)
_uri = "{0}.{1}".format(self._prefix, func.__name__)
if inspect.isgeneratorfunction(func):
func = inlineCallbacks(func)
self._handlers.append((_uri, func))
return func
return decorator
def signal(self, name):
"""
Decorator attaching a function as handler for application signals.
Signals are local events triggered internally and exposed to the
developer to be able to react to the application lifecycle.
If the function yield, it will be assumed that it's an asynchronous
coroutine and inlineCallbacks will be applied to it.
Current signals :
- `onjoined`: Triggered after the application session has joined the
realm on the router and registered/subscribed all procedures
and event handlers that were setup via decorators.
- `onleave`: Triggered when the application session leaves the realm.
.. code-block:: python
@app.signal('onjoined')
def _():
# do after the app has join a realm
:param name: The name of the signal to watch.
:type name: unicode
"""
def decorator(func):
if inspect.isgeneratorfunction(func):
func = inlineCallbacks(func)
self._signals.setdefault(name, []).append(func)
return func
return decorator
@inlineCallbacks
def _fire_signal(self, name, *args, **kwargs):
"""
Utility method to call all signal handlers for a given signal.
:param name: The signal name.
:type name: str
"""
for handler in self._signals.get(name, []):
try:
# FIXME: what if the signal handler is not a coroutine?
# Why run signal handlers synchronously?
yield handler(*args, **kwargs)
except Exception as e:
# FIXME
log.msg("Warning: exception in signal handler swallowed", e)
if service:
# Don't define it if Twisted's service support isn't here
class Service(service.MultiService):
"""
A WAMP application as a twisted service.
The application object provides a simple way of creating, debugging and running WAMP application
components inside a traditional twisted application
This manages application lifecycle of the wamp connection using startService and stopService
Using services also allows to create integration tests that properly terminates their connections
It can host a WAMP application component in a WAMP-over-WebSocket client
connecting to a WAMP router.
"""
factory = WampWebSocketClientFactory
def __init__(self, url, realm, make, extra=None,
debug=False, debug_wamp=False, debug_app=False):
"""
:param url: The WebSocket URL of the WAMP router to connect to (e.g. `ws://somehost.com:8090/somepath`)
:type url: unicode
:param realm: The WAMP realm to join the application session to.
:type realm: unicode
:param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
:type make: callable
:param extra: Optional extra configuration to forward to the application component.
:type extra: dict
:param debug: Turn on low-level debugging.
:type debug: bool
:param debug_wamp: Turn on WAMP-level debugging.
:type debug_wamp: bool
:param debug_app: Turn on app-level debugging.
:type debug_app: bool
You can replace the attribute factory in order to change connectionLost or connectionFailed behaviour.
The factory attribute must return a WampWebSocketClientFactory object
"""
self.url = url
self.realm = realm
self.extra = extra or dict()
self.debug = debug
self.debug_wamp = debug_wamp
self.debug_app = debug_app
self.make = make
service.MultiService.__init__(self)
self.setupService()
def setupService(self):
"""
Setup the application component.
"""
isSecure, host, port, resource, path, params = parseWsUrl(self.url)
# factory for use ApplicationSession
def create():
cfg = ComponentConfig(self.realm, self.extra)
session = self.make(cfg)
session.debug_app = self.debug_app
return session
# create a WAMP-over-WebSocket transport client factory
transport_factory = self.factory(create, url=self.url,
debug=self.debug, debug_wamp=self.debug_wamp)
# setup the client from a Twisted endpoint
if isSecure:
from twisted.application.internet import SSLClient
clientClass = SSLClient
else:
from twisted.application.internet import TCPClient
clientClass = TCPClient
client = clientClass(host, port, transport_factory)
client.setServiceParent(self)
|
|
# -*- coding: utf-8 -*-
"""
cpsdirector.application
=======================
ConPaaS director: application support.
:copyright: (C) 2013 by Contrail Consortium.
"""
from flask import Blueprint
from flask import jsonify, request, g
import simplejson
import ConfigParser
from netaddr import IPNetwork
from cpsdirector import db
from cpsdirector.common import log
from cpsdirector.common import build_response
from cpsdirector.common import config_parser
application_page = Blueprint('application_page', __name__)
class Application(db.Model):
aid = db.Column(db.Integer, primary_key=True,
autoincrement=True)
name = db.Column(db.String(256))
user_id = db.Column(db.Integer, db.ForeignKey('user.uid'))
user = db.relationship('User', backref=db.backref('applications',
lazy="dynamic"))
def __init__(self, **kwargs):
# Default values
self.name = "New Application"
for key, val in kwargs.items():
setattr(self, key, val)
def to_dict(self):
return {
'aid': self.aid, 'name': self.name,
}
def get_available_vpn_subnet(self):
"""Find an available VPN subnet for the next service to be created in
this application."""
try:
network = config_parser.get('conpaas', 'VPN_BASE_NETWORK')
netmask = config_parser.get('conpaas', 'VPN_NETMASK')
srvbits = config_parser.get('conpaas', 'VPN_SERVICE_BITS')
except ConfigParser.NoOptionError:
return
# Split the given network into subnets.
base_net = IPNetwork(network + '/' + netmask)
vpn_subnets = base_net.subnet(32 - base_net.prefixlen - int(srvbits))
assigned_networks = [ service.subnet for service in self.services ]
for candidate_network in vpn_subnets:
candidate_network = str(candidate_network)
if candidate_network not in assigned_networks:
return candidate_network
def get_app_by_id(user_id, app_id):
app = Application.query.filter_by(aid=app_id).first()
if not app:
log('Application %s does not exist' % app_id)
return
if app.user_id != user_id:
log('Application %s is not owned by user %s' % (app_id, user_id))
return
return app
def get_app_by_name(user_id, app_name):
app = Application.query.filter_by(name=app_name).first()
if not app:
log('Application %s does not exist' % app_name)
return
if app.user_id != user_id:
log('Application %s is not owned by user %s' % (app_name, user_id))
return
return app
def get_default_app(user_id):
return Application.query.filter_by(user_id=user_id).order_by(
Application.aid).first()
def check_app_exists(app_name):
if Application.query.filter_by(name=app_name, user_id=g.user.uid).first():
return True
return False
def _createapp(app_name):
log('User %s creating a new application %s' % (g.user.username, app_name))
# check if the application already exists
if check_app_exists(app_name):
log('Application name %s already exists' % app_name)
return jsonify({
'error': True,
'msg': 'Application name "%s" already taken' % app_name })
a = Application(name=app_name, user=g.user)
db.session.add(a)
# flush() is needed to get auto-incremented sid
db.session.flush()
db.session.commit()
log('Application %s created properly' % (a.aid))
return jsonify(a.to_dict())
def deleteapp(user_id, app_id):
app = get_app_by_id(user_id, app_id)
if not app:
return False
# If an application with id 'app_id' exists and user is the owner
for service in Service.query.filter_by(application_id=app_id):
callmanager(service.sid, "shutdown", True, {})
stop(service.sid)
db.session.delete(app)
db.session.commit()
return True
from cpsdirector.user import cert_required
@application_page.route("/createapp", methods=['POST'])
@cert_required(role='user')
def createapp():
app_name = request.values.get('name')
if not app_name:
log('"name" is a required argument')
return build_response(simplejson.dumps(False))
return build_response(_createapp(app_name))
from cpsdirector.service import Service
from cpsdirector.service import stop
from cpsdirector.service import callmanager
@application_page.route("/delete/<int:appid>", methods=['POST'])
@cert_required(role='user')
def delete(appid):
"""eg: POST /delete/3
POSTed values must contain username and password.
Returns a boolean value. True in case of successful authentication and
proper service termination. False otherwise.
"""
log('User %s attempting to delete application %s' % (g.user.uid, appid))
return build_response(simplejson.dumps(deleteapp(g.user.uid, appid)))
def _renameapp(appid, newname):
log('User %s attempting to rename application %s' % (g.user.uid, appid))
app = get_app_by_id(g.user.uid, appid)
if not app:
return build_response(simplejson.dumps(False))
app.name = newname
db.session.commit()
return simplejson.dumps(True)
@application_page.route("/renameapp/<int:appid>", methods=['POST'])
@cert_required(role='user')
def renameapp(appid):
newname = request.values.get('name')
if not newname:
log('"name" is a required argument')
return build_response(simplejson.dumps(False))
return _renameapp(appid, newname)
@application_page.route("/listapp", methods=['POST', 'GET'])
@cert_required(role='user')
def list_applications():
"""POST /listapp
List all the ConPaaS applications if the user is authenticated. Return False
otherwise.
"""
return build_response(simplejson.dumps([
app.to_dict() for app in g.user.applications.all()
]))
|
|
# helpers.py
from pyparsing.core import *
from pyparsing.util import _bslash, _flatten, _escapeRegexRangeChars
#
# global helpers
#
def delimitedList(expr, delim=",", combine=False):
"""Helper to define a delimited list of expressions - the delimiter
defaults to ','. By default, the list elements and delimiters can
have intervening whitespace, and comments, but this can be
overridden by passing ``combine=True`` in the constructor. If
``combine`` is set to ``True``, the matching tokens are
returned as a single token string, with the delimiters included;
otherwise, the matching tokens are returned as a list of tokens,
with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = str(expr) + " [" + str(delim) + " " + str(expr) + "]..."
if combine:
return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName)
else:
return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName)
def countedArray(expr, intExpr=None):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the
leading count token is suppressed.
If ``intExpr`` is specified, it should be a pyparsing expression
that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s, l, t):
n = t[0]
arrayExpr << (n and Group(And([expr] * n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t: int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return (intExpr + arrayExpr).setName("(len) " + str(expr) + "...")
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches a previous literal, will also match the leading
``"1:1"`` in ``"1:10"``. If this is not desired, use
:class:`matchPreviousExpr`. Do *not* use with packrat parsing
enabled.
"""
rep = Forward()
def copyTokenToRepeater(s, l, t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName("(prev) " + str(expr))
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches by expressions, will *not* match the leading ``"1:1"``
in ``"1:10"``; the expressions are evaluated first, and then
compared, so ``"1"`` is compared with ``"10"``. Do *not* use
with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s, l, t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s, l, t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("", 0, "")
rep.setParseAction(mustMatchTheseTokens, callDuringTry=True)
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName("(prev) " + str(expr))
return rep
def oneOf(strs, caseless=False, useRegex=True, asKeyword=False):
"""Helper to quickly define a set of alternative Literals, and makes
sure to do longest-first testing when there is a conflict,
regardless of the input order, but returns
a :class:`MatchFirst` for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of
string literals
- caseless - (default= ``False``) - treat all literals as
caseless
- useRegex - (default= ``True``) - as an optimization, will
generate a Regex object; otherwise, will generate
a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
creating a :class:`Regex` raises an exception)
- asKeyword - (default=``False``) - enforce Keyword-style matching on the
generated expressions
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if isinstance(caseless, str_type):
warnings.warn(
"More than one string argument passed to oneOf, pass "
"choices as a list or space-delimited string",
stacklevel=2,
)
if caseless:
isequal = lambda a, b: a.upper() == b.upper()
masks = lambda a, b: b.upper().startswith(a.upper())
parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
else:
isequal = lambda a, b: a == b
masks = lambda a, b: b.startswith(a)
parseElementClass = Keyword if asKeyword else Literal
symbols = []
if isinstance(strs, str_type):
symbols = strs.split()
elif isinstance(strs, Iterable):
symbols = list(strs)
else:
warnings.warn(
"Invalid argument to oneOf, expected string or iterable",
SyntaxWarning,
stacklevel=2,
)
if not symbols:
return NoMatch()
if not asKeyword:
# if not producing keywords, need to reorder to take care to avoid masking
# longer choices with shorter ones
i = 0
while i < len(symbols) - 1:
cur = symbols[i]
for j, other in enumerate(symbols[i + 1 :]):
if isequal(other, cur):
del symbols[i + j + 1]
break
elif masks(cur, other):
del symbols[i + j + 1]
symbols.insert(i, other)
break
else:
i += 1
if not (caseless or asKeyword) and useRegex:
# ~ print(strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols]))
try:
if len(symbols) == len("".join(symbols)):
return Regex(
"[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)
).setName(" | ".join(symbols))
else:
return Regex("|".join(re.escape(sym) for sym in symbols)).setName(
" | ".join(symbols)
)
except sre_constants.error:
warnings.warn(
"Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning,
stacklevel=2,
)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(
" | ".join(symbols)
)
def dictOf(key, value):
"""Helper to easily and clearly define a dictionary by specifying
the respective patterns for the key and value. Takes care of
defining the :class:`Dict`, :class:`ZeroOrMore`, and
:class:`Group` tokens in the proper order. The key pattern
can include delimiting markers or punctuation, as long as they are
suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the :class:`Dict` results
can include named token fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict(OneOrMore(Group(key + value)))
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``asString`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`originalTextFor` contains expressions with defined
results names, you must set ``asString`` to ``False`` if you
want to preserve those results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b", "i"):
opener, closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s, loc, t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s, l, t: s[t._original_start : t._original_end]
else:
def extractText(s, l, t):
t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions,
even if all but one are non-empty.
"""
return TokenConverter(expr).addParseAction(lambda t: t[0])
def locatedExpr(expr):
"""Helper to decorate a returned token with its starting and ending
locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains ``<TAB>`` characters, you
may want to call :class:`ParserElement.parseWithTabs`
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s, l, t: l)
return Group(
locator("locn_start")
+ expr("value")
+ locator.copy().leaveWhitespace()("locn_end")
)
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and
closing delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list
(default= ``"("``); can also be a pyparsing expression
- closer - closing character for a nested list
(default= ``")"``); can also be a pyparsing expression
- content - expression for items within the nested lists
(default= ``None``)
- ignoreExpr - expression for ignoring opening and closing
delimiters (default= :class:`quotedString`)
If an expression is not provided for the content argument, the
nested expression will capture all whitespace-delimited content
between delimiters as a list of separate values.
Use the ``ignoreExpr`` argument to define expressions that may
contain opening or closing characters that should not be treated as
opening or closing characters for nesting, such as quotedString or
a comment expression. Specify multiple expressions using an
:class:`Or` or :class:`MatchFirst`. The default is
:class:`quotedString`, but if no expressions are to be ignored, then
pass ``None`` for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR, RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener, str_type) and isinstance(closer, str_type):
if len(opener) == 1 and len(closer) == 1:
if ignoreExpr is not None:
content = Combine(
OneOrMore(
~ignoreExpr
+ CharsNotIn(
opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
exact=1,
)
)
).setParseAction(lambda t: t[0].strip())
else:
content = empty.copy() + CharsNotIn(
opener + closer + ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t: t[0].strip())
else:
if ignoreExpr is not None:
content = Combine(
OneOrMore(
~ignoreExpr
+ ~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
).setParseAction(lambda t: t[0].strip())
else:
content = Combine(
OneOrMore(
~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
).setParseAction(lambda t: t[0].strip())
else:
raise ValueError(
"opening and closing arguments must be strings if no content expression is given"
)
ret = Forward()
if ignoreExpr is not None:
ret <<= Group(
Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
)
else:
ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
ret.setName("nested %s%s expression" % (opener, closer))
return ret
def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr, str_type):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas, alphanums + "_-:")
if xml:
tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes)
openTag = (
suppress_LT
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
+ Optional("/", default=[False])("empty").setParseAction(
lambda s, l, t: t[0] == "/"
)
+ suppress_GT
)
else:
tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(
printables, excludeChars=">"
)
openTag = (
suppress_LT
+ tagStr("tag")
+ Dict(
ZeroOrMore(
Group(
tagAttrName.setParseAction(lambda t: t[0].lower())
+ Optional(Suppress("=") + tagAttrValue)
)
)
)
+ Optional("/", default=[False])("empty").setParseAction(
lambda s, l, t: t[0] == "/"
)
+ suppress_GT
)
closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False)
openTag.setName("<%s>" % resname)
# add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
openTag.addParseAction(
lambda t: t.__setitem__(
"start" + "".join(resname.replace(":", " ").title().split()), t.copy()
)
)
closeTag = closeTag(
"end" + "".join(resname.replace(":", " ").title().split())
).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
openTag.tag_body = SkipTo(closeTag())
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML,
given a tag name. Matches tags in either upper or lower case,
attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and
# closing tags as a 2-tuple
a, a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are
# also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> https://github.com/pyparsing/pyparsing/wiki
"""
return _makeTags(tagStr, False)
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML,
given a tag name. Matches tags only in the given upper/lower case.
Example: similar to :class:`makeHTMLTags`
"""
return _makeTags(tagStr, True)
anyOpenTag, anyCloseTag = makeHTMLTags(
Word(alphas, alphanums + "_:").setName("any tag")
)
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), "><& \"'"))
commonHTMLEntity = Regex(
"&(?P<entity>" + "|".join(_htmlEntityMap.keys()) + ");"
).setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# Chad Vernon -- Add Python 2.x support for Maya
# opAssoc = types.SimpleNamespace()
class SimpleNamespace(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
keys = sorted(self.__dict__)
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(type(self).__name__, ", ".join(items))
def __eq__(self, other):
return self.__dict__ == other.__dict__
opAssoc = SimpleNamespace()
# End Python 2.x support for Maya
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation(baseExpr, opList, lpar=Suppress("("), rpar=Suppress(")")):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary
or binary, left- or right-associative. Parse actions can also be
attached to operator expressions. The generated parser will also
recognize the use of parentheses to override operator precedences
(see example below).
Note: if you define a deep operator list, you may see performance
issues when using infixNotation. See
:class:`ParserElement.enablePackrat` for a mechanism to potentially
improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the
nested
- opList - list of tuples, one for each operator precedence level
in the expression grammar; each tuple is of the form ``(opExpr,
numTerms, rightLeftAssoc, parseAction)``, where:
- opExpr is the pyparsing expression for the operator; may also
be a string, which will be converted to a Literal; if numTerms
is 3, opExpr is a tuple of two expressions, for the two
operators separating the 3 terms
- numTerms is the number of terms for this operator (must be 1,
2, or 3)
- rightLeftAssoc is the indicator whether the operator is right
or left associative, using the pyparsing-defined constants
``opAssoc.RIGHT`` and ``opAssoc.LEFT``.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the parse action
tuple member may be omitted); if the parse action is passed
a tuple or list of functions, this is equivalent to calling
``setParseAction(*fn)``
(:class:`ParserElement.setParseAction`)
- lpar - expression for matching left-parentheses
(default= ``Suppress('(')``)
- rpar - expression for matching right-parentheses
(default= ``Suppress(')')``)
Example::
# simple example of four-function arithmetic with ints and
# variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
# captive version of FollowedBy that does not do parse actions or capture results names
class _FB(FollowedBy):
def parseImpl(self, instring, loc, doActions=True):
self.expr.tryParse(instring, loc)
return loc, []
ret = Forward()
lastExpr = baseExpr | (lpar + ret + rpar)
for i, operDef in enumerate(opList):
opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError(
"if numterms=3, opExpr must be a tuple or list of two expressions"
)
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr))
elif arity == 2:
if opExpr is not None:
matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
lastExpr + OneOrMore(opExpr + lastExpr)
)
else:
matchExpr = _FB(lastExpr + lastExpr) + Group(
lastExpr + OneOrMore(lastExpr)
)
elif arity == 3:
matchExpr = _FB(
lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
) + Group(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr)
else:
raise ValueError(
"operator must be unary (1), binary (2), or ternary (3)"
)
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
elif arity == 2:
if opExpr is not None:
matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
lastExpr + OneOrMore(opExpr + thisExpr)
)
else:
matchExpr = _FB(lastExpr + thisExpr) + Group(
lastExpr + OneOrMore(thisExpr)
)
elif arity == 3:
matchExpr = _FB(
lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
else:
raise ValueError(
"operator must be unary (1), binary (2), or ternary (3)"
)
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
if isinstance(pa, (tuple, list)):
matchExpr.setParseAction(*pa)
else:
matchExpr.setParseAction(pa)
thisExpr <<= matchExpr.setName(termName) | lastExpr
lastExpr = thisExpr
ret <<= lastExpr
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
"""Helper method for defining space-delimited indentation blocks,
such as those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single
grammar should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond
the current level; set to False for block of left-most
statements (default= ``True``)
A valid block must contain at least one ``blockStatement``.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group("(" + Optional(delimitedList(identifier)) + ")") + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group(funcDecl + func_body)
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << (funcDef | assignment | identifier)
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
backup_stacks.append(indentStack[:])
def reset_stack():
indentStack[:] = backup_stacks[-1]
def checkPeerIndent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseException(s, l, "illegal nesting")
raise ParseException(s, l, "not a peer entry")
def checkSubIndent(s, l, t):
curCol = col(l, s)
if curCol > indentStack[-1]:
indentStack.append(curCol)
else:
raise ParseException(s, l, "not a subentry")
def checkUnindent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if not (indentStack and curCol in indentStack):
raise ParseException(s, l, "not an unindent")
if curCol < indentStack[-1]:
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName("INDENT")
PEER = Empty().setParseAction(checkPeerIndent).setName("")
UNDENT = Empty().setParseAction(checkUnindent).setName("UNINDENT")
if indent:
smExpr = Group(
Optional(NL)
+ INDENT
+ OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL))
+ UNDENT
)
else:
smExpr = Group(
Optional(NL)
+ OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL))
+ Optional(UNDENT)
)
# add a parse action to remove backup_stack from list of backups
smExpr.addParseAction(
lambda: backup_stacks.pop(-1) and None if backup_stacks else None
)
smExpr.setFailAction(lambda a, b, c, d: reset_stack())
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName("indented block")
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").setName(
"C style comment"
)
"Comment of the form ``/* ... */``"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form ``<!-- ... -->``"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form ``// ... (to end of line)``"
cppStyleComment = Combine(
Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dblSlashComment
).setName("C++ style comment")
"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`"
javaStyleComment = cppStyleComment
"Same as :class:`cppStyleComment`"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form ``# ... (to end of line)``"
# build list of built-in expressions, for future reference if a global default value
# gets updated
_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)]
|
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if not vars().has_key('ENABLE_WALLET'):
ENABLE_WALLET=0
if not vars().has_key('ENABLE_BITCOIND'):
ENABLE_BITCOIND=0
if not vars().has_key('ENABLE_UTILS'):
ENABLE_UTILS=0
if not vars().has_key('ENABLE_ZMQ'):
ENABLE_ZMQ=0
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("WARNING: \"import zmq\" failed. Setting ENABLE_ZMQ=0. " \
"To run zmq tests, see dependency info in /qa/README.md.")
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = buildDir + '/src/bitcoind' + EXEEXT
if "BITCOINCLI" not in os.environ:
os.environ["BITCOINCLI"] = buildDir + '/src/bitcoin-cli' + EXEEXT
#Disable Windows tests by default
if EXEEXT == ".exe" and "-win" not in opts:
print "Win tests currently disabled. Use -win option to enable"
sys.exit(0)
#Tests
testScripts = [
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'decodescript.py',
'p2p-fullblocktest.py',
'blockchain.py',
'disablewallet.py',
'sendheaders.py',
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'abandonconflict.py',
]
testScriptsExt = [
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bip68-sequence.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'pruning.py',
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
'replace-by-fee.py',
]
#Enable ZMQ tests
if ENABLE_ZMQ == 1:
testScripts.append('zmq_test.py')
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
if(ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
else:
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
|
#!/usr/bin/env python
from __future__ import print_function
__author__ = "Matija Amidzic <[email protected]>"
__copyright__ = "Copyright 2018, Deutsche Telekom AG"
__license__ = "BSD 3-Clause"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yang as ly
import unittest
import sys
import config
class UnexpectedError(Exception):
"""Exception raised for errors that are not expected.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class TestUM(unittest.TestCase):
def test_ly_ctx_new(self):
yang_folder1 = config.TESTS_DIR + "/data/files"
yang_folder2 = config.TESTS_DIR + "/data:" + config.TESTS_DIR + "/data/files"
try:
# Tests
ctx = ly.Context(yang_folder1)
self.assertIsNotNone(ctx)
list = ctx.get_searchdirs()
self.assertIsNotNone(list)
self.assertEqual(1, len(list))
ctx = ly.Context(yang_folder2)
self.assertIsNotNone(ctx)
list = ctx.get_searchdirs()
self.assertIsNotNone(list)
self.assertEqual(2, len(list))
except Exception as e:
self.fail(e)
def test_ly_ctx_new_invalid(self):
yang_folder = "INVALID_PATH"
try:
ctx = ly.Context(yang_folder)
raise UnexpectedError("exception not thrown")
except UnexpectedError as e:
self.fail(e)
except RuntimeError as e:
return
except Exception as e:
self.fail(e)
def test_ly_ctx_get_searchdirs(self):
yang_folder = config.TESTS_DIR + "/data/files"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
# Tests
list = ctx.get_searchdirs()
self.assertEqual(1, len(list))
self.assertEqual(yang_folder, list[0])
except Exception as e:
self.fail(e)
def test_ly_ctx_set_searchdir(self):
yang_folder = config.TESTS_DIR + "/data/files"
new_yang_folder = config.TESTS_DIR + "/schema/yin"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
# Tests
list = ctx.get_searchdirs()
self.assertEqual(1, len(list))
self.assertEqual(yang_folder, list[0])
ctx.set_searchdir(new_yang_folder)
list = ctx.get_searchdirs()
self.assertEqual(2, len(list))
self.assertEqual(new_yang_folder, list[1])
ctx.unset_searchdirs(0)
list = ctx.get_searchdirs()
self.assertEqual(1, len(list))
self.assertEqual(new_yang_folder, list[0])
except Exception as e:
self.fail(e)
def test_ly_ctx_set_searchdir_invalid(self):
yang_folder = config.TESTS_DIR + "/data/files"
new_yang_folder = "INVALID_PATH"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
# Tests
ctx.set_searchdir(new_yang_folder)
raise UnexpectedError("exception not thrown")
except UnexpectedError as e:
self.fail(e)
except RuntimeError as e:
return
except Exception as e:
self.fail(e)
def test_ly_ctx_info(self):
yang_folder = config.TESTS_DIR + "/api/files"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
# Tests
info = ctx.info()
self.assertIsNotNone(info)
self.assertEqual(ly.LYD_VAL_OK, info.validity())
except Exception as e:
self.fail(e)
def test_ly_ctx_load_module_invalid(self):
yang_folder = config.TESTS_DIR + "/api/files"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
# Tests
module = ctx.load_module("invalid", None)
raise UnexpectedError("exception not thrown")
except UnexpectedError as e:
self.fail(e)
except RuntimeError as e:
return
except Exception as e:
self.fail(e)
def test_ly_ctx_load_get_module(self):
yang_folder = config.TESTS_DIR + "/api/files"
name1 = "a"
name2 = "b"
revision = "2016-03-01"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
# Tests
module = ctx.get_module("invalid")
self.assertIsNone(module)
# module needs to be loaded first
module = ctx.get_module(name1)
self.assertIsNone(module)
module = ctx.load_module(name1)
self.assertIsNotNone(module)
self.assertEqual(name1, module.name(), "Module names don't match")
module = ctx.load_module(name2, revision)
self.assertIsNotNone(module)
self.assertEqual(name2, module.name(), "Module names don't match")
self.assertEqual(revision, module.rev().date(), "Module revisions don't match")
module = ctx.get_module(name2, "INVALID_REVISION")
self.assertIsNone(module)
module = ctx.get_module(name1)
self.assertIsNotNone(module)
self.assertEqual(name1, module.name(), "Module names don't match")
module = ctx.get_module(name2, revision)
self.assertIsNotNone(module)
self.assertEqual(name2, module.name(), "Module names don't match")
self.assertEqual(revision, module.rev().date(), "Module revisions don't match")
except Exception as e:
self.fail(e)
def test_ly_ctx_get_module_older(self):
yang_folder = config.TESTS_DIR + "/api/files"
name = "b"
revision = "2016-03-01"
revision_older = "2015-01-01"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
# Tests
module = ctx.load_module("c")
self.assertIsNotNone(module)
self.assertEqual("c", module.name(), "Module names don't match")
module = ctx.load_module(name, revision)
self.assertIsNotNone(module)
self.assertEqual(name, module.name(), "Module names don't match")
self.assertEqual(revision, module.rev().date(), "Module revisions don't match")
module_older = ctx.get_module_older(module)
self.assertIsNotNone(module_older)
self.assertEqual(name, module_older.name(), "Module names don't match")
self.assertEqual(revision_older, module_older.rev().date(), "Module revisions don't match")
except Exception as e:
self.fail(e)
def test_ly_ctx_get_module_by_ns(self):
yang_folder = config.TESTS_DIR + "/api/files"
module_name = "a"
ns = "urn:a"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
module = ctx.load_module(module_name)
self.assertIsNotNone(module)
self.assertEqual(module_name, module.name(), "Module names don't match")
# Tests
module = ctx.get_module_by_ns(ns)
self.assertIsNotNone(module)
self.assertEqual(module_name, module.name(), "Module names don't match")
except Exception as e:
self.fail(e)
def test_ly_ctx_clean(self):
yang_folder = config.TESTS_DIR + "/api/files"
module_name = "a"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
module = ctx.load_module(module_name)
self.assertIsNotNone(module)
self.assertEqual(module_name, module.name(), "Module names don't match")
# Tests
# confirm module is loaded
module = ctx.get_module(module_name)
self.assertIsNotNone(module)
self.assertEqual(module_name, module.name(), "Module names don't match")
ctx.clean()
# confirm ctx is cleaned
module = ctx.get_module(module_name)
self.assertIsNone(module)
except Exception as e:
self.fail(e)
def test_ly_ctx_parse_module_path(self):
yang_folder = config.TESTS_DIR + "/api/files"
yin_file = config.TESTS_DIR + "/api/files/a.yin"
yang_file = config.TESTS_DIR + "/api/files/b.yang"
module_name1 = "a"
module_name2 = "b"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
# Tests
module = ctx.parse_module_path(yin_file, ly.LYS_IN_YIN)
self.assertIsNotNone(module)
self.assertEqual(module_name1, module.name(), "Module names don't match")
module = ctx.parse_module_path(yang_file, ly.LYS_IN_YANG)
self.assertIsNotNone(module)
self.assertEqual(module_name2, module.name(), "Module names don't match")
except Exception as e:
self.fail(e)
def test_ly_ctx_parse_module_path_invalid(self):
yang_folder = config.TESTS_DIR + "/api/files"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
# Tests
module = ctx.parse_module_path("INVALID_YANG_FILE", ly.LYS_IN_YANG)
raise UnexpectedError("exception not thrown")
except UnexpectedError as e:
self.fail(e)
except RuntimeError as e:
return
except Exception as e:
self.fail(e)
def test_ly_ctx_get_submodule(self):
yang_folder = config.TESTS_DIR + "/api/files"
yin_file = config.TESTS_DIR + "/api/files/a.yin"
module_name = "a"
sub_name = "asub"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_path(yin_file, ly.LYS_IN_YIN)
# Tests
submodule = ctx.get_submodule(module_name, None, sub_name, None)
self.assertIsNotNone(submodule)
self.assertEqual(sub_name, submodule.name(), "Module names don't match")
except Exception as e:
self.fail(e)
def test_ly_ctx_get_submodule2(self):
yang_folder = config.TESTS_DIR + "/api/files"
yin_file = config.TESTS_DIR + "/api/files/a.yin"
config_file = config.TESTS_DIR + "/api/files/a.xml"
sub_name = "asub"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_path(yin_file, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
self.assertIsNotNone(root.schema().module())
# Tests
submodule = ctx.get_submodule2(root.schema().module(), sub_name)
self.assertIsNotNone(submodule)
self.assertEqual(sub_name, submodule.name(), "Module names don't match")
except Exception as e:
self.fail(e)
def test_ly_ctx_find_path(self):
yang_folder = config.TESTS_DIR + "/api/files"
yin_file = config.TESTS_DIR + "/api/files/a.yin"
yang_file = config.TESTS_DIR + "/api/files/b.yang"
schema_path1 = "/b:x/b:bubba"
schema_path2 = "/a:x/a:bubba"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
# Tests
ctx.parse_module_path(yang_file, ly.LYS_IN_YANG)
set = ctx.find_path(schema_path1)
self.assertIsNotNone(set)
ctx.parse_module_path(yin_file, ly.LYS_IN_YIN)
set = ctx.find_path(schema_path2)
self.assertIsNotNone(set)
ly.Set()
except Exception as e:
self.fail(e)
def test_ly_set(self):
yang_folder = config.TESTS_DIR + "/api/files"
yin_file = config.TESTS_DIR + "/api/files/a.yin"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_path(yin_file, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
set = ly.Set()
self.assertIsNotNone(set)
self.assertEqual(0, set.number())
set.add(root.child().schema())
self.assertEqual(1, set.number())
set.add(root.schema())
self.assertEqual(2, set.number())
set.rm(root.schema())
self.assertEqual(1, set.number())
set.add(root.schema())
self.assertEqual(2, set.number())
set.rm_index(1)
self.assertEqual(1, set.number())
set.clean()
self.assertEqual(0, set.number())
except Exception as e:
self.fail(e)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2000-2007 Michael Hudson-Doyle <[email protected]>
# Bob Ippolito
# Maciek Fijalkowski
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# one impressive collections of imports:
from pyrepl.completing_reader import CompletingReader
from pyrepl.historical_reader import HistoricalReader
from pyrepl import completing_reader, reader
from pyrepl import copy_code, commands, completer
from pyrepl import module_lister
import new, sys, os, re, code, traceback
import atexit, warnings
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import imp
imp.find_module("twisted")
from twisted.internet import reactor
from twisted.internet.abstract import FileDescriptor
except ImportError:
default_interactmethod = "interact"
else:
default_interactmethod = "twistedinteract"
CommandCompiler = code.CommandCompiler
def eat_it(*args):
"""this function eats warnings, if you were wondering"""
pass
class maybe_accept(commands.Command):
def do(self):
r = self.reader
text = r.get_unicode()
try:
# ooh, look at the hack:
code = r.compiler("#coding:utf-8\n"+text.encode('utf-8'))
except (OverflowError, SyntaxError, ValueError):
self.finish = 1
else:
if code is None:
r.insert("\n")
else:
self.finish = 1
from_line_prog = re.compile(
"^from\s+(?P<mod>[A-Za-z_.0-9]*)\s+import\s+(?P<name>[A-Za-z_.0-9]*)")
import_line_prog = re.compile(
"^(?:import|from)\s+(?P<mod>[A-Za-z_.0-9]*)\s*$")
def mk_saver(reader):
def saver(reader=reader):
try:
file = open(os.path.expanduser("~/.pythoni.hist"), "w")
except IOError:
pass
else:
pickle.dump(reader.history, file)
file.close()
return saver
class PythonicReader(CompletingReader, HistoricalReader):
def collect_keymap(self):
return super(PythonicReader, self).collect_keymap() + (
(r'\n', 'maybe-accept'),
(r'\M-\n', 'insert-nl'))
def __init__(self, console, locals,
compiler=None):
super(PythonicReader, self).__init__(console)
self.completer = completer.Completer(locals)
st = self.syntax_table
for c in "._0123456789":
st[c] = reader.SYNTAX_WORD
self.locals = locals
if compiler is None:
self.compiler = CommandCompiler()
else:
self.compiler = compiler
try:
file = open(os.path.expanduser("~/.pythoni.hist"))
except IOError:
pass
else:
try:
self.history = pickle.load(file)
except:
self.history = []
self.historyi = len(self.history)
file.close()
atexit.register(mk_saver(self))
for c in [maybe_accept]:
self.commands[c.__name__] = c
self.commands[c.__name__.replace('_', '-')] = c
def get_completions(self, stem):
b = self.get_unicode()
m = import_line_prog.match(b)
if m:
if not self._module_list_ready:
module_lister._make_module_list()
self._module_list_ready = True
mod = m.group("mod")
try:
return module_lister.find_modules(mod)
except ImportError:
pass
m = from_line_prog.match(b)
if m:
mod, name = m.group("mod", "name")
try:
l = module_lister._packages[mod]
except KeyError:
try:
mod = __import__(mod, self.locals, self.locals, [''])
return [x for x in dir(mod) if x.startswith(name)]
except ImportError:
pass
else:
return [x[len(mod) + 1:]
for x in l if x.startswith(mod + '.' + name)]
try:
l = sorted(set(self.completer.complete(stem)))
return l
except (NameError, AttributeError):
return []
class ReaderConsole(code.InteractiveInterpreter):
II_init = code.InteractiveInterpreter.__init__
def __init__(self, console, locals=None):
if locals is None:
locals = {}
self.II_init(locals)
self.compiler = CommandCompiler()
self.compile = self.compiler.compiler
self.reader = PythonicReader(console, locals, self.compiler)
locals['Reader'] = self.reader
def run_user_init_file(self):
for key in "PYREPLSTARTUP", "PYTHONSTARTUP":
initfile = os.environ.get(key)
if initfile is not None and os.path.exists(initfile):
break
else:
return
try:
execfile(initfile, self.locals, self.locals)
except:
etype, value, tb = sys.exc_info()
traceback.print_exception(etype, value, tb.tb_next)
def execute(self, text):
try:
# ooh, look at the hack:
code = self.compile("# coding:utf8\n"+text.encode('utf-8'),
'<stdin>', 'single')
except (OverflowError, SyntaxError, ValueError):
self.showsyntaxerror('<stdin>')
else:
self.runcode(code)
if sys.stdout and not sys.stdout.closed:
sys.stdout.flush()
def interact(self):
while 1:
try: # catches EOFError's and KeyboardInterrupts during execution
try: # catches KeyboardInterrupts during editing
try: # warning saver
# can't have warnings spewed onto terminal
sv = warnings.showwarning
warnings.showwarning = eat_it
l = unicode(self.reader.readline(), 'utf-8')
finally:
warnings.showwarning = sv
except KeyboardInterrupt:
print "KeyboardInterrupt"
else:
if l:
self.execute(l)
except EOFError:
break
except KeyboardInterrupt:
continue
def prepare(self):
self.sv_sw = warnings.showwarning
warnings.showwarning = eat_it
self.reader.prepare()
self.reader.refresh() # we want :after methods...
def restore(self):
self.reader.restore()
warnings.showwarning = self.sv_sw
def handle1(self, block=1):
try:
r = 1
r = self.reader.handle1(block)
except KeyboardInterrupt:
self.restore()
print "KeyboardInterrupt"
self.prepare()
else:
if self.reader.finished:
text = self.reader.get_unicode()
self.restore()
if text:
self.execute(text)
self.prepare()
return r
def tkfilehandler(self, file, mask):
try:
self.handle1(block=0)
except:
self.exc_info = sys.exc_info()
# how the <expletive> do you get this to work on Windows (without
# createfilehandler)? threads, I guess
def really_tkinteract(self):
import _tkinter
_tkinter.createfilehandler(
self.reader.console.input_fd, _tkinter.READABLE,
self.tkfilehandler)
self.exc_info = None
while 1:
# dooneevent will return 0 without blocking if there are
# no Tk windows, 1 after blocking until an event otherwise
# so the following does what we want (this wasn't expected
# to be obvious).
if not _tkinter.dooneevent(_tkinter.ALL_EVENTS):
self.handle1(block=1)
if self.exc_info:
type, value, tb = self.exc_info
self.exc_info = None
raise type, value, tb
def tkinteract(self):
"""Run a Tk-aware Python interactive session.
This function simulates the Python top-level in a way that
allows Tk's mainloop to run."""
# attempting to understand the control flow of this function
# without help may cause internal injuries. so, some
# explanation.
# The outer while loop is there to restart the interaction if
# the user types control-c when execution is deep in our
# innards. I'm not sure this can't leave internals in an
# inconsistent state, but it's a good start.
# then the inside loop keeps calling self.handle1 until
# _tkinter gets imported; then control shifts to
# self.really_tkinteract, above.
# this function can only return via an exception; we mask
# EOFErrors (but they end the interaction) and
# KeyboardInterrupts cause a restart. All other exceptions
# are likely bugs in pyrepl (well, 'cept for SystemExit, of
# course).
while 1:
try:
try:
self.prepare()
try:
while 1:
if sys.modules.has_key("_tkinter"):
self.really_tkinteract()
# really_tkinteract is not expected to
# return except via an exception, but:
break
self.handle1()
except EOFError:
pass
finally:
self.restore()
except KeyboardInterrupt:
continue
else:
break
def twistedinteract(self):
from twisted.internet import reactor
from twisted.internet.abstract import FileDescriptor
import signal
outerself = self
class Me(FileDescriptor):
def fileno(self):
""" We want to select on FD 0 """
return 0
def doRead(self):
"""called when input is ready"""
try:
outerself.handle1()
except EOFError:
reactor.stop()
reactor.addReader(Me())
reactor.callWhenRunning(signal.signal,
signal.SIGINT,
signal.default_int_handler)
self.prepare()
try:
reactor.run()
finally:
self.restore()
def cocoainteract(self, inputfilehandle=None, outputfilehandle=None):
# only call this when there's a run loop already going!
# note that unlike the other *interact methods, this returns immediately
from cocoasupport import CocoaInteracter
self.cocoainteracter = CocoaInteracter.alloc().init(self, inputfilehandle, outputfilehandle)
def main(use_pygame_console=0, interactmethod=default_interactmethod, print_banner=True, clear_main=True):
si, se, so = sys.stdin, sys.stderr, sys.stdout
try:
if 0 and use_pygame_console: # pygame currently borked
from pyrepl.pygame_console import PyGameConsole, FakeStdin, FakeStdout
con = PyGameConsole()
sys.stderr = sys.stdout = FakeStdout(con)
sys.stdin = FakeStdin(con)
else:
from pyrepl.unix_console import UnixConsole
try:
import locale
except ImportError:
encoding = None
else:
if hasattr(locale, 'nl_langinfo') \
and hasattr(locale, 'CODESET'):
encoding = locale.nl_langinfo(locale.CODESET)
elif os.environ.get('TERM_PROGRAM') == 'Apple_Terminal':
# /me whistles innocently...
code = int(os.popen(
"defaults read com.apple.Terminal StringEncoding"
).read())
if code == 4:
encoding = 'utf-8'
# More could go here -- and what's here isn't
# bulletproof. What would be? AppleScript?
# Doesn't seem to be possible.
else:
encoding = None
else:
encoding = None # so you get ASCII...
con = UnixConsole(os.dup(0), os.dup(1), None, encoding)
if print_banner:
print "Python", sys.version, "on", sys.platform
print 'Type "help", "copyright", "credits" or "license" '\
'for more information.'
sys.path.insert(0, os.getcwd())
if clear_main and __name__ != '__main__':
mainmod = new.module('__main__')
sys.modules['__main__'] = mainmod
else:
mainmod = sys.modules['__main__']
rc = ReaderConsole(con, mainmod.__dict__)
rc.reader._module_list_ready = False
rc.run_user_init_file()
getattr(rc, interactmethod)()
finally:
sys.stdin, sys.stderr, sys.stdout = si, se, so
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
import os
import sys
import os.path
from gimpfu import *
from math import pow, sqrt, floor
from gimpcolor import RGB
def euclidean_distance(point_one, point_two):
""" Calculates the euclidean distance.
Args:
point_one (tuple)
point_two (tuple)
Returns:
float: the distance between the two points
"""
return sqrt(pow(point_two[0] - point_one[0], 2) + \
pow(point_two[1] - point_one[1], 2))
def get_maximum_distance(ref_list, dev_list):
""" Calculates the distance between two list of pixels
Args:
ref_list (list): the list of points of the reference layer
(i.e. the layer on which we do the sup inf)
dev_list (list): the list of points of the other level
Returns:
float: the distance between the two list
tuple: the pixel of the dev_list
tuple: the pixel of the ref_list
"""
gimp.progress_init("Calculating distance...")
point_one = (0, 0)
point_two = (0, 0)
maximum_distance = 0.0
for index, pixel_ref in enumerate(ref_list):
# Updates the progress bar
gimp.progress_update(float(index) / float(len(ref_list)))
minimum_pixel = None
minimum_distance = float("inf")
for pixel_dev in dev_list:
distance = euclidean_distance(pixel_ref, pixel_dev)
if distance < minimum_distance:
minimum_distance = distance
minimum_pixel = pixel_dev
# Updates the maximum distance
if minimum_distance > maximum_distance:
maximum_distance = minimum_distance
point_one = pixel_ref
point_two = minimum_pixel
return maximum_distance, point_one, point_two
def search_outline_pixels(layer, color, pixel, from_pixel, outline_pixels):
""" Searches the outline pixels with a DFS
Args:
layer (gimp.Drawable): the layer over do the search
color (gimpcolor.RGB): the outline pixel's color
pixel (tuple): the pixel to control and from start a new search
outline_pixels (list): the list of the outline pixels
Returns:
list: the list of the outline pixels
"""
# Uses a `try except` to avoid exceptions that can arise
# if the method goes through an illegal position in the
# image (e.g. a pixel that does not exists)
try:
# Goes on in the search if the color encountered is the target color
if RGB(*layer.get_pixel(pixel)) == color:
outline_pixels.append(pixel)
target_pixels = [
(pixel[0], pixel[1] + 1), # Up
(pixel[0] + 1, pixel[1]), # Right
(pixel[0], pixel[1] - 1), # Down
(pixel[0] - 1, pixel[1]) # Left
]
# Removes the pixel from which the search comes
for p in target_pixels:
if p == from_pixel:
target_pixels.remove(p)
# Searching
for target_pixel in target_pixels:
if target_pixel not in outline_pixels:
outline_pixels = search_outline_pixels(
layer, color, target_pixel, pixel, outline_pixels)
return outline_pixels
else:
return outline_pixels
except Exception as e:
gimp.message("Raised exception while saving the outline pixels: " + \
str(e.message))
finally:
return outline_pixels
def are_pixels_connected(layer, color, pixel, from_pixel, starting_pixel,
target_pixel, already_controlled_pixels):
""" Checks if there is a path colored with `color` that connects
the `starting_pixel` to the `target_pixel` with a DFS.
Args:
layer (gimp.Drawable): the layer over do the search
color (gimpcolor.RGB): the outline pixel's color
pixel (tuple): the pixel to control now
from_pixel (tuple): the pixel from the search come
starting_pixel (tuple): the pixel from the search starts
target_pixel (tuple): the pixel to search
outline_pixels (list): the list of the outline pixels
Returns:
`True` if the `target_pixel` are connected by an outline
with the `starting_pixel`, otherwise False
"""
# Uses a `try except` to avoid exceptions that can arise
# if the method goes through an illegal position in the
# image (e.g. a pixel that does not exists)
try:
# Goes on in the search if the color encountered is the target color
if RGB(*layer.get_pixel(pixel)) == color:
if pixel == target_pixel and from_pixel is not None:
return True
elif pixel == starting_pixel and from_pixel is not None:
return False
else:
already_controlled_pixels.append(pixel)
target_pixels = [
(pixel[0], pixel[1] + 1), # Up
(pixel[0] + 1, pixel[1]), # Right
(pixel[0], pixel[1] - 1), # Down
(pixel[0] - 1, pixel[1]) # Left
]
# Remove the pixel from which the search comes
for p in target_pixels:
if p == from_pixel:
target_pixels.remove(p)
# Searching
discovered = False
for pixel_to_control in target_pixels:
if pixel_to_control not in already_controlled_pixels:
discovered |= are_pixels_connected(
layer, color, pixel_to_control, pixel, starting_pixel,
target_pixel, already_controlled_pixels)
return discovered
else:
return False
except Exception as e:
gimp.message("Raised exception while saving the outline pixels: " + \
str(e.message))
def first_discovered_pixel(layer, color, x1, y1, x2, y2):
""" Discovers and returns the first pixel of an element
contained in the image.
Args:
layer (gimp.Drawable): the layer to analyze
color (gimpcolor.RGB): the color to discover
x1 (int): x coordinate
y1 (int): y coordinate
x2 (int): x coordinate
y2 (int): y coordinate
Returns:
A tuple containing the coordinates of the
first pixel discovered
"""
target_pixel = (0, 0)
found_pixel = False
# Calculates the direction on the abscissa.
# If x1 < x2 then the direction is left to right, otherwise is right to left
direction_on_abscissa = range(x1, x2 + 1) if x1 < x2 else range(x1, x2 - 1, -1)
# Calculates the direction on the abscissa.
# If y1 < y2 then the direction is up to down, otherwise is down to right
direction_on_ordinate = range(y1, y2 + 1) if y1 < y2 else range(y1, y2 - 1, -1)
for x in direction_on_abscissa:
gimp.progress_update(float(x) / float(layer.width))
for y in direction_on_ordinate:
if RGB(*layer.get_pixel(x, y)) == color:
target_pixel = (x, y)
found_pixel = True
# If the target color is found, then stops the search
if found_pixel:
break
return target_pixel
def get_outline_pixels_positions(image, layer, color, fill_color):
""" Create the outline and search the pixels of the outline.
Args:
image (gimp.Image): the image over we make the transformation
layer (gimp.Drawable): the layer we transformate
color (gimpcolor.RGB): the outline's color
fill_color (gimpcolor.tuple): the other color
Returns:
list: the list of the outline pixels
"""
gimp.progress_init("Searching the outline pixels for the layer...")
# Firstly retrieves the bounding box of the area of interest
pdb.gimp_image_select_color(image, 0, layer, color)
no_null_selection, x1, y1, x2, y2 = pdb.gimp_selection_bounds(image)
# Initially searches the first pixel colored with the target color
target_pixels = []
# Searches left to right, up and down
target_pixels.append(first_discovered_pixel(layer, color, x1, y1, x2, y2))
# Searches right to left, up and down
target_pixels.append(first_discovered_pixel(layer, color, x2, y1, x1, y2))
# Searches left to right, down to up
target_pixels.append(first_discovered_pixel(layer, color, x1, y2, x2, y1))
# Searches right to left, down to up
target_pixels.append(first_discovered_pixel(layer, color, x2, y2, x1, y1))
for target_pixel in target_pixels:
# Selects the target area
pdb.gimp_image_select_contiguous_color(
image, 0, layer, target_pixel[0], target_pixel[1])
# Shrinks the selection
pdb.gimp_selection_shrink(image, 1)
# Sets the target color in the palette
pdb.gimp_context_set_foreground(RGB(
fill_color.r if fill_color.r < 1.0 else fill_color.r / 255.0,
fill_color.g if fill_color.g < 1.0 else fill_color.g / 255.0,
fill_color.b if fill_color.b < 1.0 else fill_color.b / 255.0,
fill_color.a if fill_color.a < 1.0 else fill_color.a / 255.0
))
# Fills the selection with the target color
pdb.gimp_edit_bucket_fill(layer, 0, 0, 100, 0, False, 0, 0)
# Previous returns the outline pixels, controls if there is only
# one element in the image
for target_pixel in target_pixels:
for other_pixel in target_pixels:
if target_pixel != other_pixel:
if not are_pixels_connected( layer, color,
target_pixel, None, target_pixel,
other_pixel, []):
raise Exception("There are disconnected elements in the image.")
# Clears an eventual selection on the image
pdb.gimp_selection_clear(image)
gimp.progress_init("Saving the outline pixels...")
return search_outline_pixels(layer, color, target_pixels[0], None, [])
def draw_line(layer, target_points, other_points):
""" Draws a line in the layer between the two set of points
Args:
layer (gimp.Drawable): the layer that will be drawn
target_points (list): the points of the biggest distance
other_points (list): the points of the smallest distance
Returns:
Nothing
"""
# Now it does the line to point out the maximum distance
red = (1.0, 0.0, 0.0, 1.0)
green = (0.0, 1.0, 0.0, 1.0)
# Draws the line that connects the two couples of points
pdb.gimp_context_set_foreground(RGB(*green))
pdb.gimp_context_set_brush_size(2)
pdb.gimp_pencil(layer, 4,
[target_points[0][0], target_points[0][1], target_points[1][0], target_points[1][1]])
pdb.gimp_context_set_brush_size(1)
pdb.gimp_pencil(layer, 4,
[other_points[0][0], other_points[0][1], other_points[1][0], other_points[1][1]])
# Draws the points that are most distant between the two couples
pdb.gimp_context_set_foreground(RGB(*red))
pdb.gimp_context_set_brush_size(2)
pdb.gimp_pencil(layer, 2, [target_points[0][0], target_points[0][1]])
pdb.gimp_pencil(layer, 2, [target_points[1][0], target_points[1][1]])
# Draws the other two points
pdb.gimp_context_set_brush_size(1)
pdb.gimp_pencil(layer, 2, [other_points[0][0], other_points[0][1]])
pdb.gimp_pencil(layer, 2, [other_points[1][0], other_points[1][1]])
def hausdorff_distance(path, color, fill_color, path_to_result_file):
""" Calculate the hausdorff distance.
Args:
path (str): tha path where the images reside
color (gimpcolor.RGB): the outline color
fill_color (gimpcolor.RGB): the filling color
path_to_result_file (str): the path where the `results.csv` file will be saved
Returns:
Nothing
"""
# Increases the recursion limit of python,
# due to it's limit of 1000 recursion call
sys.setrecursionlimit(1000000)
# Indicates the start of the process
gimp.progress_init("Initializing Hausdorff distance...")
try:
# Calculates the numbers of images saved in the specified directory
numbers_of_images = len([name for name in os.listdir(path) \
if '.png' in name and 'a' in name])
with open("%s/results.csv" % path_to_result_file, 'w') as file:
file.write("Reference image;Deviated image;Distance\n")
for index in range(1, numbers_of_images + 1):
# Loads the reference image in memory
base_image = pdb.file_png_load('%s/a%d.png' % (path, index), '')
ref_layer = base_image.layers[0] # Retrieves the ref layer
# Loads the deviated image as layer to the image
dev_layer = pdb.gimp_file_load_layer(base_image, '%s/b%d.png' % (path, index))
pdb.gimp_image_insert_layer(base_image, dev_layer, None, 0)
# Creates the outline of the reference layer
try:
ref_layer_outline_pixels_positions = get_outline_pixels_positions(
base_image, ref_layer, color, fill_color)
except Exception as e:
# Writes the results
with open("%s/results.csv" % path_to_result_file, 'a') as file:
file.write("A%d;B%d;%s\n" % (index, index, e.message))
continue
try:
# Creates the outline of the deviated layer
dev_layer_outline_pixels_positions = get_outline_pixels_positions(
base_image, dev_layer, color, fill_color)
except Exception as e:
# Writes the results
with open("%s/results.csv" % path_to_result_file, 'a') as file:
file.write("A%d;B%d;%s\n" % (index, index, e.message))
continue
# Retrieves the maxmin distance of first layer, with the two points...
ref_layer_distance, ref_pixel_one, ref_pixel_two = get_maximum_distance(
ref_layer_outline_pixels_positions, dev_layer_outline_pixels_positions)
# ...and the maxmin distance and the points of the second layer.
dev_layer_distance, dev_pixel_one, dev_pixel_two = get_maximum_distance(
dev_layer_outline_pixels_positions, ref_layer_outline_pixels_positions)
# Merges the layers to point out the maximum distance
pdb.gimp_layer_set_mode(dev_layer, 7)
pdb.gimp_image_merge_down(base_image, dev_layer, 1)
merged_layer = base_image.layers[0]
distance = 0.0
if ref_layer_distance >= dev_layer_distance:
distance = ref_layer_distance
draw_line(merged_layer, [ref_pixel_one, ref_pixel_two], [dev_pixel_one, dev_pixel_two])
else:
distance = dev_layer_distance
draw_line(merged_layer, [dev_pixel_one, dev_pixel_two], [ref_pixel_one, ref_pixel_two])
# Inserts the text layer
pdb.gimp_context_set_foreground(RGB(1.0, 1.0, 1.0, 1.0))
text_layer = pdb.gimp_text_layer_new(
base_image, "Hausdorff distance: %f" % distance, "Verdana", 14, 0)
pdb.gimp_image_insert_layer(base_image, text_layer, None, 0)
pdb.gimp_layer_translate(text_layer, 5, 5)
# Merging the layers
pdb.gimp_layer_set_mode(text_layer, 7)
pdb.gimp_image_merge_down(base_image, text_layer, 1)
merged_layer = base_image.layers[0]
# Saves the merged image
pdb.gimp_file_save(base_image, merged_layer, '%s/c%d.png' % (path, index), '')
# Writes the results
with open("%s/results.csv" % path_to_result_file, 'a') as file:
file.write("A%d;B%d;%f\n" % (index, index, distance))
# Close the generated image
pdb.gimp_image_delete(base_image)
except Exception as e:
gimp.message("Unexpected error: %s." % e.message)
gimp.message("It was not possible to calculate the distance.")
register(
"python-fu-hausdorff-dd",
"AISP Hausdorff distance from directory",
"Calculate the Hausdorff distance between two images loaded from a directory",
"Valerio Belli",
"Valerio Belli",
"2017",
"Hausdorff distance from directory",
"",
[
(PF_DIRNAME, "path", """The path where the images to analyse are
saved.""", '/Users/valerio/PycharmProjects/Distanza di Hausdorff/images'),
(PF_COLOR, "color", "The outline's color.", gimpcolor.RGB(*(1.0, 1.0, 1.0, 1.0))),
(PF_COLOR, "fill_color", "The filling color", gimpcolor.RGB(*(0.0, 0.0, 0.0, 1.0))),
(PF_DIRNAME, "path_to_result_file", """The path of the CSV file how to
save the results distances""", '/Users/valerio/PycharmProjects/Distanza di Hausdorff')
],
[],
hausdorff_distance,
menu="<Image>/Filters/",
)
if "__main__" == __name__:
main()
|
|
# -*- coding: utf-8 -*-
"""SQLite parser plugin for Mozilla Firefox history database files."""
from __future__ import unicode_literals
from dfdatetime import posix_time as dfdatetime_posix_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class FirefoxPlacesBookmarkAnnotationEventData(events.EventData):
"""Firefox bookmark annotation event data.
Attributes:
content (str): annotation content.
title (str): title of the bookmark folder.
url (str): bookmarked URL.
"""
DATA_TYPE = 'firefox:places:bookmark_annotation'
def __init__(self):
"""Initializes event data."""
super(FirefoxPlacesBookmarkAnnotationEventData, self).__init__(
data_type=self.DATA_TYPE)
self.content = None
self.title = None
self.url = None
class FirefoxPlacesBookmarkFolderEventData(events.EventData):
"""Firefox bookmark folder event data.
Attributes:
title (str): title of the bookmark folder.
"""
DATA_TYPE = 'firefox:places:bookmark_folder'
def __init__(self):
"""Initializes event data."""
super(FirefoxPlacesBookmarkFolderEventData, self).__init__(
data_type=self.DATA_TYPE)
self.title = None
class FirefoxPlacesBookmarkEventData(events.EventData):
"""Firefox bookmark event data.
Attributes:
host (str): visited hostname.
places_title (str): places title.
title (str): title of the bookmark folder.
type (int): bookmark type.
url (str): bookmarked URL.
visit_count (int): visit count.
"""
DATA_TYPE = 'firefox:places:bookmark'
def __init__(self):
"""Initializes event data."""
super(FirefoxPlacesBookmarkEventData, self).__init__(
data_type=self.DATA_TYPE)
self.host = None
self.places_title = None
self.title = None
self.type = None
self.url = None
self.visit_count = None
# TODO: refactor extra attribute.
class FirefoxPlacesPageVisitedEventData(events.EventData):
"""Firefox page visited event data.
Attributes:
extra (list[object]): extra event data.
host (str): visited hostname.
title (str): title of the visited page.
url (str): URL of the visited page.
visit_count (int): visit count.
visit_type (str): transition type for the event.
"""
DATA_TYPE = 'firefox:places:page_visited'
def __init__(self):
"""Initializes event data."""
super(FirefoxPlacesPageVisitedEventData, self).__init__(
data_type=self.DATA_TYPE)
self.extra = None
self.host = None
self.title = None
self.url = None
self.visit_count = None
self.visit_type = None
class FirefoxHistoryPlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Mozilla Firefox history database files.
The Mozilla Firefox history database file is typically stored in:
places.sqlite
"""
NAME = 'firefox_history'
DATA_FORMAT = 'Mozilla Firefox history SQLite database (places.sqlite) file'
REQUIRED_STRUCTURE = {
'moz_places': frozenset([
'url', 'title', 'visit_count', 'rev_host', 'hidden', 'typed', 'id']),
'moz_historyvisits': frozenset([
'id', 'visit_date', 'from_visit', 'visit_type', 'place_id']),
'moz_bookmarks': frozenset([
'type', 'title', 'dateAdded', 'lastModified', 'id', 'fk']),
'moz_items_annos': frozenset([
'content', 'dateAdded', 'lastModified', 'id', 'item_id'])}
QUERIES = [
(('SELECT moz_historyvisits.id, moz_places.url, moz_places.title, '
'moz_places.visit_count, moz_historyvisits.visit_date, '
'moz_historyvisits.from_visit, moz_places.rev_host, '
'moz_places.hidden, moz_places.typed, moz_historyvisits.visit_type '
'FROM moz_places, moz_historyvisits '
'WHERE moz_places.id = moz_historyvisits.place_id'),
'ParsePageVisitedRow'),
(('SELECT moz_bookmarks.type, moz_bookmarks.title AS bookmark_title, '
'moz_bookmarks.dateAdded, moz_bookmarks.lastModified, '
'moz_places.url, moz_places.title AS places_title, '
'moz_places.rev_host, moz_places.visit_count, moz_bookmarks.id '
'FROM moz_places, moz_bookmarks '
'WHERE moz_bookmarks.fk = moz_places.id AND moz_bookmarks.type <> 3'),
'ParseBookmarkRow'),
(('SELECT moz_items_annos.content, moz_items_annos.dateAdded, '
'moz_items_annos.lastModified, moz_bookmarks.title, '
'moz_places.url, moz_places.rev_host, moz_items_annos.id '
'FROM moz_items_annos, moz_bookmarks, moz_places '
'WHERE moz_items_annos.item_id = moz_bookmarks.id '
'AND moz_bookmarks.fk = moz_places.id'),
'ParseBookmarkAnnotationRow'),
(('SELECT moz_bookmarks.id, moz_bookmarks.title,'
'moz_bookmarks.dateAdded, moz_bookmarks.lastModified '
'FROM moz_bookmarks WHERE moz_bookmarks.type = 2'),
'ParseBookmarkFolderRow')]
_SCHEMA_V24 = {
'moz_anno_attributes': (
'CREATE TABLE moz_anno_attributes ( id INTEGER PRIMARY KEY, name '
'VARCHAR(32) UNIQUE NOT NULL)'),
'moz_annos': (
'CREATE TABLE moz_annos ( id INTEGER PRIMARY KEY, place_id INTEGER '
'NOT NULL, anno_attribute_id INTEGER, mime_type VARCHAR(32) DEFAULT '
'NULL, content LONGVARCHAR, flags INTEGER DEFAULT 0, expiration '
'INTEGER DEFAULT 0, type INTEGER DEFAULT 0, dateAdded INTEGER '
'DEFAULT 0, lastModified INTEGER DEFAULT 0)'),
'moz_bookmarks': (
'CREATE TABLE moz_bookmarks ( id INTEGER PRIMARY KEY, type INTEGER, '
'fk INTEGER DEFAULT NULL, parent INTEGER, position INTEGER, title '
'LONGVARCHAR, keyword_id INTEGER, folder_type TEXT, dateAdded '
'INTEGER, lastModified INTEGER)'),
'moz_bookmarks_roots': (
'CREATE TABLE moz_bookmarks_roots ( root_name VARCHAR(16) UNIQUE, '
'folder_id INTEGER)'),
'moz_favicons': (
'CREATE TABLE moz_favicons ( id INTEGER PRIMARY KEY, url '
'LONGVARCHAR UNIQUE, data BLOB, mime_type VARCHAR(32), expiration '
'LONG)'),
'moz_historyvisits': (
'CREATE TABLE moz_historyvisits ( id INTEGER PRIMARY KEY, '
'from_visit INTEGER, place_id INTEGER, visit_date INTEGER, '
'visit_type INTEGER, session INTEGER)'),
'moz_inputhistory': (
'CREATE TABLE moz_inputhistory ( place_id INTEGER NOT NULL, input '
'LONGVARCHAR NOT NULL, use_count INTEGER, PRIMARY KEY (place_id, '
'input))'),
'moz_items_annos': (
'CREATE TABLE moz_items_annos ( id INTEGER PRIMARY KEY, item_id '
'INTEGER NOT NULL, anno_attribute_id INTEGER, mime_type VARCHAR(32) '
'DEFAULT NULL, content LONGVARCHAR, flags INTEGER DEFAULT 0, '
'expiration INTEGER DEFAULT 0, type INTEGER DEFAULT 0, dateAdded '
'INTEGER DEFAULT 0, lastModified INTEGER DEFAULT 0)'),
'moz_keywords': (
'CREATE TABLE moz_keywords ( id INTEGER PRIMARY KEY AUTOINCREMENT, '
'keyword TEXT UNIQUE)'),
'moz_places': (
'CREATE TABLE moz_places ( id INTEGER PRIMARY KEY, url LONGVARCHAR, '
'title LONGVARCHAR, rev_host LONGVARCHAR, visit_count INTEGER '
'DEFAULT 0, hidden INTEGER DEFAULT 0 NOT NULL, typed INTEGER '
'DEFAULT 0 NOT NULL, favicon_id INTEGER, frecency INTEGER DEFAULT '
'-1 NOT NULL, last_visit_date INTEGER )')}
_SCHEMA_V25 = {
'moz_anno_attributes': (
'CREATE TABLE moz_anno_attributes ( id INTEGER PRIMARY KEY, name '
'VARCHAR(32) UNIQUE NOT NULL)'),
'moz_annos': (
'CREATE TABLE moz_annos ( id INTEGER PRIMARY KEY, place_id INTEGER '
'NOT NULL, anno_attribute_id INTEGER, mime_type VARCHAR(32) DEFAULT '
'NULL, content LONGVARCHAR, flags INTEGER DEFAULT 0, expiration '
'INTEGER DEFAULT 0, type INTEGER DEFAULT 0, dateAdded INTEGER '
'DEFAULT 0, lastModified INTEGER DEFAULT 0)'),
'moz_bookmarks': (
'CREATE TABLE moz_bookmarks ( id INTEGER PRIMARY KEY, type INTEGER, '
'fk INTEGER DEFAULT NULL, parent INTEGER, position INTEGER, title '
'LONGVARCHAR, keyword_id INTEGER, folder_type TEXT, dateAdded '
'INTEGER, lastModified INTEGER, guid TEXT)'),
'moz_bookmarks_roots': (
'CREATE TABLE moz_bookmarks_roots ( root_name VARCHAR(16) UNIQUE, '
'folder_id INTEGER)'),
'moz_favicons': (
'CREATE TABLE moz_favicons ( id INTEGER PRIMARY KEY, url '
'LONGVARCHAR UNIQUE, data BLOB, mime_type VARCHAR(32), expiration '
'LONG, guid TEXT)'),
'moz_historyvisits': (
'CREATE TABLE moz_historyvisits ( id INTEGER PRIMARY KEY, '
'from_visit INTEGER, place_id INTEGER, visit_date INTEGER, '
'visit_type INTEGER, session INTEGER)'),
'moz_hosts': (
'CREATE TABLE moz_hosts ( id INTEGER PRIMARY KEY, host TEXT NOT '
'NULL UNIQUE, frecency INTEGER, typed INTEGER NOT NULL DEFAULT 0, '
'prefix TEXT)'),
'moz_inputhistory': (
'CREATE TABLE moz_inputhistory ( place_id INTEGER NOT NULL, input '
'LONGVARCHAR NOT NULL, use_count INTEGER, PRIMARY KEY (place_id, '
'input))'),
'moz_items_annos': (
'CREATE TABLE moz_items_annos ( id INTEGER PRIMARY KEY, item_id '
'INTEGER NOT NULL, anno_attribute_id INTEGER, mime_type VARCHAR(32) '
'DEFAULT NULL, content LONGVARCHAR, flags INTEGER DEFAULT 0, '
'expiration INTEGER DEFAULT 0, type INTEGER DEFAULT 0, dateAdded '
'INTEGER DEFAULT 0, lastModified INTEGER DEFAULT 0)'),
'moz_keywords': (
'CREATE TABLE moz_keywords ( id INTEGER PRIMARY KEY AUTOINCREMENT, '
'keyword TEXT UNIQUE)'),
'moz_places': (
'CREATE TABLE moz_places ( id INTEGER PRIMARY KEY, url LONGVARCHAR, '
'title LONGVARCHAR, rev_host LONGVARCHAR, visit_count INTEGER '
'DEFAULT 0, hidden INTEGER DEFAULT 0 NOT NULL, typed INTEGER '
'DEFAULT 0 NOT NULL, favicon_id INTEGER, frecency INTEGER DEFAULT '
'-1 NOT NULL, last_visit_date INTEGER , guid TEXT)'),
'sqlite_stat1': (
'CREATE TABLE sqlite_stat1(tbl, idx, stat)')}
SCHEMAS = [_SCHEMA_V24, _SCHEMA_V25]
# Cache queries.
URL_CACHE_QUERY = (
'SELECT h.id AS id, p.url, p.rev_host FROM moz_places p, '
'moz_historyvisits h WHERE p.id = h.place_id')
# TODO: move to formatter.
_BOOKMARK_TYPES = {
1: 'URL',
2: 'Folder',
3: 'Separator',
}
def ParseBookmarkAnnotationRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a bookmark annotation row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = FirefoxPlacesBookmarkAnnotationEventData()
event_data.content = self._GetRowValue(query_hash, row, 'content')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'dateAdded')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastModified')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseBookmarkFolderRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a bookmark folder row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
title = self._GetRowValue(query_hash, row, 'title')
event_data = FirefoxPlacesBookmarkFolderEventData()
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.title = title or 'N/A'
timestamp = self._GetRowValue(query_hash, row, 'dateAdded')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastModified')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseBookmarkRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a bookmark row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
rev_host = self._GetRowValue(query_hash, row, 'rev_host')
bookmark_type = self._GetRowValue(query_hash, row, 'type')
event_data = FirefoxPlacesBookmarkEventData()
event_data.host = rev_host or 'N/A'
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.places_title = self._GetRowValue(query_hash, row, 'places_title')
event_data.query = query
event_data.title = self._GetRowValue(query_hash, row, 'bookmark_title')
event_data.type = self._BOOKMARK_TYPES.get(bookmark_type, 'N/A')
event_data.url = self._GetRowValue(query_hash, row, 'url')
event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count')
timestamp = self._GetRowValue(query_hash, row, 'dateAdded')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastModified')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParsePageVisitedRow(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
"""Parses a page visited row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (Optional[SQLiteCache]): cache.
database (Optional[SQLiteDatabase]): database.
"""
query_hash = hash(query)
from_visit = self._GetRowValue(query_hash, row, 'from_visit')
hidden = self._GetRowValue(query_hash, row, 'hidden')
rev_host = self._GetRowValue(query_hash, row, 'rev_host')
typed = self._GetRowValue(query_hash, row, 'typed')
# TODO: make extra conditional formatting.
extras = []
if from_visit:
extras.append('visited from: {0:s}'.format(
self._GetUrl(from_visit, cache, database)))
if hidden == '1':
extras.append('(url hidden)')
if typed == '1':
extras.append('(directly typed)')
else:
extras.append('(URL not typed directly)')
event_data = FirefoxPlacesPageVisitedEventData()
event_data.host = self._ReverseHostname(rev_host)
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.url = self._GetRowValue(query_hash, row, 'url')
event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count')
event_data.visit_type = self._GetRowValue(query_hash, row, 'visit_type')
if extras:
event_data.extra = extras
timestamp = self._GetRowValue(query_hash, row, 'visit_date')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ReverseHostname(self, hostname):
"""Reverses the hostname and strips the leading dot.
The hostname entry is reversed:
moc.elgoog.www.
Should be:
www.google.com
Args:
hostname (str): reversed hostname.
Returns:
str: hostname without a leading dot.
"""
if not hostname:
return ''
if len(hostname) <= 1:
return hostname
if hostname[-1] == '.':
return hostname[::-1][1:]
return hostname[::-1][0:]
def _GetUrl(self, url_id, cache, database):
"""Retrieves an URL from a reference to an entry in the from_visit table.
Args:
url_id (str): identifier of the visited URL.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
Returns:
str: URL and hostname.
"""
url_cache_results = cache.GetResults('url')
if not url_cache_results:
result_set = database.Query(self.URL_CACHE_QUERY)
cache.CacheQueryResults(
result_set, 'url', 'id', ('url', 'rev_host'))
url_cache_results = cache.GetResults('url')
url, reverse_host = url_cache_results.get(url_id, ['', ''])
if not url:
return ''
hostname = self._ReverseHostname(reverse_host)
return '{0:s} ({1:s})'.format(url, hostname)
sqlite.SQLiteParser.RegisterPlugin(FirefoxHistoryPlugin)
|
|
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from future.utils import with_metaclass
__all__ = ('autoclass', 'ensureclass')
from .jnius import (
JavaClass, MetaJavaClass, JavaMethod, JavaStaticMethod,
JavaField, JavaStaticField, JavaMultipleMethod, find_javaclass
)
class Class(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'java/lang/Class'
desiredAssertionStatus = JavaMethod('()Z')
forName = JavaMultipleMethod([
('(Ljava/lang/String,Z,Ljava/lang/ClassLoader;)Ljava/langClass;', True, False),
('(Ljava/lang/String;)Ljava/lang/Class;', True, False), ])
getClassLoader = JavaMethod('()Ljava/lang/ClassLoader;')
getClasses = JavaMethod('()[Ljava/lang/Class;')
getComponentType = JavaMethod('()Ljava/lang/Class;')
getConstructor = JavaMethod('([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;')
getConstructors = JavaMethod('()[Ljava/lang/reflect/Constructor;')
getDeclaredClasses = JavaMethod('()[Ljava/lang/Class;')
getDeclaredConstructor = JavaMethod('([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;')
getDeclaredConstructors = JavaMethod('()[Ljava/lang/reflect/Constructor;')
getDeclaredField = JavaMethod('(Ljava/lang/String;)Ljava/lang/reflect/Field;')
getDeclaredFields = JavaMethod('()[Ljava/lang/reflect/Field;')
getDeclaredMethod = JavaMethod('(Ljava/lang/String,[Ljava/lang/Class;)Ljava/lang/reflect/Method;')
getDeclaredMethods = JavaMethod('()[Ljava/lang/reflect/Method;')
getDeclaringClass = JavaMethod('()Ljava/lang/Class;')
getField = JavaMethod('(Ljava/lang/String;)Ljava/lang/reflect/Field;')
getFields = JavaMethod('()[Ljava/lang/reflect/Field;')
getInterfaces = JavaMethod('()[Ljava/lang/Class;')
getMethod = JavaMethod('(Ljava/lang/String,[Ljava/lang/Class;)Ljava/lang/reflect/Method;')
getMethods = JavaMethod('()[Ljava/lang/reflect/Method;')
getModifiers = JavaMethod('()[I')
getName = JavaMethod('()Ljava/lang/String;')
getPackage = JavaMethod('()Ljava/lang/Package;')
getProtectionDomain = JavaMethod('()Ljava/security/ProtectionDomain;')
getResource = JavaMethod('(Ljava/lang/String;)Ljava/net/URL;')
getResourceAsStream = JavaMethod('(Ljava/lang/String;)Ljava/io/InputStream;')
getSigners = JavaMethod('()[Ljava/lang/Object;')
getSuperclass = JavaMethod('()Ljava/lang/reflect/Class;')
isArray = JavaMethod('()Z')
isAssignableFrom = JavaMethod('(Ljava/lang/reflect/Class;)Z')
isInstance = JavaMethod('(Ljava/lang/Object;)Z')
isInterface = JavaMethod('()Z')
isPrimitive = JavaMethod('()Z')
newInstance = JavaMethod('()Ljava/lang/Object;')
toString = JavaMethod('()Ljava/lang/String;')
class Object(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'java/lang/Object'
getClass = JavaMethod('()Ljava/lang/Class;')
hashCode = JavaMethod('()I')
class Modifier(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'java/lang/reflect/Modifier'
isAbstract = JavaStaticMethod('(I)Z')
isFinal = JavaStaticMethod('(I)Z')
isInterface = JavaStaticMethod('(I)Z')
isNative = JavaStaticMethod('(I)Z')
isPrivate = JavaStaticMethod('(I)Z')
isProtected = JavaStaticMethod('(I)Z')
isPublic = JavaStaticMethod('(I)Z')
isStatic = JavaStaticMethod('(I)Z')
isStrict = JavaStaticMethod('(I)Z')
isSynchronized = JavaStaticMethod('(I)Z')
isTransient = JavaStaticMethod('(I)Z')
isVolatile = JavaStaticMethod('(I)Z')
class Method(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'java/lang/reflect/Method'
getName = JavaMethod('()Ljava/lang/String;')
toString = JavaMethod('()Ljava/lang/String;')
getParameterTypes = JavaMethod('()[Ljava/lang/Class;')
getReturnType = JavaMethod('()Ljava/lang/Class;')
getModifiers = JavaMethod('()I')
isVarArgs = JavaMethod('()Z')
class Field(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'java/lang/reflect/Field'
getName = JavaMethod('()Ljava/lang/String;')
toString = JavaMethod('()Ljava/lang/String;')
getType = JavaMethod('()Ljava/lang/Class;')
getModifiers = JavaMethod('()I')
class Constructor(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'java/lang/reflect/Constructor'
toString = JavaMethod('()Ljava/lang/String;')
getParameterTypes = JavaMethod('()[Ljava/lang/Class;')
getModifiers = JavaMethod('()I')
isVarArgs = JavaMethod('()Z')
def get_signature(cls_tp):
tp = cls_tp.getName()
if tp[0] == '[':
return tp.replace('.', '/')
signatures = {
'void': 'V', 'boolean': 'Z', 'byte': 'B',
'char': 'C', 'short': 'S', 'int': 'I',
'long': 'J', 'float': 'F', 'double': 'D'}
ret = signatures.get(tp)
if ret:
return ret
# don't do it in recursive way for the moment,
# error on the JNI/android: JNI ERROR (app bug): local reference table overflow (max=512)
#
#ensureclass(tp)
return 'L{0};'.format(tp.replace('.', '/'))
registers = []
def ensureclass(clsname):
if clsname in registers:
return
jniname = clsname.replace('.', '/')
if MetaJavaClass.get_javaclass(jniname):
return
registers.append(clsname)
autoclass(clsname)
def lower_name(s):
return s[:1].lower() + s[1:] if s else ''
def bean_getter(s):
return (s.startswith('get') and len(s) > 3 and s[3].isupper()) or (s.startswith('is') and len(s) > 2 and s[2].isupper())
def autoclass(clsname):
jniname = clsname.replace('.', '/')
cls = MetaJavaClass.get_javaclass(jniname)
if cls:
return cls
classDict = {}
#c = Class.forName(clsname)
c = find_javaclass(clsname)
if c is None:
raise Exception('Java class {0} not found'.format(c))
return None
constructors = []
for constructor in c.getConstructors():
sig = '({0})V'.format(
''.join([get_signature(x) for x in constructor.getParameterTypes()]))
constructors.append((sig, constructor.isVarArgs()))
classDict['__javaconstructor__'] = constructors
methods = c.getMethods()
methods_name = [x.getName() for x in methods]
for index, method in enumerate(methods):
name = methods_name[index]
if name in classDict:
continue
count = methods_name.count(name)
# only one method available
if count == 1:
static = Modifier.isStatic(method.getModifiers())
varargs = method.isVarArgs()
sig = '({0}){1}'.format(
''.join([get_signature(x) for x in method.getParameterTypes()]),
get_signature(method.getReturnType()))
cls = JavaStaticMethod if static else JavaMethod
classDict[name] = cls(sig, varargs=varargs)
if name != 'getClass' and bean_getter(name) and len(method.getParameterTypes()) == 0:
lowername = lower_name(name[3:])
classDict[lowername] = (lambda n: property(lambda self: getattr(self, n)()))(name)
continue
# multiple signatures
signatures = []
for index, subname in enumerate(methods_name):
if subname != name:
continue
method = methods[index]
sig = '({0}){1}'.format(
''.join([get_signature(x) for x in method.getParameterTypes()]),
get_signature(method.getReturnType()))
'''
print 'm', name, sig, method.getModifiers()
m = method.getModifiers()
print 'Public', Modifier.isPublic(m)
print 'Private', Modifier.isPrivate(m)
print 'Protected', Modifier.isProtected(m)
print 'Static', Modifier.isStatic(m)
print 'Final', Modifier.isFinal(m)
print 'Synchronized', Modifier.isSynchronized(m)
print 'Volatile', Modifier.isVolatile(m)
print 'Transient', Modifier.isTransient(m)
print 'Native', Modifier.isNative(m)
print 'Interface', Modifier.isInterface(m)
print 'Abstract', Modifier.isAbstract(m)
print 'Strict', Modifier.isStrict(m)
'''
signatures.append((sig, Modifier.isStatic(method.getModifiers()), method.isVarArgs()))
classDict[name] = JavaMultipleMethod(signatures)
for iclass in c.getInterfaces():
if iclass.getName() == 'java.util.List':
classDict['__getitem__'] = lambda self, index: self.get(index)
classDict['__len__'] = lambda self: self.size()
break
for field in c.getFields():
static = Modifier.isStatic(field.getModifiers())
sig = get_signature(field.getType())
cls = JavaStaticField if static else JavaField
classDict[field.getName()] = cls(sig)
classDict['__javaclass__'] = clsname.replace('.', '/')
return MetaJavaClass.__new__(
MetaJavaClass,
clsname, # .replace('.', '_'),
(JavaClass, ),
classDict)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.model_base import Metric
from federatedml.model_base import MetricMeta
from federatedml.framework.hetero.procedure import batch_generator
from federatedml.nn.hetero_nn.backend.model_builder import model_builder
from federatedml.nn.hetero_nn.hetero_nn_base import HeteroNNBase
from federatedml.optim.convergence import converge_func_factory
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.protobuf.generated.hetero_nn_model_meta_pb2 import HeteroNNMeta
from federatedml.protobuf.generated.hetero_nn_model_param_pb2 import HeteroNNParam
from federatedml.util import consts, LOGGER
from federatedml.util.io_check import assert_io_num_rows_equal
MODELMETA = "HeteroNNGuestMeta"
MODELPARAM = "HeteroNNGuestParam"
class HeteroNNGuest(HeteroNNBase):
def __init__(self):
super(HeteroNNGuest, self).__init__()
self.task_type = None
self.converge_func = None
self.batch_generator = batch_generator.Guest()
self.data_keys = []
self.model_builder = None
self.label_dict = {}
self.model = None
self.role = consts.GUEST
self.history_loss = []
self.num_label = 2
self.input_shape = None
self._summary_buf = {"history_loss": [],
"is_converged": False,
"best_iteration": -1}
def _init_model(self, hetero_nn_param):
super(HeteroNNGuest, self)._init_model(hetero_nn_param)
self.task_type = hetero_nn_param.task_type
self.converge_func = converge_func_factory(self.early_stop, self.tol)
def _build_model(self):
# return a hetero NN model with keras backend
self.model = model_builder("guest", self.hetero_nn_param)
self.model.set_transfer_variable(self.transfer_variable)
def _set_loss_callback_info(self):
self.callback_meta("loss",
"train",
MetricMeta(name="train",
metric_type="LOSS",
extra_metas={"unit_name": "iters"}))
def fit(self, data_inst, validate_data=None):
self.callback_list.on_train_begin(data_inst, validate_data)
# collect data from table to form data loader
if not self.component_properties.is_warm_start:
self._build_model()
cur_epoch = 0
else:
self.model.warm_start()
self.callback_warm_start_init_iter(self.history_iter_epoch)
cur_epoch = self.history_iter_epoch + 1
self.prepare_batch_data(self.batch_generator, data_inst)
if not self.input_shape:
self.model.set_empty()
self._set_loss_callback_info()
while cur_epoch < self.epochs:
self.iter_epoch = cur_epoch
LOGGER.debug("cur epoch is {}".format(cur_epoch))
self.callback_list.on_epoch_begin(cur_epoch)
epoch_loss = 0
for batch_idx in range(len(self.data_x)):
# hetero NN model
batch_loss = self.model.train(self.data_x[batch_idx], self.data_y[batch_idx], cur_epoch, batch_idx)
epoch_loss += batch_loss
epoch_loss /= len(self.data_x)
LOGGER.debug("epoch {}' loss is {}".format(cur_epoch, epoch_loss))
self.callback_metric("loss",
"train",
[Metric(cur_epoch, epoch_loss)])
self.history_loss.append(epoch_loss)
self.callback_list.on_epoch_end(cur_epoch)
if self.callback_variables.stop_training:
LOGGER.debug('early stopping triggered')
break
if self.hetero_nn_param.selector_param.method:
# when use selective bp, loss converge will be disabled
is_converge = False
else:
is_converge = self.converge_func.is_converge(epoch_loss)
self._summary_buf["is_converged"] = is_converge
self.transfer_variable.is_converge.remote(is_converge,
role=consts.HOST,
idx=0,
suffix=(cur_epoch,))
if is_converge:
LOGGER.debug("Training process is converged in epoch {}".format(cur_epoch))
break
cur_epoch += 1
if cur_epoch == self.epochs:
LOGGER.debug("Training process reach max training epochs {} and not converged".format(self.epochs))
self.callback_list.on_train_end()
# if self.validation_strategy and self.validation_strategy.has_saved_best_model():
# self.load_model(self.validation_strategy.cur_best_model)
self.set_summary(self._get_model_summary())
@assert_io_num_rows_equal
def predict(self, data_inst):
data_inst = self.align_data_header(data_inst, self._header)
keys, test_x, test_y = self._load_data(data_inst)
self.set_partition(data_inst)
preds = self.model.predict(test_x)
if self.task_type == "regression":
preds = [float(pred[0]) for pred in preds]
predict_tb = session.parallelize(zip(keys, preds), include_key=True, partition=data_inst.partitions)
result = self.predict_score_to_output(data_inst, predict_tb)
else:
if self.num_label > 2:
preds = [list(map(float, pred)) for pred in preds]
predict_tb = session.parallelize(zip(keys, preds), include_key=True, partition=data_inst.partitions)
result = self.predict_score_to_output(data_inst, predict_tb, classes=list(range(self.num_label)))
else:
preds = [float(pred[0]) for pred in preds]
predict_tb = session.parallelize(zip(keys, preds), include_key=True, partition=data_inst.partitions)
threshold = self.predict_param.threshold
result = self.predict_score_to_output(data_inst, predict_tb, classes=[0, 1], threshold=threshold)
return result
def export_model(self):
if self.model is None:
return
return {MODELMETA: self._get_model_meta(),
MODELPARAM: self._get_model_param()}
def load_model(self, model_dict):
model_dict = list(model_dict["model"].values())[0]
param = model_dict.get(MODELPARAM)
meta = model_dict.get(MODELMETA)
self._build_model()
self._restore_model_meta(meta)
self._restore_model_param(param)
def _get_model_summary(self):
# self._summary_buf["best_iteration"] = -1 if self.validation_strategy is None else self.validation_strategy.best_iteration
self._summary_buf["history_loss"] = self.history_loss
if self.callback_variables.validation_summary:
self._summary_buf["validation_metrics"] = self.callback_variables.validation_summary
"""
if self.validation_strategy:
validation_summary = self.validation_strategy.summary()
if validation_summary:
self._summary_buf["validation_metrics"] = validation_summary
"""
return self._summary_buf
def _get_model_meta(self):
model_meta = HeteroNNMeta()
model_meta.task_type = self.task_type
model_meta.batch_size = self.batch_size
model_meta.epochs = self.epochs
model_meta.early_stop = self.early_stop
model_meta.tol = self.tol
# model_meta.interactive_layer_lr = self.hetero_nn_param.interacitve_layer_lr
model_meta.hetero_nn_model_meta.CopyFrom(self.model.get_hetero_nn_model_meta())
return model_meta
def _get_model_param(self):
model_param = HeteroNNParam()
model_param.iter_epoch = self.iter_epoch
model_param.hetero_nn_model_param.CopyFrom(self.model.get_hetero_nn_model_param())
model_param.num_label = self.num_label
model_param.best_iteration = self.callback_variables.best_iteration
# model_param.best_iteration = -1 if self.validation_strategy is None else self.validation_strategy.best_iteration
model_param.header.extend(self._header)
for loss in self.history_loss:
model_param.history_loss.append(loss)
return model_param
def get_metrics_param(self):
if self.task_type == consts.CLASSIFICATION:
if self.num_label == 2:
return EvaluateParam(eval_type="binary",
pos_label=1, metrics=self.metrics)
else:
return EvaluateParam(eval_type="multi", metrics=self.metrics)
else:
return EvaluateParam(eval_type="regression", metrics=self.metrics)
def prepare_batch_data(self, batch_generator, data_inst):
self._header = data_inst.schema["header"]
batch_generator.initialize_batch_generator(data_inst, self.batch_size)
batch_data_generator = batch_generator.generate_batch_data()
for batch_data in batch_data_generator:
keys, batch_x, batch_y = self._load_data(batch_data)
self.data_x.append(batch_x)
self.data_y.append(batch_y)
self.data_keys.append(keys)
self._convert_label()
self.set_partition(data_inst)
def _load_data(self, data_inst):
data = list(data_inst.collect())
data_keys = [key for (key, val) in data]
data_keys_map = dict(zip(sorted(data_keys), range(len(data_keys))))
keys = [None for idx in range(len(data_keys))]
batch_x = [None for idx in range(len(data_keys))]
batch_y = [None for idx in range(len(data_keys))]
for (key, inst) in data:
idx = data_keys_map[key]
keys[idx] = key
batch_x[idx] = inst.features
batch_y[idx] = inst.label
if self.input_shape is None:
try:
self.input_shape = inst.features.shape[0]
except AttributeError:
self.input_shape = 0
batch_x = np.asarray(batch_x)
batch_y = np.asarray(batch_y)
return keys, batch_x, batch_y
def _convert_label(self):
diff_label = np.unique(np.concatenate(self.data_y))
self.label_dict = dict(zip(diff_label, range(diff_label.shape[0])))
transform_y = []
self.num_label = diff_label.shape[0]
if self.task_type == "regression" or self.num_label <= 2:
for batch_y in self.data_y:
new_batch_y = np.zeros((batch_y.shape[0], 1))
for idx in range(new_batch_y.shape[0]):
new_batch_y[idx] = batch_y[idx]
transform_y.append(new_batch_y)
self.data_y = transform_y
return
for batch_y in self.data_y:
new_batch_y = np.zeros((batch_y.shape[0], self.num_label))
for idx in range(new_batch_y.shape[0]):
y = batch_y[idx]
new_batch_y[idx][y] = 1
transform_y.append(new_batch_y)
self.data_y = transform_y
def _restore_model_param(self, param):
super(HeteroNNGuest, self)._restore_model_param(param)
self.num_label = param.num_label
|
|
from __future__ import absolute_import
import blinker
from collections import deque
from functools import wraps, partial
from threading import local
import sys
from .compat import reraise, iteritems, is_nextable
def noop(*_, **dummy):
pass
class StopIterationWithValue(StopIteration):
value = None
def __init__(self, value):
super(StopIterationWithValue, self).__init__()
self.value = value
class _PendingRunnable(object):
def __init__(self, it, parent=None, key=None, callback=None, callback_exc=None):
self.iterable = it
self.iteration = 0
self.parent = parent
self.key = key
self.callback = callback
self.callback_exc = callback_exc
self.dependency_results = None
self.dependencies_remaining = 0
self.exception_to_raise = None
self.result = None
self.result_exception = None
def step(self):
assert self.iteration >= 0
self.iteration += 1
if self.iteration == 1:
assert self.dependency_results is None and self.exception_to_raise is None
run_fn = partial(next, self.iterable)
elif self.exception_to_raise is not None:
exc, self.exception_to_raise = self.exception_to_raise, None
run_fn = partial(self.iterable.throw, *exc)
else:
run_fn = partial(self.iterable.send, self.dependency_results)
try:
requirements = run_fn()
except StopIteration as e:
self.result = getattr(e, 'value', None)
self.iteration = -1
return None
except Exception:
self.result_exception = sys.exc_info()
self.iteration = -1
return None
if requirements is None:
requirements = []
dependencies = None
if isinstance(requirements, dict):
dependencies = requirements
self.dependency_results = {}
self.dependency_completed = partial(self._depencency_completed_list_or_dict, self.iteration)
elif isinstance(requirements, (list, set, frozenset, tuple)):
dependencies = dict(enumerate(requirements))
self.dependency_results = [None] * len(dependencies)
self.dependency_completed = partial(self._depencency_completed_list_or_dict, self.iteration)
else:
dependencies = {'': requirements}
self.dependency_results = None
self.dependency_completed = partial(self._dependency_completed_single, self.iteration)
self.dependency_threw = partial(self._dependency_threw, self.iteration)
self.dependencies_remaining = len(dependencies)
return dependencies
def _depencency_completed_list_or_dict(self, iteration, loop, k, v):
if self.iteration != iteration:
return
self.dependency_results[k] = v
self.dependencies_remaining -= 1
if self.ready:
loop.runnable(self)
def _dependency_completed_single(self, iteration, loop, _, v):
if self.iteration != iteration:
return
self.dependency_results = v
self.dependencies_remaining -= 1
if self.ready:
loop.runnable(self)
def _dependency_threw(self, iteration, loop, _, type_, value, traceback):
if self.iteration != iteration:
return
self.exception_to_raise = (type_, value, traceback)
self.iteration += 1
self.dependencies_remaining = 0
if self.ready:
loop.runnable(self)
dependency_completed = None # dynamically changed.
dependency_threw = None
@property
def ready(self):
return self.dependencies_remaining == 0 and getattr(self.iterable, 'ready', True)
LOCAL_ID = 0
def new_local_id():
global LOCAL_ID
LOCAL_ID += 1
return LOCAL_ID
class RunLoop(object):
def __init__(self):
self.locals = dict()
self.run_queue = deque()
self.total_pending = 0
self.main_runnable = None
self.on_queue_exhausted = blinker.Signal()
self.on_runnable_added = blinker.Signal()
self.on_iteration = blinker.Signal()
def run(self, iterable):
self.main_runnable = self.add(iterable)
while self.total_pending:
assert self.run_queue
self.on_iteration.send()
self._run_all_runnables()
if self.total_pending:
self.on_queue_exhausted.send()
if self.main_runnable.result_exception:
reraise(*self.main_runnable.result_exception)
return self.main_runnable.result
def add(self, iterable, callback_ok=None, callback_exc=None):
callback_ok = callback_ok or noop
callback_exc = callback_exc or noop
obj = _PendingRunnable(iterable, callback=callback_ok, callback_exc=callback_exc)
self.total_pending += 1
if obj.ready:
self.run_queue.append(obj)
if hasattr(iterable, 'on_add_to_loop'):
iterable.on_add_to_loop(self, obj)
self.on_runnable_added.send(runnable=obj)
return obj
def runnable(self, runnable):
"""Notify the context that routine is runnable. This assumes that
.add() was already called with this iterable."""
assert isinstance(runnable, _PendingRunnable)
self.run_queue.append(runnable)
def _run_all_runnables(self):
while self.run_queue:
runnable = self.run_queue.popleft()
deps = runnable.step()
if deps is None:
if runnable.result_exception:
runnable.callback_exc(*runnable.result_exception)
elif runnable.callback is not None:
runnable.callback(runnable.result)
self.total_pending -= 1
continue
for k, v in iteritems(deps):
self.add(v,
partial(runnable.dependency_completed, self, k),
partial(runnable.dependency_threw, self, k))
if runnable.ready:
self.run_queue.append(runnable)
class _ThreadingLocalRunLoop(local):
loop = None
_CURRENT_RUN_LOOP = _ThreadingLocalRunLoop()
def current_run_loop():
return _CURRENT_RUN_LOOP.loop
def use_threading_local():
assert current_run_loop() is None
global _CURRENT_RUN_LOOP
_CURRENT_RUN_LOOP = _ThreadingLocalRunLoop()
try:
from gevent.local import local as gevent_local
except ImportError as ex:
def use_gevent_local():
raise ImportError("Gevent not present")
else:
class _GeventLocalRunLoop(gevent_local):
loop = None
def use_gevent_local():
assert current_run_loop() is None
global _CURRENT_RUN_LOOP
_CURRENT_RUN_LOOP = _GeventLocalRunLoop()
def runloop_coroutine():
"""Creates a coroutine that gets run in a run loop.
The run loop will be created if necessary."""
def wrap(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if _CURRENT_RUN_LOOP.loop:
it = fn(*args, **kwargs)
assert is_nextable(it), '%s did not return an iterator' % (fn)
return it
else:
_CURRENT_RUN_LOOP.loop = loop = RunLoop()
try:
it = fn(*args, **kwargs)
assert is_nextable(it), '%s did not return an iterator' % (fn)
return loop.run(it)
finally:
_CURRENT_RUN_LOOP.loop = None
return wrapper
return wrap
def requires_runloop():
"""Same as @runloop_coroutine, but refuses to create a loop if one is not present."""
def wrap(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
assert current_run_loop()
return fn(*args, **kwargs)
return wrapper
return wrap
def coro_return(value):
raise StopIterationWithValue(value)
class _DeferredIterable(object):
def __init__(self):
self.value = None
self.exception = None
self.ready = False
self.batch_context = None
self.runnable = None
self.on_ready = blinker.Signal()
def on_add_to_loop(self, context, runnable):
assert self.batch_context is None
self.batch_context = context
self.runnable = runnable
def set_value(self, value):
assert not self.ready
self.ready = True
self.value = value
if self.batch_context:
self.batch_context.runnable(self.runnable)
self.on_ready.send()
def set_exception(self, type_, value=None, traceback=None):
assert not self.ready
self.ready = True
self.exception = (type_, value, traceback)
if self.batch_context:
self.batch_context.runnable(self.runnable)
self.on_ready.send()
def __next__(self):
coro_return(self.get())
next = __next__
def get(self):
if __debug__:
if not self.ready:
raise ValueError(".get() on non-ready deferred.")
if self.exception is not None:
reraise(*self.exception)
return self.value
@requires_runloop()
def deferred():
assert current_run_loop()
coro_return(_DeferredIterable())
yield # pragma: no cover
@requires_runloop()
def future(iterable):
"""Given an iterable, this returns an object that can be yielded again once
you want to use it's value. This is useful to "front-load" some expensive
calls that you don't need the results of immediately.
Usage:
thing_later = yield future(thing_resolver())
... Do things ...
thing = yield thing_later
In addition, this may be used to catch exceptions when doing several actions in parallel:
a, b, c = yield future(get_a()), future(get_b()), future(get_c())
try:
a_thing = yield a
except ValueError:
a_thing = None # it's ok we don't need it anyway
b_thing, c_thing = yield b, c
"""
result = yield deferred()
current_run_loop().add(iterable, result.set_value, result.set_exception)
coro_return(result)
@requires_runloop()
def wait(deferreds, count=None):
"""iwait(deferreds_or_futures, count=None).
Waits until up to `count` (or all, if count is None) deferreds to complete. Returns
the objects that completed. Example:
a, b, c = yield future(get_a()), future(get_b()), future(get_c())
first, second = yield wait([a, b, c], count=2)
# At this point 2/3 of the above futures are complete."""
if count is None:
count = len(deferreds)
assert count <= len(deferreds), 'Waiting on too many deferreds: %s' % (count)
ready_list = [d for d in deferreds if d.ready]
# Check if any of the deferreds are ready.
if len(ready_list) < count:
wait_deferred = yield deferred()
for d in deferreds:
def on_ready(_):
if wait_deferred.ready:
return # This is mostly necessary for PyPy because weak refs
# aren't immediately removed there.
ready_list.append(d)
if len(ready_list) >= count:
wait_deferred.set_value(True)
d.on_ready.connect(on_ready, weak=True)
yield wait_deferred
assert len(ready_list) == count
coro_return(ready_list)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2014 Romain Dorgueil
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from sqlalchemy import MetaData, Table
from rdc.etl.error import ProhibitedOperationError
from rdc.etl.hash import Hash
from rdc.etl.io import STDIN, INSERT, UPDATE, SELECT, STDERR
from rdc.etl.transform import Transform
from rdc.etl.util import now, cached_property
class DatabaseLoad(Transform):
"""
TODO doc this !!! test this !!!!
"""
engine = None
table_name = None
fetch_columns = None
insert_only_fields = ()
discriminant = ('id', )
created_at_field = 'created_at'
updated_at_field = 'updated_at'
allowed_operations = (INSERT, UPDATE, )
def __init__(self, engine=None, table_name=None, fetch_columns=None, discriminant=None, created_at_field=None,
updated_at_field=None, insert_only_fields=None, allowed_operations=None):
super(DatabaseLoad, self).__init__()
self.engine = engine or self.engine
self.table_name = table_name or self.table_name
# XXX should take self.fetch_columns into account if provided
self.fetch_columns = {}
if isinstance(fetch_columns, (list, tuple, )):
self.add_fetch_column(*fetch_columns)
elif isinstance(fetch_columns, dict):
self.add_fetch_column(**fetch_columns)
self.discriminant = discriminant or self.discriminant
self.created_at_field = created_at_field or self.created_at_field
self.updated_at_field = updated_at_field or self.updated_at_field
self.insert_only_fields = insert_only_fields or self.insert_only_fields
self.allowed_operations = allowed_operations or self.allowed_operations
self._buffer = []
self._connection = None
self._max_buffer_size = 1000
self._last_duration = None
self._last_commit_at = None
self._query_count = 0
@property
def connection(self):
if self._connection is None:
self._connection = self.engine.connect()
return self._connection
def commit(self):
with self.connection.begin():
while len(self._buffer):
hash = self._buffer.pop(0)
try:
yield self.do_transform(copy(hash))
except Exception as e:
yield Hash((
('_input', hash, ),
('_transform', self, ),
('_error', e, ),
)), STDERR
def close_connection(self):
self._connection.close()
self._connection = None
def get_insert_columns_for(self, hash):
"""List of columns we can use for insert."""
return self.columns
def get_update_columns_for(self, hash, row):
"""List of columns we can use for update."""
return [
column for column in self.columns
if not column in self.insert_only_fields
]
def get_columns_for(self, hash, row=None):
"""Retrieve list of table column names for which we have a value in given hash.
"""
if row:
column_names = self.get_update_columns_for(hash, row)
else:
column_names = self.get_insert_columns_for(hash)
return [key for key in hash if key in column_names]
def find(self, dataset, connection=None):
query = '''SELECT * FROM {table} WHERE {criteria} LIMIT 1'''.format(
table=self.table_name,
criteria=' AND '.join([key_atom + ' = %s' for key_atom in self.discriminant]),
)
rp = (connection or self.connection).execute(query, [dataset.get(key_atom) for key_atom in self.discriminant])
# Increment stats
self._input._special_stats[SELECT] += 1
return rp.fetchone()
def initialize(self):
super(DatabaseLoad, self).initialize()
self._input._special_stats[SELECT] = 0
self._output._special_stats[INSERT] = 0
self._output._special_stats[UPDATE] = 0
def do_transform(self, hash):
"""Actual database load transformation logic, without the buffering / transaction logic.
"""
# find line, if it exist
row = self.find(hash)
now = self.now
column_names = self.table.columns.keys()
# UpdatedAt field configured ? Let's set the value in source hash
if self.updated_at_field in column_names:
hash[self.updated_at_field] = now
# Otherwise, make sure there is no such field
else:
if self.updated_at_field in hash:
del hash[self.updated_at_field]
# UPDATE
if row:
if not UPDATE in self.allowed_operations:
raise ProhibitedOperationError('UPDATE operations are not allowed by this transformation.')
_columns = self.get_columns_for(hash, row)
query = '''UPDATE {table} SET {values} WHERE {criteria}'''.format(
table=self.table_name,
values=', '.join((
'{column} = %s'.format(column=_column) for _column in _columns
if not _column in self.discriminant
)),
criteria=' AND '.join((
'{key} = %s'.format(key=_key) for _key in self.discriminant
))
)
values = [hash[_column] for _column in _columns if not _column in self.discriminant] + \
[hash[_column] for _column in self.discriminant]
# INSERT
else:
if not INSERT in self.allowed_operations:
raise ProhibitedOperationError('INSERT operations are not allowed by this transformation.')
if self.created_at_field in column_names:
hash[self.created_at_field] = now
else:
if self.created_at_field in hash:
del hash[self.created_at_field]
_columns = self.get_columns_for(hash)
query = '''INSERT INTO {table} ({keys}) VALUES ({values})'''.format(
table=self.table_name,
keys=', '.join(_columns),
values=', '.join(['%s'] * len(_columns))
)
values = [hash[key] for key in _columns]
# Execute
self.connection.execute(query, values)
# Increment stats
if row:
self._output._special_stats[UPDATE] += 1
else:
self._output._special_stats[INSERT] += 1
# If user required us to fetch some columns, let's query again to get their actual values.
if self.fetch_columns and len(self.fetch_columns):
if not row:
row = self.find(hash)
if not row:
raise ValueError('Could not find matching row after load.')
for alias, column in self.fetch_columns.iteritems():
hash[alias] = row[column]
return hash
def transform(self, hash, channel=STDIN):
"""Transform method. Stores the input in a buffer, and only unstack buffer content if some limit has been
exceeded.
TODO for now buffer limit is hardcoded as 1000, but we may use a few criterias to add intelligence to this:
time since last commit, duration of last commit, buffer length ...
"""
self._buffer.append(hash)
if len(self._buffer) >= self._max_buffer_size:
for _out in self.commit():
yield _out
def finalize(self):
"""Transform's finalize method.
Empties the remaining lines in buffer by loading them into database and close database connection.
"""
super(DatabaseLoad, self).finalize()
for _out in self.commit():
yield _out
self.close_connection()
def add_fetch_column(self, *columns, **aliased_columns):
self.fetch_columns.update(aliased_columns)
for column in columns:
self.fetch_columns[column] = column
@cached_property
def columns(self):
return self.table.columns.keys()
@cached_property
def metadata(self):
"""SQLAlchemy metadata."""
return MetaData()
@cached_property
def table(self):
"""SQLAlchemy table object, using metadata autoloading from database to avoid the need of column definitions."""
return Table(self.table_name, self.metadata, autoload=True, autoload_with=self.engine)
@property
def now(self):
"""Current timestamp, used for created/updated at fields."""
return now()
|
|
################################################################################
# The Sandbox Libraries (Python) Test Suite 0 (Dependencies) #
# #
# Copyright (C) 2004-2009, 2011-2013 LIU Yu, [email protected] #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# #
# 1. Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation #
# and/or other materials provided with the distribution. #
# #
# 3. Neither the name of the author(s) nor the names of its contributors may #
# be used to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
################################################################################
#
__all__ = ['TestPlatformToolChain', 'TestPackageIntegrity', 'TestBadPolicy', ]
try:
from . import config
from .config import unittest
except (ValueError, ImportError):
import config
from config import unittest
class TestPlatformToolChain(unittest.TestCase):
def test_platform(self):
from platform import system, machine
self.assertTrue(system() in ('Linux', ))
self.assertTrue(machine() in ('i686', 'x86_64', ))
from sys import version_info
self.assertTrue(version_info[0] == 2 and version_info[1] >= 6 or
version_info[0] >= 3)
pass
def test_toolchain(self):
from subprocess import Popen, PIPE
cmd = config.build("hello", config.CODE_HELLO_WORLD)
self.assertTrue(cmd is not None)
p = Popen(cmd, close_fds=True, stdout=PIPE)
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"Hello World!\n")
pass
pass
class TestPackageIntegrity(unittest.TestCase):
def test_package(self):
import sandbox
self.assertTrue(sandbox.__version__ >= "0.3.5-1")
self.assertTrue(isinstance(sandbox.SandboxEvent, object))
self.assertTrue(isinstance(sandbox.SandboxAction, object))
self.assertTrue(isinstance(sandbox.SandboxPolicy, object))
self.assertTrue(isinstance(sandbox.Sandbox, object))
pass
def test_policy(self):
import sandbox
e = sandbox.SandboxEvent(sandbox.S_EVENT_EXIT)
self.assertTrue(hasattr(e, 'type'))
self.assertTrue(hasattr(e, 'data'))
a = sandbox.SandboxAction(sandbox.S_ACTION_CONT)
self.assertTrue(hasattr(a, 'type'))
self.assertTrue(hasattr(a, 'data'))
p = sandbox.SandboxPolicy()
self.assertTrue(callable(p))
self.assertTrue(isinstance(p(e, a), sandbox.SandboxAction))
self.assertEqual(p(e, a).type, sandbox.S_ACTION_FINI)
pass
def test_policy_local(self):
import sandbox
try:
from . import policy
except (ValueError, ImportError):
try:
import policy
except:
policy = None
pass
self.assertTrue(hasattr(policy, "MinimalPolicy"))
self.assertTrue(issubclass(policy.MinimalPolicy, sandbox.SandboxPolicy))
p = policy.MinimalPolicy()
self.assertTrue(callable(p))
e = sandbox.SandboxEvent(sandbox.S_EVENT_EXIT)
a = sandbox.SandboxAction(sandbox.S_ACTION_CONT)
self.assertTrue(isinstance(p(e, a), sandbox.SandboxAction))
self.assertEqual(p(e, a).type, sandbox.S_ACTION_FINI)
pass
def test_sandbox(self):
import sandbox
echo = ("/bin/echo", "Hello", "World!", )
s = sandbox.Sandbox(echo)
self.assertTrue(hasattr(s, 'task'))
self.assertEqual(s.task, echo)
self.assertTrue(hasattr(s, 'jail'))
self.assertEqual(s.jail, "/")
self.assertTrue(hasattr(s, 'status'))
self.assertEqual(s.status, sandbox.S_STATUS_RDY)
self.assertTrue(hasattr(s, 'result'))
self.assertEqual(s.result, sandbox.S_RESULT_PD)
self.assertTrue(hasattr(s, 'policy'))
self.assertTrue(isinstance(s.policy, sandbox.SandboxPolicy))
self.assertTrue(hasattr(s, 'run'))
self.assertTrue(callable(s.run))
self.assertTrue(hasattr(s, 'probe'))
self.assertTrue(callable(s.probe))
pass
def test_sandbox_err(self):
import sandbox
self.assertRaises(ValueError, sandbox.Sandbox, "/non/existence")
pass
pass
class TestBadPolicy(unittest.TestCase):
def setUp(self):
self.task = ("/bin/echo", "Hello", "World!", )
pass
def test_non_policy(self):
# If the base class of the policy is not sandbox.SandboxPolicy,
# the sandbox should not pass self-test during initialization
import sandbox
class NonPolicy:
def __call__(self, e, a):
return a
pass
self.assertRaises(TypeError, sandbox.Sandbox, self.task, policy=NonPolicy())
s = sandbox.Sandbox(self.task)
self.assertRaises(TypeError, setattr, s, "policy", NonPolicy())
pass
def test_bad_policy(self):
# If the policy does not yield valid actions, the sandbox should
# terminate with BP
import sandbox
class BadPolicy(sandbox.SandboxPolicy):
def __call__(self, e, a):
return None
pass
s = sandbox.Sandbox(self.task, policy=BadPolicy())
s.run()
self.assertEqual(s.result, sandbox.S_RESULT_BP)
pass
def test_inf_policy(self):
# If the policy always returns CONT, the sandbox should terminate with
# BP after the sandboxed program has gone leaving no further events
import sandbox
class InfPolicy(sandbox.SandboxPolicy):
def __call__(self, e, a):
return a
pass
s_wr = open("/dev/null", "wb")
s = sandbox.Sandbox(self.task, policy=InfPolicy(), stdout=s_wr)
s.run()
s_wr.close()
self.assertEqual(s.result, sandbox.S_RESULT_BP)
pass
pass
def test_suite():
return unittest.TestSuite([
unittest.TestLoader().loadTestsFromTestCase(eval(c)) for c in __all__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module implements the base CCDData class."""
import numpy as np
from .compat import NDDataArray
from .nduncertainty import StdDevUncertainty, NDUncertainty
from ..io import fits, registry
from .. import units as u
from .. import log
from ..wcs import WCS
from ..utils.decorators import sharedmethod
__all__ = ['CCDData', 'fits_ccddata_reader', 'fits_ccddata_writer']
# Global value which can turn on/off the unit requirements when creating a
# CCDData. Should be used with care because several functions actually break
# if the unit is None!
_config_ccd_requires_unit = True
def _arithmetic(op):
"""Decorator factory which temporarly disables the need for a unit when
creating a new CCDData instance. The final result must have a unit.
Parameters
----------
op : function
The function to apply. Supported are:
- ``np.add``
- ``np.subtract``
- ``np.multiply``
- ``np.true_divide``
Notes
-----
Should only be used on CCDData ``add``, ``subtract``, ``divide`` or
``multiply`` because only these methods from NDArithmeticMixin are
overwritten.
"""
def decorator(func):
def inner(self, operand, operand2=None, **kwargs):
global _config_ccd_requires_unit
_config_ccd_requires_unit = False
result = self._prepare_then_do_arithmetic(op, operand,
operand2, **kwargs)
# Wrap it again as CCDData so it checks the final unit.
_config_ccd_requires_unit = True
return result.__class__(result)
inner.__doc__ = ("See `astropy.nddata.NDArithmeticMixin.{}`."
"".format(func.__name__))
return sharedmethod(inner)
return decorator
class CCDData(NDDataArray):
"""A class describing basic CCD data.
The CCDData class is based on the NDData object and includes a data array,
uncertainty frame, mask frame, flag frame, meta data, units, and WCS
information for a single CCD image.
Parameters
-----------
data : `~astropy.nddata.CCDData`-like or `numpy.ndarray`-like
The actual data contained in this `~astropy.nddata.CCDData` object.
Note that the data will always be saved by *reference*, so you should
make a copy of the ``data`` before passing it in if that's the desired
behavior.
uncertainty : `~astropy.nddata.StdDevUncertainty`, `numpy.ndarray` or \
None, optional
Uncertainties on the data.
Default is ``None``.
mask : `numpy.ndarray` or None, optional
Mask for the data, given as a boolean Numpy array with a shape
matching that of the data. The values must be `False` where
the data is *valid* and `True` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
Default is ``None``.
flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \
optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
Default is ``None``.
wcs : `~astropy.wcs.WCS` or None, optional
WCS-object containing the world coordinate system for the data.
Default is ``None``.
meta : dict-like object or None, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object, e.g. creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.Unit` or str, optional
The units of the data.
Default is ``None``.
.. warning::
If the unit is ``None`` or not otherwise specified it will raise a
``ValueError``
Raises
------
ValueError
If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g.,
match shape) onto ``data``.
Methods
-------
read(\\*args, \\**kwargs)
``Classmethod`` to create an CCDData instance based on a ``FITS`` file.
This method uses :func:`fits_ccddata_reader` with the provided
parameters.
write(\\*args, \\**kwargs)
Writes the contents of the CCDData instance into a new ``FITS`` file.
This method uses :func:`fits_ccddata_writer` with the provided
parameters.
Notes
-----
`~astropy.nddata.CCDData` objects can be easily converted to a regular
Numpy array using `numpy.asarray`.
For example::
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> x = CCDData([1,2,3], unit='adu')
>>> np.asarray(x)
array([1, 2, 3])
This is useful, for example, when plotting a 2D image using
matplotlib.
>>> from astropy.nddata import CCDData
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> x = CCDData([[1,2,3], [4,5,6]], unit='adu')
>>> plt.imshow(x) # doctest: +SKIP
"""
def __init__(self, *args, **kwd):
if 'meta' not in kwd:
kwd['meta'] = kwd.pop('header', None)
if 'header' in kwd:
raise ValueError("can't have both header and meta.")
super().__init__(*args, **kwd)
# Check if a unit is set. This can be temporarly disabled by the
# _CCDDataUnit contextmanager.
if _config_ccd_requires_unit and self.unit is None:
raise ValueError("a unit for CCDData must be specified.")
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def wcs(self):
return self._wcs
@wcs.setter
def wcs(self, value):
self._wcs = value
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = u.Unit(value)
@property
def header(self):
return self._meta
@header.setter
def header(self, value):
self.meta = value
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
if getattr(value, '_parent_nddata', None) is not None:
value = value.__class__(value, copy=False)
self._uncertainty = value
elif isinstance(value, np.ndarray):
if value.shape != self.shape:
raise ValueError("uncertainty must have same shape as "
"data.")
self._uncertainty = StdDevUncertainty(value)
log.info("array provided for uncertainty; assuming it is a "
"StdDevUncertainty.")
else:
raise TypeError("uncertainty must be an instance of a "
"NDUncertainty object or a numpy array.")
self._uncertainty.parent_nddata = self
else:
self._uncertainty = value
def to_hdu(self, hdu_mask='MASK', hdu_uncertainty='UNCERT',
hdu_flags=None, wcs_relax=True):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header['bunit'] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, 'shape'):
raise ValueError('only a numpy.ndarray mask can be saved.')
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
if self.uncertainty.__class__.__name__ != 'StdDevUncertainty':
raise ValueError('only StdDevUncertainty can be saved.')
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if (hasattr(self.uncertainty, 'unit') and
self.uncertainty.unit is not None and
self.uncertainty.unit != self.unit):
raise ValueError('saving uncertainties with a unit differing'
'from the data unit is not supported.')
hduUncert = fits.ImageHDU(self.uncertainty.array,
name=hdu_uncertainty)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError('adding the flags to a HDU is not '
'supported at this time.')
hdulist = fits.HDUList(hdus)
return hdulist
def copy(self):
"""
Return a copy of the CCDData object.
"""
return self.__class__(self, copy=True)
add = _arithmetic(np.add)(NDDataArray.add)
subtract = _arithmetic(np.subtract)(NDDataArray.subtract)
multiply = _arithmetic(np.multiply)(NDDataArray.multiply)
divide = _arithmetic(np.true_divide)(NDDataArray.divide)
def _insert_in_metadata_fits_safe(self, key, value):
"""
Insert key/value pair into metadata in a way that FITS can serialize.
Parameters
----------
key : str
Key to be inserted in dictionary.
value : str or None
Value to be inserted.
Notes
-----
This addresses a shortcoming of the FITS standard. There are length
restrictions on both the ``key`` (8 characters) and ``value`` (72
characters) in the FITS standard. There is a convention for handling
long keywords and a convention for handling long values, but the
two conventions cannot be used at the same time.
This addresses that case by checking the length of the ``key`` and
``value`` and, if necessary, shortening the key.
"""
if len(key) > 8 and len(value) > 72:
short_name = key[:8]
self.meta['HIERARCH {0}'.format(key.upper())] = (
short_name, "Shortened name for {}".format(key))
self.meta[short_name] = value
else:
self.meta[key] = value
# This needs to be importable by the tests...
_KEEP_THESE_KEYWORDS_IN_HEADER = [
'JD-OBS',
'MJD-OBS',
'DATE-OBS'
]
def _generate_wcs_and_update_header(hdr):
"""
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs
"""
# Try constructing a WCS object.
try:
wcs = WCS(hdr)
except Exception as exc:
# Normally WCS only raises Warnings and doesn't fail but in rare
# cases (malformed header) it could fail...
log.info('An exception happened while extracting WCS informations from '
'the Header.\n{}: {}'.format(type(exc).__name__, str(exc)))
return hdr, None
# Test for success by checking to see if the wcs ctype has a non-empty
# value, return None for wcs if ctype is empty.
if not wcs.wcs.ctype[0]:
return (hdr, None)
new_hdr = hdr.copy()
# If the keywords below are in the header they are also added to WCS.
# It seems like they should *not* be removed from the header, though.
wcs_header = wcs.to_header(relax=True)
for k in wcs_header:
if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
new_hdr.remove(k, ignore_missing=True)
return (new_hdr, wcs)
def fits_ccddata_reader(filename, hdu=0, unit=None, hdu_uncertainty='UNCERT',
hdu_mask='MASK', hdu_flags=None, **kwd):
"""
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, optional
FITS extension from which CCDData should be initialized. If zero and
and no data in the primary extension, it will search for the first
extension with data. The header will be added to the primary header.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled.
"""
unsupport_open_keywords = {
'do_not_scale_image_data': 'Image data must be scaled.',
'scale_back': 'Scale information is not preserved.'
}
for key, msg in unsupport_open_keywords.items():
if key in kwd:
prefix = 'unsupported keyword: {0}.'.format(key)
raise TypeError(' '.join([prefix, msg]))
with fits.open(filename, **kwd) as hdus:
hdr = hdus[hdu].header
if hdu_uncertainty is not None and hdu_uncertainty in hdus:
uncertainty = StdDevUncertainty(hdus[hdu_uncertainty].data)
else:
uncertainty = None
if hdu_mask is not None and hdu_mask in hdus:
# Mask is saved as uint but we want it to be boolean.
mask = hdus[hdu_mask].data.astype(np.bool_)
else:
mask = None
if hdu_flags is not None and hdu_flags in hdus:
raise NotImplementedError('loading flags is currently not '
'supported.')
# search for the first instance with data if
# the primary header is empty.
if hdu == 0 and hdus[hdu].data is None:
for i in range(len(hdus)):
if hdus.fileinfo(i)['datSpan'] > 0:
hdu = i
comb_hdr = hdus[hdu].header.copy()
# Add header values from the primary header that aren't
# present in the extension header.
comb_hdr.extend(hdr, unique=True)
hdr = comb_hdr
log.info("first HDU with data is extension "
"{0}.".format(hdu))
break
if 'bunit' in hdr:
fits_unit_string = hdr['bunit']
# patch to handle FITS files using ADU for the unit instead of the
# standard version of 'adu'
if fits_unit_string.strip().lower() == 'adu':
fits_unit_string = fits_unit_string.lower()
else:
fits_unit_string = None
if unit is not None and fits_unit_string:
log.info("using the unit {0} passed to the FITS reader instead of "
"the unit {1} in the FITS file.".format(unit,
fits_unit_string))
use_unit = unit or fits_unit_string
hdr, wcs = _generate_wcs_and_update_header(hdr)
ccd_data = CCDData(hdus[hdu].data, meta=hdr, unit=use_unit,
mask=mask, uncertainty=uncertainty, wcs=wcs)
return ccd_data
def fits_ccddata_writer(ccd_data, filename, hdu_mask='MASK',
hdu_uncertainty='UNCERT', hdu_flags=None, **kwd):
"""
Write CCDData object to FITS file.
Parameters
----------
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = ccd_data.to_hdu(hdu_mask=hdu_mask, hdu_uncertainty=hdu_uncertainty,
hdu_flags=hdu_flags)
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(CCDData):
registry.register_reader('fits', CCDData, fits_ccddata_reader)
registry.register_writer('fits', CCDData, fits_ccddata_writer)
registry.register_identifier('fits', CCDData, fits.connect.is_fits)
try:
CCDData.read.__doc__ = fits_ccddata_reader.__doc__
except AttributeError:
CCDData.read.__func__.__doc__ = fits_ccddata_reader.__doc__
try:
CCDData.write.__doc__ = fits_ccddata_writer.__doc__
except AttributeError:
CCDData.write.__func__.__doc__ = fits_ccddata_writer.__doc__
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime as dt
import unittest
import pytest
from marshmallow import ValidationError
from parameterized import parameterized
from airflow.api_connexion.schemas.task_instance_schema import (
clear_task_instance_form,
set_task_instance_state_form,
task_instance_schema,
)
from airflow.models import SlaMiss, TaskInstance as TI
from airflow.operators.dummy import DummyOperator
from airflow.utils.platform import getuser
from airflow.utils.state import State
from airflow.utils.timezone import datetime
class TestTaskInstanceSchema:
@pytest.fixture(autouse=True)
def set_attrs(self, session, dag_maker):
self.default_time = datetime(2020, 1, 1)
with dag_maker(dag_id="TEST_DAG_ID", session=session):
self.task = DummyOperator(task_id="TEST_TASK_ID", start_date=self.default_time)
self.dr = dag_maker.create_dagrun(execution_date=self.default_time)
session.flush()
self.default_ti_init = {
"run_id": None,
"state": State.RUNNING,
}
self.default_ti_extras = {
"dag_run": self.dr,
"start_date": self.default_time + dt.timedelta(days=1),
"end_date": self.default_time + dt.timedelta(days=2),
"pid": 100,
"duration": 10000,
"pool": "default_pool",
"queue": "default_queue",
}
yield
session.rollback()
def test_task_instance_schema_without_sla(self, session):
ti = TI(task=self.task, **self.default_ti_init)
for key, value in self.default_ti_extras.items():
setattr(ti, key, value)
serialized_ti = task_instance_schema.dump((ti, None))
expected_json = {
"dag_id": "TEST_DAG_ID",
"duration": 10000.0,
"end_date": "2020-01-03T00:00:00+00:00",
"execution_date": "2020-01-01T00:00:00+00:00",
"executor_config": "{}",
"hostname": "",
"max_tries": 0,
"operator": "DummyOperator",
"pid": 100,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 1,
"queue": "default_queue",
"queued_when": None,
"sla_miss": None,
"start_date": "2020-01-02T00:00:00+00:00",
"state": "running",
"task_id": "TEST_TASK_ID",
"try_number": 0,
"unixname": getuser(),
"dag_run_id": None,
}
assert serialized_ti == expected_json
def test_task_instance_schema_with_sla(self, session):
sla_miss = SlaMiss(
task_id="TEST_TASK_ID",
dag_id="TEST_DAG_ID",
execution_date=self.default_time,
)
session.add(sla_miss)
session.flush()
ti = TI(task=self.task, **self.default_ti_init)
for key, value in self.default_ti_extras.items():
setattr(ti, key, value)
serialized_ti = task_instance_schema.dump((ti, sla_miss))
expected_json = {
"dag_id": "TEST_DAG_ID",
"duration": 10000.0,
"end_date": "2020-01-03T00:00:00+00:00",
"execution_date": "2020-01-01T00:00:00+00:00",
"executor_config": "{}",
"hostname": "",
"max_tries": 0,
"operator": "DummyOperator",
"pid": 100,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 1,
"queue": "default_queue",
"queued_when": None,
"sla_miss": {
"dag_id": "TEST_DAG_ID",
"description": None,
"email_sent": False,
"execution_date": "2020-01-01T00:00:00+00:00",
"notification_sent": False,
"task_id": "TEST_TASK_ID",
"timestamp": None,
},
"start_date": "2020-01-02T00:00:00+00:00",
"state": "running",
"task_id": "TEST_TASK_ID",
"try_number": 0,
"unixname": getuser(),
"dag_run_id": None,
}
assert serialized_ti == expected_json
class TestClearTaskInstanceFormSchema(unittest.TestCase):
@parameterized.expand(
[
(
[
{
"dry_run": False,
"reset_dag_runs": True,
"only_failed": True,
"only_running": True,
}
]
),
(
[
{
"dry_run": False,
"reset_dag_runs": True,
"end_date": "2020-01-01T00:00:00+00:00",
"start_date": "2020-01-02T00:00:00+00:00",
}
]
),
(
[
{
"dry_run": False,
"reset_dag_runs": True,
"task_ids": [],
}
]
),
]
)
def test_validation_error(self, payload):
with pytest.raises(ValidationError):
clear_task_instance_form.load(payload)
class TestSetTaskInstanceStateFormSchema:
current_input = {
"dry_run": True,
"task_id": "print_the_context",
"execution_date": "2020-01-01T00:00:00+00:00",
"include_upstream": True,
"include_downstream": True,
"include_future": True,
"include_past": True,
"new_state": "failed",
}
def test_success(self):
result = set_task_instance_state_form.load(self.current_input)
expected_result = {
'dry_run': True,
'execution_date': dt.datetime(2020, 1, 1, 0, 0, tzinfo=dt.timezone(dt.timedelta(0), '+0000')),
'include_downstream': True,
'include_future': True,
'include_past': True,
'include_upstream': True,
'new_state': 'failed',
'task_id': 'print_the_context',
}
assert expected_result == result
@parameterized.expand(
[
({"task_id": None},),
({"include_future": "foo"},),
({"execution_date": "NOW"},),
({"new_state": "INVALID_STATE"},),
({"execution_date": "2020-01-01T00:00:00+00:00", "dag_run_id": "dagrun_id"},),
]
)
def test_validation_error(self, override_data):
self.current_input.update(override_data)
with pytest.raises(ValidationError):
set_task_instance_state_form.load(self.current_input)
|
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline utilities for the ViT experiments."""
import math
from typing import Optional, Union, Callable
from absl import logging
from clu import deterministic_data
import flax
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
def _get_dataset_builder(
dataset: Union[str, tfds.core.DatasetBuilder],
data_dir: Optional[str] = None) -> tfds.core.DatasetBuilder:
"""Returns a dataset builder."""
if isinstance(dataset, str):
dataset_builder = tfds.builder(dataset, data_dir=data_dir)
elif isinstance(dataset, tfds.core.DatasetBuilder):
dataset_builder = dataset
else:
raise ValueError("`dataset` must be a string or tfds.core.DatasetBuilder. "
f"Received {dataset} instead.")
return dataset_builder
def _get_process_split(split, process_index, process_count, drop_remainder):
"""Returns the split for the given process given a multi-process setup."""
splits = tfds.even_splits(
split, n=process_count, drop_remainder=drop_remainder)
process_split = splits[process_index]
return process_split
def _get_process_num_examples(builder, split, process_batch_size, process_index,
process_count, drop_remainder):
"""Returns the number of examples in a given process's split."""
process_split = _get_process_split(
split,
process_index=process_index,
process_count=process_count,
drop_remainder=drop_remainder)
num_examples = builder.info.splits[process_split].num_examples
if drop_remainder:
device_batch_size = process_batch_size // jax.local_device_count()
num_examples = (
math.floor(num_examples / device_batch_size) * device_batch_size)
return num_examples
def get_num_examples(dataset: Union[str, tfds.core.DatasetBuilder],
split: str,
process_batch_size: int,
drop_remainder: bool = True,
process_count: Optional[int] = None,
data_dir: Optional[str] = None) -> int:
"""Returns the total number of examples in a (sharded) dataset split.
Args:
dataset: Either a dataset name or a dataset builder object.
split: Specifies which split of the data to load.
process_batch_size: Per process batch size.
drop_remainder: Whether to drop remainders when sharding across processes
and batching.
process_count: Number of global processes (over all "hosts") across
which the dataset will be sharded. If None, then the number of global
processes will be obtained from `jax.process_count()`.
data_dir: Directory for the dataset files.
Returns:
The number of examples in the dataset split that will be read when sharded
across available processes.
"""
dataset_builder = _get_dataset_builder(dataset, data_dir)
if process_count is None:
process_count = jax.process_count()
num_examples = 0
for i in range(process_count):
num_examples += _get_process_num_examples(
dataset_builder,
split=split,
process_batch_size=process_batch_size,
process_index=i,
process_count=process_count,
drop_remainder=drop_remainder)
remainder = dataset_builder.info.splits[split].num_examples - num_examples
if remainder:
warning = (f"Dropping {remainder} examples from the {split} split of the "
f"{dataset_builder.info.name} dataset.")
logging.warning(warning)
return num_examples
def _add_mask(batch, num_batch_dims):
"""Adds a mask to a dictionary of tensors."""
mask = tf.ones(tf.shape(list(batch.values())[0])[:num_batch_dims])
if "mask" in batch:
mask *= batch["mask"]
batch["mask"] = mask
return batch
def _pad_reshape_batch(batch, flat_batch_size, num_devices):
"""Pads and reshapes the tensors in a flattened batch."""
def f(x):
actual_batch_size = tf.shape(x)[0]
needed = flat_batch_size - actual_batch_size
zeros = tf.zeros(tf.concat([[needed], x.shape[1:]], axis=0), dtype=x.dtype)
new_x = tf.concat([x, zeros], axis=0)
new_x = tf.reshape(new_x, tf.concat([[num_devices, -1], x.shape[1:]],
axis=0))
return new_x
new_batch = {k: f(v) for k, v in batch.items()}
return new_batch
def get_data(
dataset: Union[str, tfds.core.DatasetBuilder],
split: str,
rng: Union[None, jnp.ndarray, tf.Tensor],
process_batch_size: int,
preprocess_fn: Optional[Callable[[deterministic_data.Features],
deterministic_data.Features]],
cache: bool = False,
num_epochs: Optional[int] = None,
repeat_after_batching: bool = False,
shuffle: bool = True,
shuffle_buffer_size: int = 10_000,
prefetch_size: int = 4,
drop_remainder: bool = True,
data_dir: Optional[str] = None,
process_index: Optional[int] = None,
process_count: Optional[int] = None,
) -> tf.data.Dataset:
"""Creates a standard input pipeline (shuffle, preprocess, batch).
Args:
dataset: Either a dataset name or a dataset builder object.
split: Specifies which split of the data to load. Will be sharded across all
available processes (globally over all "hosts"), and the unique sharded
subsplit corresponding to the current process will be returned.
rng: A jax.random.PRNG key or a tf.Tensor for TF stateless seeds to use for
seeding shuffle operations and preprocessing ops. Must be set if
shuffling.
process_batch_size: Per process batch size.
preprocess_fn: Function for preprocessing individual examples (which should
be Python dictionary of tensors).
cache: Whether to cache the unprocessed dataset in memory before
preprocessing and batching ("loaded"), after preprocessing and batching
("batched"), or not at all (False).
num_epochs: Number of epochs for which to repeat the dataset. None to repeat
forever.
repeat_after_batching: Whether to `repeat` the dataset before or after
batching.
shuffle: Whether to shuffle the dataset (both on file and example level).
shuffle_buffer_size: Number of examples in the shuffle buffer.
prefetch_size: The number of elements in the final dataset to prefetch in
the background. This should be a small (say <10) positive integer or
tf.data.AUTOTUNE.
drop_remainder: Whether to drop remainders when batching and splitting
across processes.
data_dir: Directory for the dataset files.
process_index: Integer id in the range [0, process_count) of the current
process in a multi-process setup. If None, then the index will be obtained
from `jax.process_index()`.
process_count: Number of global processes (over all "hosts") across which
the dataset will be sharded. If None, then the number of global processes
will be obtained from `jax.process_count()`.
Returns:
The dataset with preprocessed, masked, padded, and batched examples for the
unique sharded subset of `split` corresponding to the current process in a
multi-process setup.
"""
assert cache in ("loaded", "batched", False, None)
if process_index is None:
process_index = jax.process_index()
if process_count is None:
process_count = jax.process_count()
dataset_builder = _get_dataset_builder(dataset, data_dir)
if rng is not None:
rng = jax.random.fold_in(rng, process_index) # Derive RNG for this process.
process_split = _get_process_split(
split,
process_index=process_index,
process_count=process_count,
drop_remainder=drop_remainder)
dataset = deterministic_data.create_dataset(
dataset_builder,
split=process_split,
batch_dims=(),
rng=rng,
filter_fn=None,
preprocess_fn=preprocess_fn,
decoders={"image": tfds.decode.SkipDecoding()},
cache=cache == "loaded",
num_epochs=num_epochs if not repeat_after_batching else 1,
shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
prefetch_size=0,
pad_up_to_batches=None,
drop_remainder=drop_remainder,
)
num_devices = jax.local_device_count()
if drop_remainder:
# If we're dropping the remainder, we can take the fast path of double
# batching to [num_devices, batch_size_per_device] and then adding a mask of
# ones for the two batch dimensions.
batch_size_per_device = process_batch_size // num_devices
batch_dims = [num_devices, batch_size_per_device]
for batch_size in reversed(batch_dims):
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.map(
lambda xs: _add_mask(xs, 2), num_parallel_calls=tf.data.AUTOTUNE)
else:
# If we're not dropping the remainder, then we define a flattened batch size
# that would divide evenly across devices, and then batch to that size with
# drop_remainder=False. Then we add a mask of ones for the examples given,
# pad each flattened batch with zeros (including the mask) to ensure all
# batches have the same number of examples, and then reshape to
# [num_devices, batch_size_per_device].
batch_size_per_device = math.ceil(process_batch_size / num_devices)
flat_batch_size = batch_size_per_device * num_devices
dataset = dataset.batch(flat_batch_size, drop_remainder=drop_remainder)
def f(xs):
return _pad_reshape_batch(_add_mask(xs, 1), flat_batch_size, num_devices)
dataset = dataset.map(f, num_parallel_calls=tf.data.AUTOTUNE)
if cache == "batched":
dataset = dataset.cache()
if repeat_after_batching:
dataset = dataset.repeat(num_epochs)
return dataset.prefetch(prefetch_size)
def start_input_pipeline(dataset, n_prefetch, devices=None):
"""Creates a data iterator with optional prefetching and padding."""
it = iter(dataset)
def _prepare(x):
# Transforms x into read-only numpy array without copy if possible, see:
# https://github.com/tensorflow/tensorflow/issues/33254#issuecomment-542379165
return np.asarray(memoryview(x))
it = (jax.tree_map(_prepare, xs) for xs in it)
if n_prefetch:
it = flax.jax_utils.prefetch_to_device(it, n_prefetch, devices=devices)
return it
|
|
import base64
import json
import os
import threading
import queue
import uuid
from multiprocessing.managers import AcquirerProxy, BaseManager, BaseProxy, DictProxy, public_methods
from .utils import isomorphic_encode
class StashManager(BaseManager):
shared_data = {}
lock = threading.Lock()
def _get_shared():
return StashManager.shared_data
def _get_lock():
return StashManager.lock
StashManager.register("get_dict",
callable=_get_shared,
proxytype=DictProxy)
StashManager.register('Lock',
callable=_get_lock,
proxytype=AcquirerProxy)
# We have to create an explicit class here because the built-in
# AutoProxy has a bug with nested managers, and the MakeProxy
# method doesn't work with spawn-based multiprocessing, since the
# generated class can't be pickled for use in child processes.
class QueueProxy(BaseProxy):
_exposed_ = public_methods(queue.Queue)
for method in QueueProxy._exposed_:
def impl_fn(method):
def _impl(self, *args, **kwargs):
return self._callmethod(method, args, kwargs)
_impl.__name__ = method
return _impl
setattr(QueueProxy, method, impl_fn(method))
StashManager.register("Queue",
callable=queue.Queue,
proxytype=QueueProxy)
class StashServer(object):
def __init__(self, address=None, authkey=None, mp_context=None):
self.address = address
self.authkey = authkey
self.manager = None
self.mp_context = mp_context
def __enter__(self):
self.manager, self.address, self.authkey = start_server(self.address,
self.authkey,
self.mp_context)
store_env_config(self.address, self.authkey)
def __exit__(self, *args, **kwargs):
if self.manager is not None:
self.manager.shutdown()
def load_env_config():
address, authkey = json.loads(os.environ["WPT_STASH_CONFIG"])
if isinstance(address, list):
address = tuple(address)
else:
address = str(address)
authkey = base64.b64decode(authkey)
return address, authkey
def store_env_config(address, authkey):
authkey = base64.b64encode(authkey)
os.environ["WPT_STASH_CONFIG"] = json.dumps((address, authkey.decode("ascii")))
def start_server(address=None, authkey=None, mp_context=None):
if isinstance(authkey, str):
authkey = authkey.encode("ascii")
kwargs = {}
if mp_context is not None:
kwargs["ctx"] = mp_context
manager = StashManager(address, authkey, **kwargs)
manager.start()
address = manager._address
if isinstance(address, bytes):
address = address.decode("ascii")
return (manager, address, manager._authkey)
class LockWrapper(object):
def __init__(self, lock):
self.lock = lock
def acquire(self):
self.lock.acquire()
def release(self):
self.lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *args, **kwargs):
self.release()
#TODO: Consider expiring values after some fixed time for long-running
#servers
class Stash(object):
"""Key-value store for persisting data across HTTP/S and WS/S requests.
This data store is specifically designed for persisting data across server
requests. The synchronization is achieved by using the BaseManager from
the multiprocessing module so different processes can acccess the same data.
Stash can be used interchangeably between HTTP, HTTPS, WS and WSS servers.
A thing to note about WS/S servers is that they require additional steps in
the handlers for accessing the same underlying shared data in the Stash.
This can usually be achieved by using load_env_config(). When using Stash
interchangeably between HTTP/S and WS/S request, the path part of the key
should be expliclitly specified if accessing the same key/value subset.
The store has several unusual properties. Keys are of the form (path,
uuid), where path is, by default, the path in the HTTP request and
uuid is a unique id. In addition, the store is write-once, read-once,
i.e. the value associated with a particular key cannot be changed once
written and the read operation (called "take") is destructive. Taken together,
these properties make it difficult for data to accidentally leak
between different resources or different requests for the same
resource.
"""
_proxy = None
lock = None
manager = None
_initializing = threading.Lock()
def __init__(self, default_path, address=None, authkey=None):
self.default_path = default_path
self._get_proxy(address, authkey)
self.data = Stash._proxy
def _get_proxy(self, address=None, authkey=None):
if address is None and authkey is None:
Stash._proxy = {}
Stash.lock = threading.Lock()
# Initializing the proxy involves connecting to the remote process and
# retrieving two proxied objects. This process is not inherently
# atomic, so a lock must be used to make it so. Atomicity ensures that
# only one thread attempts to initialize the connection and that any
# threads running in parallel correctly wait for initialization to be
# fully complete.
with Stash._initializing:
if Stash.lock:
return
Stash.manager = StashManager(address, authkey)
Stash.manager.connect()
Stash._proxy = self.manager.get_dict()
Stash.lock = LockWrapper(self.manager.Lock())
def get_queue(self):
return self.manager.Queue()
def _wrap_key(self, key, path):
if path is None:
path = self.default_path
# This key format is required to support using the path. Since the data
# passed into the stash can be a DictProxy which wouldn't detect
# changes when writing to a subdict.
if isinstance(key, bytes):
# UUIDs are within the ASCII charset.
key = key.decode('ascii')
return (isomorphic_encode(path), uuid.UUID(key).bytes)
def put(self, key, value, path=None):
"""Place a value in the shared stash.
:param key: A UUID to use as the data's key.
:param value: The data to store. This can be any python object.
:param path: The path that has access to read the data (by default
the current request path)"""
if value is None:
raise ValueError("SharedStash value may not be set to None")
internal_key = self._wrap_key(key, path)
if internal_key in self.data:
raise StashError("Tried to overwrite existing shared stash value "
"for key %s (old value was %s, new value is %s)" %
(internal_key, self.data[internal_key], value))
else:
self.data[internal_key] = value
def take(self, key, path=None):
"""Remove a value from the shared stash and return it.
:param key: A UUID to use as the data's key.
:param path: The path that has access to read the data (by default
the current request path)"""
internal_key = self._wrap_key(key, path)
value = self.data.get(internal_key, None)
if value is not None:
try:
self.data.pop(internal_key)
except KeyError:
# Silently continue when pop error occurs.
pass
return value
class StashError(Exception):
pass
|
|
"""
Component that will help set the ffmpeg component.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/ffmpeg/
"""
import asyncio
import logging
import os
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
from homeassistant.config import load_yaml_config_file
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
DOMAIN = 'ffmpeg'
REQUIREMENTS = ["ha-ffmpeg==1.2"]
_LOGGER = logging.getLogger(__name__)
SERVICE_START = 'start'
SERVICE_STOP = 'stop'
SERVICE_RESTART = 'restart'
DATA_FFMPEG = 'ffmpeg'
CONF_INITIAL_STATE = 'initial_state'
CONF_INPUT = 'input'
CONF_FFMPEG_BIN = 'ffmpeg_bin'
CONF_EXTRA_ARGUMENTS = 'extra_arguments'
CONF_OUTPUT = 'output'
CONF_RUN_TEST = 'run_test'
DEFAULT_BINARY = 'ffmpeg'
DEFAULT_RUN_TEST = True
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_FFMPEG_BIN, default=DEFAULT_BINARY): cv.string,
vol.Optional(CONF_RUN_TEST, default=DEFAULT_RUN_TEST): cv.boolean,
}),
}, extra=vol.ALLOW_EXTRA)
SERVICE_FFMPEG_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
def start(hass, entity_id=None):
"""Start a ffmpeg process on entity."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_START, data)
def stop(hass, entity_id=None):
"""Stop a ffmpeg process on entity."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_STOP, data)
def restart(hass, entity_id=None):
"""Restart a ffmpeg process on entity."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_RESTART, data)
@asyncio.coroutine
def async_setup(hass, config):
"""Setup the FFmpeg component."""
conf = config.get(DOMAIN, {})
manager = FFmpegManager(
hass,
conf.get(CONF_FFMPEG_BIN, DEFAULT_BINARY),
conf.get(CONF_RUN_TEST, DEFAULT_RUN_TEST)
)
descriptions = yield from hass.loop.run_in_executor(
None, load_yaml_config_file,
os.path.join(os.path.dirname(__file__), 'services.yaml'))
# register service
@asyncio.coroutine
def async_service_handle(service):
"""Handle service ffmpeg process."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
devices = [device for device in manager.entities
if device.entity_id in entity_ids]
else:
devices = manager.entities
tasks = []
for device in devices:
if service.service == SERVICE_START:
tasks.append(device.async_start_ffmpeg())
elif service.service == SERVICE_STOP:
tasks.append(device.async_stop_ffmpeg())
else:
tasks.append(device.async_restart_ffmpeg())
if tasks:
yield from asyncio.wait(tasks, loop=hass.loop)
hass.services.async_register(
DOMAIN, SERVICE_START, async_service_handle,
descriptions[DOMAIN].get(SERVICE_START), schema=SERVICE_FFMPEG_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_STOP, async_service_handle,
descriptions[DOMAIN].get(SERVICE_STOP), schema=SERVICE_FFMPEG_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_RESTART, async_service_handle,
descriptions[DOMAIN].get(SERVICE_RESTART),
schema=SERVICE_FFMPEG_SCHEMA)
hass.data[DATA_FFMPEG] = manager
return True
class FFmpegManager(object):
"""Helper for ha-ffmpeg."""
def __init__(self, hass, ffmpeg_bin, run_test):
"""Initialize helper."""
self.hass = hass
self._cache = {}
self._bin = ffmpeg_bin
self._run_test = run_test
self._entities = []
@property
def binary(self):
"""Return ffmpeg binary from config."""
return self._bin
@property
def entities(self):
"""Return ffmpeg entities for services."""
return self._entities
@callback
def async_register_device(self, device):
"""Register a ffmpeg process/device."""
self._entities.append(device)
@asyncio.coroutine
def async_shutdown(event):
"""Stop ffmpeg process."""
yield from device.async_stop_ffmpeg()
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, async_shutdown)
# start on startup
if device.initial_state:
@asyncio.coroutine
def async_start(event):
"""Start ffmpeg process."""
yield from device.async_start_ffmpeg()
yield from device.async_update_ha_state()
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_start)
@asyncio.coroutine
def async_run_test(self, input_source):
"""Run test on this input. TRUE is deactivate or run correct.
This method must be run in the event loop.
"""
from haffmpeg import Test
if self._run_test:
# if in cache
if input_source in self._cache:
return self._cache[input_source]
# run test
ffmpeg_test = Test(self.binary, loop=self.hass.loop)
success = yield from ffmpeg_test.run_test(input_source)
if not success:
_LOGGER.error("FFmpeg '%s' test fails!", input_source)
self._cache[input_source] = False
return False
self._cache[input_source] = True
return True
class FFmpegBase(Entity):
"""Interface object for ffmpeg."""
def __init__(self, initial_state=True):
"""Initialize ffmpeg base object."""
self.ffmpeg = None
self.initial_state = initial_state
@property
def available(self):
"""Return True if entity is available."""
return self.ffmpeg.is_running
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
def async_start_ffmpeg(self):
"""Start a ffmpeg process.
This method must be run in the event loop and returns a coroutine.
"""
raise NotImplementedError()
def async_stop_ffmpeg(self):
"""Stop a ffmpeg process.
This method must be run in the event loop and returns a coroutine.
"""
return self.ffmpeg.close()
@asyncio.coroutine
def async_restart_ffmpeg(self):
"""Stop a ffmpeg process."""
yield from self.async_stop_ffmpeg()
yield from self.async_start_ffmpeg()
|
|
import subprocess
import sys
import threading
from time import sleep
import os, signal
class Controller():
"""
Gets input from a controller recognized by xboxdrv.
Parameters
----------
return_values: list, optional
Values to return from the controller. use get_input_names()
to get the names of these values.
return_as: list, optional
Names to return the input values as. it must be the same length
as return_values
in_range: tuple, optional
Range in the format(min, max) where min is the lowest incoming value and max
the greatest
out_range: tuple, optional
Range in the format(min, max) where min is the lowest desired outgoing value and max
the greatest
Returns
-------
controller_outputs: dict
Dict with an entry for each button specified on the controller.
NOTE:
return_values may be present while return_as can still be none, however, in_range and out_range must both exist
"""
class __parser__(threading.Thread):
def __init__(self, _xboxdrv_process):
"""
Parses the input from xboxdrv. It runs as a seperate thread to prevent
stale data when get_values() is called
"""
threading.Thread.__init__(self)
self.xboxdrv = _xboxdrv_process
self.control_inputs = {}
self.running = True
def run(self):
while self.running:
if(self.xboxdrv.poll() is not None):
#print "subprocess has died, raising SIGINT"
self.running = False
#os.kill(os.getpid(), signal.SIGINT)
line = self.xboxdrv.stdout.readline()
try:
#This is a somewhat hackey method but it should work for all controllers that xboxdrv can handle.
#
#xboxdrv prints off controller inputs as "X1:120 Y1: 10 select:1" etc...
#Just splitting by spaces does not work as it would seperate "Y1:" and "10".
#This method removes all spaces after a ":" but does not affect the spaces after the numerical
#value of an input.
line = line.replace(": ", ": ")
line = line.replace(": ", ": ")
line = line.replace(": ", ": ")
line = line.replace(": ", ": ")
line = line.replace(": ", ": ")
line = line.replace(": ", ":")
# Sometimes there's two spaces a value; replace with one
line = line.replace(" ", " ")
entries = line.split(" ")
self.control_inputs = {}
for entry in entries:
s = entry.split(":")
self.control_inputs[str(s[0])] = int(s[-1])
# Catches controller info that xboxdrv outputs at the beginning
except ValueError:
pass
print "parse thread exiting!"
def __init__(self, return_values=None, return_as=None, in_range=None, out_range=None):
if return_values and return_as:
if not len(return_values) == len(return_as):
sys.exit("return_values and return_as must be the same length!")
elif return_as and not return_values:
sys.exit("No values to return!")
if not in_range and not out_range:
pass
elif len(in_range) != 2 or len(out_range) != 2:
sys.exit("in_range and out_range must be in format: (min, max)")
self._in_range = in_range
self._out_range = out_range
self._return_values = return_values
self._return_as = return_as
self.controller = subprocess.Popen(["sudo", "xboxdrv", "-d"], stdout=subprocess.PIPE)
# This waits for password input
sleep(2)
self.line_parser = self.__parser__(self.controller)
self.line_parser.daemon = True
self.line_parser.start()
self.outputs = {}
def map_range(self, x, in_min, in_max, out_min, out_max):
"""
Maps an input with a specified input range to a specified output range
Parameters
----------
x: float, int
Input to be mapped
in_min: float, int,
Minimum of input range
in_max: float, int,
Maximum of input range
out_min: float, int,
Minimun of output range
out_max: float, int,
Maximum of output range
Return
-------
out: float
Scaled input value
"""
x = float(x)
in_min = float(in_min)
in_max = float(in_max)
out_min = float(out_min)
out_max = float(out_max)
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def get_input_names(self):
"""
Gets a list of the names of all the values coming from xboxdrv.
Parameters
----------
None
Returns
-------
input_names: list
The names of each value as xboxdrv sees it
"""
sleep(3)
#TODO: this could fail if the parser catches a line from the info text that xboxdrv puts out
while not self.line_parser.control_inputs:
pass
names = []
for key in self.line_parser.control_inputs.keys():
names.append(key)
return names
def kill_controller(self):
#kill the subprocess, so we can exit cleanly
self.controller.terminate()
def get_values(self):
"""
Returns the values specified by the caller or all the values if no values specified
Parameters
----------
None
Returns
-------
controller_outputs: dict
Dict with an entry for each button specified on the controller.
"""
self.outputs = {}
# Changes return values names to specified names
if self._return_values and self._return_as:
try:
for key in range(len(self._return_values)):
self.outputs[str(self._return_as[key])] = self.line_parser.control_inputs[self._return_values[key]]
except KeyError:
pass
# Does not change names but does only return specified value
elif self._return_values and not self._return_as:
try:
for key in range(len(self._return_values)):
self.outputs[str(self._return_values[key])] = self.line_parser.control_inputs[self._return_values[key]]
except KeyError:
pass
else:
self.outputs = self.line_parser.control_inputs
# Maps values to a range
if self._in_range and self._out_range:
for key in self.outputs:
self.outputs[key] = self.map_range(self.outputs[key], self._in_range[0], self._in_range[1], self._out_range[0], self._out_range[1])
return self.outputs
|
|
# -*- coding: utf-8 -*-
"""
xkbgroup.core
~~~~~~~~~~~~~
This module implements the XKeyboard API.
Classes:
* XKeyboard: the main class.
Exceptions:
* X11Error: raised for errors on X server issues.
:copyright: (c) 2016 by Nguyen Duc My.
:license: MIT, see LICENSE for more details.
"""
import os
import re
import sys
from ctypes import *
from collections import UserList, namedtuple
from .xkb import *
# Error-related utilities
OPEN_DISPLAY_ERRORS = {
XkbOD_BadLibraryVersion: "{libname} uses XKB version {used_major}.{used_minor}\n"
"Xlib supports incompatible version {found_major}.{found_minor}",
XkbOD_ConnectionRefused: "Cannot open display \"{display_name}\"",
XkbOD_BadServerVersion: "{libname} uses XKB version {used_major}.{used_minor}\n"
"Server \"{display_name}\" uses incompatible version "
"{found_major}.{found_minor}",
XkbOD_NonXkbServer: "XKB extension not present on \"{display_name}\"",
}
GET_CONTROLS_ERRORS = {
BadAlloc: "Unable to allocate storage",
BadImplementation: "Invalid reply from server",
BadMatch: "A compatible version of Xkb was not available in the server or "
"an argument has correct type and range, but is otherwise invalid",
}
GET_NAMES_ERRORS = {
BadAlloc: "Unable to allocate storage",
BadImplementation: "Invalid reply from server",
BadLength: "The length of a request is shorter or longer than that "
"required to minimally contain the arguments",
BadMatch: "A compatible version of Xkb was not available in the server or "
"an argument has correct type and range, but is otherwise invalid",
}
class X11Error(Exception):
"""Exception class, raised for errors on X server issues."""
def _ensure_type(obj, type):
if not isinstance(obj, type):
raise ValueError("Wrong value type, must be {}.".format(str(type)))
# Both Python 3.2+ compatible and more neat than assigning to __doc__
class GroupData(namedtuple("GroupData", ["num", "name", "symbol", "variant"])):
"""Contains all data about the specific group."""
def __format__(self, format_spec):
"""If format_spec is not empty, use it as a format string in
format_spec.format(...) with keyword arguments named corresponding to
fields. Otherwise just return str(self).
:param format_spec: format specifier
:rtype: str
"""
if len(format_spec) > 0:
return format_spec.format(
num=self.num,
name=self.name,
symbol=self.symbol,
variant=self.variant)
return str(self)
class XKeyboard:
"""The main class.
Usage examples:
# Assume we have the following configuration
$ setxkbmap -layout us,ru,ua,fr
$ python
>>> from xkbgroup import XKeyboard
>>> xkb = XKeyboard()
>>> xkb.group_num
1
>>> xkb.group_num = 2
>>> xkb.group_num
2
>>> xkb.group_num -= 2
>>> xkb.group_num
0
>>> xkb.group_name
English (US)
>>> xkb.group_name = 'Ukrainian'
>>> xkb.group_name
Ukrainian
>>> xkb.group_num
2
>>> xkb.group_symbol
ua
>>> xkb.group_symbol = 'fr'
>>> xkb.group_symbol
fr
>>> xkb.group_variant
''
>>> xkb.group_num -= 3
>>> xkb.group_variant
''
>>> xkb.group_num
0
>>>
"""
# Fields with default values
non_symbols = {"capslock", "pc", "inet", "group", "terminate"}
# Main methods
def __init__(self, auto_open=True, non_symbols=None):
"""
:param auto_open: if True automatically call open_display().
:param non_symbols: either iterable of string non-symbol names or
None to use the default set of non-symbol names.
"""
if non_symbols:
self.non_symbols = non_symbols
if auto_open:
self.open_display()
def open_display(self):
"""Establishes connection with X server and prepares objects
necessary to retrieve and send data.
"""
self.close_display() # Properly finish previous open_display()
XkbIgnoreExtension(False)
display_name = None
major = c_int(XkbMajorVersion)
minor = c_int(XkbMinorVersion)
reason = c_int()
self._display = XkbOpenDisplay(
display_name,
None, None, byref(major), byref(minor), byref(reason))
if not self._display:
if reason.value in OPEN_DISPLAY_ERRORS:
# Assume POSIX conformance
display_name = os.getenv("DISPLAY") or "default"
raise X11Error(OPEN_DISPLAY_ERRORS[reason.value].format(
libname="xkbgroup",
used_major=XkbMajorVersion,
used_minor=XkbMinorVersion,
found_major=major.value,
found_minor=minor.value,
display_name=display_name)
+ ".")
else:
raise X11Error("Unknown error {} from XkbOpenDisplay.".format(reason.value))
self._keyboard_description = XkbGetMap(self._display, 0, XkbUseCoreKbd)
if not self._keyboard_description:
self.close_display()
raise X11Error("Failed to get keyboard description.")
# Controls mask doesn't affect the availability of xkb->ctrls->num_groups anyway
# Just use a valid value, and xkb->ctrls->num_groups will be definitely set
status = XkbGetControls(self._display, XkbAllControlsMask, self._keyboard_description)
if status != Success:
self.close_display()
raise X11Error(GET_CONTROLS_ERRORS[status] + ".")
names_mask = XkbSymbolsNameMask | XkbGroupNamesMask
status = XkbGetNames(self._display, names_mask, self._keyboard_description)
if status != Success:
self.close_display()
raise X11Error(GET_NAMES_ERRORS[status] + ".")
def close_display(self):
"""Closes connection with X server and cleans up objects
created on open_display().
"""
if hasattr(self, "_keyboard_description") and self._keyboard_description:
names_mask = XkbSymbolsNameMask | XkbGroupNamesMask
XkbFreeNames(self._keyboard_description, names_mask, True)
XkbFreeControls(self._keyboard_description, XkbAllControlsMask, True)
XkbFreeClientMap(self._keyboard_description, 0, True)
del self._keyboard_description
if hasattr(self, "_display") and self._display:
XCloseDisplay(self._display)
del self._display
def __del__(self):
self.close_display()
def __enter__(self):
self.open_display()
return self
def __exit__(self, type, value, traceback):
self.close_display()
# Properties for all layouts
@property
def groups_data(self):
"""All data about all groups (get-only).
:getter: Returns all data about all groups
:type: list of GroupData
"""
return _ListProxy(GroupData(num, name, symbol, variant)
for (num, name, symbol, variant)
in zip(range(self.groups_count),
self.groups_names,
self.groups_symbols,
self.groups_variants))
@property
def groups_count(self):
"""Number of all groups (get-only).
:getter: Returns number of all groups
:type: int
"""
if self._keyboard_description.contents.ctrls is not None:
return self._keyboard_description.contents.ctrls.contents.num_groups
else:
groups_source = self._groups_source
groups_count = 0
while (groups_count < XkbNumKbdGroups and
groups_source[groups_count] != None_):
groups_count += 1
return groups_count
@property
def groups_names(self):
"""Names of all groups (get-only).
:getter: Returns names of all groups
:type: list of str
"""
return _ListProxy(self._get_group_name_by_num(i) for i in range(self.groups_count))
@property
def groups_symbols(self):
"""Symbols of all groups (get-only).
:getter: Returns symbols of all groups
:type: list of str
"""
return _ListProxy(symdata.symbol for symdata in self._symboldata_list)
@property
def groups_variants(self):
"""Variants of all groups (get-only).
:getter: Returns variants of all groups
:type: list of str
"""
return _ListProxy(symdata.variant or "" for symdata in self._symboldata_list)
# Properties and methods for current layout
@property
def group_data(self):
"""All data about the current group (get-only).
:getter: Returns all data about the current group
:type: GroupData
"""
return GroupData(self.group_num,
self.group_name,
self.group_symbol,
self.group_variant)
@property
def group_num(self):
"""Current group number.
:getter: Returns current group number
:setter: Sets current group number
:type: int
"""
xkb_state = XkbStateRec()
XkbGetState(self._display, XkbUseCoreKbd, byref(xkb_state))
return xkb_state.group
@group_num.setter
def group_num(self, value):
_ensure_type(value, int)
if XkbLockGroup(self._display, XkbUseCoreKbd, value):
XFlush(self._display)
else:
self.close_display()
raise X11Error("Failed to set group number.")
@property
def group_name(self):
"""Current group full name.
:getter: Returns current group name
:setter: Sets current group name
:type: str
"""
return self._get_group_name_by_num(self.group_num)
@group_name.setter
def group_name(self, value):
_ensure_type(value, str)
groups_names = self.groups_names
n_mapping = {groups_names[i]: i for i in range(len(groups_names))}
try:
self.group_num = n_mapping[value]
except KeyError as exc:
raise ValueError("Wrong group name.") from exc
@property
def group_symbol(self):
"""Current group symbol.
:getter: Returns current group symbol
:setter: Sets current group symbol
:type: str
"""
s_mapping = {symdata.index: symdata.symbol for symdata in self._symboldata_list}
return s_mapping[self.group_num]
@group_symbol.setter
def group_symbol(self, value):
_ensure_type(value, str)
s_mapping = {symdata.symbol: symdata.index for symdata in self._symboldata_list}
try:
self.group_num = s_mapping[value]
except KeyError as exc:
raise ValueError("Wrong group symbol.") from exc
@property
def group_variant(self):
"""Current group variant (get-only).
:getter: Returns current group variant
:type: str
"""
v_mapping = {symdata.index: symdata.variant for symdata in self._symboldata_list}
return v_mapping[self.group_num] or ""
# Current group variant is a get-only value because variants are associated
# with symbols in /usr/share/X11/xkb/rules/evdev.lst and specified at
# setxkbmap call time
# Formatting method (for the great goodness!)
def format(self, format_str):
"""Returns a formatted version of format_str.
The only named replacement fields supported by this method and
their corresponding API calls are:
* {num} group_num
* {name} group_name
* {symbol} group_symbol
* {variant} group_variant
* {current_data} group_data
* {nums} groups_nums
* {names} groups_names
* {symbols} groups_symbols
* {variants} groups_variants
* {all_data} groups_data
Passing other replacement fields will result in raising exceptions.
:param format_str: a new style format string
:rtype: str
"""
return format_str.format(**{
"num": self.group_num,
"name": self.group_name,
"symbol": self.group_symbol,
"variant": self.group_variant,
"current_data": self.group_data,
"count": self.groups_count,
"names": self.groups_names,
"symbols": self.groups_symbols,
"variants": self.groups_variants,
"all_data": self.groups_data})
def __format__(self, format_spec):
"""Handle format(xkb, format_spec) as xkb.format(format_spec) if
format_spec is not empty. Otherwise just return str(self).
:param format_spec: format specifier
:rtype: str
"""
if len(format_spec) > 0:
return self.format(format_spec)
return str(self)
# Private properties and methods
@property
def _groups_source(self):
return self._keyboard_description.contents.names.contents.groups
@property
def _symbols_source(self):
return self._keyboard_description.contents.names.contents.symbols
@property
def _symboldata_list(self):
symbol_str_atom = self._symbols_source
if symbol_str_atom != None_:
b_symbol_str = XGetAtomName(self._display, symbol_str_atom)
return _parse_symbols(b_symbol_str.decode(), self.non_symbols)
else:
raise X11Error("Failed to get symbol names.")
def _get_group_name_by_num(self, group_num):
cur_group_atom = self._groups_source[group_num]
if cur_group_atom != None_:
b_group_name = XGetAtomName(self._display, cur_group_atom)
return b_group_name.decode() if b_group_name else ""
else:
raise X11Error("Failed to get group name.")
SymbolData = namedtuple("SymbolData", ["symbol", "variant", "index"])
SYMBOL_REGEX = re.compile(r"""
(?P<symbol>\w+)
(?: \( (?P<variant>\w+.*) \) )?
(?: : (?P<index>\d+) )?
""", re.VERBOSE)
class _Compat_SRE_Pattern:
def __init__(self, re_obj):
self.re_obj = re_obj
def __getattr__(self, name):
return getattr(self.re_obj, name)
# re_obj.fullmatch is a Python 3.4+ only feature
def fullmatch(self, string, pos=None, endpos=None):
pos = pos if pos else 0
endpos = endpos if endpos else len(string)
match = self.re_obj.match(string, pos, endpos)
if match and match.span() != (pos, endpos):
return None
return match
if sys.version_info < (3, 4):
SYMBOL_REGEX = _Compat_SRE_Pattern(SYMBOL_REGEX)
def _parse_symbols(symbols_str, non_symbols, default_index=0):
def get_symboldata(symstr):
match = SYMBOL_REGEX.fullmatch(symstr)
if match:
index = match.group('index')
return SymbolData(
match.group('symbol'),
match.group('variant'),
int(index) - 1 if index else default_index)
else:
raise X11Error("Malformed symbol string: {!r}".format(symstr))
symboldata_list = []
for symstr in symbols_str.split('+'):
symboldata = get_symboldata(symstr)
if symboldata.symbol not in non_symbols:
symboldata_list.append(symboldata)
indices = [symdata.index for symdata in symboldata_list]
assert len(indices) == len(set(indices)), ("Duplicate index in %r" %
symboldata_list)
return symboldata_list
_COLON_SEPARATOR_REGEX = re.compile(r"(?<!\\):")
class _ListProxy(UserList):
def __format__(self, format_spec):
if len(format_spec) > 0:
spec_parts = _COLON_SEPARATOR_REGEX.split(format_spec)
spec_parts = [s.replace("\\:", ":") for s in spec_parts]
assert len(spec_parts) > 0
elem_spec = spec_parts[0]
elems_formatted = [format(x, elem_spec) for x in self.data]
if len(spec_parts) == 1:
assert len(elem_spec) > 0
return str(elems_formatted)
elif len(spec_parts) == 2:
sep = spec_parts[1]
return sep.join(elems_formatted)
else:
raise ValueError(
"Too many specifiers: \"{}\"".format(format_spec))
return str(self.data)
__all__ = ["XKeyboard", "GroupData", "X11Error"]
def print_xkeyboard(xkb):
print("xkb {")
contents = [
"%d groups {%s}," % (xkb.groups_count, ", ".join(xkb.groups_names)),
"symbols {%s}" % ", ".join(xkb.groups_symbols),
"variants {%s}" % ", ".join('"{}"'.format(variant) for variant in xkb.groups_variants),
"current group: %s (%d) - %s - \"%s\"" %
(xkb.group_symbol, xkb.group_num, xkb.group_name, xkb.group_variant)
]
print("\n".join("\t" + line for line in contents))
print("}")
def test():
with XKeyboard() as xkb:
print_xkeyboard(xkb)
xkb.group_num += 2
print_xkeyboard(xkb)
xkb.group_num -= 3
print_xkeyboard(xkb)
xkb.group_num -= 2
print_xkeyboard(xkb)
if __name__ == '__main__':
test()
|
|
import json
from django.forms import ModelForm
from django import forms
from django.forms.models import model_to_dict
from crispy_forms.layout import Layout, HTML
from crispy_forms.bootstrap import Field
from hs_core.forms import BaseFormHelper
from .models import Site, Variable, Method, ProcessingLevel, TimeSeriesResult, UTCOffSet
NO_SELECTION_DROPDOWN_OPTION = "-----"
class SiteFormHelper(BaseFormHelper):
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
show_site_code_selection=False, *args, **kwargs):
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
file_type = kwargs.pop('file_type', False)
field_width = 'form-control input-sm'
common_layout = Layout(
Field('selected_series_id', css_class=field_width, type="hidden"),
Field('available_sites', css_class=field_width, type="hidden"),
Field('site_code', css_class=field_width,
id=_get_field_id('site_code', file_type=file_type),
title="A brief and unique code that identifies the site at "
"which the data were collected (e.g., 'USU-LBR-Mendon' "
"or '10109000')."),
Field('site_name', css_class=field_width,
id=_get_field_id('site_name', file_type=file_type),
title="A brief but descriptive name for the site (e.g., "
"'Little Bear River at Mendon Road near Mendon, Utah')."),
Field('organization', css_class=field_width,
id=_get_field_id('organization', file_type=file_type),),
Field('elevation_m', css_class=field_width,
id=_get_field_id('elevation_m', file_type=file_type),
title="The elevation of the site in meters (e.g., 1345)."),
Field('elevation_datum', css_class=field_width,
id=_get_field_id('elevation_datum', file_type=file_type),
title="The datum to which the site elevation is referenced "
"(e.g., 'NGVD29').\n"
"Select 'Other...' to specify a new elevation datum term."),
Field('site_type', css_class=field_width,
id=_get_field_id('site_type', file_type=file_type),
title="A controlled vocabulary term that describes the type of "
"data collection site (e.g., 'Stream').\n"
"Select 'Other...' to specify a new site type term."),
Field('latitude', css_class=field_width,
id=_get_field_id('latitude', file_type=file_type),
title="The latitude coordinate of the site location using the "
"WGS84 datum (e.g., 43.1111).",
data_map_item="latitude"),
Field('longitude', css_class=field_width,
id=_get_field_id('longitude', file_type=file_type),
title="The longitude coordinate of the site location using the "
"WGS84 datum (e.g., -111.2334).",
data_map_item="longitude"),
)
layout = _set_form_helper_layout(common_layout=common_layout, element_name="site",
is_show_element_code_selection=show_site_code_selection,
field_css=field_width)
super(SiteFormHelper, self).__init__(allow_edit, res_short_id, element_id, element_name,
layout, *args, **kwargs)
class SiteForm(ModelForm):
selected_series_id = forms.CharField(max_length=50, required=False)
site_code_choices = forms.ChoiceField(choices=(), required=False)
available_sites = forms.CharField(max_length=1000, required=False)
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
self.cv_site_types = list(kwargs.pop('cv_site_types'))
self.cv_elevation_datums = list(kwargs.pop('cv_elevation_datums'))
selected_series_id = kwargs.pop('selected_series_id', None)
available_sites = kwargs.pop('available_sites', [])
show_site_code_selection = kwargs.pop('show_site_code_selection', False)
file_type = kwargs.pop('file_type', False)
super(SiteForm, self).__init__(*args, **kwargs)
self.selected_series_id = selected_series_id
show_site_code_selection = len(available_sites) > 0 and show_site_code_selection
self.helper = SiteFormHelper(allow_edit, res_short_id, element_id, element_name='Site',
show_site_code_selection=show_site_code_selection,
file_type=file_type)
self.fields['selected_series_id'].initial = selected_series_id
_set_available_elements_form_field(form=self, elements=available_sites,
element_name="site")
code_selection_label = "Select any existing sites to use for this series"
_set_element_code_selection_form_field(form=self, form_field_name="site_code_choices",
form_field_label=code_selection_label,
element_id=element_id, elements=available_sites,
element_code_att_name="site_code",
element_name_att_name="site_name")
def set_dropdown_widgets(self, site_type=None, elevation_datum=None):
cv_site_type_choices = _get_cv_dropdown_widget_items(self.cv_site_types, site_type)
self.fields['site_type'].widget = forms.Select(choices=cv_site_type_choices)
cv_e_datum_choices = _get_cv_dropdown_widget_items(self.cv_elevation_datums,
elevation_datum)
self.fields['elevation_datum'].widget = forms.Select(choices=cv_e_datum_choices)
@property
def form_id(self):
form_id = 'id_site_%s' % self.number
return form_id
@property
def form_id_button(self):
return "'" + self.form_id + "'"
class Meta:
model = Site
fields = ['site_code', 'site_name', 'elevation_m', 'elevation_datum', 'site_type',
'site_code_choices', 'latitude', 'longitude']
exclude = ['content_object']
widgets = {'elevation_m': forms.TextInput(), 'latitude': forms.TextInput(),
'longitude': forms.TextInput()}
labels = {'latitude': 'WGS84 Latitude*',
'longitude': 'WGS84 Longitude*'
}
class SiteValidationForm(forms.Form):
site_code = forms.CharField(max_length=200)
site_name = forms.CharField(max_length=255, required=False)
elevation_m = forms.FloatField(required=False)
elevation_datum = forms.CharField(max_length=50, required=False)
site_type = forms.CharField(max_length=100, required=False)
selected_series_id = forms.CharField(max_length=50, required=False)
latitude = forms.FloatField()
longitude = forms.FloatField()
def clean_latitude(self):
lat = self.cleaned_data['latitude']
if lat < -90 or lat > 90:
raise forms.ValidationError("Value for latitude must be in the range of -90 to 90")
return lat
def clean_longitude(self):
lon = self.cleaned_data['longitude']
if lon < -180 or lon > 180:
raise forms.ValidationError("Value for longitude must be in the range of -180 to 180")
return lon
def clean_elevation_datum(self):
e_datum = self.cleaned_data['elevation_datum']
if e_datum == NO_SELECTION_DROPDOWN_OPTION:
e_datum = ''
return e_datum
def clean_site_type(self):
s_type = self.cleaned_data['site_type']
if s_type == NO_SELECTION_DROPDOWN_OPTION:
s_type = ''
return s_type
def clean(self):
cleaned_data = super(SiteValidationForm, self).clean()
elevation_m = cleaned_data.get('elevation_m', None)
elevation_datum = cleaned_data.get('elevation_datum', '')
if elevation_m is not None:
if len(elevation_datum.strip()) == 0:
self._errors['elevation_datum'] = ["A value for elevation datum is missing"]
return self.cleaned_data
class VariableFormHelper(BaseFormHelper):
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
show_variable_code_selection=False, *args, **kwargs):
file_type = kwargs.pop('file_type', False)
field_width = 'form-control input-sm'
common_layout = Layout(
Field('selected_series_id', css_class=field_width, type="hidden"),
Field('available_variables', css_class=field_width, type="hidden"),
Field('variable_code', css_class=field_width,
id=_get_field_id('variable_code', file_type=file_type),
title="A brief and unique code that identifies the measured "
"variable (e.g., 'Temp')."),
Field('variable_name', css_class=field_width,
id=_get_field_id('variable_name', file_type=file_type),
title="A brief but descriptive name of the variable that was measured "
"selected from a controlled vocabulary of variable names "
"(e.g., 'Temperature').\n"
"Select 'Other...' to specify a new variable name term."),
Field('variable_type', css_class=field_width,
id=_get_field_id('variable_type', file_type=file_type),
title="A term selected from a controlled vocabulary that describes the "
"type of variable that was measured (e.g., 'Water quality').\n"
"Select 'Other...' to specify a new variable type term."),
Field('no_data_value', css_class=field_width,
id=_get_field_id('no_data_value', file_type=file_type),
title="A numeric value that is used to represent 'NoData' values "
"in the time series (e.g., -9999)."),
Field('variable_definition', css_class=field_width,
id=_get_field_id('variable_definition', file_type=file_type),
title="An optional, longer text description of the variable "
"(e.g., 'Water temperature')."),
Field('speciation', css_class=field_width,
id=_get_field_id('speciation', file_type=file_type),
title="A term describing the chemical speciation of the resulting data "
"values. For most continuous time series from environmental "
"sensors, this will be 'Not Applicable'.\n"
"Select 'Other...' to specify a new speciation term."),
)
layout = _set_form_helper_layout(
common_layout=common_layout, element_name="variable",
is_show_element_code_selection=show_variable_code_selection,
field_css=field_width)
super(VariableFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class VariableForm(ModelForm):
selected_series_id = forms.CharField(max_length=50, required=False)
variable_code_choices = forms.ChoiceField(choices=(), required=False)
available_variables = forms.CharField(max_length=1000, required=False)
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
self.cv_variable_types = list(kwargs.pop('cv_variable_types'))
self.cv_variable_names = list(kwargs.pop('cv_variable_names'))
self.cv_speciations = list(kwargs.pop('cv_speciations'))
selected_series_id = kwargs.pop('selected_series_id', None)
available_variables = kwargs.pop('available_variables', [])
show_variable_code_selection = kwargs.pop('show_variable_code_selection', False)
file_type = kwargs.pop('file_type', False)
super(VariableForm, self).__init__(*args, **kwargs)
self.selected_series_id = selected_series_id
show_variable_code_selection = len(available_variables) > 0 and show_variable_code_selection
self.helper = VariableFormHelper(allow_edit, res_short_id, element_id,
element_name='Variable',
show_variable_code_selection=show_variable_code_selection,
file_type=file_type)
self.fields['selected_series_id'].initial = selected_series_id
_set_available_elements_form_field(form=self, elements=available_variables,
element_name="variable")
code_selection_label = "Select any existing variables to use for this series"
_set_element_code_selection_form_field(form=self, form_field_name="variable_code_choices",
form_field_label=code_selection_label,
element_id=element_id, elements=available_variables,
element_code_att_name="variable_code",
element_name_att_name="variable_name")
def set_dropdown_widgets(self, variable_type=None, variable_name=None, speciation=None):
cv_var_type_choices = _get_cv_dropdown_widget_items(self.cv_variable_types, variable_type)
self.fields['variable_type'].widget = forms.Select(choices=cv_var_type_choices)
cv_var_name_choices = _get_cv_dropdown_widget_items(self.cv_variable_names, variable_name)
self.fields['variable_name'].widget = forms.Select(choices=cv_var_name_choices)
cv_speciation_choices = _get_cv_dropdown_widget_items(self.cv_speciations, speciation)
self.fields['speciation'].widget = forms.Select(choices=cv_speciation_choices)
@property
def form_id(self):
form_id = 'id_variable_%s' % self.number
return form_id
@property
def form_id_button(self):
return "'" + self.form_id + "'"
class Meta:
model = Variable
fields = ['variable_code', 'variable_name', 'variable_type', 'no_data_value',
'variable_definition', 'speciation', 'variable_code_choices']
exclude = ['content_object']
widgets = {'no_data_value': forms.TextInput()}
class VariableValidationForm(forms.Form):
variable_code = forms.CharField(max_length=50)
variable_name = forms.CharField(max_length=100)
variable_type = forms.CharField(max_length=100)
no_data_value = forms.IntegerField()
variable_definition = forms.CharField(max_length=255, required=False)
speciation = forms.CharField(max_length=255, required=False)
selected_series_id = forms.CharField(max_length=50, required=False)
def clean_speciation(self):
spe = self.cleaned_data['speciation']
if spe == NO_SELECTION_DROPDOWN_OPTION:
spe = ''
return spe
def clean(self):
cleaned_data = super(VariableValidationForm, self).clean()
variable_name = cleaned_data.get('variable_name', None)
variable_type = cleaned_data.get('variable_type', None)
if variable_name is None or variable_name == NO_SELECTION_DROPDOWN_OPTION:
self._errors['variable_name'] = ["A value for variable name is missing"]
if variable_type is None or variable_type == NO_SELECTION_DROPDOWN_OPTION:
self._errors['variable_type'] = ["A value for variable type is missing"]
return self.cleaned_data
class MethodFormHelper(BaseFormHelper):
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
show_method_code_selection=False, *args, **kwargs):
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
file_type = kwargs.pop('file_type', False)
field_width = 'form-control input-sm'
common_layout = Layout(
Field('selected_series_id', css_class=field_width, type="hidden"),
Field('available_methods', css_class=field_width, type="hidden"),
Field('method_code', css_class=field_width,
id=_get_field_id('method_code', file_type=file_type),
title="A brief and unique code that identifies the method used to "
"create the observations (e.g., 'Hydrolab')."),
Field('method_name', css_class=field_width,
id=_get_field_id('method_name', file_type=file_type),
title="A brief but descriptive name for the method used to create "
"the observations (e.g., 'Hydrolab MiniSonde 5')."),
Field('method_type', css_class=field_width,
id=_get_field_id('method_type', file_type=file_type),
title="A term selected from a controlled vocabulary to describe the "
"type of method used to create the observations. For "
"sensor measurements use 'Instrument deployment'.\n"
"Select 'Other...' to specify a new method type term."),
Field('method_description', css_class=field_width,
id=_get_field_id('method_description', file_type=file_type),
title="A longer text description of the method "
"(e.g., 'Water temperature measured using a "
"Hydrolab Multiparameter Sonde')."),
Field('method_link', css_class=field_width,
id=_get_field_id('method_link', file_type=file_type),
title="A URL link to a website that contains more information "
"about or a detailed description of the method "
"(e.g., 'http://www.hydrolab.com')."),
)
layout = _set_form_helper_layout(common_layout=common_layout, element_name="method",
is_show_element_code_selection=show_method_code_selection,
field_css=field_width)
super(MethodFormHelper, self).__init__(allow_edit, res_short_id, element_id, element_name,
layout, *args, **kwargs)
class MethodForm(ModelForm):
selected_series_id = forms.CharField(max_length=50, required=False)
method_code_choices = forms.ChoiceField(choices=(), required=False)
available_methods = forms.CharField(max_length=1000, required=False)
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
self.cv_method_types = list(kwargs.pop('cv_method_types'))
selected_series_id = kwargs.pop('selected_series_id', None)
available_methods = kwargs.pop('available_methods', [])
show_method_code_selection = kwargs.pop('show_method_code_selection', False)
file_type = kwargs.pop('file_type', False)
super(MethodForm, self).__init__(*args, **kwargs)
self.selected_series_id = selected_series_id
show_method_code_selection = len(available_methods) > 0 and show_method_code_selection
self.helper = MethodFormHelper(allow_edit, res_short_id, element_id, element_name='Method',
show_method_code_selection=show_method_code_selection,
file_type=file_type)
self.fields['selected_series_id'].initial = selected_series_id
_set_available_elements_form_field(self, available_methods, "method")
code_selection_label = "Select any existing methods to use for this series"
_set_element_code_selection_form_field(form=self, form_field_name="method_code_choices",
form_field_label=code_selection_label,
element_id=element_id, elements=available_methods,
element_code_att_name="method_code",
element_name_att_name="method_name")
def set_dropdown_widgets(self, current_method_type=None):
cv_method_type_choices = _get_cv_dropdown_widget_items(self.cv_method_types,
current_method_type)
self.fields['method_type'].widget = forms.Select(choices=cv_method_type_choices)
@property
def form_id(self):
form_id = 'id_method_%s' % self.number
return form_id
@property
def form_id_button(self):
return "'" + self.form_id + "'"
class Meta:
model = Method
fields = ['method_code', 'method_name', 'method_type', 'method_description',
'method_link', 'method_code_choices']
exclude = ['content_object']
widgets = {'method_code': forms.TextInput()}
class MethodValidationForm(forms.Form):
method_code = forms.CharField(max_length=50)
method_name = forms.CharField(max_length=200)
method_type = forms.CharField(max_length=200)
method_description = forms.CharField(required=False)
method_link = forms.URLField(required=False)
selected_series_id = forms.CharField(max_length=50, required=False)
def clean(self):
cleaned_data = super(MethodValidationForm, self).clean()
method_type = cleaned_data.get('method_type', None)
if method_type is None or method_type == NO_SELECTION_DROPDOWN_OPTION:
self._errors['method_type'] = ["A value for method type is missing"]
return self.cleaned_data
class ProcessingLevelFormHelper(BaseFormHelper):
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
show_processing_level_code_selection=False, *args, **kwargs):
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
file_type = kwargs.pop('file_type', False)
field_width = 'form-control input-sm'
common_layout = Layout(
Field('selected_series_id', css_class=field_width, type="hidden"),
Field('available_processinglevels', css_class=field_width, type="hidden"),
Field('processing_level_code', css_class=field_width,
id=_get_field_id('processing_level_code', file_type=file_type),
title="A brief and unique code that identifies the processing level "
"of the observations (e.g., 'QC1')."),
Field('definition', css_class=field_width,
id=_get_field_id('definition', file_type=file_type),
title="A brief description of the processing level "
"(e.g., 'Quality Controlled Data')."),
Field('explanation', css_class=field_width,
id=_get_field_id('explanation', file_type=file_type),
title="A longer text description of the processing level that provides "
"more detail about how the processing was done "
"(e.g., 'Data that have passed quality control processing')."),
)
layout = _set_form_helper_layout(
common_layout=common_layout,
element_name="processinglevel",
is_show_element_code_selection=show_processing_level_code_selection,
field_css=field_width)
kwargs['element_name_label'] = 'Processing Level'
super(ProcessingLevelFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class ProcessingLevelForm(ModelForm):
selected_series_id = forms.CharField(max_length=50, required=False)
processinglevel_code_choices = forms.ChoiceField(choices=(), required=False)
available_processinglevels = forms.CharField(max_length=1000, required=False)
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
selected_series_id = kwargs.pop('selected_series_id', None)
available_processinglevels = kwargs.pop('available_processinglevels', [])
show_processing_level_code_selection = kwargs.pop('show_processing_level_code_selection',
False)
file_type = kwargs.pop('file_type', False)
super(ProcessingLevelForm, self).__init__(*args, **kwargs)
self.helper = ProcessingLevelFormHelper(
allow_edit, res_short_id, element_id,
element_name='ProcessingLevel',
show_processing_level_code_selection=show_processing_level_code_selection,
file_type=file_type)
self.fields['selected_series_id'].initial = selected_series_id
_set_available_elements_form_field(self, available_processinglevels, "processinglevel")
code_selection_label = "Select any existing processing level to use for this series"
_set_element_code_selection_form_field(form=self,
form_field_name="processinglevel_code_choices",
form_field_label=code_selection_label,
element_id=element_id,
elements=available_processinglevels,
element_code_att_name="processing_level_code",
element_name_att_name="definition")
@property
def form_id(self):
form_id = 'id_processinglevel_%s' % self.number
return form_id
@property
def form_id_button(self):
return "'" + self.form_id + "'"
class Meta:
model = ProcessingLevel
fields = ['processing_level_code', 'definition', 'explanation',
'processinglevel_code_choices']
exclude = ['content_object']
widgets = {'processing_level_code': forms.TextInput()}
class ProcessingLevelValidationForm(forms.Form):
processing_level_code = forms.CharField(max_length=50)
definition = forms.CharField(max_length=200, required=False)
explanation = forms.CharField(required=False)
selected_series_id = forms.CharField(max_length=50, required=False)
class TimeSeriesResultFormHelper(BaseFormHelper):
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
file_type = kwargs.pop('file_type', False)
field_width = 'form-control input-sm'
layout = Layout(
Field('selected_series_id', css_class=field_width, type="hidden"),
Field('units_type', css_class=field_width,
id=_get_field_id('units_type', file_type=file_type),
title="A term selected from a controlled vocabulary that describes the "
"type of units used for the Time Series result values "
"(e.g., 'Temperature').\n"
"Select 'Other...' to specify a new units type term."),
Field('units_name', css_class=field_width,
id=_get_field_id('units_name', file_type=file_type),
title="A brief, but descriptive name of the units used for the "
"Time Series result values (e.g., 'Degrees Celsius')."),
Field('units_abbreviation', css_class=field_width,
id=_get_field_id('units_abbreviation', file_type=file_type),
title="A text abbreviation for the units (e.g., 'Deg. C')."),
Field('status', css_class=field_width,
id=_get_field_id('status', file_type=file_type),
title="A term selected from a controlled vocabulary to describe the "
"status of the time series. Completed datasets use 'Complete'. "
"Where data collection is ongoing, use 'Ongoing'.\n"
"Select 'Other...' to specify a new status term."),
Field('sample_medium', css_class=field_width,
id=_get_field_id('sample_medium', file_type=file_type),
title="A term selected from a controlled vocabulary to specify the "
"environmental medium in which the observation was made "
"(e.g., 'Liquid aqueous').\n"
"Select 'Other...' to specify a new sample medium term."),
Field('value_count', css_class=field_width,
id=_get_field_id('value_count', file_type=file_type),
title="The total number of data values in the Time Series "
"(e.g., 24205)."),
Field('aggregation_statistics', css_class=field_width,
id=_get_field_id('aggregation_statistics', file_type=file_type),
title="An indication of whether the values are 'Continuous' or "
"represent recorded values of some statistic aggregated over a "
"time interval (e.g., 'Average').\n"
"Select 'Other...' to specify a new aggregation statistics term."),
Field('series_label', css_class=field_width, type="hidden"),
)
kwargs['element_name_label'] = 'Time Series Result'
super(TimeSeriesResultFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class TimeSeriesResultForm(ModelForm):
selected_series_id = forms.CharField(max_length=50, required=False)
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
selected_series_id = kwargs.pop('selected_series_id', None)
self.cv_sample_mediums = list(kwargs.pop('cv_sample_mediums'))
self.cv_units_types = list(kwargs.pop('cv_units_types'))
self.cv_aggregation_statistics = list(kwargs.pop('cv_aggregation_statistics'))
self.cv_statuses = list(kwargs.pop('cv_statuses'))
file_type = kwargs.pop('file_type', False)
super(TimeSeriesResultForm, self).__init__(*args, **kwargs)
self.helper = TimeSeriesResultFormHelper(allow_edit, res_short_id, element_id,
element_name='TimeSeriesResult',
file_type=file_type)
self.fields['selected_series_id'].initial = selected_series_id
def set_dropdown_widgets(self, current_sample_medium=None, current_units_type=None,
current_agg_statistics=None, current_status=None):
cv_sample_medium_choices = _get_cv_dropdown_widget_items(self.cv_sample_mediums,
current_sample_medium)
self.fields['sample_medium'].widget = forms.Select(choices=cv_sample_medium_choices)
cv_units_type_choices = _get_cv_dropdown_widget_items(self.cv_units_types,
current_units_type)
self.fields['units_type'].widget = forms.Select(choices=cv_units_type_choices)
cv_status_choices = _get_cv_dropdown_widget_items(self.cv_statuses, current_status)
self.fields['status'].widget = forms.Select(choices=cv_status_choices)
cv_agg_statistics_choices = _get_cv_dropdown_widget_items(self.cv_aggregation_statistics,
current_agg_statistics)
self.fields['aggregation_statistics'].widget = forms.Select(
choices=cv_agg_statistics_choices)
def set_series_label(self, series_label):
self.fields['series_label'].initial = series_label
def set_value_count(self, value_count=None):
if value_count is not None:
self.fields['value_count'].initial = value_count
@property
def form_id(self):
form_id = 'id_timeseriesresult_%s' % self.number
return form_id
@property
def form_id_button(self):
return "'" + self.form_id + "'"
class Meta:
model = TimeSeriesResult
fields = ['units_type', 'units_name', 'units_abbreviation', 'status', 'sample_medium',
'value_count', 'aggregation_statistics', 'series_label']
widgets = {'value_count': forms.TextInput()}
labels = {'aggregation_statistics': 'Aggregation statistic'}
class TimeSeriesResultValidationForm(forms.Form):
units_type = forms.CharField(max_length=255)
units_name = forms.CharField(max_length=255)
units_abbreviation = forms.CharField(max_length=20)
status = forms.CharField(max_length=255, required=False)
sample_medium = forms.CharField(max_length=255)
value_count = forms.IntegerField()
aggregation_statistics = forms.CharField(max_length=255)
series_label = forms.CharField(max_length=255, required=False)
selected_series_id = forms.CharField(max_length=50, required=False)
def clean(self):
cleaned_data = super(TimeSeriesResultValidationForm, self).clean()
units_type = cleaned_data.get('units_type', None)
status = cleaned_data.get('status', None)
sample_medium = cleaned_data.get('sample_medium', None)
aggregation_statistics = cleaned_data.get('aggregation_statistics', None)
if units_type is None or units_type == NO_SELECTION_DROPDOWN_OPTION:
self._errors['units_type'] = ["A value for units type is missing"]
if status == NO_SELECTION_DROPDOWN_OPTION:
cleaned_data['status'] = ""
if sample_medium is None or sample_medium == NO_SELECTION_DROPDOWN_OPTION:
self._errors['sample_medium'] = ["A value for sample medium is missing"]
if aggregation_statistics is None or aggregation_statistics == NO_SELECTION_DROPDOWN_OPTION:
self._errors['aggregation_statistics'] = ["A value for aggregation statistic "
"is missing"]
class UTCOffSetFormHelper(BaseFormHelper):
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
field_width = 'form-control input-sm'
file_type = kwargs.pop('file_type', False)
layout = Layout(
Field('selected_series_id', css_class=field_width, type="hidden"),
Field('value', css_class=field_width,
id=_get_field_id('utcoffset', file_type=file_type),
title="The value of the UTCOffset for timestamp values accompanying your "
"time series data."),
)
kwargs['element_name_label'] = 'UTC Offset*'
super(UTCOffSetFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class UTCOffSetForm(ModelForm):
selected_series_id = forms.CharField(max_length=50, required=False)
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
selected_series_id = kwargs.pop('selected_series_id', None)
file_type = kwargs.pop('file_type', False)
super(UTCOffSetForm, self).__init__(*args, **kwargs)
self.helper = UTCOffSetFormHelper(allow_edit, res_short_id, element_id,
element_name='UTCOffSet', file_type=file_type)
self.fields['selected_series_id'].initial = selected_series_id
if not element_id:
self.fields['value'].initial = ""
class Meta:
model = UTCOffSet
fields = ['value']
exclude = ['content_object']
widgets = {'value': forms.TextInput()}
labels = {'value': ""}
class UTCOffSetValidationForm(forms.Form):
value = forms.FloatField(required=True)
def _get_cv_dropdown_widget_items(dropdown_items, selected_item_name):
# filter out the item that needs to shown as the currently selected item
# in the dropdown list
dropdown_items = [item for item in dropdown_items if item.name != selected_item_name]
# sort the cv items
cv_item_names = [item.name for item in dropdown_items]
cv_item_names.sort(key=str.lower)
# create a list of tuples from item names
cv_items = [(item_name, item_name) for item_name in cv_item_names]
if selected_item_name is None or len(selected_item_name) == 0:
selected_item_name = NO_SELECTION_DROPDOWN_OPTION
cv_items = [(selected_item_name, selected_item_name)] + cv_items
else:
cv_items = [(selected_item_name, selected_item_name)] + cv_items + \
[(NO_SELECTION_DROPDOWN_OPTION, NO_SELECTION_DROPDOWN_OPTION)]
cv_item_choices = tuple(cv_items)
return cv_item_choices
def _set_available_elements_form_field(form, elements, element_name):
elements_data = []
for element in elements:
element_data = model_to_dict(element, exclude=["object_id", "series_ids", "content_type"])
elements_data.append(element_data)
element_data_str = ""
if len(elements_data) > 0:
element_data_str = json.dumps(elements_data)
form_field_name = "available_{}s".format(element_name)
form.fields[form_field_name].initial = element_data_str
def _set_element_code_selection_form_field(form, form_field_name, form_field_label, element_id,
elements, element_code_att_name, element_name_att_name):
element_display_str = "{code_att_name}:{name_att_name}"
if len(elements) > 0:
if len(form.initial) > 0:
element_code_choices = [(getattr(element, element_code_att_name),
element_display_str.format(
code_att_name=str(getattr(element, element_code_att_name)),
name_att_name=getattr(element, element_name_att_name))
) for element in elements if element.id != element_id]
element_code_choices = tuple([(form.initial[element_code_att_name],
element_display_str.format(
code_att_name=str(form.initial[element_code_att_name]),
name_att_name=form.initial[element_name_att_name]))] +
element_code_choices + [("----", "----")])
else:
element_code_choices = [(getattr(element, element_code_att_name),
element_display_str.format(
code_att_name=str(getattr(element, element_code_att_name)),
name_att_name=getattr(element, element_name_att_name)))
for element in elements]
element_code_choices = tuple([("----", "----")] + element_code_choices)
form.fields[form_field_name].widget = forms.Select(
choices=element_code_choices)
form.fields[form_field_name].label = form_field_label
def _get_field_id(field_name, file_type=False):
if file_type:
return "id_{}_filetype".format(field_name)
return "id_{}".format(field_name)
def _set_form_helper_layout(common_layout, element_name, is_show_element_code_selection, field_css):
form_field_name = "{}_code_choices".format(element_name)
if is_show_element_code_selection:
element_choice_help = "Select '----' for a new {} or any other option to use an " \
"existing {} for this series".format(element_name, element_name)
layout = Layout(
Field(form_field_name, css_class=field_css, title=element_choice_help),
common_layout,
)
else:
layout = Layout(
Field(form_field_name, css_class=field_css, type="hidden"),
common_layout,
)
return layout
UpdateSQLiteLayout = Layout(HTML("""
<div id="sql-file-update" class="row"
{% if not cm.can_update_sqlite_file or not cm.metadata.is_dirty %}style="display:none;
"{% endif %} style="margin-bottom:10px">
<div class="col-sm-12">
<div class="alert alert-warning alert-dismissible" role="alert">
<strong>SQLite file needs to be synced with metadata changes.</strong>
{% if cm.metadata.series_names %}
<div>
<strong><span style="color:red;">NOTE:</span> New resource specific metadata
elements can't be created after you update the SQLite file.</strong>
</div>
{% endif %}
<input id="can-update-sqlite-file" type="hidden"
value="{{ cm.can_update_sqlite_file }}">
<input id="metadata-dirty" type="hidden" value="{{ cm.metadata.is_dirty }}">
<form action="/timeseries/sqlite/update/{{ cm.short_id }}/" method="post"
enctype="multipart/form-data">
{% csrf_token %}
<input name="resource-mode" type="hidden" value="edit">
<button id="btn-update-sqlite-file" type="button" class="btn btn-primary">
Update SQLite File</button>
</form>
</div>
</div>
</div>
"""
)
)
SeriesSelectionLayout = Layout(HTML("""
<div id="div-series-selection">
<div class="col-sm-12">
<strong>Select a timeseries to see corresponding metadata (Number of
timeseries: {{ series_ids.items|length }}):</strong>
<form action="/resource/{{ cm.short_id }}/" method="get" enctype="multipart/form-data">
{% csrf_token %}
<input name="resource-mode" type="hidden" value="edit">
<select class="form-control" name="series_id" id="series_id">
{% for series_id, label in series_ids.items %}
{% if selected_series_id == series_id %}
<option value="{{ series_id }}" selected="selected"
title="{{ label }}">{{ label|slice:":120"|add:"..." }}</option>
{% else %}
<option value="{{ series_id }}" title="{{ label }}">
{{ label|slice:":120"|add:"..." }}</option>
{% endif %}
{% endfor %}
</select>
</form>
<hr>
</div>
</div>
"""
)
)
UTCOffSetLayout = HTML("""
<div class="form-group col-sm-6 col-xs-12 time-series-forms">
<div id="utc_offset">
{% load crispy_forms_tags %}
{% crispy utcoffset_form %}
<hr style="border:0">
</div>
</div>
""")
TimeSeriesMetaDataLayout = HTML("""
<div class="form-group col-sm-6 col-xs-12 time-series-forms">
<div id="site" class="hs-coordinates-picker" data-coordinates-type="point">
{% load crispy_forms_tags %}
{% crispy site_form %}
<hr style="border:0">
</div>
<div id="variable">
{% load crispy_forms_tags %}
{% crispy variable_form %}
<hr style="border:0">
</div>
<div id="method">
{% load crispy_forms_tags %}
{% crispy method_form %}
<hr style="border:0">
</div>
</div>
<div class="form-group col-sm-6 col-xs-12 time-series-forms">
<div id="processinglevel">
{% load crispy_forms_tags %}
{% crispy processing_level_form %}
<hr style="border:0">
</div>
<div id="timeseriesresult">
{% load crispy_forms_tags %}
{% crispy timeseries_result_form %}
</div>
</div>
"""
)
|
|
#!/usr/bin/env python
# Module : SysTrayIcon.py
# Synopsis : Windows System tray icon.
# Programmer : Simon Brunning - [email protected]
# Date : 11 April 2005
# Notes : Based on (i.e. ripped off from) Mark Hammond's
# win32gui_taskbar.py and win32gui_menu.py demos from PyWin32
import os
import sys
import itertools, glob
import webbrowser
import win32api
import win32con
import win32gui_struct
from folders import AppFolders
try:
import winxpgui as win32gui
except ImportError:
import win32gui
class WinGui(object):
'''TODO'''
QUIT = 'QUIT'
SPECIAL_ACTIONS = [QUIT]
FIRST_ID = 1023
def bye(self, obj):
print 'Bye, then.'
def show(self, sender):
webbrowser.open("http://localhost:{0}".format(self.apiServer.port), new=0)
def __init__(self, apiServer):
self.apiServer = apiServer
self.icon = AppFolders.iconsPath("logo.ico")
self.hover_text = "ComicStreamer"
self.on_quit = self.bye
menu_options = (
('Show Interface', None, self.show),
)
menu_options = menu_options + (('Quit', None, self.QUIT),)
self._next_action_id = self.FIRST_ID
self.menu_actions_by_id = set()
self.menu_options = self._add_ids_to_menu_options(list(menu_options))
self.menu_actions_by_id = dict(self.menu_actions_by_id)
del self._next_action_id
self.default_menu_index = 1
self.window_class_name = "ComicStreamerTrayIcon"
message_map = {win32gui.RegisterWindowMessage("TaskbarCreated"): self.restart,
win32con.WM_DESTROY: self.destroy,
win32con.WM_COMMAND: self.command,
win32con.WM_USER+20 : self.notify,}
# Register the Window class.
window_class = win32gui.WNDCLASS()
hinst = window_class.hInstance = win32gui.GetModuleHandle(None)
window_class.lpszClassName = self.window_class_name
window_class.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW;
window_class.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW)
window_class.hbrBackground = win32con.COLOR_WINDOW
window_class.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = win32gui.RegisterClass(window_class)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = win32gui.CreateWindow(classAtom,
self.window_class_name,
style,
0,
0,
win32con.CW_USEDEFAULT,
win32con.CW_USEDEFAULT,
0,
0,
hinst,
None)
win32gui.UpdateWindow(self.hwnd)
self.notify_id = None
self.refresh_icon()
def _add_ids_to_menu_options(self, menu_options):
result = []
for menu_option in menu_options:
option_text, option_icon, option_action = menu_option
if callable(option_action) or option_action in self.SPECIAL_ACTIONS:
self.menu_actions_by_id.add((self._next_action_id, option_action))
result.append(menu_option + (self._next_action_id,))
elif non_string_iterable(option_action):
result.append((option_text,
option_icon,
self._add_ids_to_menu_options(option_action),
self._next_action_id))
else:
print 'Unknown item', option_text, option_icon, option_action
self._next_action_id += 1
return result
def refresh_icon(self):
# Try and find a custom icon
hinst = win32gui.GetModuleHandle(None)
if os.path.isfile(self.icon):
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
hicon = win32gui.LoadImage(hinst,
self.icon,
win32con.IMAGE_ICON,
0,
0,
icon_flags)
else:
print "Can't find icon file - using default."
hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
if self.notify_id: message = win32gui.NIM_MODIFY
else: message = win32gui.NIM_ADD
self.notify_id = (self.hwnd,
0,
win32gui.NIF_ICON | win32gui.NIF_MESSAGE | win32gui.NIF_TIP,
win32con.WM_USER+20,
hicon,
self.hover_text)
win32gui.Shell_NotifyIcon(message, self.notify_id)
def restart(self, hwnd, msg, wparam, lparam):
self.refresh_icon()
def destroy(self, hwnd, msg, wparam, lparam):
if self.on_quit: self.on_quit(self)
nid = (self.hwnd, 0)
win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, nid)
win32gui.PostQuitMessage(0) # Terminate the app.
def notify(self, hwnd, msg, wparam, lparam):
if lparam==win32con.WM_LBUTTONDBLCLK:
self.execute_menu_option(self.default_menu_index + self.FIRST_ID)
elif lparam==win32con.WM_RBUTTONUP:
self.show_menu()
elif lparam==win32con.WM_LBUTTONUP:
self.show_menu()
return True
def show_menu(self):
menu = win32gui.CreatePopupMenu()
self.create_menu(menu, self.menu_options)
#win32gui.SetMenuDefaultItem(menu, 1000, 0)
pos = win32gui.GetCursorPos()
# See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp
win32gui.SetForegroundWindow(self.hwnd)
win32gui.TrackPopupMenu(menu,
win32con.TPM_LEFTALIGN,
pos[0],
pos[1],
0,
self.hwnd,
None)
win32gui.PostMessage(self.hwnd, win32con.WM_NULL, 0, 0)
def create_menu(self, menu, menu_options):
for option_text, option_icon, option_action, option_id in menu_options[::-1]:
if option_icon:
option_icon = self.prep_menu_icon(option_icon)
if option_id in self.menu_actions_by_id:
item, extras = win32gui_struct.PackMENUITEMINFO(text=option_text,
hbmpItem=option_icon,
wID=option_id)
win32gui.InsertMenuItem(menu, 0, 1, item)
else:
submenu = win32gui.CreatePopupMenu()
self.create_menu(submenu, option_action)
item, extras = win32gui_struct.PackMENUITEMINFO(text=option_text,
hbmpItem=option_icon,
hSubMenu=submenu)
win32gui.InsertMenuItem(menu, 0, 1, item)
def prep_menu_icon(self, icon):
# First load the icon.
ico_x = win32api.GetSystemMetrics(win32con.SM_CXSMICON)
ico_y = win32api.GetSystemMetrics(win32con.SM_CYSMICON)
hicon = win32gui.LoadImage(0, icon, win32con.IMAGE_ICON, ico_x, ico_y, win32con.LR_LOADFROMFILE)
hdcBitmap = win32gui.CreateCompatibleDC(0)
hdcScreen = win32gui.GetDC(0)
hbm = win32gui.CreateCompatibleBitmap(hdcScreen, ico_x, ico_y)
hbmOld = win32gui.SelectObject(hdcBitmap, hbm)
# Fill the background.
brush = win32gui.GetSysColorBrush(win32con.COLOR_MENU)
win32gui.FillRect(hdcBitmap, (0, 0, 16, 16), brush)
# unclear if brush needs to be feed. Best clue I can find is:
# "GetSysColorBrush returns a cached brush instead of allocating a new
# one." - implies no DeleteObject
# draw the icon
win32gui.DrawIconEx(hdcBitmap, 0, 0, hicon, ico_x, ico_y, 0, 0, win32con.DI_NORMAL)
win32gui.SelectObject(hdcBitmap, hbmOld)
win32gui.DeleteDC(hdcBitmap)
return hbm
def command(self, hwnd, msg, wparam, lparam):
id = win32gui.LOWORD(wparam)
self.execute_menu_option(id)
def execute_menu_option(self, id):
menu_action = self.menu_actions_by_id[id]
if menu_action == self.QUIT:
win32gui.DestroyWindow(self.hwnd)
else:
menu_action(self)
def run(self):
win32gui.PumpMessages()
def non_string_iterable(obj):
try:
iter(obj)
except TypeError:
return False
else:
return not isinstance(obj, basestring)
|
|
# Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
# Splash2 Run Script
#
import os
import optparse
import sys
import m5
from m5.objects import *
# --------------------
# Define Command Line Options
# ====================
parser = optparse.OptionParser()
parser.add_option("-d", "--detailed", action="store_true")
parser.add_option("-t", "--timing", action="store_true")
parser.add_option("-m", "--maxtick", type="int")
parser.add_option("-n", "--numcpus",
help="Number of cpus in total", type="int")
parser.add_option("-f", "--frequency",
default = "1GHz",
help="Frequency of each CPU")
parser.add_option("--l1size",
default = "32kB")
parser.add_option("--l1latency",
default = "1ns")
parser.add_option("--l2size",
default = "256kB")
parser.add_option("--l2latency",
default = "10ns")
parser.add_option("--rootdir",
help="Root directory of Splash2",
default="/dist/splash2/codes")
parser.add_option("-b", "--benchmark",
help="Splash 2 benchmark to run")
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
if not options.numcpus:
print "Specify the number of cpus with -n"
sys.exit(1)
# --------------------
# Define Splash2 Benchmarks
# ====================
class Cholesky(LiveProcess):
cwd = options.rootdir + '/kernels/cholesky'
executable = options.rootdir + '/kernels/cholesky/CHOLESKY'
cmd = ['CHOLESKY', '-p' + str(options.numcpus),
options.rootdir + '/kernels/cholesky/inputs/tk23.O']
class FFT(LiveProcess):
cwd = options.rootdir + '/kernels/fft'
executable = options.rootdir + '/kernels/fft/FFT'
cmd = ['FFT', '-p', str(options.numcpus), '-m18']
class LU_contig(LiveProcess):
executable = options.rootdir + '/kernels/lu/contiguous_blocks/LU'
cmd = ['LU', '-p', str(options.numcpus)]
cwd = options.rootdir + '/kernels/lu/contiguous_blocks'
class LU_noncontig(LiveProcess):
executable = options.rootdir + '/kernels/lu/non_contiguous_blocks/LU'
cmd = ['LU', '-p', str(options.numcpus)]
cwd = options.rootdir + '/kernels/lu/non_contiguous_blocks'
class Radix(LiveProcess):
executable = options.rootdir + '/kernels/radix/RADIX'
cmd = ['RADIX', '-n524288', '-p', str(options.numcpus)]
cwd = options.rootdir + '/kernels/radix'
class Barnes(LiveProcess):
executable = options.rootdir + '/apps/barnes/BARNES'
cmd = ['BARNES']
input = options.rootdir + '/apps/barnes/input.p' + str(options.numcpus)
cwd = options.rootdir + '/apps/barnes'
class FMM(LiveProcess):
executable = options.rootdir + '/apps/fmm/FMM'
cmd = ['FMM']
if str(options.numcpus) == '1':
input = options.rootdir + '/apps/fmm/inputs/input.2048'
else:
input = options.rootdir + '/apps/fmm/inputs/input.2048.p' + str(options.numcpus)
cwd = options.rootdir + '/apps/fmm'
class Ocean_contig(LiveProcess):
executable = options.rootdir + '/apps/ocean/contiguous_partitions/OCEAN'
cmd = ['OCEAN', '-p', str(options.numcpus)]
cwd = options.rootdir + '/apps/ocean/contiguous_partitions'
class Ocean_noncontig(LiveProcess):
executable = options.rootdir + '/apps/ocean/non_contiguous_partitions/OCEAN'
cmd = ['OCEAN', '-p', str(options.numcpus)]
cwd = options.rootdir + '/apps/ocean/non_contiguous_partitions'
class Raytrace(LiveProcess):
executable = options.rootdir + '/apps/raytrace/RAYTRACE'
cmd = ['RAYTRACE', '-p' + str(options.numcpus),
options.rootdir + '/apps/raytrace/inputs/teapot.env']
cwd = options.rootdir + '/apps/raytrace'
class Water_nsquared(LiveProcess):
executable = options.rootdir + '/apps/water-nsquared/WATER-NSQUARED'
cmd = ['WATER-NSQUARED']
if options.numcpus==1:
input = options.rootdir + '/apps/water-nsquared/input'
else:
input = options.rootdir + '/apps/water-nsquared/input.p' + str(options.numcpus)
cwd = options.rootdir + '/apps/water-nsquared'
class Water_spatial(LiveProcess):
executable = options.rootdir + '/apps/water-spatial/WATER-SPATIAL'
cmd = ['WATER-SPATIAL']
if options.numcpus==1:
input = options.rootdir + '/apps/water-spatial/input'
else:
input = options.rootdir + '/apps/water-spatial/input.p' + str(options.numcpus)
cwd = options.rootdir + '/apps/water-spatial'
# --------------------
# Base L1 Cache Definition
# ====================
class L1(Cache):
latency = options.l1latency
mshrs = 12
tgts_per_mshr = 8
# ----------------------
# Base L2 Cache Definition
# ----------------------
class L2(Cache):
latency = options.l2latency
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8
# ----------------------
# Define the cpus
# ----------------------
busFrequency = Frequency(options.frequency)
if options.timing:
cpus = [TimingSimpleCPU(cpu_id = i,
clock=options.frequency)
for i in xrange(options.numcpus)]
elif options.detailed:
cpus = [DerivO3CPU(cpu_id = i,
clock=options.frequency)
for i in xrange(options.numcpus)]
else:
cpus = [AtomicSimpleCPU(cpu_id = i,
clock=options.frequency)
for i in xrange(options.numcpus)]
# ----------------------
# Create a system, and add system wide objects
# ----------------------
system = System(cpu = cpus, physmem = SimpleMemory(),
membus = SystemXBar(clock = busFrequency))
system.clock = '1GHz'
system.toL2bus = L2XBar(clock = busFrequency)
system.l2 = L2(size = options.l2size, assoc = 8)
# ----------------------
# Connect the L2 cache and memory together
# ----------------------
system.physmem.port = system.membus.master
system.l2.cpu_side = system.toL2bus.master
system.l2.mem_side = system.membus.slave
system.system_port = system.membus.slave
# ----------------------
# Connect the L2 cache and clusters together
# ----------------------
for cpu in cpus:
cpu.addPrivateSplitL1Caches(L1(size = options.l1size, assoc = 1),
L1(size = options.l1size, assoc = 4))
# connect cpu level-1 caches to shared level-2 cache
cpu.connectAllPorts(system.toL2bus, system.membus)
# ----------------------
# Define the root
# ----------------------
root = Root(full_system = False, system = system)
# --------------------
# Pick the correct Splash2 Benchmarks
# ====================
if options.benchmark == 'Cholesky':
root.workload = Cholesky()
elif options.benchmark == 'FFT':
root.workload = FFT()
elif options.benchmark == 'LUContig':
root.workload = LU_contig()
elif options.benchmark == 'LUNoncontig':
root.workload = LU_noncontig()
elif options.benchmark == 'Radix':
root.workload = Radix()
elif options.benchmark == 'Barnes':
root.workload = Barnes()
elif options.benchmark == 'FMM':
root.workload = FMM()
elif options.benchmark == 'OceanContig':
root.workload = Ocean_contig()
elif options.benchmark == 'OceanNoncontig':
root.workload = Ocean_noncontig()
elif options.benchmark == 'Raytrace':
root.workload = Raytrace()
elif options.benchmark == 'WaterNSquared':
root.workload = Water_nsquared()
elif options.benchmark == 'WaterSpatial':
root.workload = Water_spatial()
else:
print >> sys.stderr, """The --benchmark environment variable was set to something improper.
Use Cholesky, FFT, LUContig, LUNoncontig, Radix, Barnes, FMM, OceanContig,
OceanNoncontig, Raytrace, WaterNSquared, or WaterSpatial"""
sys.exit(1)
# --------------------
# Assign the workload to the cpus
# ====================
for cpu in cpus:
cpu.workload = root.workload
# ----------------------
# Run the simulation
# ----------------------
if options.timing or options.detailed:
root.system.mem_mode = 'timing'
# instantiate configuration
m5.instantiate()
# simulate until program terminates
if options.maxtick:
exit_event = m5.simulate(options.maxtick)
else:
exit_event = m5.simulate(m5.MaxTick)
print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of RL agents."""
from __future__ import division
from __future__ import print_function
import collections
import heapq
import pprint
import random
import numpy as np
import tensorflow.compat.v1 as tf
from meta_reward_learning.semantic_parsing.nsm import data_utils
from meta_reward_learning.semantic_parsing.nsm import env_factory
from meta_reward_learning.semantic_parsing.nsm import graph_factory
# To suppress division by zero in np.log, because the
# behaviour np.log(0.0) = -inf is correct in this context.
np.warnings.filterwarnings('ignore')
def samples_with_features(batch_envs, samples, max_n_exp):
"""Add sim_features to samples."""
new_samples = []
batch_env_dict = {env.name: env for env in batch_envs}
for sample in samples:
traj = sample.traj
if sum(traj.rewards):
# Computing the features for program with zero rewards is wasteful
env = batch_env_dict[traj.env_name]
program = traj_to_program(traj, env.de_vocab)
sim_features = env_factory.create_features(env, program, max_n_exp)
new_traj = traj._replace(sim_features=sim_features)
sample = sample._replace(traj=new_traj)
new_samples.append(sample)
return new_samples
def get_data_for_train(samples):
trajs = [s.traj for s in samples]
obs = [t.obs for t in trajs]
actions = [t.actions for t in trajs]
context = [t.context for t in trajs]
returns = [sum(t.rewards) for t in trajs]
weights = [s.prob * r for s, r in zip(samples, returns)]
return obs, actions, context, weights
def scale_probs(samples, scale):
"""Weight each samples with the weight. Reflected on probs."""
scaled_probs = [scale * s.prob for s in samples]
new_samples = []
for s, p in zip(samples, scaled_probs):
new_samples.append(Sample(traj=s.traj, prob=p))
return new_samples
def normalize_probs(samples, smoothing=1e-8, beta=1):
"""Normalize the probability of the samples (in each env) to sum to 1.0."""
sum_prob_dict = {}
probs = [np.power(s.prob, beta) + smoothing for s in samples]
for s, prob in zip(samples, probs):
name = s.traj.env_name
if name in sum_prob_dict:
sum_prob_dict[name] += prob
else:
sum_prob_dict[name] = prob
new_samples = []
for s, prob in zip(samples, probs):
new_prob = prob / sum_prob_dict[s.traj.env_name]
new_samples.append(Sample(traj=s.traj, prob=new_prob))
return new_samples
def compute_baselines(returns, probs, env_names):
"""Compute baseline for samples."""
baseline_dict = {}
for ret, p, name in zip(returns, probs, env_names):
if name not in baseline_dict:
baseline_dict[name] = ret * p
else:
baseline_dict[name] += ret * p
return baseline_dict
class PGAgent(object):
"""Agent trained by policy gradient."""
def __init__(self,
model,
train_writer=None,
discount_factor=1.0,
extra_monitors=False,
score_model='tabular',
score_norm_fn='sigmoid',
use_baseline=False):
# Used for weights experiment
self.env_to_index = {}
# Set this parameter when doing meta learning
self.meta_learn = model.meta_learn
self.model = model
self.discount_factor = discount_factor
self._extra_monitors = extra_monitors
self.train_writer = train_writer
self.score_model = score_model
self.score_norm_fn = score_norm_fn
self.use_baseline = use_baseline
if self.use_baseline:
self.baseline_dict = dict()
self.monitor_graph = graph_factory.MonitorGraph()
for name in [
'avg_return', 'avg_len', 'avg_prob'
# 'val_loss', min_return', 'max_return', 'std_return',
# 'min_len', 'max_len', 'std_len', 'clip_frac'
]:
self.monitor_graph.add_scalar_monitor(name, dtype=tf.float32)
if extra_monitors:
self.extra_monitor_list = []
# 'buffer_prob', 'reweighted_buffer_prob', 'replay_buffer_size',
# 'mean_buffer_size', 'num_contexts']
for name in self.extra_monitor_list:
self.monitor_graph.add_scalar_monitor(name, dtype=tf.float32)
self.monitor_graph.launch()
def generate_samples(self,
envs,
n_samples=1,
greedy=False,
use_cache=False,
filter_error=True):
"""Returns Actions, rewards, obs, other info."""
samples = sampling(
self.model,
envs,
n_samples=n_samples,
greedy=greedy,
use_cache=use_cache,
filter_error=filter_error)
return samples
def beam_search(self,
envs=None,
beam_size=1,
use_cache=False,
greedy=False,
renorm=True):
"""Returns Actions, rewards, obs and probs."""
samples = beam_search(
self.model,
envs,
beam_size=beam_size,
use_cache=use_cache,
greedy=greedy,
renorm=renorm)
return samples
def update_replay_prob(self, samples, min_replay_weight=0.0):
"""Update the probability of the replay samples and recompute the weights (prob)."""
prob_sum_dict = {}
trajs_to_update = [sample.traj for sample in samples if sample.prob is None]
new_probs = self.compute_probs(trajs_to_update)
for traj, prob in zip(trajs_to_update, new_probs):
name = traj.env_name
if name in prob_sum_dict:
prob_sum_dict[name] += prob
else:
prob_sum_dict[name] = prob
i = 0
new_samples = []
for sample in samples:
name = sample.traj.env_name
if name in prob_sum_dict:
w = max(prob_sum_dict[name], min_replay_weight)
else:
w = 0.0
if sample.prob is None:
new_samples.append(
sample._replace(prob=new_probs[i] * w / prob_sum_dict[name]))
i += 1
else:
prob = sample.prob * (1.0 - w)
new_samples.append(sample._replace(prob=prob))
assert i == len(trajs_to_update)
return new_samples
# pylint: disable=missing-docstring
def train(self,
samples,
debug=False,
parameters=None,
min_prob=0.0,
scale=1.0,
de_vocab=None,
val_samples=None):
trajs = [s.traj for s in samples]
probs = [s.prob for s in samples]
env_names = [t.env_name for t in trajs]
rewards = [sum(t.rewards) for t in trajs]
obs = [t.obs for t in trajs]
actions = [t.actions for t in trajs]
context = [t.context for t in trajs]
sim_features, env_indices = self.create_score_features(trajs)
# Compute the product of pi(a) * R(a) where R(a) is the binary reward
# given by the executor
weights = [p * r * scale for p, r in zip(probs, rewards)]
if self.use_baseline:
baselines = self.get_baselines(samples)
else:
baselines = None
if debug:
print('+' * 50)
model_probs = self.compute_probs(trajs)
print('scale: {}, min_prob: {}'.format(scale, min_prob))
for i, (name, r, p, mp, w, traj) in enumerate(
zip(env_names, rewards, probs, model_probs, weights, trajs)):
print(('sample {}, name: {}, return: {}, '
'prob: {}, model prob: {}, weight: {}').format(
i, name, r, p, mp, w))
if de_vocab is not None:
print(' '.join(traj_to_program(traj, de_vocab)))
print('+' * 50)
if self.meta_learn:
val_obs, val_actions, val_context, val_weights = get_data_for_train(
val_samples)
val_feed_dict = dict(
val_inputs=val_obs,
val_targets=val_actions,
val_context=val_context,
val_weights=val_weights)
else:
val_feed_dict = None
self.model.train(
obs,
actions,
weights=weights,
context=context,
parameters=parameters,
val_feed_dict=val_feed_dict,
writer=self.train_writer,
baselines=baselines,
sim_features=sim_features,
env_indices=env_indices)
# pylint: enable=missing-docstring
def compute_probs(self, trajs):
obs = [s.obs for s in trajs]
actions = [s.actions for s in trajs]
context = [s.context for s in trajs]
probs = self.model.compute_probs(obs, actions, context=context)
return probs
def create_score_features(self, trajs):
env_indices, sim_features = None, None
if self.score_model == 'tabular' or self.score_norm_fn == 'softmax':
sim_features = [[self.env_to_index[t.env_name], t.idx] for t in trajs]
elif self.score_model in ['linear', 'local_linear', 'local_attn']:
sim_features = [t.sim_features for t in trajs]
if self.score_model == 'local_linear':
env_indices = [self.env_to_index[t.env_name] for t in trajs]
return sim_features, env_indices
def compute_scores(self, trajs):
"""Compute the scores assigned to the trajs."""
if self.score_model not in ['attn', 'complex']:
sim_features, env_indices = self.create_score_features(trajs)
scores = self.model.compute_simple_scores(
sim_features=sim_features, env_indices=env_indices)
else:
obs = [s.obs for s in trajs]
actions = [s.actions for s in trajs]
context = [s.context for s in trajs]
scores = self.model.compute_scores(obs, actions, context=context)
return scores
def get_baselines(self, samples):
return [self.baseline_dict[s.traj.env_name] for s in samples]
def compute_step_logprobs(self, trajs):
obs = [s.obs for s in trajs]
actions = [s.actions for s in trajs]
context = [s.context for s in trajs]
logprobs = self.model.compute_step_logprobs(obs, actions, context=context)
return logprobs
def evaluate(
self,
samples,
writer=None,
true_n=None,
# clip_frac=0.0,
extra_monitors=None):
"""Evaluate the agent on the envs."""
trajs = [s.traj for s in samples]
actions = [t.actions for t in trajs]
probs = [s.prob for s in samples]
returns = [
compute_returns(t.rewards, self.discount_factor)[0] for t in trajs
]
# pylint: disable=unused-variable
avg_return, _, max_return, min_return, n_w = compute_weighted_stats(
returns, probs)
# pylint: enable=unused-variable
if true_n is not None:
# Account for the fact that some environment doesn't
# generate any valid samples, but we still need to
# consider them when computing returns.
new_avg_return = avg_return * n_w / true_n
tf.logging.info(
'avg return adjusted from {} to {} based on true n'.format(
avg_return, new_avg_return))
avg_return = new_avg_return
lens = [len(acs) for acs in actions]
# pylint: disable=unused-variable
avg_len, std_len, max_len, min_len, _ = compute_weighted_stats(lens, probs)
# pylint: enable=unused-variable
if writer is not None:
if extra_monitors is None:
if self._extra_monitors:
extra_monitors = {monitor: 0.0 for monitor in self.extra_monitor_list}
else:
extra_monitors = {}
feed_dict = dict(
avg_return=avg_return,
avg_len=avg_len,
**extra_monitors)
self.write_to_monitor(feed_dict, writer)
return avg_return, avg_len
def write_to_monitor(self, feed_dict, writer):
summary = self.monitor_graph.generate_summary(feed_dict)
writer.add_summary(summary, self.model.get_global_step())
writer.flush()
def compute_weighted_stats(array, weight):
"""Compute the stats (e.g. mean, std) of array weighted by `weight`."""
n = len(array)
if n < 1:
return (0.0, 0.0, 0.0, 0.0, 0.0)
sum_ = 0.0
min_ = array[0]
max_ = array[0]
n_w = sum(weight)
for a, w in zip(array, weight):
min_ = min([min_, a])
max_ = max([max_, a])
sum_ += a * w
mean = sum_ / n_w
sum_square_std = 0.0
for a, w in zip(array, weight):
sum_square_std += (a - mean)**2 * w
std = np.sqrt(sum_square_std / n_w)
return (mean, std, max_, min_, n_w)
def compute_returns(rewards, discount_factor=1.0):
"""Compute returns of a trace (sum of discounted rewards).
Args:
rewards: list of float rewards discount_factor
discount_factor: Discount factor to be used for return calculation.
Returns:
list[float32]: A list of discounted returns of same size as rewards.
"""
returns = []
t = len(rewards)
returns = [0.0] * t
sum_return_so_far = 0.0
for i in xrange(t):
returns[-i - 1] = sum_return_so_far * discount_factor + rewards[-i - 1]
sum_return_so_far = returns[-1 - i]
return returns
def compute_td_errors(values, rewards, discount_factor=1.0, td_n=0):
"""Compute TD errors."""
td_errors = []
td_n += 1
backup_values = compute_backup_values(values, rewards, discount_factor, td_n)
for vs, bvs in zip(values, backup_values):
td_errors.append((np.array(bvs) - np.array(vs)).tolist())
return td_errors
def compute_backup_values(values, rewards, discount_factor=1.0, n_steps=1):
"""Compute backup value."""
backup_values = []
for vs, rs in zip(values, rewards):
bvs = []
# pylint: disable=invalid-name
T = len(vs)
# pylint: enable=invalid-name
for i in xrange(T):
end = min(i + n_steps, T)
if end == T:
bv = 0.0
else:
bv = vs[end] * (discount_factor**(end - i))
for t in xrange(i, end):
bv += rs[t] * (discount_factor**(t - i))
bvs.append(bv)
backup_values.append(bvs)
return backup_values
class ReplayBuffer(object):
def save(self, samples):
pass
def replay(self, envs):
pass
class AllGoodReplayBuffer(ReplayBuffer):
"""Class for the replay buffer containing successful programs."""
def __init__(self, agent=None, de_vocab=None, discount_factor=1.0):
# Mapping env names to good trajectories in that env.
self._buffer = dict()
self.discount_factor = discount_factor
self.agent = agent
self.de_vocab = de_vocab
# Persistent mapping from env names to good program strs
self.program_prob_dict = dict()
self.prob_sum_dict = dict()
def has_found_solution(self, env_name):
return env_name in self._buffer and self._buffer[env_name]
@property
def traj_buffer(self):
return self._buffer
def contain(self, traj):
"""Checks whether a given action sequence is present in the buffer or not."""
name = traj.env_name
if name not in self.program_prob_dict:
return False
program = traj_to_program(traj, self.de_vocab)
program_str = u' '.join(program)
if program_str in self.program_prob_dict[name]:
return True
else:
return False
def get_all_progs(self):
return {k: v.keys() for k, v in self.program_prob_dict.iteritems()}
@property
def size(self):
n = 0
for v in self._buffer.values():
n += len(v)
return n
def save(self, samples):
trajs = [s.traj for s in samples]
self.save_trajs(trajs)
def check_not_in_buffer(self, sample):
traj = sample.traj
name = traj.env_name
program = traj_to_program(traj, self.de_vocab)
program_str = ' '.join(program)
return program and (program[-1] == self.de_vocab.end_tk) and \
(not (name in self.program_prob_dict and
(program_str in self.program_prob_dict[name])))
def save_trajs(self, trajs):
# pylint: disable=g-doc-args
"""Saves only good trajectories not currently present in the buffer.
A good trajectory has length > 0, achieves a return greater than 0.5,
and contains the end token as it's last item.
"""
# pylint: enable=g-doc-args
total_returns = [
compute_returns(t.rewards, self.discount_factor)[0] for t in trajs
]
for t, return_ in zip(trajs, total_returns):
name = t.env_name
program = traj_to_program(t, self.de_vocab)
program_str = ' '.join(program)
# pylint: disable=g-explicit-length-test
if (return_ > 0.5 and len(program) > 0 and
(program[-1] == self.de_vocab.end_tk) and
(not (name in self.program_prob_dict and
(program_str in self.program_prob_dict[name])))):
if name in self.program_prob_dict:
self.program_prob_dict[name][program_str] = True
else:
self.program_prob_dict[name] = {program_str: True}
if name in self._buffer:
self._buffer[name].append(t)
else:
self._buffer[name] = [t]
# pylint: enable=g-explicit-length-test
def all_samples(self, envs, agent=None, sampling_strategy=None, is_val=False):
"""All samples with correct probability values for sampling strategy."""
select_env_names = set([e.name for e in envs])
trajs = []
# Collect all the trajs for the selected envs.
for name in select_env_names:
if name in self._buffer:
trajs += self._buffer[name]
if agent is None or sampling_strategy is None:
# All traj has the same probability, since it will be
# normalized later, we just assign them all as 1.0.
probs = [1.0] * len(trajs)
else:
if (not is_val) and sampling_strategy != 'probs':
probs = agent.compute_scores(trajs)
if agent.score_norm_fn == 'identity' and agent.score_model == 'linear':
probs = np.exp(probs)
if sampling_strategy == 'probs_and_reward':
probs *= agent.compute_probs(trajs)
else:
probs = agent.compute_probs(trajs)
if (not is_val) and agent.use_baseline:
rewards = agent.compute_scores(trajs)
env_names = [traj.env_name for traj in trajs]
agent.baseline_dict.update(
compute_baselines(rewards, probs, env_names))
samples = [Sample(traj=t, prob=p) for t, p in zip(trajs, probs)]
return samples
def replay(self,
envs,
n_samples=1,
use_top_k=False,
agent=None,
truncate_at_n=0,
is_val=False,
sampling_strategy=None,
beta=1):
select_env_names = set([e.name for e in envs])
samples = self.all_samples(
envs, agent=agent, sampling_strategy=sampling_strategy, is_val=is_val)
# Put the samples into an dictionary keyed by env names.
env_sample_dict = dict(
[(name, []) for name in select_env_names if name in self._buffer])
for s in samples:
name = s.traj.env_name
env_sample_dict[name].append(s)
replay_samples = []
for name, samples in env_sample_dict.iteritems():
n = len(samples)
# Truncated the number of samples in the selected
# samples and in the buffer.
if truncate_at_n > 0 and n > truncate_at_n:
# Randomize the samples before truncation in case
# when no prob information is provided and the trajs
# need to be truncated randomly.
random.shuffle(samples)
samples = heapq.nlargest(truncate_at_n, samples, key=lambda s: s.prob)
self._buffer[name] = [sample.traj for sample in samples]
# Compute the sum of prob of replays in the buffer.
self.prob_sum_dict[name] = sum([sample.prob for sample in samples])
# Used for hard EM
if use_top_k:
# Select the top k samples weighted by their probs.
selected_samples = heapq.nlargest(
n_samples, samples, key=lambda s: s.prob)
replay_samples += normalize_probs(selected_samples)
else:
# Randomly samples according to their probs.
samples = normalize_probs(samples, beta=beta)
idxs = np.random.choice(
len(samples), n_samples, p=[sample.prob for sample in samples])
selected_samples = [samples[i] for i in idxs]
replay_samples += [
Sample(traj=s.traj, prob=1.0 / n_samples) for s in selected_samples
]
return replay_samples
def traj_to_program(traj, de_vocab):
program = []
for a, ob in zip(traj.actions, traj.obs):
ob = ob[0]
token = de_vocab.lookup(ob.valid_indices[a], reverse=True)
program.append(token)
return program
# pylint: disable=invalid-name
Traj = data_utils.namedtuple_with_defaults(
'Traj', 'obs actions rewards context env_name answer idx sim_features')
Sample = collections.namedtuple('Sample', 'traj prob')
# pylint: enable=invalid-name
def sampling(model,
envs,
temperature=1.0,
use_encode=True,
greedy=False,
n_samples=1,
debug=False,
use_cache=False,
filter_error=True):
"""Code for sampling programs using the model."""
if not envs:
raise ValueError('No environment provided!')
if use_cache:
# if already explored everything, then don't explore this environment
# anymore.
envs = [env for env in envs if not env.cache.is_full()]
duplicated_envs = []
for env in envs:
for i in range(n_samples):
duplicated_envs.append(env.clone())
envs = duplicated_envs
for env in envs:
env.use_cache = use_cache
if use_encode:
env_context = [env.get_context() for env in envs]
encoded_context, initial_state = model.encode(env_context)
else:
# env_context = [None for env in envs]
encoded_context, initial_state = None, None
obs = [[env.start_ob] for env in envs]
state = initial_state
while True:
outputs, state = model.step(obs, state, context=encoded_context)
if greedy:
actions = model.predict(cell_outputs=outputs)
else:
actions = model.sampling(cell_outputs=outputs, temperature=temperature)
if debug:
print('*' * 50)
print('actions: ')
pprint.pprint(actions)
print('action_prob: ')
action_prob = model.predict_prob(cell_outputs=outputs)
pprint.pprint(action_prob)
print('*' * 50)
# Get rid of the time dimension so that actions is just one list.
actions = [a[0] for a in actions]
action_probs = model.predict_prob(cell_outputs=outputs)
action_probs = [ap[0] for ap in action_probs]
obs = []
for env, action, p in zip(envs, actions, action_probs):
try:
# pylint: disable=unused-variable
ob, _, _, info = env.step(action)
# pylint: enable=unused-variable
obs.append([ob])
except IndexError:
print(p)
raise IndexError
step_pairs = [
x for x in zip(obs, state, encoded_context, envs) if not x[-1].done
]
if step_pairs:
obs, state, encoded_context, envs = zip(*step_pairs)
obs = list(obs)
state = list(state)
envs = list(envs)
encoded_context = list(encoded_context)
assert len(obs) == len(state)
assert len(obs) == len(encoded_context)
assert len(obs) == len(envs)
else:
break
# pylint: disable=unused-variable
obs, actions, rewards = zip(*[(env.obs, env.actions, env.rewards)
for env in duplicated_envs])
# pylint: enable=unused-variable
env_names = [env.name for env in duplicated_envs]
answers = [env.interpreter.result for env in duplicated_envs]
samples = []
for i, env in enumerate(duplicated_envs):
if not (filter_error and env.error):
samples.append(
Sample(
traj=Traj(
obs=env.obs,
actions=env.actions,
rewards=env.rewards,
context=env_context[i],
env_name=env_names[i],
answer=answers[i]),
prob=1.0 / n_samples))
return samples
Hyph = collections.namedtuple('Hyph', ['state', 'env', 'score'])
Candidate = collections.namedtuple('Candidate',
['state', 'env', 'score', 'action'])
def beam_search(model,
envs,
use_encode=True,
beam_size=1,
debug=False,
renorm=True,
use_cache=False,
filter_error=True,
greedy=False):
"""Beam search using the model."""
if use_cache:
# if already explored everything, then don't explore this environment
# anymore.
envs = [env for env in envs if not env.cache.is_full()]
if use_encode:
env_context = [env.get_context() for env in envs]
encoded_context, initial_state = model.encode(env_context)
env_context_dict = dict([(env.name, env.get_context()) for env in envs])
context_dict = dict(
[(env.name, c) for env, c in zip(envs, encoded_context)])
beam = [Hyph(s, env.clone(), 0.0) for env, s in zip(envs, initial_state)]
state = initial_state
context = encoded_context
else:
beam = [Hyph(None, env.clone(), 0.0) for env in envs]
state = None
context = None
env_context_dict = dict([(env.name, None) for env in envs])
for hyp in beam:
hyp.env.use_cache = use_cache
finished_dict = dict([(env.name, []) for env in envs])
obs = [[h.env.start_ob] for h in beam]
while beam:
if debug:
print('@' * 50)
print('beam is')
for h in beam:
print('env {}'.format(h.env.name))
print(h.env.show())
print(h.score)
print()
# Run the model for one step to get probabilities for new actions.
outputs, state = model.step(obs, state, context=context)
probs = model.predict_prob(outputs)
scores = (np.log(np.array(probs)) + np.array([[[h.score]] for h in beam]))
# Collect candidates.
candidate_dict = {}
for hyph, st, score in zip(beam, state, scores):
env_name = hyph.env.name
if env_name not in candidate_dict:
candidate_dict[env_name] = []
for action, s in enumerate(score[0]):
# pylint: disable=g-comparison-negation
if not s == -np.inf:
candidate_dict[env_name].append(Candidate(st, hyph.env, s, action))
# pylint: enable=g-comparison-negation
if debug:
print('*' * 20)
print('candidates are')
for k, v in candidate_dict.iteritems():
print('env {}'.format(k))
for x in v:
print(x.env.show())
print(x.action)
print(x.score)
print(type(x))
print(isinstance(x, Candidate))
print()
# Collect the new beam.
new_beam = []
obs = []
for env_name, candidates in candidate_dict.iteritems():
# Find the top k from the union of candidates and
# finished hypotheses.
all_candidates = candidates + finished_dict[env_name]
topk = heapq.nlargest(beam_size, all_candidates, key=lambda x: x.score)
# Step the environment and collect the hypotheses into
# new beam (unfinished hypotheses) or finished_dict
finished_dict[env_name] = []
for c in topk:
if isinstance(c, Hyph):
finished_dict[env_name].append(c)
else:
env = c.env.clone()
# pylint: disable=unused-variable
ob, _, done, info = env.step(c.action)
# pylint: enable=unused-variable
new_hyph = Hyph(c.state, env, c.score)
if not done:
obs.append([ob])
new_beam.append(new_hyph)
else:
if not (filter_error and new_hyph.env.error):
finished_dict[env_name].append(new_hyph)
if debug:
print('#' * 20)
print('finished programs are')
for k, v in finished_dict.iteritems():
print('env {}'.format(k))
for x in v:
print(x.env.show())
print(x.score)
print(type(x))
print(isinstance(x, Hyph))
print()
beam = new_beam
if use_encode:
state = [h.state for h in beam]
context = [context_dict[h.env.name] for h in beam]
else:
state = None
context = None
final = []
env_names = [e.name for e in envs]
for name in env_names:
sorted_final = sorted(
finished_dict[name], key=lambda h: h.score, reverse=True)
if greedy:
# Consider the time when sorted_final is empty (didn't
# find any programs without error).
if sorted_final:
final += [sorted_final[0]]
else:
final += sorted_final
if not final:
return []
# Collect the training examples.
obs, actions, rewards, env_names, scores = zip(*[(h.env.obs, h.env.actions,
h.env.rewards, h.env.name,
h.score) for h in final])
answers = [h.env.interpreter.result for h in final]
samples = []
for i, name in enumerate(env_names):
samples.append(
Sample(
traj=Traj(
obs=obs[i],
actions=actions[i],
rewards=rewards[i],
context=env_context_dict[name],
env_name=name,
answer=answers[i]),
prob=np.exp(scores[i])))
if renorm:
samples = normalize_probs(samples)
return samples
class RandomAgent(object):
"""A random agent."""
def __init__(self, discount_factor=1.0):
self.discount_factor = discount_factor
# pylint: disable=missing-docstring
def generate_samples(self, envs, n_samples=1, use_cache=False):
if use_cache:
# if already explored everything, then don't explore this environment
# anymore.
envs = [env for env in envs if not env.cache.is_full()]
for env in envs:
env.use_cache = use_cache
duplicated_envs = []
for env in envs:
for i in range(n_samples):
duplicated_envs.append(env.clone())
envs = duplicated_envs
for env in envs:
ob = env.start_ob
while not env.done:
valid_actions = ob[0].valid_indices
action = np.random.randint(0, len(valid_actions))
ob, _, _, _ = env.step(action)
env_context = [env.get_context() for env in envs]
env_names = [env.name for env in envs]
samples = []
for i, env in enumerate(envs):
samples.append(
Sample(
traj=Traj(
obs=env.obs,
actions=env.actions,
rewards=env.rewards,
context=env_context[i],
env_name=env_names[i]),
prob=1.0 / n_samples))
return samples
def evaluate(self, samples):
trajs = [s.traj for s in samples]
actions = [t.actions for t in trajs]
probs = [s.prob for s in samples]
returns = [
compute_returns(t.rewards, self.discount_factor)[0] for t in trajs
]
# pylint: disable=unused-variable
avg_return, std_return, max_return, min_return = compute_weighted_stats(
returns, probs)
lens = [len(acs) for acs in actions]
avg_len, std_len, max_len, min_len = compute_weighted_stats(lens, probs)
# pylint: enable=unused-variable
return avg_return, avg_len
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.estimators.state_saving_rnn_estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from tensorflow.contrib import lookup
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import target_column as target_column_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.estimators import rnn_common
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import state_saving_rnn_estimator as ssre
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class PrepareInputsForRnnTest(test.TestCase):
def _test_prepare_inputs_for_rnn(self, sequence_features, context_features,
sequence_feature_columns, num_unroll,
expected):
features_by_time = ssre._prepare_inputs_for_rnn(sequence_features,
context_features,
sequence_feature_columns,
num_unroll)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
features_val = sess.run(features_by_time)
self.assertAllEqual(expected, features_val)
def testPrepareInputsForRnnBatchSize1(self):
num_unroll = 3
expected = [
np.array([[11., 31., 5., 7.]]), np.array([[12., 32., 5., 7.]]),
np.array([[13., 33., 5., 7.]])
]
sequence_features = {
'seq_feature0': constant_op.constant([[11., 12., 13.]]),
'seq_feature1': constant_op.constant([[31., 32., 33.]])
}
sequence_feature_columns = [
feature_column.real_valued_column(
'seq_feature0', dimension=1),
feature_column.real_valued_column(
'seq_feature1', dimension=1),
]
context_features = {
'ctx_feature0': constant_op.constant([[5.]]),
'ctx_feature1': constant_op.constant([[7.]])
}
self._test_prepare_inputs_for_rnn(sequence_features, context_features,
sequence_feature_columns, num_unroll,
expected)
def testPrepareInputsForRnnBatchSize2(self):
num_unroll = 3
expected = [
np.array([[11., 31., 5., 7.], [21., 41., 6., 8.]]),
np.array([[12., 32., 5., 7.], [22., 42., 6., 8.]]),
np.array([[13., 33., 5., 7.], [23., 43., 6., 8.]])
]
sequence_features = {
'seq_feature0':
constant_op.constant([[11., 12., 13.], [21., 22., 23.]]),
'seq_feature1':
constant_op.constant([[31., 32., 33.], [41., 42., 43.]])
}
sequence_feature_columns = [
feature_column.real_valued_column(
'seq_feature0', dimension=1),
feature_column.real_valued_column(
'seq_feature1', dimension=1),
]
context_features = {
'ctx_feature0': constant_op.constant([[5.], [6.]]),
'ctx_feature1': constant_op.constant([[7.], [8.]])
}
self._test_prepare_inputs_for_rnn(sequence_features, context_features,
sequence_feature_columns, num_unroll,
expected)
def testPrepareInputsForRnnNoContext(self):
num_unroll = 3
expected = [
np.array([[11., 31.], [21., 41.]]), np.array([[12., 32.], [22., 42.]]),
np.array([[13., 33.], [23., 43.]])
]
sequence_features = {
'seq_feature0':
constant_op.constant([[11., 12., 13.], [21., 22., 23.]]),
'seq_feature1':
constant_op.constant([[31., 32., 33.], [41., 42., 43.]])
}
sequence_feature_columns = [
feature_column.real_valued_column(
'seq_feature0', dimension=1),
feature_column.real_valued_column(
'seq_feature1', dimension=1),
]
context_features = None
self._test_prepare_inputs_for_rnn(sequence_features, context_features,
sequence_feature_columns, num_unroll,
expected)
def testPrepareInputsForRnnSparse(self):
num_unroll = 2
embedding_dimension = 8
expected = [
np.array([[1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1.]]),
np.array([[1., 1., 1., 1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2.],
[1., 1., 1., 1., 1., 1., 1., 1.]])
]
sequence_features = {
'wire_cast':
sparse_tensor.SparseTensor(
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1],
[2, 0, 0], [2, 1, 1]],
values=[
b'marlo', b'stringer', b'omar', b'stringer', b'marlo',
b'marlo', b'omar'
],
dense_shape=[3, 2, 2])
}
wire_cast = feature_column.sparse_column_with_keys(
'wire_cast', ['marlo', 'omar', 'stringer'])
sequence_feature_columns = [
feature_column.embedding_column(
wire_cast,
dimension=embedding_dimension,
combiner='sum',
initializer=init_ops.ones_initializer())
]
context_features = None
self._test_prepare_inputs_for_rnn(sequence_features, context_features,
sequence_feature_columns, num_unroll,
expected)
def testPrepareInputsForRnnSparseAndDense(self):
num_unroll = 2
embedding_dimension = 8
dense_dimension = 2
expected = [
np.array([[1., 1., 1., 1., 1., 1., 1., 1., 111., 112.],
[1., 1., 1., 1., 1., 1., 1., 1., 211., 212.],
[1., 1., 1., 1., 1., 1., 1., 1., 311., 312.]]),
np.array([[1., 1., 1., 1., 1., 1., 1., 1., 121., 122.],
[2., 2., 2., 2., 2., 2., 2., 2., 221., 222.],
[1., 1., 1., 1., 1., 1., 1., 1., 321., 322.]])
]
sequence_features = {
'wire_cast':
sparse_tensor.SparseTensor(
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1],
[2, 0, 0], [2, 1, 1]],
values=[
b'marlo', b'stringer', b'omar', b'stringer', b'marlo',
b'marlo', b'omar'
],
dense_shape=[3, 2, 2]),
'seq_feature0':
constant_op.constant([[[111., 112.], [121., 122.]],
[[211., 212.], [221., 222.]],
[[311., 312.], [321., 322.]]])
}
wire_cast = feature_column.sparse_column_with_keys(
'wire_cast', ['marlo', 'omar', 'stringer'])
wire_cast_embedded = feature_column.embedding_column(
wire_cast,
dimension=embedding_dimension,
combiner='sum',
initializer=init_ops.ones_initializer())
seq_feature0_column = feature_column.real_valued_column(
'seq_feature0', dimension=dense_dimension)
sequence_feature_columns = [seq_feature0_column, wire_cast_embedded]
context_features = None
self._test_prepare_inputs_for_rnn(sequence_features, context_features,
sequence_feature_columns, num_unroll,
expected)
class StateSavingRnnEstimatorTest(test.TestCase):
def testPrepareFeaturesForSQSS(self):
mode = model_fn_lib.ModeKeys.TRAIN
seq_feature_name = 'seq_feature'
sparse_seq_feature_name = 'wire_cast'
ctx_feature_name = 'ctx_feature'
sequence_length = 4
embedding_dimension = 8
features = {
sparse_seq_feature_name:
sparse_tensor.SparseTensor(
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1],
[2, 0, 0], [2, 1, 1]],
values=[
b'marlo', b'stringer', b'omar', b'stringer', b'marlo',
b'marlo', b'omar'
],
dense_shape=[3, 2, 2]),
seq_feature_name:
constant_op.constant(
1.0, shape=[sequence_length]),
ctx_feature_name:
constant_op.constant(2.0)
}
labels = constant_op.constant(5.0, shape=[sequence_length])
wire_cast = feature_column.sparse_column_with_keys(
'wire_cast', ['marlo', 'omar', 'stringer'])
sequence_feature_columns = [
feature_column.real_valued_column(
seq_feature_name, dimension=1), feature_column.embedding_column(
wire_cast,
dimension=embedding_dimension,
initializer=init_ops.ones_initializer())
]
context_feature_columns = [
feature_column.real_valued_column(
ctx_feature_name, dimension=1)
]
expected_sequence = {
rnn_common.RNNKeys.LABELS_KEY:
np.array([5., 5., 5., 5.]),
seq_feature_name:
np.array([1., 1., 1., 1.]),
sparse_seq_feature_name:
sparse_tensor.SparseTensor(
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1],
[2, 0, 0], [2, 1, 1]],
values=[
b'marlo', b'stringer', b'omar', b'stringer', b'marlo',
b'marlo', b'omar'
],
dense_shape=[3, 2, 2]),
}
expected_context = {ctx_feature_name: 2.}
sequence, context = ssre._prepare_features_for_sqss(
features, labels, mode, sequence_feature_columns,
context_feature_columns)
def assert_equal(expected, got):
self.assertEqual(sorted(expected), sorted(got))
for k, v in expected.items():
if isinstance(v, sparse_tensor.SparseTensor):
self.assertAllEqual(v.values.eval(), got[k].values)
self.assertAllEqual(v.indices.eval(), got[k].indices)
self.assertAllEqual(v.dense_shape.eval(), got[k].dense_shape)
else:
self.assertAllEqual(v, got[k])
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
actual_sequence, actual_context = sess.run(
[sequence, context])
assert_equal(expected_sequence, actual_sequence)
assert_equal(expected_context, actual_context)
def _getModelFnOpsForMode(self, mode):
"""Helper for testGetRnnModelFn{Train,Eval,Infer}()."""
num_units = [4]
seq_columns = [
feature_column.real_valued_column(
'inputs', dimension=1)
]
features = {
'inputs': constant_op.constant([1., 2., 3.]),
}
labels = constant_op.constant([1., 0., 1.])
model_fn = ssre._get_rnn_model_fn(
cell_type='basic_rnn',
target_column=target_column_lib.multi_class_target(n_classes=2),
optimizer='SGD',
num_unroll=2,
num_units=num_units,
num_threads=1,
queue_capacity=10,
batch_size=1,
# Only CLASSIFICATION yields eval metrics to test for.
problem_type=constants.ProblemType.CLASSIFICATION,
sequence_feature_columns=seq_columns,
context_feature_columns=None,
learning_rate=0.1)
model_fn_ops = model_fn(features=features, labels=labels, mode=mode)
return model_fn_ops
# testGetRnnModelFn{Train,Eval,Infer}() test which fields
# of ModelFnOps are set depending on mode.
def testGetRnnModelFnTrain(self):
model_fn_ops = self._getModelFnOpsForMode(model_fn_lib.ModeKeys.TRAIN)
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
# None may get normalized to {}; we accept neither.
self.assertNotEqual(len(model_fn_ops.eval_metric_ops), 0)
def testGetRnnModelFnEval(self):
model_fn_ops = self._getModelFnOpsForMode(model_fn_lib.ModeKeys.EVAL)
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
# None may get normalized to {}; we accept neither.
self.assertNotEqual(len(model_fn_ops.eval_metric_ops), 0)
def testGetRnnModelFnInfer(self):
model_fn_ops = self._getModelFnOpsForMode(model_fn_lib.ModeKeys.INFER)
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
# None may get normalized to {}; we accept both.
self.assertFalse(model_fn_ops.eval_metric_ops)
def testExport(self):
input_feature_key = 'magic_input_feature_key'
batch_size = 8
num_units = [4]
sequence_length = 10
num_unroll = 2
num_classes = 2
seq_columns = [
feature_column.real_valued_column(
'inputs', dimension=4)
]
def get_input_fn(mode, seed):
def input_fn():
features = {}
random_sequence = random_ops.random_uniform(
[sequence_length + 1], 0, 2, dtype=dtypes.int32, seed=seed)
labels = array_ops.slice(random_sequence, [0], [sequence_length])
inputs = math_ops.to_float(
array_ops.slice(random_sequence, [1], [sequence_length]))
features = {'inputs': inputs}
if mode == model_fn_lib.ModeKeys.INFER:
input_examples = array_ops.placeholder(dtypes.string)
features[input_feature_key] = input_examples
labels = None
return features, labels
return input_fn
model_dir = tempfile.mkdtemp()
def estimator_fn():
return ssre.StateSavingRnnEstimator(
constants.ProblemType.CLASSIFICATION,
num_units=num_units,
num_unroll=num_unroll,
batch_size=batch_size,
sequence_feature_columns=seq_columns,
num_classes=num_classes,
predict_probabilities=True,
model_dir=model_dir,
queue_capacity=2 + batch_size,
seed=1234)
# Train a bit to create an exportable checkpoint.
estimator_fn().fit(input_fn=get_input_fn(
model_fn_lib.ModeKeys.TRAIN, seed=1234),
steps=100)
# Now export, but from a fresh estimator instance, like you would
# in an export binary. That means .export() has to work without
# .fit() being called on the same object.
export_dir = tempfile.mkdtemp()
print('Exporting to', export_dir)
estimator_fn().export(
export_dir,
input_fn=get_input_fn(
model_fn_lib.ModeKeys.INFER, seed=4321),
use_deprecated_input_fn=False,
input_feature_key=input_feature_key)
# Smoke tests to ensure deprecated constructor functions still work.
class LegacyConstructorTest(test.TestCase):
def _get_input_fn(self,
sequence_length,
seed=None):
def input_fn():
random_sequence = random_ops.random_uniform(
[sequence_length + 1], 0, 2, dtype=dtypes.int32, seed=seed)
labels = array_ops.slice(random_sequence, [0], [sequence_length])
inputs = math_ops.to_float(
array_ops.slice(random_sequence, [1], [sequence_length]))
return {'inputs': inputs}, labels
return input_fn
# TODO(jtbates): move all tests below to a benchmark test.
class StateSavingRNNEstimatorLearningTest(test.TestCase):
"""Learning tests for state saving RNN Estimators."""
def testLearnSineFunction(self):
"""Tests learning a sine function."""
batch_size = 8
num_unroll = 5
sequence_length = 64
train_steps = 250
eval_steps = 20
num_rnn_layers = 1
num_units = [4] * num_rnn_layers
learning_rate = 0.3
loss_threshold = 0.035
def get_sin_input_fn(sequence_length, increment, seed=None):
def input_fn():
start = random_ops.random_uniform(
(), minval=0, maxval=(np.pi * 2.0), dtype=dtypes.float32, seed=seed)
sin_curves = math_ops.sin(
math_ops.linspace(start, (sequence_length - 1) * increment,
sequence_length + 1))
inputs = array_ops.slice(sin_curves, [0], [sequence_length])
labels = array_ops.slice(sin_curves, [1], [sequence_length])
return {'inputs': inputs}, labels
return input_fn
seq_columns = [
feature_column.real_valued_column(
'inputs', dimension=1)
]
config = run_config.RunConfig(tf_random_seed=1234)
dropout_keep_probabilities = [0.9] * (num_rnn_layers + 1)
sequence_estimator = ssre.StateSavingRnnEstimator(
constants.ProblemType.LINEAR_REGRESSION,
num_units=num_units,
cell_type='lstm',
num_unroll=num_unroll,
batch_size=batch_size,
sequence_feature_columns=seq_columns,
learning_rate=learning_rate,
dropout_keep_probabilities=dropout_keep_probabilities,
config=config,
queue_capacity=2 * batch_size,
seed=1234)
train_input_fn = get_sin_input_fn(sequence_length, np.pi / 32, seed=1234)
eval_input_fn = get_sin_input_fn(sequence_length, np.pi / 32, seed=4321)
sequence_estimator.fit(input_fn=train_input_fn, steps=train_steps)
loss = sequence_estimator.evaluate(
input_fn=eval_input_fn, steps=eval_steps)['loss']
self.assertLess(loss, loss_threshold,
'Loss should be less than {}; got {}'.format(loss_threshold,
loss))
def testLearnShiftByOne(self):
"""Tests that learning a 'shift-by-one' example.
Each label sequence consists of the input sequence 'shifted' by one place.
The RNN must learn to 'remember' the previous input.
"""
batch_size = 16
num_classes = 2
num_unroll = 32
sequence_length = 32
train_steps = 300
eval_steps = 20
num_units = [4]
learning_rate = 0.5
accuracy_threshold = 0.9
def get_shift_input_fn(sequence_length, seed=None):
def input_fn():
random_sequence = random_ops.random_uniform(
[sequence_length + 1], 0, 2, dtype=dtypes.int32, seed=seed)
labels = array_ops.slice(random_sequence, [0], [sequence_length])
inputs = math_ops.to_float(
array_ops.slice(random_sequence, [1], [sequence_length]))
return {'inputs': inputs}, labels
return input_fn
seq_columns = [
feature_column.real_valued_column(
'inputs', dimension=1)
]
config = run_config.RunConfig(tf_random_seed=21212)
sequence_estimator = ssre.StateSavingRnnEstimator(
constants.ProblemType.CLASSIFICATION,
num_units=num_units,
cell_type='lstm',
num_unroll=num_unroll,
batch_size=batch_size,
sequence_feature_columns=seq_columns,
num_classes=num_classes,
learning_rate=learning_rate,
config=config,
predict_probabilities=True,
queue_capacity=2 + batch_size,
seed=1234)
train_input_fn = get_shift_input_fn(sequence_length, seed=12321)
eval_input_fn = get_shift_input_fn(sequence_length, seed=32123)
sequence_estimator.fit(input_fn=train_input_fn, steps=train_steps)
evaluation = sequence_estimator.evaluate(
input_fn=eval_input_fn, steps=eval_steps)
accuracy = evaluation['accuracy']
self.assertGreater(accuracy, accuracy_threshold,
'Accuracy should be higher than {}; got {}'.format(
accuracy_threshold, accuracy))
# Testing `predict` when `predict_probabilities=True`.
prediction_dict = sequence_estimator.predict(
input_fn=eval_input_fn, as_iterable=False)
self.assertListEqual(
sorted(list(prediction_dict.keys())),
sorted([
prediction_key.PredictionKey.CLASSES,
prediction_key.PredictionKey.PROBABILITIES, ssre._get_state_name(0)
]))
predictions = prediction_dict[prediction_key.PredictionKey.CLASSES]
probabilities = prediction_dict[prediction_key.PredictionKey.PROBABILITIES]
self.assertListEqual(list(predictions.shape), [batch_size, sequence_length])
self.assertListEqual(
list(probabilities.shape), [batch_size, sequence_length, 2])
def testLearnLyrics(self):
lyrics = 'if I go there will be trouble and if I stay it will be double'
lyrics_list = lyrics.split()
sequence_length = len(lyrics_list)
vocab = set(lyrics_list)
batch_size = 16
num_classes = len(vocab)
num_unroll = 7 # not a divisor of sequence_length
train_steps = 350
eval_steps = 30
num_units = [4]
learning_rate = 0.4
accuracy_threshold = 0.65
def get_lyrics_input_fn(seed):
def input_fn():
start = random_ops.random_uniform(
(), minval=0, maxval=sequence_length, dtype=dtypes.int32, seed=seed)
# Concatenate lyrics_list so inputs and labels wrap when start > 0.
lyrics_list_concat = lyrics_list + lyrics_list
inputs_dense = array_ops.slice(lyrics_list_concat, [start],
[sequence_length])
indices = array_ops.constant(
[[i, 0] for i in range(sequence_length)], dtype=dtypes.int64)
dense_shape = [sequence_length, 1]
inputs = sparse_tensor.SparseTensor(
indices=indices, values=inputs_dense, dense_shape=dense_shape)
table = lookup.string_to_index_table_from_tensor(
mapping=list(vocab), default_value=-1, name='lookup')
labels = table.lookup(
array_ops.slice(lyrics_list_concat, [start + 1], [sequence_length]))
return {'lyrics': inputs}, labels
return input_fn
sequence_feature_columns = [
feature_column.embedding_column(
feature_column.sparse_column_with_keys('lyrics', vocab),
dimension=8)
]
config = run_config.RunConfig(tf_random_seed=21212)
sequence_estimator = ssre.StateSavingRnnEstimator(
constants.ProblemType.CLASSIFICATION,
num_units=num_units,
cell_type='basic_rnn',
num_unroll=num_unroll,
batch_size=batch_size,
sequence_feature_columns=sequence_feature_columns,
num_classes=num_classes,
learning_rate=learning_rate,
config=config,
predict_probabilities=True,
queue_capacity=2 + batch_size,
seed=1234)
train_input_fn = get_lyrics_input_fn(seed=12321)
eval_input_fn = get_lyrics_input_fn(seed=32123)
sequence_estimator.fit(input_fn=train_input_fn, steps=train_steps)
evaluation = sequence_estimator.evaluate(
input_fn=eval_input_fn, steps=eval_steps)
accuracy = evaluation['accuracy']
self.assertGreater(accuracy, accuracy_threshold,
'Accuracy should be higher than {}; got {}'.format(
accuracy_threshold, accuracy))
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask, request, make_response, render_template, send_file, jsonify, abort
from werkzeug import secure_filename
from werkzeug.exceptions import RequestEntityTooLarge
from flask_wtf.csrf import CsrfProtect
import datetime
import hashlib
import os
from time import sleep
from db.schema import Exp, ExpSmith, ExpSmithStats, ExpVNA, ExpACCoilProp, ExpDCCoilProp, ExpPatchInfo, ExpMaterialProp
from db.schema import exp, exp_smith, exp_smith_stats, exp_vna, exp_ac_coil_prop, exp_dc_coil_prop, exp_patch_prop, exp_material_prop
from db.schema import session
import cStringIO
from sqlalchemy import func, desc
from sqlalchemy.sql import or_
from core import json
from form import ExpInfoForm
import chart
import numpy as np
from instruments.gpib import prologix
from instruments.vna import HP8751A, FrequencySweepModes
from core.listobject import *
na = None
app = Flask(__name__, static_url_path='/static', static_folder='static', template_folder='templates')
def MainLoop(debug, port = '\\.\COM7'):
global na
plx = prologix(port = port)
na = HP8751A(plx, 17, delay = 0.05, auto = False)
app.run(host='0.0.0.0')
app.debug = debug
UPLOAD_FOLDER = os.path.dirname(os.path.realpath(__file__))
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
CsrfProtect(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
print(request.files)
if request.method == 'POST':
file = request.files['Filedata']
filename = secure_filename(file.filename)
print(filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
#f.save('/var/www/uploads/uploaded_file.txt')
return "1"
@app.route('/exp/list')
def exp_list():
return render_template('list.html')
@app.route('/exp/view/vna')
def exp_view_vna():
return render_template('vnaview.html')
@app.route('/exp/view')
def exp_view():
exp_ids = request.args.getlist('exp_ids', None)
return render_template('view.html', exp_ids = exp_ids)
@app.route('/exp/preview')
def exp_preview():
exp_id = request.args.get('exp_id', None)
return render_template('preview.html', exp_ids = [exp_id])
@app.route('/api/exp/delete')
def delete_exp():
exp_ids = request.args.getlist('exp_id', None)
if not exp_ids:
abort(404)
exp_ids = map(int, exp_ids)
deleteExps(exp_ids)
FLOAT_SEARCHABLES = ['patch_width', 'patch_length', 'dc_current', 'dc_field', 'imp_re']
@app.route('/api/exp/list')
def get_exp_list():
d = {}
start = int(request.args.get('iDisplayStart', '0'))
length = int(request.args.get('iDisplayLength', '12'))
all_search_keyword = request.args["sSearch"]
sort_cols = []
search_cols = []
if "iSortCol_0" in request.args:
for i in range(int(request.args['iSortingCols'])):
sort_col = int(request.args["iSortCol_%d" % (i)])
sort_name = request.args["mDataProp_%d" % (sort_col)]
sort_order = request.args["sSortDir_%d" % (i)]
is_sortable = bool(request.args['bSortable_%d' % (sort_col)] == 'true')
if is_sortable:
col = getColumnByName([Exp, ExpVNA, ExpDCCoilProp, ExpACCoilProp, ExpPatchInfo, ExpSmith], sort_name)
if col:
sort_cols.append([col, sort_order])
for i in range(int(request.args['iColumns'])):
is_searchable = bool(request.args['bSearchable_%d' % (i)].lower() == 'true')
searchable_name = request.args["mDataProp_%d" % (i)]
if not all_search_keyword:
search_keyword = request.args["sSearch_%d" % (i)]
else:
search_keyword = all_search_keyword
if is_searchable and search_keyword:
col = getColumnByName([Exp, ExpVNA, ExpDCCoilProp, ExpACCoilProp, ExpPatchInfo, ExpSmith], searchable_name)
if col:
if searchable_name in FLOAT_SEARCHABLES:
try:
search_keyword = float(search_keyword)
except ValueError:
continue
search_cols.append([col, search_keyword])
else:
search_cols.append([col, search_keyword])
total_count = session.query(func.count('*')).select_from(Exp).scalar()
q = getAllExpsQuery()
if all_search_keyword:
all_search_fields = []
for col in search_cols:
all_search_fields.append(col[0] == col[1])
q = q.filter(or_(*all_search_fields))
#print q
else:
for col in search_cols:
if str(col[0].type) == 'DATE':
try:
col[1] = datetime.datetime.strptime(col[1], "%Y-%m-%d").date()
except ValueError:
continue
q = q.filter(col[0] == col[1])
for col in sort_cols:
if col[1] == "desc":
q = q.order_by(desc(col[0]))
else:
q = q.order_by(col[0])
q = q.offset(start).limit(start + length).all()
d['sEcho'] = int(request.args.get('sEcho', '1'))
d['iTotalRecords'] = total_count
d['iTotalDisplayRecords'] = total_count
d['aaData'] = []
for exp, exp_vna, exp_dc_coil, exp_ac_coil, patch, exp_smith in q:
d['aaData'].append(
{
'detail': '<img src="/static/img/details_open.png">',
'ac_coil_id': exp_ac_coil.id,
'dc_coil_id': exp_dc_coil.id,
'exp_date': exp.exp_date.strftime('%Y-%m-%d'),
'patch_material': patch.name,
'patch_width': patch.width,
'patch_height': patch.height,
'dc_current': exp.dc_current,
'dc_field': round(exp.dc_field, 3),
'comment': exp.comment,
'if_bandwidth': exp_vna.if_bandwidth,
'number_of_points': exp_vna.number_of_points,
'sweep_type': exp_vna.sweep_type,
'channel': exp_vna.channel,
'source_power': exp_vna.source_power,
'measure_type': exp_vna.measure_type,
'sweep_time': exp_vna.sweep_time,
'imp_re': round(exp_smith.imp_re, 3),
'freq': round(float(exp_smith.freq) / 1000, 3) ,
'id': exp.id
}
)
response = make_response(jsonify(d))
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/exp/plot/ar_resonance')
def exp_ar_vs_resonance():
exp_ids = request.args.getlist('exp_id', None)
if not exp_ids:
abort(404)
buf = chart.draw_ar_vs_f(exp_ids)
return send_file(buf,
mimetype="image/png",
attachment_filename="ar_vs_resonance.png",
as_attachment=True)
@app.route('/exp/plot/ar_imp_re')
def exp_ar_vs_imp_re():
exp_ids = request.args.getlist('exp_id', None)
if not exp_ids:
abort(404)
buf = chart.draw_ar_vs_imp_re(exp_ids)
return send_file(buf,
mimetype="image/png",
attachment_filename="ar_imp_re.png",
as_attachment=True)
@app.route('/exp/plot/field_resonance')
def exp_field_vs_resonance():
exp_ids = request.args.getlist('exp_id', None)
if not exp_ids:
abort(404)
buf = chart.draw_dc_field_vs_f(exp_ids)
return send_file(buf,
mimetype="image/png",
attachment_filename="testing.png",
as_attachment=True)
@app.route('/exp/plot/freq_re')
def exp_f_vs_re():
exp_ids = request.args.getlist('exp_id', None)
if not exp_ids:
abort(404)
datas = getImpedanceData(exp_ids, 're')
buf = chart.draw_f_vs_imp_re(datas)
return send_file(buf,
mimetype="image/png",
attachment_filename="testing.png",
as_attachment=True)
@app.route('/exp/plot/freq_re/vna')
def exp_f_vs_re_vna():
srcpwr = request.args.get('srcpwr', '0')
center_freq = request.args.get('center_freq', 100000)
freq_span = request.args.get('freq_span', 20000)
datas = []
na.source_power = int(srcpwr)
#na.find_center_freq()
na.sweep_mode = FrequencySweepModes.SINGLE
#na.set_num_of_points(201)
na.set_frequency_span(center = float(center_freq), span = float(freq_span))
freq, im, re, imp_re, imp_im = na.read_impedance()
freq = np.array(freq)
imp_re = np.array(imp_re)
imp_re = imp_re * 50
data = MplXYPlotData()
data.x = freq
data.y = imp_re
data.set_legend('Source Power = %s' % (srcpwr))
buf = chart.draw_f_vs_imp_re([data])
filename = datetime.datetime.now().strftime("%Y%m%d_%H-%M-%S")
na.sweep_mode = FrequencySweepModes.CONTINUOUS
return send_file(buf,
mimetype="image/png",
attachment_filename=filename + ".png", cache_timeout = 0,
as_attachment=True)
@app.route('/api/exp/data/freq_re/vna')
def exp_data_f_vs_re_vna():
srcpwr = request.args.get('srcpwr', '0')
datas = []
na.source_power = int(srcpwr)
#na.find_center_freq()
na.sweep_mode = FrequencySweepModes.SINGLE
#na.set_num_of_points(201)
freq, im, re, imp_re, imp_im = na.read_impedance()
imp_re = imp_re * 50
for data in zip(freq, imp_re):
datas.append([data[0], data[1]])
na.sweep_mode = FrequencySweepModes.CONTINUOUS
resp = make_response(jsonify(json.make_json_data(datas)))
resp.status_code = 200
return resp
@app.route('/exp/plot/smithchart/vna')
def smithchart_vna():
srcpwr = request.args.get('srcpwr', '0')
datas = []
data = []
na.set_source_power(int(srcpwr))
#na.find_center_freq()
#na.set_num_of_points(201)
freq, im, re, imp_re, imp_im = na.read_impedance()
for item in zip(imp_re, imp_im):
data.append(item[0] + item[1] * 1j)
data = np.array(data)
datas.append(data)
buf = chart.draw_smithchart(datas)
filename = datetime.datetime.now().strftime("%Y%m%d_%H-%M-%S")
return send_file(buf,
mimetype="image/png",
attachment_filename=filename+".png", cache_timeout = 0,
as_attachment=True)
@app.route('/exp/plot/smithchart')
def smithchart():
exp_ids = request.args.getlist('exp_id', None)
if not exp_ids:
abort(404)
datas = getExpSmithByExpIds(exp_ids)
buf = chart.draw_smithchart(datas)
return send_file(buf,
mimetype="image/png",
attachment_filename="testing.png",
as_attachment=True)
@app.route('/api/exp/vna', methods=['GET'])
def vna():
method = request.args.get('method', 'init')
resp = jsonify({})
resp.status_code = 200
return resp
@app.route('/exp/insert', methods=['GET', 'POST'])
def insert_data():
exp_id = request.args.get('exp_id', None)
if exp_id:
exp = session.query(Exp).filter(Exp.id == exp_id).first()
form = ExpInfoForm(request.form, exp)
else:
form = ExpInfoForm(request.form)
na.source_power = 7
center_freq = int(na.find_center_freq())
form.center_freq.data = center_freq
form.freq_span.data = int(20000)
if request.method == "POST" and form.validate():
if form.id.data:
exp = session.query(Exp).filter(Exp.id == int(form.id.data)).first()
exp.ac_coil_id = form.ac_coil_id.data
exp.dc_coil_id = form.dc_coil_id.data
exp.patch_id = form.patch_id.data
exp.exp_date = form.exp_date_submit.data
exp.dc_current = form.dc_current.data
exp.dc_field = form.dc_field.data
exp.comment = form.comment.data
else:
exp = Exp(
ac_coil_id = form.ac_coil_id.data.id,
dc_coil_id = form.dc_coil_id.data.id,
patch_id = form.patch_id.data.patch_id,
exp_date = form.exp_date_submit.data,
dc_current = form.dc_current.data,
dc_field = form.dc_field.data,
comment = form.comment.data
)
session.add(exp)
session.flush()
vna_data = []
na.source_power = form.source_power.data
#na.find_center_freq()
na.set_frequency_span(center = form.center_freq.data, span = form.freq_span.data)
na.autoscale()
na.num_of_points = 801
na.sweep_mode = FrequencySweepModes.SINGLE
sleep(0.5)
freq, re, im, imp_re, imp_im = na.read_impedance()
for freq, re, im in zip(freq, re, im):
smith = ExpSmith(exp.id, freq, re, im)
vna_data.append(smith)
vna_properties = ExpVNA(
exp.id,
float(0),
na.num_of_points,
'SMITH CHART',
na.sweep_type,
int(1),
float(form.source_power.data),
'A/R',
float(na.sweep_time) * 100
)
session.add(vna_properties)
session.add_all(vna_data)
session.commit()
na.sweep_mode = FrequencySweepModes.CONTINUOUS
resp = make_response(
jsonify(
json.make_json_data(
{'id': exp.id}
)
)
)
resp.status_code = 200
return resp
na.sweep_mode = FrequencySweepModes.CONTINUOUS
return render_template('insert.html', form=form)
'''
@app.route('/update')
def update():
now = datetime.datetime.now()
ret = now.strftime("%Y%m%dT%H%M%S")
ret_hash = hashlib.md5(ret.encode()).hexdigest()
response = make_response(ret)
response.headers.add("ETag", ret_hash)
if request.headers.get('If-None-Match') == ret_hash:
xheader = '{"timeout":"5000"}'
response.headers.add("X-Smartupdater", xheader)
else:
xheader = '{"timeout":"1000"}'
response.headers.add("X-Smartupdater", xheader)
return response
if __name__ == "__main__":
app.run()
'''
|
|
# -*- coding: utf-8 -*-
# ----- for creating dataset -----
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
# ----- general import -----
import pandas as pd
import numpy as np
# ----- stacking library -----
from stacking.base import FOLDER_NAME, PATH, INPUT_PATH, TEMP_PATH,\
FEATURES_PATH, OUTPUT_PATH, SUBMIT_FORMAT
# ----- utils -----
from stacking.base import load_data, save_pred_as_submit_format, create_cv_id, \
eval_pred
# ----- classifiers -----
from stacking.base import BaseModel, XGBClassifier, KerasClassifier
# ----- keras -----
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l1, l2
# ----- scikit-learn -----
from sklearn.linear_model import LogisticRegression as LR
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier
# ----- Set problem type!! -----
problem_type = 'classification'
classification_type = 'multi-class'
eval_type = 'logloss'
BaseModel.set_prob_type(problem_type, classification_type, eval_type)
# ----- create dataset -----
# load data for binary
digits = load_digits()
# split data for train and test
data_train, data_test, label_train, label_test = train_test_split(digits.data, digits.target)
# concat data as pandas' dataframe format
data_train = pd.DataFrame(data_train)
label_train = pd.DataFrame(label_train, columns=['target'])
data_test = pd.DataFrame(data_test)
label_test = pd.DataFrame(label_test, columns=['target'])
# save data under /data/input.
data_train.to_csv(INPUT_PATH + 'train.csv', index=False)
label_train.to_csv(INPUT_PATH + 'target.csv', index=False)
data_test.to_csv(INPUT_PATH + 'test.csv', index=False)
label_test.to_csv(INPUT_PATH + 'label_test.csv', index=False)
# ----- END create dataset -----
# -----create features -----
train_log = data_train.iloc[:, :64].applymap(lambda x: np.log(x+1))
test_log = data_test.iloc[:, :64].applymap(lambda x: np.log(x+1))
train_log.columns = map(str, train_log.columns)
test_log.columns = map(str, test_log.columns)
train_log.columns += '_log'
test_log.columns += '_log'
# save data under /data/output/features/.
train_log.to_csv(FEATURES_PATH + 'train_log.csv', index=False)
test_log.to_csv(FEATURES_PATH + 'test_log.csv', index=False)
# ----- END create features -----
# ----- First stage stacking model-----
# FEATURE LISTS in Stage 1.
FEATURE_LIST_stage1 = {
'train':(
INPUT_PATH + 'train.csv',
FEATURES_PATH + 'train_log.csv',
),
'target':(
INPUT_PATH + 'target.csv',
),
'test':(
INPUT_PATH + 'test.csv',
FEATURES_PATH + 'test_log.csv',
),
}
# need to get input shape for NN now
X,y,test = load_data(flist=FEATURE_LIST_stage1, drop_duplicates=False)
assert((False in X.columns == test.columns) == False)
nn_input_dim_NN = X.shape[1:]
output_dim = len(set(y))
del X, y, test
# Models in Stage 1
PARAMS_V1 = {
'colsample_bytree':0.80,
'learning_rate':0.1,
"eval_metric":"mlogloss",
'max_depth':5,
'min_child_weight':1,
'nthread':4,
'seed':407,
'silent':1,
'subsample':0.60,
'objective':'multi:softprob',
'num_class':output_dim,
}
class ModelV1(BaseModel):
def build_model(self):
return XGBClassifier(params=self.params, num_round=10)
PARAMS_V2 = {
'batch_size':32,
'nb_epoch':15,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
#'show_accuracy':True,
'class_weight':None,
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV2(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dense(64, input_shape=nn_input_dim_NN, init='he_normal'))
model.add(LeakyReLU(alpha=.00001))
model.add(Dropout(0.5))
model.add(Dense(output_dim, init='he_normal'))
model.add(Activation('softmax'))
sgd = SGD(lr=0.1, decay=1e-5, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=["accuracy"])
return KerasClassifier(nn=model,**self.params)
PARAMS_V3 = {
'n_estimators':500, 'criterion':'gini', 'n_jobs':8, 'verbose':0,
'random_state':407, 'oob_score':True,
}
class ModelV3(BaseModel):
def build_model(self):
return RandomForestClassifier(**self.params)
PARAMS_V4 = {
'n_estimators':550, 'criterion':'gini', 'n_jobs':8, 'verbose':0,
'random_state':407,
}
class ModelV4(BaseModel):
def build_model(self):
return ExtraTreesClassifier(**self.params)
PARAMS_V5 = {
'n_estimators':300, 'learning_rate':0.05,'subsample':0.8,
'max_depth':5, 'verbose':1, 'max_features':0.9,
'random_state':407,
}
class ModelV5(BaseModel):
def build_model(self):
return GradientBoostingClassifier(**self.params)
PARAMS_V6 = {
'n_estimators':650, 'learning_rate':0.01,'subsample':0.8,
'max_depth':5, 'verbose':1, 'max_features':0.82,
'random_state':407,
}
class ModelV6(BaseModel):
def build_model(self):
return GradientBoostingClassifier(**self.params)
# ----- END first stage stacking model -----
# ----- Second stage stacking model -----
'''
PARAMS_V1_stage2 = {
'colsample_bytree':0.8,
'learning_rate':0.05,
"eval_metric":"mlogloss",
'max_depth':4,
'seed':1234,
'nthread':8,
'reg_lambda':0.01,
'reg_alpha':0.01,
'silent':1,
'subsample':0.80,
'objective':'multi:softprob',
'num_class':output_dim,
}
class ModelV1_stage2(BaseModel):
def build_model(self):
return XGBClassifier(params=self.params, num_round=40)
'''
PARAMS_V1_stage2 = {
'penalty':'l2',
'tol':0.0001,
'C':1.0,
'random_state':None,
'verbose':0,
'n_jobs':8
}
class ModelV1_stage2(BaseModel):
def build_model(self):
return LR(**self.params)
# ----- END first stage stacking model -----
if __name__ == "__main__":
# Create cv-fold index
target = pd.read_csv(INPUT_PATH + 'target.csv')
create_cv_id(target, n_folds_ = 5, cv_id_name='cv_id', seed=407)
######## stage1 Models #########
print 'Start stage 1 training'
m = ModelV1(name="v1_stage1",
flist=FEATURE_LIST_stage1,
params = PARAMS_V1,
kind = 'st'
)
m.run()
m = ModelV2(name="v2_stage1",
flist=FEATURE_LIST_stage1,
params = PARAMS_V2,
kind = 'st'
)
m.run()
m = ModelV3(name="v3_stage1",
flist=FEATURE_LIST_stage1,
params = PARAMS_V3,
kind = 'st'
)
m.run()
m = ModelV4(name="v4_stage1",
flist=FEATURE_LIST_stage1,
params = PARAMS_V4,
kind = 'st'
)
m.run()
m = ModelV5(name="v5_stage1",
flist=FEATURE_LIST_stage1,
params = PARAMS_V5,
kind = 'st'
)
m.run()
m = ModelV6(name="v6_stage1",
flist=FEATURE_LIST_stage1,
params = PARAMS_V6,
kind = 'st'
)
m.run()
print 'Done stage 1'
print
######## stage2 Models #########
print 'Start stage 2 training'
# FEATURE LISTS in Stage 2.
# Need to define here because the outputs for NN dim. haven't been created yet.
FEATURE_LIST_stage2 = {
'train':(INPUT_PATH + 'train.csv',
FEATURES_PATH + 'train_log.csv',
TEMP_PATH + 'v1_stage1_all_fold.csv',
TEMP_PATH + 'v2_stage1_all_fold.csv',
TEMP_PATH + 'v3_stage1_all_fold.csv',
TEMP_PATH + 'v4_stage1_all_fold.csv',
TEMP_PATH + 'v5_stage1_all_fold.csv',
TEMP_PATH + 'v6_stage1_all_fold.csv',
),
'target':(
INPUT_PATH + 'target.csv',
),
'test':(INPUT_PATH + 'test.csv',
FEATURES_PATH + 'test_log.csv',
TEMP_PATH + 'v1_stage1_test.csv',
TEMP_PATH + 'v2_stage1_test.csv',
TEMP_PATH + 'v3_stage1_test.csv',
TEMP_PATH + 'v4_stage1_test.csv',
TEMP_PATH + 'v5_stage1_test.csv',
TEMP_PATH + 'v6_stage1_test.csv',
),
}
X,y,test = load_data(flist=FEATURE_LIST_stage2, drop_duplicates=False)
assert((False in X.columns == test.columns) == False)
nn_input_dim_NN2 = X.shape[1]
del X, y, test
# Models
m = ModelV1_stage2(name="v1_stage2",
flist=FEATURE_LIST_stage2,
params = PARAMS_V1_stage2,
kind = 'st',
)
m.run()
print 'Done stage 2'
print
# averaging
print 'Saving as submission format'
#sample_sub = pd.read_csv('data/input/sample_submission.csv')
label = pd.read_csv(INPUT_PATH + 'label_test.csv')
testID = range(len(label))
testID = pd.DataFrame(testID, columns=['ID'])
pred = pd.read_csv(TEMP_PATH + 'v1_stage2_TestInAllTrainingData.csv')
print 'Test evaluation'
mll = eval_pred(label.target, pred.values, eval_type=eval_type)
print 'saving final results'
pred.columns = ['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9']
pred = pd.concat([testID, pred], axis=1)
pred.to_csv(TEMP_PATH + 'final_submission.csv', index=False)
|
|
from django.shortcuts import render, redirect
from django.template import loader, RequestContext
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout as auth_logout
from django.http import HttpResponse, JsonResponse
from django.core.mail import send_mail
from django.utils import translation
from django.utils.translation import ugettext as _
from django.forms.forms import NON_FIELD_ERRORS
from django.core.mail import EmailMultiAlternatives
from django.core.mail import EmailMessage
from django.template.loader import render_to_string, get_template
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.message import EmailMessage
from email.headerregistry import Address
from email.utils import make_msgid
import smtplib
import datetime
import pytz
from .utils import isEmailAddressValid
from .models import Activation, Reset
from .forms import LoginForm, RegistrationForm, PasswordChangeForm, LostPasswordForm, PasswordResetForm
# Create your views here.
def activate(request, uuid):
message = _("Your account has been activated, please log in.")
try:
activation = Activation.objects.get(url=uuid)
if (datetime.datetime.utcnow().replace(tzinfo=pytz.utc) - activation.time).days > 0:
# invalid since you waited too long.
message = _("Your activation url has expired. Please reregister.")
user = User.objects.get(username=activation.username)
user.delete()
activation.delete()
else:
user = User.objects.get(username=activation.username)
user.is_active = True
user.save()
activation.delete()
except:
message = _("Invalid activation url. Please try again.")
form = LoginForm()
form.cleaned_data = {}
form.add_error(NON_FIELD_ERRORS, message)
context = {
'title': _("Log in"),
'form': form,
}
return render(request, 'registration/login.html', context)
def lost_password(request):
if request.method == "POST":
form = LostPasswordForm(request.POST)
if form.is_valid():
user = User.objects.filter(email=form.cleaned_data['email'])
reset = Reset(username=user[0].username, time=datetime.datetime.now())
reset.save()
msg = EmailMessage()
msg['From'] = '[email protected]'
msg['To'] = form.cleaned_data['email']
msg['Subject'] = _('DataCube Password Reset')
msg.set_content(_('Reset your password here: ') + settings.BASE_HOST + "/accounts/" + str(reset.url) + "/reset")
# Sending the email:
with smtplib.SMTP('localhost') as s:
s.send_message(msg)
form = LoginForm()
form.cleaned_data = {}
form.add_error(NON_FIELD_ERRORS, _("We have sent an email containing the url required to reset your password. Please use that link then log back in."))
context = {
'title': _("Log in"),
'form': form,
}
return render(request, 'registration/login.html', context)
else:
context = {'title': _("Password Reset"), 'form': form}
return render(request, 'registration/lost_password.html', context)
else:
context = {'title': _("Password Reset"), 'form': LostPasswordForm()}
return render(request, 'registration/lost_password.html', context)
def reset(request, uuid):
try:
reset = Reset.objects.get(url=uuid)
if request.method == "POST":
form = PasswordResetForm(request.POST)
if form.is_valid():
user = User.objects.get(username=reset.username)
user.set_password(form.cleaned_data['new_password'])
user.save()
reset.delete()
form = LoginForm()
form.cleaned_data = {}
form.add_error(NON_FIELD_ERRORS, _("Your password has been changed. Please log in."))
context = {
'title': _("Log in"),
'form': form,
}
return render(request, 'registration/login.html', context)
else:
context = {'title': _("Password Reset"), 'user_hash': uuid, 'form': form}
return render(request, 'registration/reset.html', context)
else:
if (datetime.datetime.utcnow().replace(tzinfo=pytz.utc) - reset.time).days > 0:
# invalid since you waited too long.
form = LostPasswordForm()
form.cleaned_data = {}
form.add_error(NON_FIELD_ERRORS, _("Your password reset url has expired. Please reset it again."))
context = {'title': _("Password Reset"), 'form': form}
return render(request, 'registration/lost_password.html', context)
else:
form = PasswordResetForm()
form.cleaned_data = {}
form.add_error(NON_FIELD_ERRORS, _("Please enter your new password."))
context = {'title': _("Password Reset"), 'user_hash': uuid, 'form': form}
return render(request, 'registration/reset.html', context)
except:
form = LoginForm()
form.cleaned_data = {}
form.add_error(NON_FIELD_ERRORS, _("Invalid password reset url."))
context = {
'title': _("Log in"),
'form': form,
}
return render(request, 'registration/login.html', context)
@login_required
def password_change(request):
"""
Navigates to the password change page. POST will validate current password and change it,
GET will display the form.
**Context**
``message``
An error message in the event that something is incorrect.
``next``
The redirect page upon successfull login.
**Template**
:template:`registration/password_change.html`
"""
if request.method == 'POST':
form = PasswordChangeForm(request.POST)
if form.is_valid():
user = authenticate(username=request.user.username, password=form.cleaned_data['password'])
if user is not None:
user.set_password(form.cleaned_data['new_password'])
user.save()
auth_logout(request)
form = LoginForm()
form.cleaned_data = {}
form.add_error(NON_FIELD_ERRORS, _("Your password has been changed. Please log in."))
context = {
'title': _("Log in"),
'form': form,
}
return render(request, 'registration/login.html', context)
else:
form.add_error('password', _("Your current password is incorrect, please try again."))
# Return an 'invalid login' error message.
context = {
'title': _("Password Change"),
'form': form
}
return render(request, 'registration/password_change.html', context)
else:
context = {'title': _("Password Change"), 'form': PasswordChangeForm()}
return render(request, 'registration/password_change.html', context)
def registration(request):
"""
Navigates to the registration page. POST will create a user and log in,
GET will display the form.
**Context**
``message``
An error message in the event that something is incorrect.
``next``
The redirect page upon successfull login.
**Template**
:template:`registration/registration.html`
"""
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
user = User.objects.create_user(form.cleaned_data['username'], form.cleaned_data['email'],
form.cleaned_data['password'])
#user.is_active = False
user.save()
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])
next = request.POST.get('next', "/")
if user is not None:
if user.is_active:
auth_login(request, user)
# Redirect to a success page.
return redirect(next)
"""activation = Activation(username=user.username, time=datetime.datetime.now())
activation.save()
if user is not None:
subject, from_email, to_email = "CEOS Datacube Account Activation", "[email protected]", [user.email]
msg = EmailMessage()
msg['From'] = from_email
msg['To'] = to_email
msg['Subject'] = subject
msg.set_content('')
# It is possible to use msg.add_alternative() to add HTML content too
html_content = ""
activation_url = settings.BASE_HOST + "/accounts/" + str(activation.url) + "/activate"
with open('/home/' + settings.LOCAL_USER +
'/Datacube/data_cube_ui/static/assets/media/email_template.html') as f:
for line in f:
if (line == "\t\t\tAVAILABLE_TOOLS\n"):
for app in Application.objects.all():
html_content += "<li>" + app.application_name + "</li>"
elif (line == "\t\t\tAVAILABLE_AREAS\n"):
for area in Area.objects.all():
html_content += "<li>" + area.area_name + "</li>"
elif ("HOME_URL" in line):
html_content += line.replace("HOME_URL", settings.BASE_HOST)
else:
html_content += line
if 'str' in line:
break
html_content = html_content.replace("ACTIVATION_URL", activation_url)
msg.add_alternative(html_content, subtype='html')
# Attaching content:
fp = open('/home/' + settings.LOCAL_USER + '/Datacube/data_cube_ui/static/assets/media/banner.png',
'rb')
att = MIMEImage(fp.read()) # Or use MIMEImage, etc
fp.close()
# The following line is to control the filename of the attached file
att.add_header('Content-Disposition', 'attachment', filename='banner.png')
msg.make_mixed() # This converts the message to multipart/mixed
msg.attach(att) # Don't forget to convert the message to multipart first!
# Sending the email:
with smtplib.SMTP('localhost') as s:
s.send_message(msg)
form = LoginForm()
form.cleaned_data = {}
form.add_error(NON_FIELD_ERRORS, _("Activate your account using the url that has been emailed to you and log in."))
context = {
'title': _("Log in"),
'form': form,
}
return render(request, 'registration/login.html', context)"""
context = {
'title': _("Registration"),
'form': form,
}
return render(request, 'registration/registration.html', context)
else:
context = {'title': _("Registration"), 'form': RegistrationForm(),}
if request.GET:
next = request.POST.get('next', "/")
if request.user.is_authenticated():
return redirect(next)
context['next'] = next
return render(request, 'registration/registration.html', context)
def login(request):
"""
Navigates to the login page of the application. Note this is used as the POST for submitting
a request to log in as well as the initial landing page.
**Context**
``message``
An error message in the event username and/or password is incorrect.
``next``
The redirect page upon successfull login.
**Template**
:template:`registration/login.html`
"""
if request.method == 'POST':
form = LoginForm(request.POST)
#the form will never be invalid in this case.
if form.is_valid():
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])
next = request.POST.get('next', "/")
if user is not None:
if user.is_active:
auth_login(request, user)
# Redirect to a success page.
return redirect(next)
else:
form.add_error(NON_FIELD_ERRORS, _("Please activate your account using the link found in the registration email."))
# Return a 'disabled account' error message
context = {
'title': _("Log in"),
'form': form,
'message': _("Please activate your account using the link found in the registration email.")
}
return render(request, 'registration/login.html', context)
form.add_error(NON_FIELD_ERRORS, _("Please enter a correct username and password combination."))
form.add_error('username', _(""))
form.add_error('password', _(""))
# Return an 'invalid login' error message.
context = {
'title': _("Log in"),
'form': form,
'message': _("Please enter a correct username and password combination.")
}
return render(request, 'registration/login.html', context)
else:
context = {'title': _("Log in"), 'form': LoginForm() }
if request.GET:
next = request.GET.get('next', "/")
if request.user.is_authenticated():
return redirect(next)
context['next'] = next
return render(request, 'registration/login.html', context)
def logout(request):
"""
Logout view that redirects the user to the home page.
**Context**
**Template**
"""
auth_logout(request)
return redirect('home')
|
|
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
from ctypes import WinDLL, get_last_error, FormatError, WinError, addressof
from ctypes import c_buffer, c_ubyte, Structure, Union, byref
from ctypes.wintypes import BOOL, BOOLEAN, LPCWSTR, DWORD, HANDLE
from ctypes.wintypes import WCHAR, USHORT, LPVOID, ULONG, LPDWORD
kernel32 = WinDLL('kernel32', use_last_error=True)
UCHAR = c_ubyte
# Win32 error codes
ERROR_SUCCESS = 0
ERROR_NOT_SUPPORTED = 50
ERROR_PRIVILEGE_NOT_HELD = 1314
# Win32 API entry points
CreateSymbolicLinkW = kernel32.CreateSymbolicLinkW
CreateSymbolicLinkW.restype = BOOLEAN
CreateSymbolicLinkW.argtypes = (LPCWSTR, # lpSymlinkFileName In
LPCWSTR, # lpTargetFileName In
DWORD) # dwFlags In
# Symbolic link creation flags
SYMBOLIC_LINK_FLAG_FILE = 0x00
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x01
# symlink support for CreateSymbolicLink() starting with Windows 10 (1703, v10.0.14972)
SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE = 0x02
GetFileAttributesW = kernel32.GetFileAttributesW
GetFileAttributesW.restype = DWORD
GetFileAttributesW.argtypes = (LPCWSTR,) # lpFileName In
INVALID_FILE_ATTRIBUTES = 0xFFFFFFFF
FILE_ATTRIBUTE_REPARSE_POINT = 0x00400
CreateFileW = kernel32.CreateFileW
CreateFileW.restype = HANDLE
CreateFileW.argtypes = (LPCWSTR, # lpFileName In
DWORD, # dwDesiredAccess In
DWORD, # dwShareMode In
LPVOID, # lpSecurityAttributes In_opt
DWORD, # dwCreationDisposition In
DWORD, # dwFlagsAndAttributes In
HANDLE) # hTemplateFile In_opt
CloseHandle = kernel32.CloseHandle
CloseHandle.restype = BOOL
CloseHandle.argtypes = (HANDLE,) # hObject In
INVALID_HANDLE_VALUE = HANDLE(-1).value
OPEN_EXISTING = 3
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000
DeviceIoControl = kernel32.DeviceIoControl
DeviceIoControl.restype = BOOL
DeviceIoControl.argtypes = (HANDLE, # hDevice In
DWORD, # dwIoControlCode In
LPVOID, # lpInBuffer In_opt
DWORD, # nInBufferSize In
LPVOID, # lpOutBuffer Out_opt
DWORD, # nOutBufferSize In
LPDWORD, # lpBytesReturned Out_opt
LPVOID) # lpOverlapped Inout_opt
# Device I/O control flags and options
FSCTL_GET_REPARSE_POINT = 0x000900A8
IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
IO_REPARSE_TAG_SYMLINK = 0xA000000C
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 0x4000
class GENERIC_REPARSE_BUFFER(Structure):
_fields_ = (('DataBuffer', UCHAR * 1),)
class SYMBOLIC_LINK_REPARSE_BUFFER(Structure):
_fields_ = (('SubstituteNameOffset', USHORT),
('SubstituteNameLength', USHORT),
('PrintNameOffset', USHORT),
('PrintNameLength', USHORT),
('Flags', ULONG),
('PathBuffer', WCHAR * 1))
@property
def PrintName(self):
arrayt = WCHAR * (self.PrintNameLength // 2)
offset = type(self).PathBuffer.offset + self.PrintNameOffset
return arrayt.from_address(addressof(self) + offset).value
class MOUNT_POINT_REPARSE_BUFFER(Structure):
_fields_ = (('SubstituteNameOffset', USHORT),
('SubstituteNameLength', USHORT),
('PrintNameOffset', USHORT),
('PrintNameLength', USHORT),
('PathBuffer', WCHAR * 1))
@property
def PrintName(self):
arrayt = WCHAR * (self.PrintNameLength // 2)
offset = type(self).PathBuffer.offset + self.PrintNameOffset
return arrayt.from_address(addressof(self) + offset).value
class REPARSE_DATA_BUFFER(Structure):
class REPARSE_BUFFER(Union):
_fields_ = (('SymbolicLinkReparseBuffer', SYMBOLIC_LINK_REPARSE_BUFFER),
('MountPointReparseBuffer', MOUNT_POINT_REPARSE_BUFFER),
('GenericReparseBuffer', GENERIC_REPARSE_BUFFER))
_fields_ = (('ReparseTag', ULONG),
('ReparseDataLength', USHORT),
('Reserved', USHORT),
('ReparseBuffer', REPARSE_BUFFER))
_anonymous_ = ('ReparseBuffer',)
def create_filesymlink(source, link_name):
"""Creates a Windows file symbolic link source pointing to link_name."""
_create_symlink(source, link_name, SYMBOLIC_LINK_FLAG_FILE)
def create_dirsymlink(source, link_name):
"""Creates a Windows directory symbolic link source pointing to link_name.
"""
_create_symlink(source, link_name, SYMBOLIC_LINK_FLAG_DIRECTORY)
def _create_symlink(source, link_name, dwFlags):
if not CreateSymbolicLinkW(link_name, source,
dwFlags | SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE):
# See https://github.com/golang/go/pull/24307/files#diff-b87bc12e4da2497308f9ef746086e4f0
# "the unprivileged create flag is unsupported below Windows 10 (1703, v10.0.14972).
# retry without it."
if not CreateSymbolicLinkW(link_name, source, dwFlags):
code = get_last_error()
error_desc = FormatError(code).strip()
if code == ERROR_PRIVILEGE_NOT_HELD:
raise OSError(errno.EPERM, error_desc, link_name)
_raise_winerror(
code,
'Error creating symbolic link \"%s\"'.format(link_name))
def islink(path):
result = GetFileAttributesW(path)
if result == INVALID_FILE_ATTRIBUTES:
return False
return bool(result & FILE_ATTRIBUTE_REPARSE_POINT)
def readlink(path):
reparse_point_handle = CreateFileW(path,
0,
0,
None,
OPEN_EXISTING,
FILE_FLAG_OPEN_REPARSE_POINT |
FILE_FLAG_BACKUP_SEMANTICS,
None)
if reparse_point_handle == INVALID_HANDLE_VALUE:
_raise_winerror(
get_last_error(),
'Error opening symbolic link \"%s\"'.format(path))
target_buffer = c_buffer(MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
n_bytes_returned = DWORD()
io_result = DeviceIoControl(reparse_point_handle,
FSCTL_GET_REPARSE_POINT,
None,
0,
target_buffer,
len(target_buffer),
byref(n_bytes_returned),
None)
CloseHandle(reparse_point_handle)
if not io_result:
_raise_winerror(
get_last_error(),
'Error reading symbolic link \"%s\"'.format(path))
rdb = REPARSE_DATA_BUFFER.from_buffer(target_buffer)
if rdb.ReparseTag == IO_REPARSE_TAG_SYMLINK:
return rdb.SymbolicLinkReparseBuffer.PrintName
elif rdb.ReparseTag == IO_REPARSE_TAG_MOUNT_POINT:
return rdb.MountPointReparseBuffer.PrintName
# Unsupported reparse point type
_raise_winerror(
ERROR_NOT_SUPPORTED,
'Error reading symbolic link \"%s\"'.format(path))
def _raise_winerror(code, error_desc):
win_error_desc = FormatError(code).strip()
error_desc = "%s: %s".format(error_desc, win_error_desc)
raise WinError(code, error_desc)
|
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: ec2_vpc
short_description: configure AWS virtual private clouds
description:
- Create or terminates AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "1.4"
deprecated: >-
Deprecated in 2.3. Use M(ec2_vpc_net) along with supporting modules including
M(ec2_vpc_igw), M(ec2_vpc_route_table), M(ec2_vpc_subnet), M(ec2_vpc_dhcp_options),
M(ec2_vpc_nat_gateway), M(ec2_vpc_nacl).
options:
cidr_block:
description:
- "The cidr block representing the VPC, e.g. C(10.0.0.0/16), required when I(state=present)."
required: false
instance_tenancy:
description:
- "The supported tenancy options for instances launched into the VPC."
required: false
default: "default"
choices: [ "default", "dedicated" ]
dns_support:
description:
- Toggles the "Enable DNS resolution" flag.
required: false
default: "yes"
choices: [ "yes", "no" ]
dns_hostnames:
description:
- Toggles the "Enable DNS hostname support for instances" flag.
required: false
default: "yes"
choices: [ "yes", "no" ]
subnets:
description:
- 'A dictionary array of subnets to add of the form C({ cidr: ..., az: ... , resource_tags: ... }).'
- Where C(az) is the desired availability zone of the subnet, optional.
- 'Tags C(resource_tags) use dictionary form C({ "Environment":"Dev", "Tier":"Web", ...}), optional.'
- C(resource_tags) see resource_tags for VPC below. The main difference is subnet tags not specified here will be deleted.
- All VPC subnets not in this list will be removed as well.
- As of 1.8, if the subnets parameter is not specified, no existing subnets will be modified.'
required: false
default: null
vpc_id:
description:
- A VPC id to terminate when I(state=absent).
required: false
default: null
resource_tags:
description:
- 'A dictionary array of resource tags of the form C({ tag1: value1, tag2: value2 }).
- Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore,
if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7,
specifying a resource tag was optional.'
required: true
version_added: "1.6"
internet_gateway:
description:
- Toggle whether there should be an Internet gateway attached to the VPC.
required: false
default: "no"
choices: [ "yes", "no" ]
route_tables:
description:
- >
A dictionary array of route tables to add of the form:
C({ subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }). Where the subnets list is
those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword
for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids,
interface-ids, and vpc-peering-connection-ids in addition igw. resource_tags is optional and uses dictionary form: C({ "Name": "public", ... }).
This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated
subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes
will be modified.
required: false
default: null
wait:
description:
- Wait for the VPC to be in state 'available' before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
state:
description:
- Create or terminate the VPC.
required: true
choices: [ "present", "absent" ]
author: "Carson Gee (@carsongee)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic creation example:
- ec2_vpc:
state: present
cidr_block: 172.23.0.0/16
resource_tags: { "Environment":"Development" }
region: us-west-2
# Full creation example with subnets and optional availability zones.
# The absence or presence of subnets deletes or creates them respectively.
- ec2_vpc:
state: present
cidr_block: 172.22.0.0/16
resource_tags: { "Environment":"Development" }
subnets:
- cidr: 172.22.1.0/24
az: us-west-2c
resource_tags: { "Environment":"Dev", "Tier" : "Web" }
- cidr: 172.22.2.0/24
az: us-west-2b
resource_tags: { "Environment":"Dev", "Tier" : "App" }
- cidr: 172.22.3.0/24
az: us-west-2a
resource_tags: { "Environment":"Dev", "Tier" : "DB" }
internet_gateway: True
route_tables:
- subnets:
- 172.22.2.0/24
- 172.22.3.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
- subnets:
- 172.22.1.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
region: us-west-2
register: vpc
# Removal of a VPC by id
- ec2_vpc:
state: absent
vpc_id: vpc-aaaaaaa
region: us-west-2
# If you have added elements not managed by this module, e.g. instances, NATs, etc then
# the delete will fail until those dependencies are removed.
'''
import time
try:
import boto
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
def get_vpc_info(vpc):
"""
Retrieves vpc information from an instance
ID and returns it as a dictionary
"""
return({
'id': vpc.id,
'cidr_block': vpc.cidr_block,
'dhcp_options_id': vpc.dhcp_options_id,
'region': vpc.region.name,
'state': vpc.state,
})
def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Finds a VPC that matches a specific id or cidr + tags
module : AnsibleModule object
vpc_conn: authenticated VPCConnection connection object
Returns:
A VPC object that matches either an ID or CIDR and one or more tag values
"""
if vpc_id is None and cidr is None:
module.fail_json(
msg='You must specify either a vpc_id or a cidr block + list of unique tags, aborting'
)
found_vpcs = []
resource_tags = module.params.get('resource_tags')
# Check for existing VPC by cidr_block or id
if vpc_id is not None:
found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',})
else:
previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'})
for vpc in previous_vpcs:
# Get all tags for each of the found VPCs
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
# If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC
if resource_tags and set(resource_tags.items()).issubset(set(vpc_tags.items())):
found_vpcs.append(vpc)
found_vpc = None
if len(found_vpcs) == 1:
found_vpc = found_vpcs[0]
if len(found_vpcs) > 1:
module.fail_json(msg='Found more than one vpc based on the supplied criteria, aborting')
return (found_vpc)
def routes_match(rt_list=None, rt=None, igw=None):
"""
Check if the route table has all routes as in given list
rt_list : A list if routes provided in the module
rt : The Remote route table object
igw : The internet gateway object for this vpc
Returns:
True when there provided routes and remote routes are the same.
False when provided routes and remote routes are different.
"""
local_routes = []
remote_routes = []
for route in rt_list:
route_kwargs = {
'gateway_id': None,
'instance_id': None,
'interface_id': None,
'vpc_peering_connection_id': None,
'state': 'active'
}
if route['gw'] == 'igw':
route_kwargs['gateway_id'] = igw.id
elif route['gw'].startswith('i-'):
route_kwargs['instance_id'] = route['gw']
elif route['gw'].startswith('eni-'):
route_kwargs['interface_id'] = route['gw']
elif route['gw'].startswith('pcx-'):
route_kwargs['vpc_peering_connection_id'] = route['gw']
else:
route_kwargs['gateway_id'] = route['gw']
route_kwargs['destination_cidr_block'] = route['dest']
local_routes.append(route_kwargs)
for j in rt.routes:
remote_routes.append(j.__dict__)
match = []
for i in local_routes:
change = "false"
for j in remote_routes:
if set(i.items()).issubset(set(j.items())):
change = "true"
match.append(change)
if 'false' in match:
return False
else:
return True
def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=None):
"""
Checks if the remote routes match the local routes.
route_tables : Route_tables parameter in the module
vpc_conn : The VPC connection object
module : The module object
vpc : The vpc object for this route table
igw : The internet gateway object for this vpc
Returns:
True when there is difference between the provided routes and remote routes and if subnet associations are different.
False when both routes and subnet associations matched.
"""
#We add a one for the main table
rtb_len = len(route_tables) + 1
remote_rtb_len = len(vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}))
if remote_rtb_len != rtb_len:
return True
for rt in route_tables:
rt_id = None
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
'does not exist, aborting'.format(sn, rt)
)
nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
if not nrt:
return True
else:
nrt = nrt[0]
if not rt_id:
rt_id = nrt.id
if not routes_match(rt['routes'], nrt, igw):
return True
continue
else:
if rt_id == nrt.id:
continue
else:
return True
return True
return False
def create_vpc(module, vpc_conn):
"""
Creates a new or modifies an existing VPC.
module : AnsibleModule object
vpc_conn: authenticated VPCConnection connection object
Returns:
A dictionary with information
about the VPC and subnets that were launched
"""
id = module.params.get('vpc_id')
cidr_block = module.params.get('cidr_block')
instance_tenancy = module.params.get('instance_tenancy')
dns_support = module.params.get('dns_support')
dns_hostnames = module.params.get('dns_hostnames')
subnets = module.params.get('subnets')
internet_gateway = module.params.get('internet_gateway')
route_tables = module.params.get('route_tables')
vpc_spec_tags = module.params.get('resource_tags')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
# Check for existing VPC by cidr_block + tags or id
previous_vpc = find_vpc(module, vpc_conn, id, cidr_block)
if previous_vpc is not None:
changed = False
vpc = previous_vpc
else:
changed = True
try:
vpc = vpc_conn.create_vpc(cidr_block, instance_tenancy)
# wait here until the vpc is available
pending = True
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and pending:
try:
pvpc = vpc_conn.get_all_vpcs(vpc.id)
if hasattr(pvpc, 'state'):
if pvpc.state == "available":
pending = False
elif hasattr(pvpc[0], 'state'):
if pvpc[0].state == "available":
pending = False
# sometimes vpc_conn.create_vpc() will return a vpc that can't be found yet by vpc_conn.get_all_vpcs()
# when that happens, just wait a bit longer and try again
except boto.exception.BotoServerError as e:
if e.error_code != 'InvalidVpcID.NotFound':
raise
if pending:
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime())
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# Done with base VPC, now change to attributes and features.
# Add resource tags
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
if not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())):
new_tags = {}
for (key, value) in set(vpc_spec_tags.items()):
if (key, value) not in set(vpc_tags.items()):
new_tags[key] = value
if new_tags:
vpc_conn.create_tags(vpc.id, new_tags)
# boto doesn't appear to have a way to determine the existing
# value of the dns attributes, so we just set them.
# It also must be done one at a time.
vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_support=dns_support)
vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_hostnames=dns_hostnames)
# Process all subnet properties
if subnets is not None:
if not isinstance(subnets, list):
module.fail_json(msg='subnets needs to be a list of cidr blocks')
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
# First add all new subnets
for subnet in subnets:
add_subnet = True
subnet_tags_current = True
new_subnet_tags = subnet.get('resource_tags', {})
subnet_tags_delete = []
for csn in current_subnets:
if subnet['cidr'] == csn.cidr_block:
add_subnet = False
# Check if AWS subnet tags are in playbook subnet tags
existing_tags_subset_of_new_tags = (set(csn.tags.items()).issubset(set(new_subnet_tags.items())))
# Check if subnet tags in playbook are in AWS subnet tags
new_tags_subset_of_existing_tags = (set(new_subnet_tags.items()).issubset(set(csn.tags.items())))
if existing_tags_subset_of_new_tags is False:
try:
for item in csn.tags.items():
if item not in new_subnet_tags.items():
subnet_tags_delete.append(item)
subnet_tags_delete = [key[0] for key in subnet_tags_delete]
delete_subnet_tag = vpc_conn.delete_tags(csn.id, subnet_tags_delete)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to delete resource tag, error {0}'.format(e))
# Add new subnet tags if not current
if new_tags_subset_of_existing_tags is False:
try:
changed = True
create_subnet_tag = vpc_conn.create_tags(csn.id, new_subnet_tags)
except EC2ResponseError as e:
module.fail_json(msg='Unable to create resource tag, error: {0}'.format(e))
if add_subnet:
try:
new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None))
new_subnet_tags = subnet.get('resource_tags', {})
if new_subnet_tags:
# Sometimes AWS takes its time to create a subnet and so using new subnets's id
# to create tags results in exception.
# boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending'
# so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet
while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0:
time.sleep(0.1)
vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e))
# Now delete all absent subnets
for csubnet in current_subnets:
delete_subnet = True
for subnet in subnets:
if csubnet.cidr_block == subnet['cidr']:
delete_subnet = False
if delete_subnet:
try:
vpc_conn.delete_subnet(csubnet.id)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to delete subnet {0}, error: {1}'.format(csubnet.cidr_block, e))
# Handle Internet gateway (create/delete igw)
igw = None
igw_id = None
igws = vpc_conn.get_all_internet_gateways(filters={'attachment.vpc-id': vpc.id})
if len(igws) > 1:
module.fail_json(msg='EC2 returned more than one Internet Gateway for id %s, aborting' % vpc.id)
if internet_gateway:
if len(igws) != 1:
try:
igw = vpc_conn.create_internet_gateway()
vpc_conn.attach_internet_gateway(igw.id, vpc.id)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to create Internet Gateway, error: {0}'.format(e))
else:
# Set igw variable to the current igw instance for use in route tables.
igw = igws[0]
else:
if len(igws) > 0:
try:
vpc_conn.detach_internet_gateway(igws[0].id, vpc.id)
vpc_conn.delete_internet_gateway(igws[0].id)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e))
if igw is not None:
igw_id = igw.id
# Handle route tables - this may be worth splitting into a
# different module but should work fine here. The strategy to stay
# idempotent is to basically build all the route tables as
# defined, track the route table ids, and then run through the
# remote list of route tables and delete any that we didn't
# create. This shouldn't interrupt traffic in theory, but is the
# only way to really work with route tables over time that I can
# think of without using painful aws ids. Hopefully boto will add
# the replace-route-table API to make this smoother and
# allow control of the 'main' routing table.
if route_tables is not None:
rtb_needs_change = rtb_changed(route_tables, vpc_conn, module, vpc, igw)
if route_tables is not None and rtb_needs_change:
if not isinstance(route_tables, list):
module.fail_json(msg='route tables need to be a list of dictionaries')
# Work through each route table and update/create to match dictionary array
all_route_tables = []
for rt in route_tables:
try:
new_rt = vpc_conn.create_route_table(vpc.id)
new_rt_tags = rt.get('resource_tags', None)
if new_rt_tags:
vpc_conn.create_tags(new_rt.id, new_rt_tags)
for route in rt['routes']:
route_kwargs = {}
if route['gw'] == 'igw':
if not internet_gateway:
module.fail_json(
msg='You asked for an Internet Gateway ' \
'(igw) route, but you have no Internet Gateway'
)
route_kwargs['gateway_id'] = igw.id
elif route['gw'].startswith('i-'):
route_kwargs['instance_id'] = route['gw']
elif route['gw'].startswith('eni-'):
route_kwargs['interface_id'] = route['gw']
elif route['gw'].startswith('pcx-'):
route_kwargs['vpc_peering_connection_id'] = route['gw']
else:
route_kwargs['gateway_id'] = route['gw']
vpc_conn.create_route(new_rt.id, route['dest'], **route_kwargs)
# Associate with subnets
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
'does not exist, aborting'.format(sn, rt)
)
rsn = rsn[0]
# Disassociate then associate since we don't have replace
old_rt = vpc_conn.get_all_route_tables(
filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id}
)
old_rt = [ x for x in old_rt if x.id is not None ]
if len(old_rt) == 1:
old_rt = old_rt[0]
association_id = None
for a in old_rt.associations:
if a.subnet_id == rsn.id:
association_id = a.id
vpc_conn.disassociate_route_table(association_id)
vpc_conn.associate_route_table(new_rt.id, rsn.id)
all_route_tables.append(new_rt)
changed = True
except EC2ResponseError as e:
module.fail_json(
msg='Unable to create and associate route table {0}, error: ' \
'{1}'.format(rt, e)
)
# Now that we are good to go on our new route tables, delete the
# old ones except the 'main' route table as boto can't set the main
# table yet.
all_rts = vpc_conn.get_all_route_tables(filters={'vpc-id': vpc.id})
for rt in all_rts:
if rt.id is None:
continue
delete_rt = True
for newrt in all_route_tables:
if newrt.id == rt.id:
delete_rt = False
break
if delete_rt:
rta = rt.associations
is_main = False
for a in rta:
if a.main:
is_main = True
break
try:
if not is_main:
vpc_conn.delete_route_table(rt.id)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e))
vpc_dict = get_vpc_info(vpc)
created_vpc_id = vpc.id
returned_subnets = []
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
for sn in current_subnets:
returned_subnets.append({
'resource_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})),
'cidr': sn.cidr_block,
'az': sn.availability_zone,
'id': sn.id,
})
if subnets is not None:
# Sort subnets by the order they were listed in the play
order = {}
for idx, val in enumerate(subnets):
order[val['cidr']] = idx
# Number of subnets in the play
subnets_in_play = len(subnets)
returned_subnets.sort(key=lambda x: order.get(x['cidr'], subnets_in_play))
return (vpc_dict, created_vpc_id, returned_subnets, igw_id, changed)
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Terminates a VPC
module: Ansible module object
vpc_conn: authenticated VPCConnection connection object
vpc_id: a vpc id to terminate
cidr: The cidr block of the VPC - can be used in lieu of an ID
Returns a dictionary of VPC information
about the VPC terminated.
If the VPC to be terminated is available
"changed" will be set to True.
"""
vpc_dict = {}
terminated_vpc_id = ''
changed = False
vpc = find_vpc(module, vpc_conn, vpc_id, cidr)
if vpc is not None:
if vpc.state == 'available':
terminated_vpc_id=vpc.id
vpc_dict=get_vpc_info(vpc)
try:
subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
for sn in subnets:
vpc_conn.delete_subnet(sn.id)
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc.id}
)
for igw in igws:
vpc_conn.detach_internet_gateway(igw.id, vpc.id)
vpc_conn.delete_internet_gateway(igw.id)
rts = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id})
for rt in rts:
rta = rt.associations
is_main = False
for a in rta:
if a.main:
is_main = True
if not is_main:
vpc_conn.delete_route_table(rt.id)
vpc_conn.delete_vpc(vpc.id)
except EC2ResponseError as e:
module.fail_json(
msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e)
)
changed = True
vpc_dict['state'] = "terminated"
return (changed, vpc_dict, terminated_vpc_id)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
cidr_block = dict(),
instance_tenancy = dict(choices=['default', 'dedicated'], default='default'),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
subnets = dict(type='list'),
vpc_id = dict(),
internet_gateway = dict(type='bool', default=False),
resource_tags = dict(type='dict', required=True),
route_tables = dict(type='list'),
state = dict(choices=['present', 'absent'], default='present'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
vpc_conn = connect_to_aws(boto.vpc, region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
else:
module.fail_json(msg="region must be specified")
igw_id = None
if module.params.get('state') == 'absent':
vpc_id = module.params.get('vpc_id')
cidr = module.params.get('cidr_block')
(changed, vpc_dict, new_vpc_id) = terminate_vpc(module, vpc_conn, vpc_id, cidr)
subnets_changed = None
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning a new VPC
(vpc_dict, new_vpc_id, subnets_changed, igw_id, changed) = create_vpc(module, vpc_conn)
module.exit_json(changed=changed, vpc_id=new_vpc_id, vpc=vpc_dict, igw_id=igw_id, subnets=subnets_changed)
if __name__ == '__main__':
main()
|
|
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
from collections import OrderedDict
from copy import copy
from devlib import (LinuxTarget, AndroidTarget, LocalLinuxTarget,
ChromeOsTarget, Platform, Juno, TC2, Gem5SimulationPlatform,
AdbConnection, SshConnection, LocalConnection,
Gem5Connection)
from devlib.target import DEFAULT_SHELL_PROMPT
from wa.framework import pluginloader
from wa.framework.configuration.core import get_config_point_map
from wa.framework.exception import PluginLoaderError
from wa.framework.plugin import Plugin, Parameter
from wa.framework.target.assistant import LinuxAssistant, AndroidAssistant, ChromeOsAssistant
from wa.utils.types import list_of_strings, list_of_ints, regex, identifier, caseless_string
from wa.utils.misc import isiterable
def list_target_descriptions(loader=pluginloader):
targets = {}
for cls in loader.list_target_descriptors():
descriptor = cls()
for desc in descriptor.get_descriptions():
if desc.name in targets:
msg = 'Duplicate target "{}" returned by {} and {}'
prev_dtor = targets[desc.name].source
raise PluginLoaderError(msg.format(desc.name, prev_dtor.name,
descriptor.name))
targets[desc.name] = desc
return list(targets.values())
def get_target_description(name, loader=pluginloader):
for tdesc in list_target_descriptions(loader):
if tdesc.name == name:
return tdesc
raise ValueError('Could not find target descriptor "{}"'.format(name))
def instantiate_target(tdesc, params, connect=None, extra_platform_params=None):
# pylint: disable=too-many-locals,too-many-branches
target_params = get_config_point_map(tdesc.target_params)
platform_params = get_config_point_map(tdesc.platform_params)
conn_params = get_config_point_map(tdesc.conn_params)
assistant_params = get_config_point_map(tdesc.assistant_params)
tp, pp, cp = {}, {}, {}
for supported_params, new_params in (target_params, tp), (platform_params, pp), (conn_params, cp):
for name, value in supported_params.items():
if value.default and name == value.name:
new_params[name] = value.default
for name, value in params.items():
if name in target_params:
tp[name] = value
elif name in platform_params:
pp[name] = value
elif name in conn_params:
cp[name] = value
elif name in assistant_params:
pass
else:
msg = 'Unexpected parameter for {}: {}'
raise ValueError(msg.format(tdesc.name, name))
for pname, pval in (extra_platform_params or {}).items():
if pname in pp:
raise RuntimeError('Platform parameter clash: {}'.format(pname))
pp[pname] = pval
tp['platform'] = (tdesc.platform or Platform)(**pp)
if cp:
tp['connection_settings'] = cp
if tdesc.connection:
tp['conn_cls'] = tdesc.connection
if connect is not None:
tp['connect'] = connect
return tdesc.target(**tp)
def instantiate_assistant(tdesc, params, target):
assistant_params = {}
for param in tdesc.assistant_params:
if param.name in params:
assistant_params[param.name] = params[param.name]
elif param.default:
assistant_params[param.name] = param.default
return tdesc.assistant(target, **assistant_params)
class TargetDescription(object):
def __init__(self, name, source, description=None, target=None, platform=None,
conn=None, assistant=None, target_params=None, platform_params=None,
conn_params=None, assistant_params=None):
self.name = name
self.source = source
self.description = description
self.target = target
self.platform = platform
self.connection = conn
self.assistant = assistant
self._set('target_params', target_params)
self._set('platform_params', platform_params)
self._set('conn_params', conn_params)
self._set('assistant_params', assistant_params)
def get_default_config(self):
param_attrs = ['target_params', 'platform_params',
'conn_params', 'assistant_params']
config = {}
for pattr in param_attrs:
for p in getattr(self, pattr):
config[p.name] = p.default
return config
def _set(self, attr, vals):
if vals is None:
vals = []
elif isiterable(vals):
if hasattr(vals, 'values'):
vals = list(vals.values())
else:
msg = '{} must be iterable; got "{}"'
raise ValueError(msg.format(attr, vals))
setattr(self, attr, vals)
class TargetDescriptor(Plugin):
kind = 'target_descriptor'
def get_descriptions(self): # pylint: disable=no-self-use
return []
COMMON_TARGET_PARAMS = [
Parameter('working_directory', kind=str,
description='''
On-target working directory that will be used by WA. This
directory must be writable by the user WA logs in as without
the need for privilege elevation.
'''),
Parameter('executables_directory', kind=str,
description='''
On-target directory where WA will install its executable
binaries. This location must allow execution. This location does
*not* need to be writable by unprivileged users or rooted devices
(WA will install with elevated privileges as necessary).
'''),
Parameter('modules', kind=list,
description='''
A list of additional modules to be installed for the target.
``devlib`` implements functionality for particular subsystems as
modules. A number of "default" modules (e.g. for cpufreq
subsystem) are loaded automatically, unless explicitly disabled.
If additional modules need to be loaded, they may be specified
using this parameter.
Please see ``devlib`` documentation for information on the available
modules.
'''),
Parameter('load_default_modules', kind=bool, default=True,
description='''
A number of modules (e.g. for working with the cpufreq subsystem) are
loaded by default when a Target is instantiated. Setting this to
``True`` would suppress that, ensuring that only the base Target
interface is initialized.
You may want to set this to ``False`` if there is a problem with one
or more default modules on your platform (e.g. your device is
unrooted and cpufreq is not accessible to unprivileged users), or
if ``Target`` initialization is taking too long for your platform.
'''),
Parameter('shell_prompt', kind=regex, default=DEFAULT_SHELL_PROMPT,
description='''
A regex that matches the shell prompt on the target.
'''),
]
COMMON_PLATFORM_PARAMS = [
Parameter('core_names', kind=list_of_strings,
description='''
List of names of CPU cores in the order that they appear to the
kernel. If not specified, it will be inferred from the platform.
'''),
Parameter('core_clusters', kind=list_of_ints,
description='''
Cluster mapping corresponding to the cores in ``core_names``.
Cluster indexing starts at ``0``. If not specified, this will be
inferred from ``core_names`` -- consecutive cores with the same
name will be assumed to share a cluster.
'''),
Parameter('big_core', kind=str,
description='''
The name of the big cores in a big.LITTLE system. If not
specified, this will be inferred, either from the name (if one of
the names in ``core_names`` matches known big cores), or by
assuming that the last cluster is big.
'''),
Parameter('model', kind=str,
description='''
Hardware model of the platform. If not specified, an attempt will
be made to read it from target.
'''),
Parameter('modules', kind=list,
description='''
An additional list of modules to be loaded into the target.
'''),
]
VEXPRESS_PLATFORM_PARAMS = [
Parameter('serial_port', kind=str,
description='''
The serial device/port on the host for the initial connection to
the target (used for early boot, flashing, etc).
'''),
Parameter('baudrate', kind=int,
description='''
Baud rate for the serial connection.
'''),
Parameter('vemsd_mount', kind=str,
description='''
VExpress MicroSD card mount location. This is a MicroSD card in
the VExpress device that is mounted on the host via USB. The card
contains configuration files for the platform and firmware and
kernel images to be flashed.
'''),
Parameter('bootloader', kind=str,
allowed_values=['uefi', 'uefi-shell', 'u-boot', 'bootmon'],
description='''
Selects the bootloader mechanism used by the board. Depending on
firmware version, a number of possible boot mechanisms may be use.
Please see ``devlib`` documentation for descriptions.
'''),
Parameter('hard_reset_method', kind=str,
allowed_values=['dtr', 'reboottxt'],
description='''
There are a couple of ways to reset VersatileExpress board if the
software running on the board becomes unresponsive. Both require
configuration to be enabled (please see ``devlib`` documentation).
``dtr``: toggle the DTR line on the serial connection
``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount.
'''),
]
GEM5_PLATFORM_PARAMS = [
Parameter('gem5_bin', kind=str, mandatory=True,
description='''
Path to the gem5 binary
'''),
Parameter('gem5_args', kind=str, mandatory=True,
description='''
Arguments to be passed to the gem5 binary
'''),
Parameter('gem5_virtio', kind=str, mandatory=True,
description='''
VirtIO device setup arguments to be passed to gem5. VirtIO is used
to transfer files between the simulation and the host.
'''),
Parameter('name', kind=str, default='gem5',
description='''
The name for the gem5 "device".
'''),
]
CONNECTION_PARAMS = {
AdbConnection: [
Parameter(
'device', kind=str,
aliases=['adb_name'],
description="""
ADB device name
"""),
Parameter(
'adb_server', kind=str,
description="""
ADB server to connect to.
"""),
],
SshConnection: [
Parameter(
'host', kind=str, mandatory=True,
description="""
Host name or IP address of the target.
"""),
Parameter(
'username', kind=str, mandatory=True,
description="""
User name to connect with
"""),
Parameter(
'password', kind=str,
description="""
Password to use.
"""),
Parameter(
'keyfile', kind=str,
description="""
Key file to use
"""),
Parameter(
'port', kind=int,
description="""
The port SSH server is listening on on the target.
"""),
Parameter(
'telnet', kind=bool, default=False,
description="""
If set to ``True``, a Telnet connection, rather than
SSH will be used.
"""),
Parameter(
'password_prompt', kind=str,
description="""
Password prompt to expect
"""),
Parameter(
'original_prompt', kind=str,
description="""
Original shell prompt to expect.
"""),
Parameter(
'sudo_cmd', kind=str,
default="sudo -- sh -c '{}'",
description="""
Sudo command to use. Must have ``"{}"`` specified
somewhere in the string it indicate where the command
to be run via sudo is to go.
"""),
],
Gem5Connection: [
Parameter(
'host', kind=str, mandatory=False,
description="""
Host name or IP address of the target.
"""),
Parameter(
'username', kind=str, default='root',
description="""
User name to connect to gem5 simulation.
"""),
Parameter(
'password', kind=str,
description="""
Password to use.
"""),
Parameter(
'port', kind=int,
description="""
The port SSH server is listening on on the target.
"""),
Parameter(
'password_prompt', kind=str,
description="""
Password prompt to expect
"""),
Parameter(
'original_prompt', kind=str,
description="""
Original shell prompt to expect.
"""),
],
LocalConnection: [
Parameter(
'password', kind=str,
description="""
Password to use for sudo. if not specified, the user will
be prompted during intialization.
"""),
Parameter(
'keep_password', kind=bool, default=True,
description="""
If ``True`` (the default), the password will be cached in
memory after it is first obtained from the user, so that the
user would not be prompted for it again.
"""),
Parameter(
'unrooted', kind=bool, default=False,
description="""
Indicate that the target should be considered unrooted; do not
attempt sudo or ask the user for their password.
"""),
],
}
CONNECTION_PARAMS['ChromeOsConnection'] = \
CONNECTION_PARAMS[AdbConnection] + CONNECTION_PARAMS[SshConnection]
# name --> ((target_class, conn_class), params_list, defaults)
TARGETS = {
'linux': ((LinuxTarget, SshConnection), COMMON_TARGET_PARAMS, None),
'android': ((AndroidTarget, AdbConnection), COMMON_TARGET_PARAMS +
[Parameter('package_data_directory', kind=str, default='/data/data',
description='''
Directory containing Android data
'''),
], None),
'chromeos': ((ChromeOsTarget, 'ChromeOsConnection'), COMMON_TARGET_PARAMS +
[Parameter('package_data_directory', kind=str, default='/data/data',
description='''
Directory containing Android data
'''),
Parameter('android_working_directory', kind=str,
description='''
On-target working directory that will be used by WA for the
android container. This directory must be writable by the user
WA logs in as without the need for privilege elevation.
'''),
Parameter('android_executables_directory', kind=str,
description='''
On-target directory where WA will install its executable
binaries for the android container. This location must allow execution.
This location does *not* need to be writable by unprivileged users or
rooted devices (WA will install with elevated privileges as necessary).
directory must be writable by the user WA logs in as without
the need for privilege elevation.
'''),
], None),
'local': ((LocalLinuxTarget, LocalConnection), COMMON_TARGET_PARAMS, None),
}
# name --> assistant
ASSISTANTS = {
'linux': LinuxAssistant,
'android': AndroidAssistant,
'local': LinuxAssistant,
'chromeos': ChromeOsAssistant
}
# name --> ((platform_class, conn_class), params_list, defaults, target_defaults)
# Note: normally, connection is defined by the Target name, but
# platforms may choose to override it
# Note: the target_defaults allows you to override common target_params for a
# particular platform. Parameters you can override are in COMMON_TARGET_PARAMS
# Example of overriding one of the target parameters: Replace last None with:
# {'shell_prompt': CUSTOM__SHELL_PROMPT}
PLATFORMS = {
'generic': ((Platform, None), COMMON_PLATFORM_PARAMS, None, None),
'juno': ((Juno, None), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
{
'vemsd_mount': '/media/JUNO',
'baudrate': 115200,
'bootloader': 'u-boot',
'hard_reset_method': 'dtr',
},
None),
'tc2': ((TC2, None), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
{
'vemsd_mount': '/media/VEMSD',
'baudrate': 38400,
'bootloader': 'bootmon',
'hard_reset_method': 'reboottxt',
}, None),
'gem5': ((Gem5SimulationPlatform, Gem5Connection), GEM5_PLATFORM_PARAMS, None, None),
}
class DefaultTargetDescriptor(TargetDescriptor):
name = 'devlib_targets'
description = """
The default target descriptor that provides descriptions in the form
<platform>_<target>.
These map directly onto ``Target``\ s and ``Platform``\ s supplied by ``devlib``.
"""
def get_descriptions(self):
# pylint: disable=attribute-defined-outside-init,too-many-locals
result = []
for target_name, target_tuple in TARGETS.items():
(target, conn), target_params = self._get_item(target_tuple)
assistant = ASSISTANTS[target_name]
conn_params = CONNECTION_PARAMS[conn]
for platform_name, platform_tuple in PLATFORMS.items():
platform_target_defaults = platform_tuple[-1]
platform_tuple = platform_tuple[0:-1]
(platform, plat_conn), platform_params = self._get_item(platform_tuple)
# Add target defaults specified in the Platform tuple
target_params = self._apply_param_defaults(target_params,
platform_target_defaults)
name = '{}_{}'.format(platform_name, target_name)
td = TargetDescription(name, self)
td.target = target
td.platform = platform
td.assistant = assistant
td.target_params = target_params
td.platform_params = platform_params
td.assistant_params = assistant.parameters
if plat_conn:
td.conn = plat_conn
td.conn_params = CONNECTION_PARAMS[plat_conn]
else:
td.conn = conn
td.conn_params = conn_params
result.append(td)
return result
def _apply_param_defaults(self, params, defaults): # pylint: disable=no-self-use
'''Adds parameters in the defaults dict to params list.
Return updated params as a list (idempotent function).'''
if not defaults:
return params
param_map = OrderedDict((p.name, copy(p)) for p in params)
for name, value in defaults.items():
if name not in param_map:
raise ValueError('Unexpected default "{}"'.format(name))
param_map[name].default = value
# Convert the OrderedDict to a list to return the same type
return list(param_map.values())
def _get_item(self, item_tuple):
cls, params, defaults = item_tuple
updated_params = self._apply_param_defaults(params, defaults)
return cls, updated_params
_adhoc_target_descriptions = []
def create_target_description(name, *args, **kwargs):
name = identifier(name)
for td in _adhoc_target_descriptions:
if caseless_string(name) == td.name:
msg = 'Target with name "{}" already exists (from source: {})'
raise ValueError(msg.format(name, td.source))
stack = inspect.stack()
# inspect.stack() returns a list of call frame records for the current thread
# in reverse call order. So the first entry is for the current frame and next one
# for the immediate caller. Each entry is a tuple in the format
# (frame_object, module_path, line_no, function_name, source_lines, source_lines_index)
#
# Here we assign the path of the calling module as the "source" for this description.
# because this might be invoked via the add_scription_for_target wrapper, we need to
# check for that, and make sure that we get the info for *its* caller in that case.
if stack[1][3] == 'add_description_for_target':
source = stack[2][1]
else:
source = stack[1][1]
_adhoc_target_descriptions.append(TargetDescription(name, source, *args, **kwargs))
def _get_target_defaults(target):
specificity = 0
res = ('linux', TARGETS['linux']) # fallback to a generic linux target
for name, ttup in TARGETS.items():
if issubclass(target, ttup[0][0]):
new_spec = len(inspect.getmro(ttup[0][0]))
if new_spec > specificity:
res = (name, ttup)
specificity = new_spec
return res
def add_description_for_target(target, description=None, **kwargs):
(base_name, ((_, base_conn), base_params, _)) = _get_target_defaults(target)
if 'target_params' not in kwargs:
kwargs['target_params'] = base_params
if 'platform' not in kwargs:
kwargs['platform'] = Platform
if 'platform_params' not in kwargs:
for (plat, conn), params, _, _ in PLATFORMS.values():
if plat == kwargs['platform']:
kwargs['platform_params'] = params
if conn is not None and kwargs['conn'] is None:
kwargs['conn'] = conn
break
if 'conn' not in kwargs:
kwargs['conn'] = base_conn
if 'conn_params' not in kwargs:
kwargs['conn_params'] = CONNECTION_PARAMS.get(kwargs['conn'])
if 'assistant' not in kwargs:
kwargs['assistant'] = ASSISTANTS.get(base_name)
create_target_description(target.name, target=target, description=description, **kwargs)
class SimpleTargetDescriptor(TargetDescriptor):
name = 'adhoc_targets'
description = """
Returns target descriptions added with ``create_target_description``.
"""
def get_descriptions(self):
return _adhoc_target_descriptions
|
|
# coding: utf-8
from .base import ObjectifyModel
from ..base import ObjectifyObject
class ObjectifyList(ObjectifyModel):
"""
The instantiated ObjectifyObject class we want to use in our list
"""
__slots__ = ('__list__','__my_list_object__')
__list_object__ = None
def __init__(self,list_object=None,**kwargs):
self.__list__ = []
self.__fetch_attr__ = None
if list_object is not None:
self.__my_list_object__ = list_object
else:
self.__my_list_object__ = self.__list_object__
if self.__my_list_object__ is None:
raise RuntimeError("Cannot have an ObjectifyList without a __list_object__")
super(ObjectifyList, self).__init__(list_object=list_object, **kwargs)
def __morph_item__(self,item):
"""
Morph an item to insert it into the list
"""
from ..dynamic import DynamicProperty
if isinstance(self.__my_list_object__, DynamicProperty):
return self.__morph_dynamic_item__(item)
if not isinstance(item,self.__my_list_object__.__class__):
_item = self.__my_list_object__.copy_inited()
_item.from_collection(item)
else:
_item = item
return _item
def __morph_dynamic_item__(self,item):
"""
This function handles morphing items when the __list_object__ is an instance of objectify.prop.Dynamic
"""
if isinstance(item,ObjectifyModel):
"""
A dynamic attribute simply wants to adapt any value to the closest ObjectifyObject representation
In this case we already have a best fit, which is the raw value
"""
return item
"""
This function handles setting attributes which are or subclass
our Dynamic property
"""
if isinstance(item,dict):
"""
In this case we need to create a DynamicDict object to properly fit our data
"""
from ..dynamic import DynamicDict
_item = DynamicDict()
_item.from_collection(item)
return _item
if isinstance(item,list):
"""
In this case we need to create a DynamicList object to properly fit our data
"""
from ..dynamic import DynamicList
_item = DynamicList()
_item.from_collection(item)
return _item
_item = self.__my_list_object__.copy_inited()
_item.from_collection(item)
return _item
def __setitem__(self, key, item):
_item = self.__morph_item__(item)
#super(ObjectifyList, self).__setitem__(key,_item)
self.__list__.__setitem__(key,_item)
def __getitem__(self, key, raw=False):
existing = self.__list__.__getitem__(key)
#existing = super(ObjectifyList, self).__getitem__(key)
if not raw:
if isinstance(existing,ObjectifyObject) and not isinstance(existing,ObjectifyModel):
return existing.to_collection()
return existing
def __iter__(self,raw=False):
i = 0
while i < len(self):
yield self.__getitem__(i,raw=raw)
i += 1
def __raw_iter__(self):
#We also want to bypass our overload
#return super(ObjectifyList, self).__iter__()
return self.__list__.__iter__()
def append(self,item):
_item = self.__morph_item__(item)
return self.__list__.append(_item)
#super(ObjectifyList, self).append(_item)
def extend(self,item):
_item = self.__morph_item__(item)
return self.__list__.extend(_item)
#super(ObjectifyList, self).extend(_item)
def insert(self,key,item):
_item = self.__morph_item__(item)
return self.__list__.insert(key,_item)
#super(ObjectifyList, self).insert(key,_item)
def get_raw_item(self,key):
return self.__getitem__(key,raw=True)
def empty(self):
return not bool(self)
def verify_exclude(self,exclude):
#Raise an exception if we get an empty value
for ex in exclude:
if not ex:
raise RuntimeError("Empty area in exclude path %s in %s" % (exclude,self.__repr__()))
def split_exclude(self,exclude):
passdown = set()
for ex in exclude:
ex_l = ex.split(".")
self.verify_exclude(ex_l)
if len(ex_l) < 2:
raise RuntimeError("Unable to handle ending exclude path in ObjectifyList %s!" % self.__repr__())
if ex_l[0] != "[0]":
raise RuntimeError("Exclude path touches ObjectifyList without [0]!")
passdown.add(".".join(ex_l[1:]))
return passdown
def to_collection(self,exclude=None):
to_return = []
passdown_exclude = None
if exclude:
passdown_exclude = self.split_exclude(exclude)
for obj in self.__raw_iter__():
if passdown_exclude:
to_return.append(obj.to_collection(
exclude=passdown_exclude
))
else:
to_return.append(obj.to_collection())
return to_return
def from_collection(self,lst,clear=True):
if clear:
del self.__list__[:]
if not isinstance(lst,list):
lst = [lst]
for obj in lst:
self.append(self.__morph_item__(obj))
def copy_inited(self,keep_name=True):
kwargs_dict = self._kwargs_to_dict()
if keep_name:
kwargs_dict['name'] = self.__key_name__
cl = self.__class__(
*self.__init_args__,
**kwargs_dict
)
cl.__my_list_object__ = cl.__my_list_object__.copy_inited()
return cl
def example_value(self):
return [self.__my_list_object__.example_value()]
|
|
# -*- coding: utf-8 -*-
'''
Subversion SCM
'''
# Import python libs
import re
import shlex
import subprocess
# Import salt libs
from salt import utils, exceptions
from salt.modules import state_std
_INI_RE = re.compile(r"^([^:]+):\s+(\S.*)$", re.M)
def __virtual__():
'''
Only load if svn is installed
'''
if utils.which('svn'):
return 'svn'
return False
def _check_svn():
'''
Check for svn on this node.
'''
utils.check_or_die('svn')
def _run_svn(cmd, cwd, user, username, password, opts, **kwargs):
'''
Execute svn
return the output of the command
cmd
The command to run.
cwd
The path to the Subversion repository
user
Run svn as a user other than what the minion runs as
username
Connect to the Subversion server as another user
password
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
opts
Any additional options to add to the command line
kwargs
Additional options to pass to the run-cmd
'''
cmd = 'svn --non-interactive {0} '.format(cmd)
if username:
opts += ('--username', username)
if password:
opts += ('--password', password)
if opts:
cmd += subprocess.list2cmdline(opts)
result = __salt__['cmd.run_stdall'](cmd, cwd=cwd, runas=user, **kwargs)
retcode = result['retcode']
if retcode == 0:
return result['stdout']
raise exceptions.CommandExecutionError(result['stdout'] + '\n\n' + cmd)
def _run_svn_stdall(cmd, cwd, user, username, password, opts, **kwargs):
'''
Execute svn
return the output of the command
cmd
The command to run.
cwd
The path to the Subversion repository
user
Run svn as a user other than what the minion runs as
username
Connect to the Subversion server as another user
password
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
opts
Any additional options to add to the command line
kwargs
Additional options to pass to the run-cmd
'''
cmd = 'svn --non-interactive {0} '.format(cmd)
if username:
opts += ('--username', username)
if password:
opts += ('--password', password)
if opts:
cmd += subprocess.list2cmdline(opts)
result = __salt__['cmd.run_stdall'](cmd, cwd=cwd, runas=user, **kwargs)
state_std(kwargs, result)
retcode = result['retcode']
if retcode == 0:
return result['stdout']
raise exceptions.CommandExecutionError(result['stdout'] + '\n\n' + cmd)
def info(cwd,
targets=None,
user=None,
username=None,
password=None,
fmt='str'):
'''
Display the Subversion information from the checkout.
cwd
The path to the Subversion repository
targets : None
files, directories, and URLs to pass to the command as arguments
svn uses '.' by default
user : None
Run svn as a user other than what the minion runs as
username : None
Connect to the Subversion server as another user
password : None
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
fmt : str
How to fmt the output from info.
(str, xml, list, dict)
CLI Example:
.. code-block:: bash
salt '*' svn.info /path/to/svn/repo
'''
opts = list()
if fmt == 'xml':
opts.append('--xml')
if targets:
opts += shlex.split(str(targets))
infos = _run_svn('info', cwd, user, username, password, opts)
if fmt in ('str', 'xml'):
return infos
info_list = []
for infosplit in infos.split('\n\n'):
info_list.append(_INI_RE.findall(infosplit))
if fmt == 'list':
return info_list
if fmt == 'dict':
return [dict(tmp) for tmp in info_list]
def checkout(cwd,
remote,
target=None,
user=None,
username=None,
password=None,
*opts, **kwargs):
'''
Download a working copy of the remote Subversion repository
directory or file
cwd
The path to the Subversion repository
remote : None
URL to checkout
target : None
The name to give the file or directory working copy
Default: svn uses the remote basename
user : None
Run svn as a user other than what the minion runs as
username : None
Connect to the Subversion server as another user
password : None
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
CLI Example:
.. code-block:: bash
salt '*' svn.checkout /path/to/repo svn://remote/repo
'''
opts += (remote,)
if target:
opts += (target,)
return _run_svn('checkout', cwd, user, username, password, opts, **kwargs)
def switch(cwd, remote, target=None, user=None, username=None,
password=None, *opts, **kwargs):
'''
.. versionadded:: Hydrogen
Switch a working copy of a remote Subversion repository
directory
cwd
The path to the Subversion repository
remote : None
URL to switch
target : None
The name to give the file or directory working copy
Default: svn uses the remote basename
user : None
Run svn as a user other than what the minion runs as
username : None
Connect to the Subversion server as another user
password : None
Connect to the Subversion server with this password
CLI Example::
salt '*' svn.switch /path/to/repo svn://remote/repo
'''
opts += (remote,)
if target:
opts += (target,)
return _run_svn_stdall('switch', cwd, user, username, password, opts, **kwargs)
def update(cwd, targets=None, user=None, username=None, password=None, *opts, **kwargs):
'''
Update the current directory, files, or directories from
the remote Subversion repository
cwd
The path to the Subversion repository
targets : None
files and directories to pass to the command as arguments
Default: svn uses '.'
user : None
Run svn as a user other than what the minion runs as
password : None
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
username : None
Connect to the Subversion server as another user
CLI Example:
.. code-block:: bash
salt '*' svn.update /path/to/repo
'''
if targets:
opts += tuple(shlex.split(targets))
return _run_svn_stdall('update', cwd, user, username, password, opts, **kwargs)
def diff(cwd, targets=None, user=None, username=None, password=None, *opts, **kwargs):
'''
Return the diff of the current directory, files, or directories from
the remote Subversion repository
cwd
The path to the Subversion repository
targets : None
files and directories to pass to the command as arguments
Default: svn uses '.'
user : None
Run svn as a user other than what the minion runs as
username : None
Connect to the Subversion server as another user
password : None
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
CLI Example:
.. code-block:: bash
salt '*' svn.diff /path/to/repo
'''
if targets:
opts += tuple(shlex.split(targets))
return _run_svn('diff', cwd, user, username, password, opts, **kwargs)
def commit(cwd,
targets=None,
msg=None,
user=None,
username=None,
password=None,
*opts, **kwargs):
'''
Commit the current directory, files, or directories to
the remote Subversion repository
cwd
The path to the Subversion repository
targets : None
files and directories to pass to the command as arguments
Default: svn uses '.'
msg : None
Message to attach to the commit log
user : None
Run svn as a user other than what the minion runs as
username : None
Connect to the Subversion server as another user
password : None
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
CLI Example:
.. code-block:: bash
salt '*' svn.commit /path/to/repo
'''
if msg:
opts += ('-m', msg)
if targets:
opts += tuple(shlex.split(targets))
return _run_svn_stdall('commit', cwd, user, username, password, opts, **kwargs)
def add(cwd, targets, user=None, username=None, password=None, *opts, **kwargs):
'''
Add files to be tracked by the Subversion working-copy checkout
cwd
The path to the Subversion repository
targets : None
files and directories to pass to the command as arguments
user : None
Run svn as a user other than what the minion runs as
username : None
Connect to the Subversion server as another user
password : None
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
CLI Example:
.. code-block:: bash
salt '*' svn.add /path/to/repo /path/to/new/file
'''
if targets:
opts += tuple(shlex.split(targets))
return _run_svn_stdall('add', cwd, user, username, password, opts, **kwargs)
def remove(cwd,
targets,
msg=None,
user=None,
username=None,
password=None,
*opts, **kwargs):
'''
Remove files and directories from the Subversion repository
cwd
The path to the Subversion repository
targets : None
files, directories, and URLs to pass to the command as arguments
msg : None
Message to attach to the commit log
user : None
Run svn as a user other than what the minion runs as
username : None
Connect to the Subversion server as another user
password : None
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
CLI Example:
.. code-block:: bash
salt '*' svn.remove /path/to/repo /path/to/repo/remove
'''
if msg:
opts += ('-m', msg)
if targets:
opts += tuple(shlex.split(targets))
return _run_svn_stdall('remove', cwd, user, username, password, opts, **kwargs)
def status(cwd, targets=None, user=None, username=None, password=None, *opts, **kwargs):
'''
Display the status of the current directory, files, or
directories in the Subversion repository
cwd
The path to the Subversion repository
targets : None
files, directories, and URLs to pass to the command as arguments
Default: svn uses '.'
user : None
Run svn as a user other than what the minion runs as
username : None
Connect to the Subversion server as another user
password : None
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
CLI Example:
.. code-block:: bash
salt '*' svn.status /path/to/repo
'''
if targets:
opts += tuple(shlex.split(targets))
return _run_svn('status', cwd, user, username, password, opts, **kwargs)
def export(cwd,
remote,
target=None,
user=None,
username=None,
password=None,
*opts, **kwargs):
'''
Create an unversioned copy of a tree.
cwd
The path to the Subversion repository
remote : None
URL and path to file or directory checkout
target : None
The name to give the file or directory working copy
Default: svn uses the remote basename
user : None
Run svn as a user other than what the minion runs as
username : None
Connect to the Subversion server as another user
password : None
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
CLI Example:
.. code-block:: bash
salt '*' svn.export /path/to/repo svn://remote/repo
'''
opts += (remote,)
if target:
opts += (target,)
return _run_svn_stdall('export', cwd, user, username, password, opts, **kwargs)
|
|
# -*- coding: utf-8 -*-
"""
The following image extraction implementation was taken from an old
copy of Reddit's source code.
"""
__title__ = 'newspaper'
__author__ = 'Lucas Ou-Yang'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014, Lucas Ou-Yang'
import logging
import urllib
import StringIO
import math
from PIL import Image, ImageFile
from urllib2 import Request, HTTPError, URLError, build_opener
from httplib import InvalidURL
log = logging.getLogger(__name__)
chunk_size = 1024
thumbnail_size = 90, 90
def image_to_str(image):
s = StringIO.StringIO()
image.save(s, image.format)
s.seek(0)
return s.read()
def str_to_image(s):
s = StringIO.StringIO(s)
s.seek(0)
image = Image.open(s)
return image
def prepare_image(image):
image = square_image(image)
image.thumbnail(thumbnail_size, Image.ANTIALIAS) # inplace
return image
def image_entropy(img):
"""
Calculate the entropy of an image.
"""
hist = img.histogram()
hist_size = sum(hist)
hist = [float(h) / hist_size for h in hist]
return -sum([p * math.log(p, 2) for p in hist if p != 0])
def square_image(img):
"""
If the image is taller than it is wide, square it off. determine
which pieces to cut off based on the entropy pieces.
"""
x,y = img.size
while y > x:
# slice 10px at a time until square
slice_height = min(y - x, 10)
bottom = img.crop((0, y - slice_height, x, y))
top = img.crop((0, 0, x, slice_height))
# remove the slice with the least entropy
if image_entropy(bottom) < image_entropy(top):
img = img.crop((0, 0, x, y - slice_height))
else:
img = img.crop((0, slice_height, x, y))
x,y = img.size
return img
def clean_url(url):
"""
Url quotes unicode data out of urls.
"""
url = url.encode('utf8')
url = ''.join([urllib.quote(c) if ord(c) >= 127 else c for c in url])
return url
def fetch_url(url, useragent, referer=None, retries=1, dimension=False):
"""
"""
cur_try = 0
nothing = None if dimension else (None, None)
url = clean_url(url)
if not url.startswith(('http://', 'https://')):
return nothing
while True:
try:
req = Request(url)
req.add_header('User-Agent', useragent)
if referer:
req.add_header('Referer', referer)
opener = build_opener()
open_req = opener.open(req, timeout=5)
# if we only need the dimension of the image, we may not
# need to download the entire thing
if dimension:
content = open_req.read(chunk_size)
else:
content = open_req.read()
content_type = open_req.headers.get('content-type')
if not content_type:
return nothing
if 'image' in content_type:
p = ImageFile.Parser()
new_data = content
while not p.image and new_data:
try:
p.feed(new_data)
except IOError, e:
# pil failed to install, jpeg codec broken
# **should work if you install via pillow
print ('***jpeg misconfiguration! check pillow or pil'
'installation this machine: %s' % str(e))
p = None
break
except ValueError, ve:
log.debug('cant read image format: %s' % url)
p = None
break
new_data = open_req.read(chunk_size)
content += new_data
if p is None:
return nothing
# return the size, or return the data
if dimension and p.image:
return p.image.size
elif dimension:
return nothing
elif dimension:
# expected an image, but didn't get one
return nothing
return content_type, content
except (URLError, HTTPError, InvalidURL), e:
cur_try += 1
if cur_try >= retries:
log.debug('error while fetching: %s refer: %s' % (url, referer))
return nothing
finally:
if 'open_req' in locals():
open_req.close()
def fetch_size(url, useragent, referer=None, retries=1):
return fetch_url(url, useragent, referer, retries, dimension=True)
class Scraper:
def __init__(self, article):
self.url = article.url # if not url else url
self.imgs = article.imgs # if not imgs else imgs
self.top_img = article.top_img # if not top_img else top_img
self.config = article.config
self.useragent = self.config.browser_user_agent
def largest_image_url(self):
if not self.imgs and not self.top_img:
return None
if self.top_img:
return self.top_img
max_area = 0
max_url = None
for img_url in self.imgs:
size = fetch_size(img_url, self.useragent, referer=self.url)
if not size:
continue
area = size[0] * size[1]
# ignore little images
if area < 5000:
log.debug('ignore little %s' % img_url)
continue
# PIL won't scale up, so we set a min width and
# maintain the aspect ratio
if size[0] < thumbnail_size[0]:
continue
# ignore excessively long/wide images
if max(size) / min(size) > self.config.image_dimension_ration:
log.debug('ignore dims %s' % img_url)
continue
# penalize images with "sprite" in their name
if 'sprite' in img_url.lower():
log.debug('penalizing sprite %s' % img_url)
area /= 10
if area > max_area:
max_area = area
max_url = img_url
log.debug('using max img ' + max_url)
return max_url
def thumbnail(self):
"""
Identifies top image, trims out a thumbnail and also has a url.
"""
image_url = self.largest_image_url()
if image_url:
content_type, image_str = fetch_url(image_url, referer=self.url)
if image_str:
image = str_to_image(image_str)
try:
image = prepare_image(image)
except IOError, e:
if 'interlaced' in e.message:
return None
# raise
return image, image_url
return None, None
|
|
import functools
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
import canopy
from treeano.sandbox.nodes import batch_normalization as bn
fX = theano.config.floatX
@treeano.register_node("strided_downsample")
class StridedDownsampleNode(treeano.NodeImpl):
hyperparameter_names = ("strides",)
def compute_output(self, network, in_vw):
strides = network.find_hyperparameter(["strides"])
out_slices = []
out_shape = list(in_vw.shape)
for idx, stride in enumerate(strides):
out_slices.append(slice(None, None, stride))
size = out_shape[idx]
if size is not None:
out_shape[idx] = (size + stride - 1) // stride
network.create_vw(
"default",
variable=in_vw.variable[tuple(out_slices)],
shape=tuple(out_shape),
tags={"output"},
)
@treeano.register_node("resnet_init_conv_2d")
class ResnetInitConv2DNode(treeano.NodeImpl):
"""
NOTE: originally copy-pasted from Conv2DNode
"""
hyperparameter_names = ("inits",
"num_filters",
"filter_size",
"conv_stride",
"stride",
"conv_pad",
"pad")
def compute_output(self, network, in_vw):
# gather hyperparameters
num_filters = network.find_hyperparameter(["num_filters"])
filter_size = network.find_hyperparameter(["filter_size"])
stride = network.find_hyperparameter(["conv_stride", "stride"], (1, 1))
pad = network.find_hyperparameter(["conv_pad", "pad"], "valid")
pad = tn.conv.conv_parse_pad(filter_size, pad)
assert len(filter_size) == 2
# create weight
num_channels = in_vw.shape[1]
filter_shape = (num_filters, num_channels) + tuple(filter_size)
W = network.create_vw(
name="weight",
is_shared=True,
shape=filter_shape,
tags={"parameter", "weight"},
default_inits=[],
).variable
# calculate identity for resnet init
# ---
# read hyperparams
identity_ratio = network.find_hyperparameter(["identity_ratio"], 0.5)
ratio_on_input = network.find_hyperparameter(["ratio_on_input"], True)
# find center spatial location
dim0_idx = filter_shape[2] // 2
dim1_idx = filter_shape[3] // 2
# create identity kernel
ratio_idx = 1 if ratio_on_input else 0
init = np.zeros(filter_shape, dtype=theano.config.floatX)
for i in range(min(filter_shape[0],
filter_shape[1],
int(identity_ratio * filter_shape[ratio_idx]))):
init[i, i, dim0_idx, dim1_idx] += 1
out_var = T.nnet.conv2d(input=in_vw.variable,
filters=W + init,
input_shape=in_vw.shape,
filter_shape=filter_shape,
border_mode=pad,
subsample=stride)
out_shape = tn.conv.conv_output_shape(input_shape=in_vw.shape,
num_filters=num_filters,
axes=(2, 3),
conv_shape=filter_size,
strides=stride,
pads=pad)
network.create_vw(
"default",
variable=out_var,
shape=out_shape,
tags={"output"},
)
@treeano.register_node("resnet_init_conv_2d_with_bias")
class ResnetInitConv2DWithBiasNode(treeano.Wrapper0NodeImpl):
hyperparameter_names = ResnetInitConv2DNode.hyperparameter_names
def architecture_children(self):
return [
tn.SequentialNode(
self._name + "_sequential",
[ResnetInitConv2DNode(self._name + "_conv"),
tn.AddBiasNode(self._name + "_bias",
broadcastable_axes=(0, 2, 3))])]
@treeano.register_node("zero_last_axis_partition")
class _ZeroLastAxisPartitionNode(treeano.NodeImpl):
"""
zeros out a fraction of a tensor
"""
hyperparameter_names = ("zero_ratio",
"axis")
def compute_output(self, network, in_vw):
zero_ratio = network.find_hyperparameter(["zero_ratio"])
axis = network.find_hyperparameter(["axis"], 1)
in_var = in_vw.variable
size = treeano.utils.as_fX(in_var.shape[axis])
num_zeros = T.round(zero_ratio * size).astype("int32")
idxs = [None] * (axis - 1) + [slice(-num_zeros, None)]
out_var = T.set_subtensor(in_var[idxs], 0)
network.create_vw(
"default",
variable=out_var,
shape=in_vw.shape,
tags={"output"},
)
def residual_block_conv_2d(name,
num_filters,
num_layers,
increase_dim=None,
conv_node=tn.Conv2DNode,
bn_node=bn.BatchNormalizationNode,
activation_node=tn.ReLUNode,
input_num_filters=None,
projection_filter_size=(1, 1),
increase_dim_stride=(2, 2),
no_identity=False):
if increase_dim is not None:
assert increase_dim in {"projection", "pad"}
first_stride = increase_dim_stride
if increase_dim == "projection":
identity_node = tn.SequentialNode(
name + "_projection",
[tn.Conv2DNode(name + "_projectionconv",
num_filters=num_filters,
filter_size=projection_filter_size,
stride=first_stride,
pad="same"),
bn_node(name + "_projectionbn")])
elif increase_dim == "pad":
assert input_num_filters is not None
identity_node = tn.SequentialNode(
name + "_pad",
[StridedDownsampleNode(
name + "_stride",
strides=(1, 1) + first_stride),
tn.PadNode(
name + "_addpad",
padding=(0, (num_filters - input_num_filters) // 2, 0, 0))])
else:
first_stride = (1, 1)
identity_node = tn.IdentityNode(name + "_identity")
nodes = []
# first node
for i in range(num_layers):
if i == 0:
# first conv
# ---
# same as middle convs, but with stride
nodes += [
conv_node(name + "_conv%d" % i,
num_filters=num_filters,
stride=first_stride,
pad="same"),
bn_node(name + "_bn%d" % i),
activation_node(name + "_activation%d" % i),
]
else:
nodes += [
conv_node(name + "_conv%d" % i,
num_filters=num_filters,
stride=(1, 1),
pad="same"),
bn_node(name + "_bn%d" % i),
activation_node(name + "_activation%d" % i),
]
# for last conv, remove activation
nodes.pop()
residual_node = tn.SequentialNode(name + "_seq", nodes)
if no_identity:
# ability to disable resnet connections
return residual_node
else:
return tn.ElementwiseSumNode(name,
[identity_node,
residual_node])
def resnet_init_block_conv_2d(*args, **kwargs):
return residual_block_conv_2d(*args,
conv_node=ResnetInitConv2DNode,
**kwargs)
def resnet_init_projection_conv_2d(name,
num_filters,
num_layers,
bn_node=bn.BatchNormalizationNode,
activation_node=tn.ReLUNode,
stride=(1, 1)):
nodes = []
# first node
for i in range(num_layers):
if i == 0:
# first conv
# ---
# same as middle convs, but with stride
nodes += [
tn.Conv2DNode(name + "_conv%d" % i,
num_filters=num_filters,
stride=stride,
pad="same"),
bn_node(name + "_bn%d" % i),
activation_node(name + "_activation%d" % i),
]
else:
nodes += [
ResnetInitConv2DNode(name + "_conv%d" % i,
num_filters=num_filters,
stride=(1, 1),
pad="same"),
bn_node(name + "_bn%d" % i),
activation_node(name + "_activation%d" % i),
]
# for last conv, remove activation
nodes.pop()
return tn.SequentialNode(name + "_seq", nodes)
def preactivation_residual_block_conv_2d(name,
num_filters,
num_layers,
increase_dim=None,
initial_block=False,
conv_node=tn.Conv2DNode,
bn_node=bn.BatchNormalizationNode,
activation_node=tn.ReLUNode,
input_num_filters=None,
projection_filter_size=(1, 1),
increase_dim_stride=(2, 2),
no_identity=False):
"""
from http://arxiv.org/abs/1603.05027
"""
if increase_dim is not None:
assert increase_dim in {"projection", "pad"}
first_stride = increase_dim_stride
if increase_dim == "projection":
# TODO remove pre-activation when initial block
assert not initial_block
identity_node = tn.SequentialNode(
name + "_projection",
[
bn_node(name + "_projectionbn"),
activation_node(name + "_projectionactivation"),
tn.Conv2DNode(name + "_projectionconv",
num_filters=num_filters,
filter_size=projection_filter_size,
stride=first_stride,
pad="same"),
])
elif increase_dim == "pad":
assert input_num_filters is not None
identity_node = tn.SequentialNode(
name + "_pad",
[StridedDownsampleNode(
name + "_stride",
strides=(1, 1) + first_stride),
tn.PadNode(
name + "_addpad",
padding=(0, (num_filters - input_num_filters) // 2, 0, 0))])
else:
first_stride = (1, 1)
identity_node = tn.IdentityNode(name + "_identity")
nodes = []
# first node
for i in range(num_layers):
if i == 0:
# first conv
# ---
# maybe remove initial activation
if not initial_block:
nodes += [
bn_node(name + "_bn%d" % i),
activation_node(name + "_activation%d" % i),
]
# same as middle convs, but with stride
nodes += [
conv_node(name + "_conv%d" % i,
num_filters=num_filters,
stride=first_stride,
pad="same"),
]
else:
nodes += [
bn_node(name + "_bn%d" % i),
activation_node(name + "_activation%d" % i),
conv_node(name + "_conv%d" % i,
num_filters=num_filters,
stride=(1, 1),
pad="same"),
]
residual_node = tn.SequentialNode(name + "_seq", nodes)
if no_identity:
# ability to disable resnet connections
return residual_node
else:
return tn.ElementwiseSumNode(name,
[identity_node,
residual_node])
def generalized_residual(name, nodes, identity_ratio=0.5):
return tn.ElementwiseSumNode(
name,
[_ZeroLastAxisPartitionNode(name + "_zero",
zero_ratio=(1 - identity_ratio)),
tn.SequentialNode(
name + "_seq",
nodes)])
def generalized_residual_conv_2d(name,
num_filters,
include_preactivation=True,
bn_node=bn.BatchNormalizationNode,
activation_node=tn.ReLUNode,
conv_node=tn.Conv2DNode,
identity_ratio=0.5):
"""
generalized resnet block based on pre-activation resnet
"""
nodes = []
if include_preactivation:
# add pre-activation
nodes += [
bn_node(name + "_bn"),
activation_node(name + "_activation"),
]
nodes += [conv_node(name + "_conv", num_filters=num_filters)]
return generalized_residual(name, nodes, identity_ratio)
def generalized_residual_block_conv_2d(name,
num_filters,
num_layers,
increase_dim=None,
initial_block=False,
bn_node=bn.BatchNormalizationNode,
activation_node=tn.ReLUNode,
conv_node=tn.Conv2DNode,
identity_ratio=0.5,
input_num_filters=None,
projection_filter_size=(1, 1),
increase_dim_stride=(2, 2),
no_identity=False):
if no_identity: # HACK for compatibility
identity_ratio = 0
nodes = []
if increase_dim is not None:
if increase_dim == "projection":
# TODO remove pre-activation when initial block
assert not initial_block
# TODO maybe reduce layers by 1 to have same depth
# num_layers -= 1
nodes += [tn.SequentialNode(
name + "_projection",
[bn_node(name + "_projectionbn"),
activation_node(name + "_projectionactivation"),
tn.Conv2DNode(name + "_projectionconv",
num_filters=num_filters,
filter_size=projection_filter_size,
stride=increase_dim_stride,
pad="same")])]
elif increase_dim == "pad":
assert input_num_filters is not None
nodes += [tn.SequentialNode(
name + "_pad",
[StridedDownsampleNode(
name + "_stride",
strides=(1, 1) + increase_dim_stride),
tn.PadNode(
name + "_addpad",
padding=(0, (num_filters - input_num_filters) // 2, 0, 0))])]
else:
raise ValueError(increase_dim)
for i in range(num_layers):
include_preactivation = (not initial_block) or (i != 0)
nodes += [generalized_residual_conv_2d(
"%s_%d" % (name, i),
include_preactivation=include_preactivation,
num_filters=num_filters,
activation_node=activation_node,
identity_ratio=identity_ratio)]
return tn.SequentialNode(name, nodes)
def pool_with_projection_2d(name,
projection_filters,
stride=(2, 2),
filter_size=(3, 3),
bn_node=bn.BatchNormalizationNode):
pool_node = tn.MaxPool2DNode(name + "_pool",
pool_size=stride,
stride=stride)
projection_node = tn.SequentialNode(
name + "_projection",
[tn.Conv2DNode(name + "_projectionconv",
num_filters=projection_filters,
filter_size=filter_size,
stride=stride,
pad="same"),
bn_node(name + "_projectionbn")])
return tn.ConcatenateNode(name, [pool_node, projection_node])
def forget_gate_conv_2d_node(name,
num_filters,
filter_size=(3, 3),
initial_bias=0):
return tn.ElementwiseProductNode(
name,
[tn.IdentityNode(name + "_identity"),
tn.SequentialNode(
name + "_forget",
[tn.Conv2DWithBiasNode(name + "_conv",
num_filters=num_filters,
filter_size=filter_size,
stride=(1, 1),
pad="same"),
tn.AddConstantNode(name + "_initial_bias", value=initial_bias),
tn.SigmoidNode(name + "_sigmoid")])])
|
|
from __future__ import division, print_function, absolute_import
from scipy import stats
import numpy as np
from numpy.testing import assert_almost_equal, assert_, assert_raises, \
assert_array_almost_equal, assert_array_almost_equal_nulp
def test_kde_1d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
prob1 = gkde.integrate_box_1d(xnmean, np.inf)
prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
def test_kde_2d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
mean = np.array([1.0, 3.0])
covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
# Need transpose (shape (2, 500)) for kde
xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
x, y = np.mgrid[-7:7:500j, -7:7:500j]
grid_coords = np.vstack([x.ravel(), y.ravel()])
kdepdf = gkde.evaluate(grid_coords)
kdepdf = kdepdf.reshape(500, 500)
normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance)
intervall = y.ravel()[1] - y.ravel()[0]
assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01)
small = -1e100
large = 1e100
prob1 = gkde.integrate_box([small, mean[1]], [large, large])
prob2 = gkde.integrate_box([small, small], [large, mean[1]])
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*(intervall**2), decimal=2)
assert_almost_equal(gkde.integrate_gaussian(mean, covariance),
(kdepdf*normpdf).sum()*(intervall**2), decimal=2)
def test_kde_bandwidth_method():
def scotts_factor(kde_obj):
"""Same as default, just check that it works."""
return np.power(kde_obj.n, -1./(kde_obj.d+4))
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
# Supply a callable
gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
# Supply a scalar
gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
xs = np.linspace(-7,7,51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf2)
kdepdf3 = gkde3.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf3)
assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
# Subclasses that should stay working (extracted from various sources).
# Unfortunately the earlier design of gaussian_kde made it necessary for users
# to create these kinds of subclasses, or call _compute_covariance() directly.
class _kde_subclass1(stats.gaussian_kde):
def __init__(self, dataset):
self.dataset = np.atleast_2d(dataset)
self.d, self.n = self.dataset.shape
self.covariance_factor = self.scotts_factor
self._compute_covariance()
class _kde_subclass2(stats.gaussian_kde):
def __init__(self, dataset):
self.covariance_factor = self.scotts_factor
super(_kde_subclass2, self).__init__(dataset)
class _kde_subclass3(stats.gaussian_kde):
def __init__(self, dataset, covariance):
self.covariance = covariance
stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance(self):
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = np.sqrt(np.linalg.det(2*np.pi * self.covariance)) \
* self.n
class _kde_subclass4(stats.gaussian_kde):
def covariance_factor(self):
return 0.5 * self.silverman_factor()
def test_gaussian_kde_subclassing():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# gaussian_kde itself
kde = stats.gaussian_kde(x1)
ys = kde(xs)
# subclass 1
kde1 = _kde_subclass1(x1)
y1 = kde1(xs)
assert_array_almost_equal_nulp(ys, y1, nulp=10)
# subclass 2
kde2 = _kde_subclass2(x1)
y2 = kde2(xs)
assert_array_almost_equal_nulp(ys, y2, nulp=10)
# subclass 3
kde3 = _kde_subclass3(x1, kde.covariance)
y3 = kde3(xs)
assert_array_almost_equal_nulp(ys, y3, nulp=10)
# subclass 4
kde4 = _kde_subclass4(x1)
y4 = kde4(x1)
y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017]
assert_array_almost_equal(y_expected, y4, decimal=6)
# Not a subclass, but check for use of _compute_covariance()
kde5 = kde
kde5.covariance_factor = lambda: kde.factor
kde5._compute_covariance()
y5 = kde5(xs)
assert_array_almost_equal_nulp(ys, y5, nulp=10)
def test_gaussian_kde_covariance_caching():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475]
# Set the bandwidth, then reset it to the default.
kde = stats.gaussian_kde(x1)
kde.set_bandwidth(bw_method=0.5)
kde.set_bandwidth(bw_method='scott')
y2 = kde(xs)
assert_array_almost_equal(y_expected, y2, decimal=7)
def test_gaussian_kde_monkeypatch():
"""Ugly, but people may rely on this. See scipy pull request 123,
specifically the linked ML thread "Width of the Gaussian in stats.kde".
If it is necessary to break this later on, that is to be discussed on ML.
"""
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# The old monkeypatched version to get at Silverman's Rule.
kde = stats.gaussian_kde(x1)
kde.covariance_factor = kde.silverman_factor
kde._compute_covariance()
y1 = kde(xs)
# The new saner version.
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
y2 = kde2(xs)
assert_array_almost_equal_nulp(y1, y2, nulp=10)
def test_kde_integer_input():
"""Regression test for #1181."""
x1 = np.arange(5)
kde = stats.gaussian_kde(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721]
assert_array_almost_equal(kde(x1), y_expected, decimal=6)
def test_pdf_logpdf():
np.random.seed(1)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
xs = np.linspace(-15, 12, 25)
pdf = gkde.evaluate(xs)
pdf2 = gkde.pdf(xs)
assert_almost_equal(pdf, pdf2, decimal=12)
logpdf = np.log(pdf)
logpdf2 = gkde.logpdf(xs)
assert_almost_equal(logpdf, logpdf2, decimal=12)
# There are more points than data
gkde = stats.gaussian_kde(xs)
pdf = np.log(gkde.evaluate(xn))
pdf2 = gkde.logpdf(xn)
assert_almost_equal(pdf, pdf2, decimal=12)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Operators that integrates with Google Cloud Build service."""
import json
import re
import warnings
from copy import deepcopy
from typing import Any, Dict, Optional, Sequence, Tuple, Union
from urllib.parse import unquote, urlparse
import yaml
from google.api_core.retry import Retry
from google.cloud.devtools.cloudbuild_v1.types import Build, BuildTrigger, RepoSource
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.cloud_build import CloudBuildHook
REGEX_REPO_PATH = re.compile(r"^/(?P<project_id>[^/]+)/(?P<repo_name>[^/]+)[\+/]*(?P<branch_name>[^:]+)?")
class CloudBuildCancelBuildOperator(BaseOperator):
"""
Cancels a build in progress.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCancelBuildOperator`
:param id_: The ID of the build.
:type id_: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields = ("project_id", "id_", "gcp_conn_id")
def __init__(
self,
*,
id_: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.id_ = id_
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.cancel_build(
id_=self.id_,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Build.to_dict(result)
class CloudBuildCreateBuildOperator(BaseOperator):
"""
Starts a build with the specified configuration.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCreateBuildOperator`
:param build: Optional, the build resource to create. If a dict is provided, it must be of
the same form as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.Build`.
Only either build or body should be passed.
:type build: Optional[Union[dict, `google.cloud.devtools.cloudbuild_v1.types.Build`]]
:param body: (Deprecated) The build resource to create.
This parameter has been deprecated. You should pass the build parameter instead.
:type body: Optional[dict]
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param wait: Optional, wait for operation to finish.
:type wait: Optional[bool]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields = ("project_id", "build", "body", "gcp_conn_id", "impersonation_chain")
def __init__(
self,
*,
build: Optional[Union[Dict, Build, str]] = None,
body: Optional[Dict] = None,
project_id: Optional[str] = None,
wait: bool = True,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.build = build
# Not template fields to keep original value
self.build_raw = build
self.body = body
self.project_id = project_id
self.wait = wait
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
if self.body and self.build:
raise AirflowException("Either build or body should be passed.")
if self.body:
warnings.warn(
"The body parameter has been deprecated. You should pass body using the build parameter.",
DeprecationWarning,
stacklevel=4,
)
self.build = self.build_raw = self.body
def prepare_template(self) -> None:
# if no file is specified, skip
if not isinstance(self.build_raw, str):
return
with open(self.build_raw) as file:
if any(self.build_raw.endswith(ext) for ext in ['.yaml', '.yml']):
self.build = yaml.load(file.read(), Loader=yaml.FullLoader)
if self.build_raw.endswith('.json'):
self.build = json.loads(file.read())
def execute(self, context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
build = BuildProcessor(build=self.build).process_body()
result = hook.create_build(
build=build,
project_id=self.project_id,
wait=self.wait,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Build.to_dict(result)
class CloudBuildCreateBuildTriggerOperator(BaseOperator):
"""
Creates a new BuildTrigger.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCreateBuildTriggerOperator`
:param trigger: The BuildTrigger to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
:type trigger: Union[dict, `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`]
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields = ("project_id", "trigger", "gcp_conn_id")
def __init__(
self,
*,
trigger: Union[dict, BuildTrigger],
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger = trigger
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.create_build_trigger(
trigger=self.trigger,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return BuildTrigger.to_dict(result)
class CloudBuildDeleteBuildTriggerOperator(BaseOperator):
"""
Deletes a BuildTrigger by its project ID and trigger ID.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildDeleteBuildTriggerOperator`
:param trigger_id: The ID of the BuildTrigger to delete.
:type trigger_id: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = ("project_id", "trigger_id", "gcp_conn_id")
def __init__(
self,
*,
trigger_id: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_id = trigger_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
hook.delete_build_trigger(
trigger_id=self.trigger_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudBuildGetBuildOperator(BaseOperator):
"""
Returns information about a previously requested build.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildGetBuildOperator`
:param id_: The ID of the build.
:type id_: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields = ("project_id", "id_", "gcp_conn_id")
def __init__(
self,
*,
id_: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.id_ = id_
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.get_build(
id_=self.id_,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Build.to_dict(result)
class CloudBuildGetBuildTriggerOperator(BaseOperator):
"""
Returns information about a BuildTrigger.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildGetBuildTriggerOperator`
:param trigger_id: The ID of the BuildTrigger to get.
:type trigger_id: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields = ("project_id", "trigger_id", "gcp_conn_id")
def __init__(
self,
*,
trigger_id: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_id = trigger_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.get_build_trigger(
trigger_id=self.trigger_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return BuildTrigger.to_dict(result)
class CloudBuildListBuildTriggersOperator(BaseOperator):
"""
Lists existing BuildTriggers.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildListBuildTriggersOperator`
:param location: The location of the project.
:type location: string
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param page_size: Optional, number of results to return in the list.
:type page_size: Optional[int]
:param page_token: Optional, token to provide to skip to a particular spot in the list.
:type page_token: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: List[dict]
"""
template_fields = ("location", "project_id", "gcp_conn_id")
def __init__(
self,
*,
location: str,
project_id: Optional[str] = None,
page_size: Optional[int] = None,
page_token: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.page_size = page_size
self.page_token = page_token
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
results = hook.list_build_triggers(
project_id=self.project_id,
location=self.location,
page_size=self.page_size,
page_token=self.page_token,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return [BuildTrigger.to_dict(result) for result in results]
class CloudBuildListBuildsOperator(BaseOperator):
"""
Lists previously requested builds.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildListBuildsOperator`
:param location: The location of the project.
:type location: string
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param page_size: Optional, number of results to return in the list.
:type page_size: Optional[int]
:param filter_: Optional, the raw filter text to constrain the results.
:type filter_: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: List[dict]
"""
template_fields = ("location", "project_id", "gcp_conn_id")
def __init__(
self,
*,
location: str,
project_id: Optional[str] = None,
page_size: Optional[int] = None,
filter_: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.page_size = page_size
self.filter_ = filter_
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
results = hook.list_builds(
project_id=self.project_id,
location=self.location,
page_size=self.page_size,
filter_=self.filter_,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return [Build.to_dict(result) for result in results]
class CloudBuildRetryBuildOperator(BaseOperator):
"""
Creates a new build based on the specified build. This method creates a new build
using the original build request, which may or may not result in an identical build.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildRetryBuildOperator`
:param id_: Build ID of the original build.
:type id_: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param wait: Optional, wait for operation to finish.
:type wait: Optional[bool]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields = ("project_id", "id_", "gcp_conn_id")
def __init__(
self,
*,
id_: str,
project_id: Optional[str] = None,
wait: bool = True,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.id_ = id_
self.project_id = project_id
self.wait = wait
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.retry_build(
id_=self.id_,
project_id=self.project_id,
wait=self.wait,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Build.to_dict(result)
class CloudBuildRunBuildTriggerOperator(BaseOperator):
"""
Runs a BuildTrigger at a particular source revision.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildRunBuildTriggerOperator`
:param trigger_id: The ID of the trigger.
:type trigger_id: str
:param source: Source to build against this trigger. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.RepoSource`
:type source: Union[dict, `google.cloud.devtools.cloudbuild_v1.types.RepoSource`]
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param wait: Optional, wait for operation to finish.
:type wait: Optional[bool]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields = ("project_id", "trigger_id", "source", "gcp_conn_id")
def __init__(
self,
*,
trigger_id: str,
source: Union[dict, RepoSource],
project_id: Optional[str] = None,
wait: bool = True,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_id = trigger_id
self.source = source
self.project_id = project_id
self.wait = wait
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.run_build_trigger(
trigger_id=self.trigger_id,
source=self.source,
project_id=self.project_id,
wait=self.wait,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Build.to_dict(result)
class CloudBuildUpdateBuildTriggerOperator(BaseOperator):
"""
Updates a BuildTrigger by its project ID and trigger ID.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildUpdateBuildTriggerOperator`
:param trigger_id: The ID of the trigger.
:type trigger_id: str
:param trigger: The BuildTrigger to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
:type trigger: Union[dict, `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`]
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields = ("project_id", "trigger_id", "trigger", "gcp_conn_id")
def __init__(
self,
*,
trigger_id: str,
trigger: Union[dict, BuildTrigger],
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_id = trigger_id
self.trigger = trigger
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.update_build_trigger(
trigger_id=self.trigger_id,
trigger=self.trigger,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return BuildTrigger.to_dict(result)
class BuildProcessor:
"""
Processes build configurations to add additional functionality to support the use of operators.
The following improvements are made:
* It is required to provide the source and only one type can be given,
* It is possible to provide the source as the URL address instead dict.
:param build: The request body of the build.
See: https://cloud.google.com/cloud-build/docs/api/reference/rest/Shared.Types/Build
:type build: Union[Dict, Build]
"""
def __init__(self, build: Union[Dict, Build]) -> None:
if isinstance(build, Build):
self.build = Build(build)
self.build = deepcopy(build)
def _verify_source(self) -> None:
if not (("storage_source" in self.build["source"]) ^ ("repo_source" in self.build["source"])):
raise AirflowException(
"The source could not be determined. Please choose one data source from: "
"storage_source and repo_source."
)
def _reformat_source(self) -> None:
self._reformat_repo_source()
self._reformat_storage_source()
def _reformat_repo_source(self) -> None:
if "repo_source" not in self.build["source"]:
return
repo_source = self.build["source"]["repo_source"]
if not isinstance(repo_source, str):
return
self.build["source"]["repo_source"] = self._convert_repo_url_to_dict(repo_source)
def _reformat_storage_source(self) -> None:
if "storage_source" not in self.build["source"]:
return
storage_source = self.build["source"]["storage_source"]
if not isinstance(storage_source, str):
return
self.build["source"]["storage_source"] = self._convert_storage_url_to_dict(storage_source)
def process_body(self) -> Build:
"""
Processes the body passed in the constructor
:return: the body.
:rtype: `google.cloud.devtools.cloudbuild_v1.types.Build`
"""
if 'source' in self.build:
self._verify_source()
self._reformat_source()
return Build(self.build)
@staticmethod
def _convert_repo_url_to_dict(source: str) -> Dict[str, Any]:
"""
Convert url to repository in Google Cloud Source to a format supported by the API
Example valid input:
.. code-block:: none
https://source.cloud.google.com/airflow-project/airflow-repo/+/branch-name:
"""
url_parts = urlparse(source)
match = REGEX_REPO_PATH.search(url_parts.path)
if url_parts.scheme != "https" or url_parts.hostname != "source.cloud.google.com" or not match:
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"https://source.cloud.google.com/airflow-project/airflow-repo/+/branch-name:"
)
project_id = unquote(match.group("project_id"))
repo_name = unquote(match.group("repo_name"))
branch_name = unquote(match.group("branch_name")) if match.group("branch_name") else "master"
source_dict = {
"project_id": project_id,
"repo_name": repo_name,
"branch_name": branch_name,
}
return source_dict
@staticmethod
def _convert_storage_url_to_dict(storage_url: str) -> Dict[str, Any]:
"""
Convert url to object in Google Cloud Storage to a format supported by the API
Example valid input:
.. code-block:: none
gs://bucket-name/object-name.tar.gz
"""
url_parts = urlparse(storage_url)
if url_parts.scheme != "gs" or not url_parts.hostname or not url_parts.path or url_parts.path == "/":
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"gs://bucket-name/object-name.tar.gz#24565443"
)
source_dict = {
"bucket": url_parts.hostname,
"object_": url_parts.path[1:],
}
if url_parts.fragment:
source_dict["generation"] = int(url_parts.fragment)
return source_dict
|
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testable usage examples for Google Cloud Pubsub API wrapper
Each example function takes a ``client`` argument (which must be an instance
of :class:`google.cloud.pubsub.client.Client`) and uses it to perform a task
with the API.
To facilitate running the examples as system tests, each example is also passed
a ``to_delete`` list; the function adds to the list any objects created which
need to be deleted during teardown.
"""
import time
from google.cloud.pubsub.client import Client
def snippet(func):
"""Mark ``func`` as a snippet example function."""
func._snippet = True
return func
def _millis():
return time.time() * 1000
@snippet
def client_list_topics(client, to_delete): # pylint: disable=unused-argument
"""List topics for a project."""
def do_something_with(sub): # pylint: disable=unused-argument
pass
# [START client_list_topics]
for topic in client.list_topics(): # API request(s)
do_something_with(topic)
# [END client_list_topics]
@snippet
def client_list_subscriptions(client,
to_delete): # pylint: disable=unused-argument
"""List all subscriptions for a project."""
def do_something_with(sub): # pylint: disable=unused-argument
pass
# [START client_list_subscriptions]
for subscription in client.list_subscriptions(): # API request(s)
do_something_with(subscription)
# [END client_list_subscriptions]
@snippet
def client_topic(client, to_delete): # pylint: disable=unused-argument
"""Topic factory."""
TOPIC_NAME = 'topic_factory-%d' % (_millis(),)
# [START client_topic]
topic = client.topic(TOPIC_NAME)
# [END client_topic]
@snippet
def client_subscription(client, to_delete): # pylint: disable=unused-argument
"""Subscription factory."""
SUBSCRIPTION_NAME = 'subscription_factory-%d' % (_millis(),)
# [START client_subscription]
subscription = client.subscription(
SUBSCRIPTION_NAME, ack_deadline=60,
retain_acked_messages=True)
# [END client_subscription]
@snippet
def topic_create(client, to_delete):
"""Create a topic."""
TOPIC_NAME = 'topic_create-%d' % (_millis(),)
# [START topic_create]
topic = client.topic(TOPIC_NAME)
topic.create() # API request
# [END topic_create]
to_delete.append(topic)
@snippet
def topic_exists(client, to_delete):
"""Test existence of a topic."""
TOPIC_NAME = 'topic_exists-%d' % (_millis(),)
topic = client.topic(TOPIC_NAME)
to_delete.append(topic)
# [START topic_exists]
assert not topic.exists() # API request
topic.create() # API request
assert topic.exists() # API request
# [END topic_exists]
@snippet
def topic_delete(client, to_delete): # pylint: disable=unused-argument
"""Delete a topic."""
TOPIC_NAME = 'topic_delete-%d' % (_millis(),)
topic = client.topic(TOPIC_NAME)
topic.create() # API request
# [START topic_delete]
assert topic.exists() # API request
topic.delete()
assert not topic.exists() # API request
# [END topic_delete]
@snippet
def topic_iam_policy(client, to_delete):
"""Fetch / set a topic's IAM policy."""
TOPIC_NAME = 'topic_iam_policy-%d' % (_millis(),)
topic = client.topic(TOPIC_NAME)
topic.create()
to_delete.append(topic)
# [START topic_get_iam_policy]
policy = topic.get_iam_policy() # API request
# [END topic_get_iam_policy]
assert len(policy.viewers) == 0
assert len(policy.editors) == 0
assert len(policy.owners) == 0
# [START topic_set_iam_policy]
ALL_USERS = policy.all_users()
policy.viewers = [ALL_USERS]
LOGS_GROUP = policy.group('[email protected]')
policy.editors = [LOGS_GROUP]
new_policy = topic.set_iam_policy(policy) # API request
# [END topic_set_iam_policy]
assert ALL_USERS in new_policy.viewers
assert LOGS_GROUP in new_policy.editors
# @snippet # Disabled due to #1687
def topic_check_iam_permissions(client, to_delete):
"""Check topic IAM permissions."""
TOPIC_NAME = 'topic_check_iam_permissions-%d' % (_millis(),)
topic = client.topic(TOPIC_NAME)
topic.create()
to_delete.append(topic)
# [START topic_check_iam_permissions]
from google.cloud.pubsub.iam import OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE
TO_CHECK = [OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE]
ALLOWED = topic.check_iam_permissions(TO_CHECK)
assert set(ALLOWED) == set(TO_CHECK)
# [END topic_check_iam_permissions]
@snippet
def topic_publish_messages(client, to_delete):
"""Publish messages to a topic."""
TOPIC_NAME = 'topic_publish_messages-%d' % (_millis(),)
topic = client.topic(TOPIC_NAME)
topic.create()
to_delete.append(topic)
# [START topic_publish_simple_message]
topic.publish(b'This is the message payload') # API request
# [END topic_publish_simple_message]
# [START topic_publish_message_with_attrs]
topic.publish(b'Another message payload', extra='EXTRA') # API request
# [END topic_publish_message_with_attrs]
@snippet
def topic_subscription(client, to_delete):
"""Create subscriptions to a topic."""
TOPIC_NAME = 'topic_subscription-%d' % (_millis(),)
SUB_DEFAULTS = 'topic_subscription-defaults-%d' % (_millis(),)
SUB_ACK90 = 'topic_subscription-ack90-%d' % (_millis(),)
topic = client.topic(TOPIC_NAME)
topic.create()
to_delete.append(topic)
# [START topic_subscription_defaults]
sub_defaults = topic.subscription(SUB_DEFAULTS)
# [END topic_subscription_defaults]
sub_defaults.create() # API request
to_delete.append(sub_defaults)
expected_names = set()
expected_names.add(sub_defaults.full_name)
# [START topic_subscription_ack90]
sub_ack90 = topic.subscription(SUB_ACK90, ack_deadline=90)
# [END topic_subscription_ack90]
sub_ack90.create() # API request
to_delete.append(sub_ack90)
expected_names.add(sub_ack90.full_name)
sub_names = set()
def do_something_with(sub):
sub_names.add(sub.full_name)
# [START topic_list_subscriptions]
for subscription in topic.list_subscriptions(): # API request(s)
do_something_with(subscription)
# [END topic_list_subscriptions]
assert sub_names.issuperset(expected_names)
# @snippet: disabled, because push-mode requires a validated endpoint URL
def topic_subscription_push(client, to_delete):
"""Create subscriptions to a topic."""
TOPIC_NAME = 'topic_subscription_push-%d' % (_millis(),)
SUB_PUSH = 'topic_subscription_push-sub-%d' % (_millis(),)
PUSH_URL = 'https://api.example.com/push-endpoint'
topic = client.topic(TOPIC_NAME)
topic.create()
to_delete.append(topic)
# [START topic_subscription_push]
subscription = topic.subscription(SUB_PUSH, push_endpoint=PUSH_URL)
subscription.create() # API request
# [END topic_subscription_push]
# [START subscription_push_pull]
subscription.modify_push_configuration(push_endpoint=None) # API request
# [END subscription_push_pull]
# [START subscription_pull_push]
subscription.modify_push_configuration(
push_endpoint=PUSH_URL) # API request
# [END subscription_pull_push]
@snippet
def subscription_lifecycle(client, to_delete):
"""Test lifecycle of a subscription."""
TOPIC_NAME = 'subscription_lifecycle-%d' % (_millis(),)
SUB_NAME = 'subscription_lifecycle-defaults-%d' % (_millis(),)
topic = client.topic(TOPIC_NAME)
topic.create()
to_delete.append(topic)
# [START subscription_create]
subscription = topic.subscription(SUB_NAME)
subscription.create() # API request
# [END subscription_create]
# [START subscription_exists]
assert subscription.exists() # API request
# [END subscription_exists]
# [START subscription_reload]
subscription.reload() # API request
# [END subscription_reload]
# [START subscription_delete]
subscription.delete() # API request
# [END subscription_delete]
@snippet
def subscription_pull(client, to_delete):
"""Pull messges from a subscribed topic."""
TOPIC_NAME = 'subscription_pull-%d' % (_millis(),)
SUB_NAME = 'subscription_pull-defaults-%d' % (_millis(),)
PAYLOAD1 = b'PAYLOAD1'
PAYLOAD2 = b'PAYLOAD2'
EXTRA = 'EXTRA'
topic = client.topic(TOPIC_NAME)
topic.create()
to_delete.append(topic)
subscription = topic.subscription(SUB_NAME)
subscription.create()
to_delete.append(subscription)
# [START subscription_pull_return_immediately]
pulled = subscription.pull(return_immediately=True)
# [END subscription_pull_return_immediately]
assert len(pulled) == 0, "unexpected message"
topic.publish(PAYLOAD1)
topic.publish(PAYLOAD2, extra=EXTRA)
time.sleep(1) # eventually-consistent
# [START subscription_pull]
pulled = subscription.pull(max_messages=2)
# [END subscription_pull]
assert len(pulled) == 2, "eventual consistency"
# [START subscription_modify_ack_deadline]
for ack_id, _ in pulled:
subscription.modify_ack_deadline(ack_id, 90) # API request
# [END subscription_modify_ack_deadline]
payloads = []
extras = []
def do_something_with(message): # pylint: disable=unused-argument
payloads.append(message.data)
if message.attributes:
extras.append(message.attributes)
class ApplicationException(Exception):
pass
def log_exception(_):
pass
# [START subscription_acknowledge]
for ack_id, message in pulled:
try:
do_something_with(message)
except ApplicationException as e:
log_exception(e)
else:
subscription.acknowledge([ack_id])
# [END subscription_acknowledge]
assert set(payloads) == set([PAYLOAD1, PAYLOAD2]), 'payloads: %s' % (
(payloads,))
assert extras == [{'extra': EXTRA}], 'extras: %s' % (
(extras,))
@snippet
def subscription_pull_w_autoack(client, to_delete):
"""Pull messges from a topic, auto-acknowldging them"""
TOPIC_NAME = 'subscription_pull_autoack-%d' % (_millis(),)
SUB_NAME = 'subscription_pull_autoack-defaults-%d' % (_millis(),)
PAYLOAD1 = b'PAYLOAD1'
PAYLOAD2 = b'PAYLOAD2'
EXTRA = 'EXTRA'
topic = client.topic(TOPIC_NAME)
topic.create()
to_delete.append(topic)
subscription = topic.subscription(SUB_NAME)
subscription.create()
to_delete.append(subscription)
# [START topic_batch]
with topic.batch() as batch:
batch.publish(PAYLOAD1)
batch.publish(PAYLOAD2, extra=EXTRA)
# [END topic_batch]
time.sleep(1) # eventually-consistent
payloads = []
extras = []
def do_something_with(message): # pylint: disable=unused-argument
payloads.append(message.data)
if message.attributes:
extras.append(message.attributes)
# [START subscription_pull_autoack]
from google.cloud.pubsub.subscription import AutoAck
with AutoAck(subscription, max_messages=10) as ack:
for ack_id, message in list(ack.items()):
try:
do_something_with(message)
except Exception: # pylint: disable=broad-except
del ack[ack_id]
# [END subscription_pull_autoack]
assert set(payloads) == set(PAYLOAD1, PAYLOAD1), "eventual consistency"
assert extras == [{'extra': EXTRA}], "eventual consistency"
@snippet
def subscription_iam_policy(client, to_delete):
"""Fetch / set a subscription's IAM policy."""
TOPIC_NAME = 'subscription_iam_policy-%d' % (_millis(),)
SUB_NAME = 'subscription_iam_policy-defaults-%d' % (_millis(),)
topic = client.topic(TOPIC_NAME)
topic.create()
to_delete.append(topic)
subscription = topic.subscription(SUB_NAME)
subscription.create()
to_delete.append(subscription)
# [START subscription_get_iam_policy]
policy = subscription.get_iam_policy() # API request
# [END subscription_get_iam_policy]
assert len(policy.viewers) == 0
assert len(policy.editors) == 0
assert len(policy.owners) == 0
# [START subscription_set_iam_policy]
ALL_USERS = policy.all_users()
policy.viewers = [ALL_USERS]
LOGS_GROUP = policy.group('[email protected]')
policy.editors = [LOGS_GROUP]
new_policy = subscription.set_iam_policy(policy) # API request
# [END subscription_set_iam_policy]
assert ALL_USERS in new_policy.viewers
assert LOGS_GROUP in new_policy.editors
# @snippet # Disabled due to #1687
def subscription_check_iam_permissions(client, to_delete):
"""Check subscription IAM permissions."""
TOPIC_NAME = 'subscription_check_iam_permissions-%d' % (_millis(),)
SUB_NAME = 'subscription_check_iam_permissions-defaults-%d' % (_millis(),)
topic = client.topic(TOPIC_NAME)
topic.create()
to_delete.append(topic)
subscription = topic.subscription(SUB_NAME)
subscription.create()
to_delete.append(subscription)
# [START subscription_check_iam_permissions]
from google.cloud.pubsub.iam import OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE
TO_CHECK = [OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE]
ALLOWED = subscription.check_iam_permissions(TO_CHECK)
assert set(ALLOWED) == set(TO_CHECK)
# [END subscription_check_iam_permissions]
def _line_no(func):
code = getattr(func, '__code__', None) or getattr(func, 'func_code')
return code.co_firstlineno
def _find_examples():
funcs = [obj for obj in globals().values()
if getattr(obj, '_snippet', False)]
for func in sorted(funcs, key=_line_no):
yield func
def _name_and_doc(func):
return func.__name__, func.__doc__
def main():
client = Client()
for example in _find_examples():
to_delete = []
print('%-25s: %s' % _name_and_doc(example))
try:
example(client, to_delete)
except AssertionError as e:
print(' FAIL: %s' % (e,))
except Exception as e: # pylint: disable=broad-except
print(' ERROR: %r' % (e,))
for item in to_delete:
item.delete()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# IMPORTS ----------------------------------------------------------------------
import os
# ------------------------------------------------------------------------------
# MODULE INFORMATIONS ----------------------------------------------------------
DOCUMENTATION = '''
---
module: kernel_config
short_description: Configure the Linux kernel with provided options
author:
- "Alessandro Molari"
'''
EXAMPLES = '''
TODO
'''
# ------------------------------------------------------------------------------
# COMMONS (copy&paste) ---------------------------------------------------------
class BaseObject(object):
import syslog, os
'''Base class for all classes that use AnsibleModule.
Dependencies:
- `chrooted` function.
'''
def __init__(self, module, params=None):
syslog.openlog('ansible-{module}-{name}'.format(
module=os.path.basename(__file__), name=self.__class__.__name__))
self.work_dir = None
self.chroot = None
self._module = module
self._command_prefix = None
if params:
self._parse_params(params)
@property
def command_prefix(self):
return self._command_prefix
@command_prefix.setter
def command_prefix(self, value):
self._command_prefix = value
def run_command(self, command=None, **kwargs):
if not 'check_rc' in kwargs:
kwargs['check_rc'] = True
if command is None and self.command_prefix is None:
self.fail('Invalid command')
if self.command_prefix:
command = '{prefix} {command}'.format(
prefix=self.command_prefix, command=command or '')
if self.work_dir and not self.chroot:
command = 'cd {work_dir}; {command}'.format(
work_dir=self.work_dir, command=command)
if self.chroot:
command = chrooted(command, self.chroot, work_dir=self.work_dir)
self.log('Performing command `{}`'.format(command))
rc, out, err = self._module.run_command(command, **kwargs)
if rc != 0:
self.log('Command `{}` returned invalid status code: `{}`'.format(
command, rc), level=syslog.LOG_WARNING)
return {'rc': rc,
'out': out,
'out_lines': [line for line in out.split('\n') if line],
'err': err,
'err_lines': [line for line in out.split('\n') if line]}
def log(self, msg, level=syslog.LOG_DEBUG):
'''Log to the system logging facility of the target system.'''
if os.name == 'posix': # syslog is unsupported on Windows.
syslog.syslog(level, str(msg))
def fail(self, msg):
self._module.fail_json(msg=msg)
def exit(self, changed=True, msg='', result=None):
self._module.exit_json(changed=changed, msg=msg, result=result)
def _parse_params(self, params):
for param in params:
if param in self._module.params:
value = self._module.params[param]
if value in ['None', 'none']:
value = None
if value in ['True', 'true']:
value = True
if value in ['False', 'false']:
value = False
setattr(self, param, value)
else:
setattr(self, param, None)
def chrooted(command, path, profile='/etc/profile', work_dir=None):
prefix = "chroot {path} bash -c 'source {profile}; ".format(
path=path, profile=profile)
if work_dir:
prefix += 'cd {work_dir}; '.format(work_dir=work_dir)
prefix += command
prefix += "'"
return prefix
# ------------------------------------------------------------------------------
# CONFIGURATOR -----------------------------------------------------------------
class KernelOptionConfigurator(BaseObject):
'''Manipulate options in a kernel config file.
'''
def __init__(self, module):
super(KernelOptionConfigurator, self).__init__(module,
params=['as_module', 'value', 'kind', 'after'])
self.kernel_option = KernelOption(module)
def run(self):
if self.value == True:
self.kernel_option.enable()
elif self.value == False:
self.kernel_option.disable()
elif self.value in ['undef', 'undefined']:
self.kernel_option.undefine()
else:
self.kernel_option.set_value()
if self.as_module:
self.kernel_option.as_module()
class KernelOption(BaseObject):
'''Represent a kernel option and the related operations.
'''
def __init__(self, module):
super(KernelOption, self).__init__(module,
params=['kernel_dir', 'option', 'value', 'kind', 'after'])
self.command_prefix = '{cmd} --file {kernel_dir}'.format(
cmd=os.path.join(self.kernel_dir, 'scripts', 'config'),
kernel_dir=os.path.join(self.kernel_dir, '.config'))
def enable(self):
'''Enable a kernel option.
'''
self.run_command('--enable {option}'.format(option=self.option))
if self.after:
self.run_command('--enable-after {after} {option}'.format(
after=self.after, option=self.option))
def disable(self):
'''Disable a kernel option.
'''
self.run_command('--disable {option}'.format(option=self.option))
if self.after:
self.run_command('--disable-after {after} {option}'.format(
after=self.after, option=self.option))
def as_module(self):
'''Turn a option into a module.
'''
self.run_command('--module {option}'.format(option=self.option))
if self.after:
self.run_command('--module-after {after} {option}'.format(
after=self.after, option=self.option))
def undefine(self):
self.run_command('--undefine {option}'.format(option=self.option))
def set_value(self):
'''Set option to the provided value.
Kind indicates:
- `value`: `value` is a value.
- `string`: `value` is a string.
- `undefined`: the option should be unset.
'''
if self.value is None:
self.fail('Invalid `value`: it cannot be `None`')
if self.kind in ['str', 'string']:
self.run_command('--set-str {option} {value}'.format(
option=self.option, value=self.value))
elif self.kind == 'value':
self.run_command('--set-val {option} {value}'.format(
option=self.option, value=self.value))
else:
self.fail('Invalid `kind`: it cannot be `None`')
# ------------------------------------------------------------------------------
# MAIN FUNCTION ----------------------------------------------------------------
def main():
module = AnsibleModule(argument_spec=dict(
kernel_dir=dict(type='str', default='/usr/src/linux'),
option=dict(type='str', required=True),
value=dict(default=True),
as_module=dict(type='bool', default=False),
kind=dict(type='str', default=None),
after=dict(type='str', default=None)))
configurator = KernelOptionConfigurator(module)
configurator.run()
module.exit_json(changed=True,
msg='Kernel option {name} successfully configured'.format(
name=module.params['option']))
# ------------------------------------------------------------------------------
# ENTRY POINT ------------------------------------------------------------------
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
# ------------------------------------------------------------------------------
# vim: set filetype=python :
|
|
""" Test functions for fftpack.basic module
"""
import sys
from numpy.testing import *
from scipy.fftpack import ifft, fft, fftn, irfft, rfft
from numpy import arange, asarray, zeros, dot, exp, pi, double, cdouble
import numpy.fft
from numpy.random import rand
def random(size):
return rand(*size)
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n,dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w),x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n,dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w),x)/n
return y
class TestFft(TestCase):
def bench_random(self):
from numpy.fft import fft as numpy_fft
print
print ' Fast Fourier Transform'
print '================================================='
print ' | real input | complex input '
print '-------------------------------------------------'
print ' size | scipy | numpy | scipy | numpy '
print '-------------------------------------------------'
for size,repeat in [(100,7000),(1000,2000),
(256,10000),
(512,10000),
(1024,1000),
(2048,1000),
(2048*2,500),
(2048*4,500),
]:
print '%5s' % size,
sys.stdout.flush()
for x in [random([size]).astype(double),
random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
]:
if size > 500: y = fft(x)
else: y = direct_dft(x)
assert_array_almost_equal(fft(x),y)
print '|%8.2f' % measure('fft(x)',repeat),
sys.stdout.flush()
assert_array_almost_equal(numpy_fft(x),y)
print '|%8.2f' % measure('numpy_fft(x)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
class TestIfft(TestCase):
def bench_random(self):
from numpy.fft import ifft as numpy_ifft
print
print ' Inverse Fast Fourier Transform'
print '==============================================='
print ' | real input | complex input '
print '-----------------------------------------------'
print ' size | scipy | numpy | scipy | numpy '
print '-----------------------------------------------'
for size,repeat in [(100,7000),(1000,2000),
(256,10000),
(512,10000),
(1024,1000),
(2048,1000),
(2048*2,500),
(2048*4,500),
]:
print '%5s' % size,
sys.stdout.flush()
for x in [random([size]).astype(double),
random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
]:
if size > 500: y = ifft(x)
else: y = direct_idft(x)
assert_array_almost_equal(ifft(x),y)
print '|%8.2f' % measure('ifft(x)',repeat),
sys.stdout.flush()
assert_array_almost_equal(numpy_ifft(x),y)
print '|%8.2f' % measure('numpy_ifft(x)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
class TestRfft(TestCase):
def bench_random(self):
from numpy.fft import rfft as numpy_rfft
print
print 'Fast Fourier Transform (real data)'
print '=================================='
print ' size | scipy | numpy '
print '----------------------------------'
for size,repeat in [(100,7000),(1000,2000),
(256,10000),
(512,10000),
(1024,1000),
(2048,1000),
(2048*2,500),
(2048*4,500),
]:
print '%5s' % size,
sys.stdout.flush()
x = random([size]).astype(double)
print '|%8.2f' % measure('rfft(x)',repeat),
sys.stdout.flush()
print '|%8.2f' % measure('numpy_rfft(x)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
class TestIrfft(TestCase):
def bench_random(self):
from numpy.fft import irfft as numpy_irfft
print
print 'Inverse Fast Fourier Transform (real data)'
print '=================================='
print ' size | scipy | numpy '
print '----------------------------------'
for size,repeat in [(100,7000),(1000,2000),
(256,10000),
(512,10000),
(1024,1000),
(2048,1000),
(2048*2,500),
(2048*4,500),
]:
print '%5s' % size,
sys.stdout.flush()
x = random([size]).astype(double)
x1 = zeros(size/2+1,dtype=cdouble)
x1[0] = x[0]
for i in range(1,size/2):
x1[i] = x[2*i-1] + 1j * x[2*i]
if not size%2:
x1[-1] = x[-1]
y = irfft(x)
print '|%8.2f' % measure('irfft(x)',repeat),
sys.stdout.flush()
assert_array_almost_equal(numpy_irfft(x1,size),y)
print '|%8.2f' % measure('numpy_irfft(x1,size)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
class TestFftn(TestCase):
def bench_random(self):
from numpy.fft import fftn as numpy_fftn
print
print ' Multi-dimensional Fast Fourier Transform'
print '==================================================='
print ' | real input | complex input '
print '---------------------------------------------------'
print ' size | scipy | numpy | scipy | numpy '
print '---------------------------------------------------'
for size,repeat in [((100,100),100),((1000,100),7),
((256,256),10),
((512,512),3),
]:
print '%9s' % ('%sx%s'%size),
sys.stdout.flush()
for x in [random(size).astype(double),
random(size).astype(cdouble)+random(size).astype(cdouble)*1j
]:
y = fftn(x)
#if size > 500: y = fftn(x)
#else: y = direct_dft(x)
assert_array_almost_equal(fftn(x),y)
print '|%8.2f' % measure('fftn(x)',repeat),
sys.stdout.flush()
assert_array_almost_equal(numpy_fftn(x),y)
print '|%8.2f' % measure('numpy_fftn(x)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
if __name__ == "__main__":
run_module_suite()
|
|
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_customer_gateway
short_description: Manage an AWS customer gateway
description:
- Manage an AWS customer gateway
version_added: "2.2"
author: Michael Baydoun (@MichaelBaydoun)
requirements: [ botocore, boto3 ]
notes:
- You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the
first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent
requests do not create new customer gateway resources.
- Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use
customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.
options:
bgp_asn:
description:
- Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when state=present.
required: false
default: null
ip_address:
description:
- Internet-routable IP address for customers gateway, must be a static address.
required: true
name:
description:
- Name of the customer gateway.
required: true
routing:
description:
- The type of routing.
choices: ['static', 'dynamic']
default: dynamic
version_added: '2.4'
state:
description:
- Create or terminate the Customer Gateway.
required: false
default: present
choices: [ 'present', 'absent' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create Customer Gateway
- ec2_customer_gateway:
bgp_asn: 12345
ip_address: 1.2.3.4
name: IndianapolisOffice
region: us-east-1
register: cgw
# Delete Customer Gateway
- ec2_customer_gateway:
ip_address: 1.2.3.4
name: IndianapolisOffice
state: absent
region: us-east-1
register: cgw
'''
RETURN = '''
gateway.customer_gateways:
description: details about the gateway that was created.
returned: success
type: complex
contains:
bgp_asn:
description: The Border Gateway Autonomous System Number.
returned: when exists and gateway is available.
sample: 65123
type: string
customer_gateway_id:
description: gateway id assigned by amazon.
returned: when exists and gateway is available.
sample: cgw-cb6386a2
type: string
ip_address:
description: ip address of your gateway device.
returned: when exists and gateway is available.
sample: 1.2.3.4
type: string
state:
description: state of gateway.
returned: when gateway exists and is available.
state: available
type: string
tags:
description: any tags on the gateway.
returned: when gateway exists and is available, and when tags exist.
state: available
type: string
type:
description: encryption type.
returned: when gateway exists and is available.
sample: ipsec.1
type: string
'''
try:
from botocore.exceptions import ClientError
HAS_BOTOCORE = True
except ImportError:
HAS_BOTOCORE = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
class Ec2CustomerGatewayManager:
def __init__(self, module):
self.module = module
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
self.ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except ClientError as e:
module.fail_json(msg=e.message)
def ensure_cgw_absent(self, gw_id):
response = self.ec2.delete_customer_gateway(
DryRun=False,
CustomerGatewayId=gw_id
)
return response
def ensure_cgw_present(self, bgp_asn, ip_address):
if not bgp_asn:
bgp_asn = 65000
response = self.ec2.create_customer_gateway(
DryRun=False,
Type='ipsec.1',
PublicIp=ip_address,
BgpAsn=bgp_asn,
)
return response
def tag_cgw_name(self, gw_id, name):
response = self.ec2.create_tags(
DryRun=False,
Resources=[
gw_id,
],
Tags=[
{
'Key': 'Name',
'Value': name
},
]
)
return response
def describe_gateways(self, ip_address):
response = self.ec2.describe_customer_gateways(
DryRun=False,
Filters=[
{
'Name': 'state',
'Values': [
'available',
]
},
{
'Name': 'ip-address',
'Values': [
ip_address,
]
}
]
)
return response
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
bgp_asn=dict(required=False, type='int'),
ip_address=dict(required=True),
name=dict(required=True),
routing=dict(default='dynamic', choices=['dynamic', 'static']),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
('routing', 'dynamic', ['bgp_asn'])
]
)
if not HAS_BOTOCORE:
module.fail_json(msg='botocore is required.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
gw_mgr = Ec2CustomerGatewayManager(module)
name = module.params.get('name')
existing = gw_mgr.describe_gateways(module.params['ip_address'])
results = dict(changed=False)
if module.params['state'] == 'present':
if existing['CustomerGateways']:
existing['CustomerGateway'] = existing['CustomerGateways'][0]
results['gateway'] = existing
if existing['CustomerGateway']['Tags']:
tag_array = existing['CustomerGateway']['Tags']
for key, value in enumerate(tag_array):
if value['Key'] == 'Name':
current_name = value['Value']
if current_name != name:
results['name'] = gw_mgr.tag_cgw_name(
results['gateway']['CustomerGateway']['CustomerGatewayId'],
module.params['name'],
)
results['changed'] = True
else:
if not module.check_mode:
results['gateway'] = gw_mgr.ensure_cgw_present(
module.params['bgp_asn'],
module.params['ip_address'],
)
results['name'] = gw_mgr.tag_cgw_name(
results['gateway']['CustomerGateway']['CustomerGatewayId'],
module.params['name'],
)
results['changed'] = True
elif module.params['state'] == 'absent':
if existing['CustomerGateways']:
existing['CustomerGateway'] = existing['CustomerGateways'][0]
results['gateway'] = existing
if not module.check_mode:
results['gateway'] = gw_mgr.ensure_cgw_absent(
existing['CustomerGateway']['CustomerGatewayId']
)
results['changed'] = True
pretty_results = camel_dict_to_snake_dict(results)
module.exit_json(**pretty_results)
if __name__ == '__main__':
main()
|
|
# server.py -- Implementation of the server side git protocols
# Copyright (C) 2008 John Carr <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# or (at your option) any later version of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Git smart network protocol server implementation.
For more detailed implementation on the network protocol, see the
Documentation/technical directory in the cgit distribution, and in particular:
* Documentation/technical/protocol-capabilities.txt
* Documentation/technical/pack-protocol.txt
Currently supported capabilities:
* include-tag
* thin-pack
* multi_ack_detailed
* multi_ack
* side-band-64k
* ofs-delta
* no-progress
* report-status
* delete-refs
Known capabilities that are not supported:
* shallow (http://pad.lv/909524)
"""
import collections
import os
import socket
import SocketServer
import sys
import zlib
from dulwich.errors import (
ApplyDeltaError,
ChecksumMismatch,
GitProtocolError,
NotGitRepository,
UnexpectedCommandError,
ObjectFormatException,
)
from dulwich import log_utils
from dulwich.objects import (
hex_to_sha,
)
from dulwich.pack import (
write_pack_objects,
)
from dulwich.protocol import (
BufferedPktLineWriter,
MULTI_ACK,
MULTI_ACK_DETAILED,
Protocol,
ProtocolFile,
ReceivableProtocol,
SINGLE_ACK,
TCP_GIT_PORT,
ZERO_SHA,
ack_type,
extract_capabilities,
extract_want_line_capabilities,
)
from dulwich.repo import (
Repo,
)
logger = log_utils.getLogger(__name__)
class Backend(object):
"""A backend for the Git smart server implementation."""
def open_repository(self, path):
"""Open the repository at a path.
:param path: Path to the repository
:raise NotGitRepository: no git repository was found at path
:return: Instance of BackendRepo
"""
raise NotImplementedError(self.open_repository)
class BackendRepo(object):
"""Repository abstraction used by the Git server.
Please note that the methods required here are a
subset of those provided by dulwich.repo.Repo.
"""
object_store = None
refs = None
def get_refs(self):
"""
Get all the refs in the repository
:return: dict of name -> sha
"""
raise NotImplementedError
def get_peeled(self, name):
"""Return the cached peeled value of a ref, if available.
:param name: Name of the ref to peel
:return: The peeled value of the ref. If the ref is known not point to
a tag, this will be the SHA the ref refers to. If no cached
information about a tag is available, this method may return None,
but it should attempt to peel the tag if possible.
"""
return None
def fetch_objects(self, determine_wants, graph_walker, progress,
get_tagged=None):
"""
Yield the objects required for a list of commits.
:param progress: is a callback to send progress messages to the client
:param get_tagged: Function that returns a dict of pointed-to sha -> tag
sha for including tags.
"""
raise NotImplementedError
class DictBackend(Backend):
"""Trivial backend that looks up Git repositories in a dictionary."""
def __init__(self, repos):
self.repos = repos
def open_repository(self, path):
logger.debug('Opening repository at %s', path)
try:
return self.repos[path]
except KeyError:
raise NotGitRepository(
"No git repository was found at %(path)s" % dict(path=path)
)
class FileSystemBackend(Backend):
"""Simple backend that looks up Git repositories in the local file system."""
def open_repository(self, path):
logger.debug('opening repository at %s', path)
return Repo(path)
class Handler(object):
"""Smart protocol command handler base class."""
def __init__(self, backend, proto, http_req=None):
self.backend = backend
self.proto = proto
self.http_req = http_req
self._client_capabilities = None
@classmethod
def capability_line(cls):
return " ".join(cls.capabilities())
@classmethod
def capabilities(cls):
raise NotImplementedError(cls.capabilities)
@classmethod
def innocuous_capabilities(cls):
return ("include-tag", "thin-pack", "no-progress", "ofs-delta")
@classmethod
def required_capabilities(cls):
"""Return a list of capabilities that we require the client to have."""
return []
def set_client_capabilities(self, caps):
allowable_caps = set(self.innocuous_capabilities())
allowable_caps.update(self.capabilities())
for cap in caps:
if cap not in allowable_caps:
raise GitProtocolError('Client asked for capability %s that '
'was not advertised.' % cap)
for cap in self.required_capabilities():
if cap not in caps:
raise GitProtocolError('Client does not support required '
'capability %s.' % cap)
self._client_capabilities = set(caps)
logger.info('Client capabilities: %s', caps)
def has_capability(self, cap):
if self._client_capabilities is None:
raise GitProtocolError('Server attempted to access capability %s '
'before asking client' % cap)
return cap in self._client_capabilities
class UploadPackHandler(Handler):
"""Protocol handler for uploading a pack to the server."""
def __init__(self, backend, args, proto, http_req=None,
advertise_refs=False):
Handler.__init__(self, backend, proto, http_req=http_req)
self.repo = backend.open_repository(args[0])
self._graph_walker = None
self.advertise_refs = advertise_refs
@classmethod
def capabilities(cls):
return ("multi_ack_detailed", "multi_ack", "side-band-64k", "thin-pack",
"ofs-delta", "no-progress", "include-tag")
@classmethod
def required_capabilities(cls):
return ("side-band-64k", "thin-pack", "ofs-delta")
def progress(self, message):
if self.has_capability("no-progress"):
return
self.proto.write_sideband(2, message)
def get_tagged(self, refs=None, repo=None):
"""Get a dict of peeled values of tags to their original tag shas.
:param refs: dict of refname -> sha of possible tags; defaults to all of
the backend's refs.
:param repo: optional Repo instance for getting peeled refs; defaults to
the backend's repo, if available
:return: dict of peeled_sha -> tag_sha, where tag_sha is the sha of a
tag whose peeled value is peeled_sha.
"""
if not self.has_capability("include-tag"):
return {}
if refs is None:
refs = self.repo.get_refs()
if repo is None:
repo = getattr(self.repo, "repo", None)
if repo is None:
# Bail if we don't have a Repo available; this is ok since
# clients must be able to handle if the server doesn't include
# all relevant tags.
# TODO: fix behavior when missing
return {}
tagged = {}
for name, sha in refs.iteritems():
peeled_sha = repo.get_peeled(name)
if peeled_sha != sha:
tagged[peeled_sha] = sha
return tagged
def handle(self):
write = lambda x: self.proto.write_sideband(1, x)
graph_walker = ProtocolGraphWalker(self, self.repo.object_store,
self.repo.get_peeled)
objects_iter = self.repo.fetch_objects(
graph_walker.determine_wants, graph_walker, self.progress,
get_tagged=self.get_tagged)
# Did the process short-circuit (e.g. in a stateless RPC call)? Note
# that the client still expects a 0-object pack in most cases.
if objects_iter is None:
return
self.progress("dul-daemon says what\n")
self.progress("counting objects: %d, done.\n" % len(objects_iter))
write_pack_objects(ProtocolFile(None, write), objects_iter)
self.progress("how was that, then?\n")
# we are done
self.proto.write("0000")
def _split_proto_line(line, allowed):
"""Split a line read from the wire.
:param line: The line read from the wire.
:param allowed: An iterable of command names that should be allowed.
Command names not listed below as possible return values will be
ignored. If None, any commands from the possible return values are
allowed.
:return: a tuple having one of the following forms:
('want', obj_id)
('have', obj_id)
('done', None)
(None, None) (for a flush-pkt)
:raise UnexpectedCommandError: if the line cannot be parsed into one of the
allowed return values.
"""
if not line:
fields = [None]
else:
fields = line.rstrip('\n').split(' ', 1)
command = fields[0]
if allowed is not None and command not in allowed:
raise UnexpectedCommandError(command)
try:
if len(fields) == 1 and command in ('done', None):
return (command, None)
elif len(fields) == 2 and command in ('want', 'have'):
hex_to_sha(fields[1])
return tuple(fields)
except (TypeError, AssertionError), e:
raise GitProtocolError(e)
raise GitProtocolError('Received invalid line from client: %s' % line)
class ProtocolGraphWalker(object):
"""A graph walker that knows the git protocol.
As a graph walker, this class implements ack(), next(), and reset(). It
also contains some base methods for interacting with the wire and walking
the commit tree.
The work of determining which acks to send is passed on to the
implementation instance stored in _impl. The reason for this is that we do
not know at object creation time what ack level the protocol requires. A
call to set_ack_level() is required to set up the implementation, before any
calls to next() or ack() are made.
"""
def __init__(self, handler, object_store, get_peeled):
self.handler = handler
self.store = object_store
self.get_peeled = get_peeled
self.proto = handler.proto
self.http_req = handler.http_req
self.advertise_refs = handler.advertise_refs
self._wants = []
self._cached = False
self._cache = []
self._cache_index = 0
self._impl = None
def determine_wants(self, heads):
"""Determine the wants for a set of heads.
The given heads are advertised to the client, who then specifies which
refs he wants using 'want' lines. This portion of the protocol is the
same regardless of ack type, and in fact is used to set the ack type of
the ProtocolGraphWalker.
:param heads: a dict of refname->SHA1 to advertise
:return: a list of SHA1s requested by the client
"""
if not heads:
# The repo is empty, so short-circuit the whole process.
self.proto.write_pkt_line(None)
return None
values = set(heads.itervalues())
if self.advertise_refs or not self.http_req:
for i, (ref, sha) in enumerate(sorted(heads.iteritems())):
line = "%s %s" % (sha, ref)
if not i:
line = "%s\x00%s" % (line, self.handler.capability_line())
self.proto.write_pkt_line("%s\n" % line)
peeled_sha = self.get_peeled(ref)
if peeled_sha != sha:
self.proto.write_pkt_line('%s %s^{}\n' %
(peeled_sha, ref))
# i'm done..
self.proto.write_pkt_line(None)
if self.advertise_refs:
return None
# Now client will sending want want want commands
want = self.proto.read_pkt_line()
if not want:
return []
line, caps = extract_want_line_capabilities(want)
self.handler.set_client_capabilities(caps)
self.set_ack_type(ack_type(caps))
allowed = ('want', None)
command, sha = _split_proto_line(line, allowed)
want_revs = []
while command != None:
if sha not in values:
raise GitProtocolError(
'Client wants invalid object %s' % sha)
want_revs.append(sha)
command, sha = self.read_proto_line(allowed)
self.set_wants(want_revs)
if self.http_req and self.proto.eof():
# The client may close the socket at this point, expecting a
# flush-pkt from the server. We might be ready to send a packfile at
# this point, so we need to explicitly short-circuit in this case.
return None
return want_revs
def ack(self, have_ref):
return self._impl.ack(have_ref)
def reset(self):
self._cached = True
self._cache_index = 0
def next(self):
if not self._cached:
if not self._impl and self.http_req:
return None
return self._impl.next()
self._cache_index += 1
if self._cache_index > len(self._cache):
return None
return self._cache[self._cache_index]
def read_proto_line(self, allowed):
"""Read a line from the wire.
:param allowed: An iterable of command names that should be allowed.
:return: A tuple of (command, value); see _split_proto_line.
:raise GitProtocolError: If an error occurred reading the line.
"""
return _split_proto_line(self.proto.read_pkt_line(), allowed)
def send_ack(self, sha, ack_type=''):
if ack_type:
ack_type = ' %s' % ack_type
self.proto.write_pkt_line('ACK %s%s\n' % (sha, ack_type))
def send_nak(self):
self.proto.write_pkt_line('NAK\n')
def set_wants(self, wants):
self._wants = wants
def _is_satisfied(self, haves, want, earliest):
"""Check whether a want is satisfied by a set of haves.
A want, typically a branch tip, is "satisfied" only if there exists a
path back from that want to one of the haves.
:param haves: A set of commits we know the client has.
:param want: The want to check satisfaction for.
:param earliest: A timestamp beyond which the search for haves will be
terminated, presumably because we're searching too far down the
wrong branch.
"""
o = self.store[want]
pending = collections.deque([o])
while pending:
commit = pending.popleft()
if commit.id in haves:
return True
if commit.type_name != "commit":
# non-commit wants are assumed to be satisfied
continue
for parent in commit.parents:
parent_obj = self.store[parent]
# TODO: handle parents with later commit times than children
if parent_obj.commit_time >= earliest:
pending.append(parent_obj)
return False
def all_wants_satisfied(self, haves):
"""Check whether all the current wants are satisfied by a set of haves.
:param haves: A set of commits we know the client has.
:note: Wants are specified with set_wants rather than passed in since
in the current interface they are determined outside this class.
"""
haves = set(haves)
earliest = min([self.store[h].commit_time for h in haves])
for want in self._wants:
if not self._is_satisfied(haves, want, earliest):
return False
return True
def set_ack_type(self, ack_type):
impl_classes = {
MULTI_ACK: MultiAckGraphWalkerImpl,
MULTI_ACK_DETAILED: MultiAckDetailedGraphWalkerImpl,
SINGLE_ACK: SingleAckGraphWalkerImpl,
}
self._impl = impl_classes[ack_type](self)
_GRAPH_WALKER_COMMANDS = ('have', 'done', None)
class SingleAckGraphWalkerImpl(object):
"""Graph walker implementation that speaks the single-ack protocol."""
def __init__(self, walker):
self.walker = walker
self._sent_ack = False
def ack(self, have_ref):
if not self._sent_ack:
self.walker.send_ack(have_ref)
self._sent_ack = True
def next(self):
command, sha = self.walker.read_proto_line(_GRAPH_WALKER_COMMANDS)
if command in (None, 'done'):
if not self._sent_ack:
self.walker.send_nak()
return None
elif command == 'have':
return sha
class MultiAckGraphWalkerImpl(object):
"""Graph walker implementation that speaks the multi-ack protocol."""
def __init__(self, walker):
self.walker = walker
self._found_base = False
self._common = []
def ack(self, have_ref):
self._common.append(have_ref)
if not self._found_base:
self.walker.send_ack(have_ref, 'continue')
if self.walker.all_wants_satisfied(self._common):
self._found_base = True
# else we blind ack within next
def next(self):
while True:
command, sha = self.walker.read_proto_line(_GRAPH_WALKER_COMMANDS)
if command is None:
self.walker.send_nak()
# in multi-ack mode, a flush-pkt indicates the client wants to
# flush but more have lines are still coming
continue
elif command == 'done':
# don't nak unless no common commits were found, even if not
# everything is satisfied
if self._common:
self.walker.send_ack(self._common[-1])
else:
self.walker.send_nak()
return None
elif command == 'have':
if self._found_base:
# blind ack
self.walker.send_ack(sha, 'continue')
return sha
class MultiAckDetailedGraphWalkerImpl(object):
"""Graph walker implementation speaking the multi-ack-detailed protocol."""
def __init__(self, walker):
self.walker = walker
self._found_base = False
self._common = []
def ack(self, have_ref):
self._common.append(have_ref)
if not self._found_base:
self.walker.send_ack(have_ref, 'common')
if self.walker.all_wants_satisfied(self._common):
self._found_base = True
self.walker.send_ack(have_ref, 'ready')
# else we blind ack within next
def next(self):
while True:
command, sha = self.walker.read_proto_line(_GRAPH_WALKER_COMMANDS)
if command is None:
self.walker.send_nak()
if self.walker.http_req:
return None
continue
elif command == 'done':
# don't nak unless no common commits were found, even if not
# everything is satisfied
if self._common:
self.walker.send_ack(self._common[-1])
else:
self.walker.send_nak()
return None
elif command == 'have':
if self._found_base:
# blind ack; can happen if the client has more requests
# inflight
self.walker.send_ack(sha, 'ready')
return sha
class ReceivePackHandler(Handler):
"""Protocol handler for downloading a pack from the client."""
def __init__(self, backend, args, proto, http_req=None,
advertise_refs=False):
Handler.__init__(self, backend, proto, http_req=http_req)
self.repo = backend.open_repository(args[0])
self.advertise_refs = advertise_refs
@classmethod
def capabilities(cls):
return ("report-status", "delete-refs", "side-band-64k")
def _apply_pack(self, refs):
all_exceptions = (IOError, OSError, ChecksumMismatch, ApplyDeltaError,
AssertionError, socket.error, zlib.error,
ObjectFormatException)
status = []
# TODO: more informative error messages than just the exception string
try:
p = self.repo.object_store.add_thin_pack(self.proto.read,
self.proto.recv)
status.append(('unpack', 'ok'))
except all_exceptions, e:
status.append(('unpack', str(e).replace('\n', '')))
# The pack may still have been moved in, but it may contain broken
# objects. We trust a later GC to clean it up.
for oldsha, sha, ref in refs:
ref_status = 'ok'
try:
if sha == ZERO_SHA:
if not 'delete-refs' in self.capabilities():
raise GitProtocolError(
'Attempted to delete refs without delete-refs '
'capability.')
try:
del self.repo.refs[ref]
except all_exceptions:
ref_status = 'failed to delete'
else:
try:
self.repo.refs[ref] = sha
except all_exceptions:
ref_status = 'failed to write'
except KeyError, e:
ref_status = 'bad ref'
status.append((ref, ref_status))
return status
def _report_status(self, status):
if self.has_capability('side-band-64k'):
writer = BufferedPktLineWriter(
lambda d: self.proto.write_sideband(1, d))
write = writer.write
def flush():
writer.flush()
self.proto.write_pkt_line(None)
else:
write = self.proto.write_pkt_line
flush = lambda: None
for name, msg in status:
if name == 'unpack':
write('unpack %s\n' % msg)
elif msg == 'ok':
write('ok %s\n' % name)
else:
write('ng %s %s\n' % (name, msg))
write(None)
flush()
def handle(self):
refs = sorted(self.repo.get_refs().iteritems())
if self.advertise_refs or not self.http_req:
if refs:
self.proto.write_pkt_line(
"%s %s\x00%s\n" % (refs[0][1], refs[0][0],
self.capability_line()))
for i in range(1, len(refs)):
ref = refs[i]
self.proto.write_pkt_line("%s %s\n" % (ref[1], ref[0]))
else:
self.proto.write_pkt_line("%s capabilities^{}\0%s" % (
ZERO_SHA, self.capability_line()))
self.proto.write("0000")
if self.advertise_refs:
return
client_refs = []
ref = self.proto.read_pkt_line()
# if ref is none then client doesnt want to send us anything..
if ref is None:
return
ref, caps = extract_capabilities(ref)
self.set_client_capabilities(caps)
# client will now send us a list of (oldsha, newsha, ref)
while ref:
client_refs.append(ref.split())
ref = self.proto.read_pkt_line()
# backend can now deal with this refs and read a pack using self.read
status = self._apply_pack(client_refs)
# when we have read all the pack from the client, send a status report
# if the client asked for it
if self.has_capability('report-status'):
self._report_status(status)
# Default handler classes for git services.
DEFAULT_HANDLERS = {
'git-upload-pack': UploadPackHandler,
'git-receive-pack': ReceivePackHandler,
}
class TCPGitRequestHandler(SocketServer.StreamRequestHandler):
def __init__(self, handlers, *args, **kwargs):
self.handlers = handlers
SocketServer.StreamRequestHandler.__init__(self, *args, **kwargs)
def handle(self):
proto = ReceivableProtocol(self.connection.recv, self.wfile.write)
command, args = proto.read_cmd()
logger.info('Handling %s request, args=%s', command, args)
cls = self.handlers.get(command, None)
if not callable(cls):
raise GitProtocolError('Invalid service %s' % command)
h = cls(self.server.backend, args, proto)
h.handle()
class TCPGitServer(SocketServer.TCPServer):
allow_reuse_address = True
serve = SocketServer.TCPServer.serve_forever
def _make_handler(self, *args, **kwargs):
return TCPGitRequestHandler(self.handlers, *args, **kwargs)
def __init__(self, backend, listen_addr, port=TCP_GIT_PORT, handlers=None):
self.handlers = dict(DEFAULT_HANDLERS)
if handlers is not None:
self.handlers.update(handlers)
self.backend = backend
logger.info('Listening for TCP connections on %s:%d', listen_addr, port)
SocketServer.TCPServer.__init__(self, (listen_addr, port),
self._make_handler)
def verify_request(self, request, client_address):
logger.info('Handling request from %s', client_address)
return True
def handle_error(self, request, client_address):
logger.exception('Exception happened during processing of request '
'from %s', client_address)
def main(argv=sys.argv):
"""Entry point for starting a TCP git server."""
if len(argv) > 1:
gitdir = argv[1]
else:
gitdir = '.'
log_utils.default_logging_config()
backend = DictBackend({'/': Repo(gitdir)})
server = TCPGitServer(backend, 'localhost')
server.serve_forever()
def serve_command(handler_cls, argv=sys.argv, backend=None, inf=sys.stdin,
outf=sys.stdout):
"""Serve a single command.
This is mostly useful for the implementation of commands used by e.g. git+ssh.
:param handler_cls: `Handler` class to use for the request
:param argv: execv-style command-line arguments. Defaults to sys.argv.
:param backend: `Backend` to use
:param inf: File-like object to read from, defaults to standard input.
:param outf: File-like object to write to, defaults to standard output.
:return: Exit code for use with sys.exit. 0 on success, 1 on failure.
"""
if backend is None:
backend = FileSystemBackend()
def send_fn(data):
outf.write(data)
outf.flush()
proto = Protocol(inf.read, send_fn)
handler = handler_cls(backend, argv[1:], proto)
# FIXME: Catch exceptions and write a single-line summary to outf.
handler.handle()
return 0
def generate_info_refs(repo):
"""Generate an info refs file."""
refs = repo.get_refs()
for name in sorted(refs.iterkeys()):
# get_refs() includes HEAD as a special case, but we don't want to
# advertise it
if name == 'HEAD':
continue
sha = refs[name]
o = repo.object_store[sha]
if not o:
continue
yield '%s\t%s\n' % (sha, name)
peeled_sha = repo.get_peeled(name)
if peeled_sha != sha:
yield '%s\t%s^{}\n' % (peeled_sha, name)
def generate_objects_info_packs(repo):
"""Generate an index for for packs."""
for pack in repo.object_store.packs:
yield 'P pack-%s.pack\n' % pack.name()
def update_server_info(repo):
"""Generate server info for dumb file access.
This generates info/refs and objects/info/packs,
similar to "git update-server-info".
"""
repo._put_named_file(os.path.join('info', 'refs'),
"".join(generate_info_refs(repo)))
repo._put_named_file(os.path.join('objects', 'info', 'packs'),
"".join(generate_objects_info_packs(repo)))
|
|
#!/usr/bin/env python3
# ----------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ----------------------------------------------------------------------
"""Generate pipeline (default: gpdb_master-generated.yml) from template (default:
templates/gpdb-tpl.yml).
Python module requirements:
- jinja2 (install through pip or easy_install)
"""
from __future__ import print_function
import argparse
import datetime
import os
import re
import subprocess
import yaml
from jinja2 import Environment, FileSystemLoader
PIPELINES_DIR = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_ENVIRONMENT = Environment(
autoescape=False,
loader=FileSystemLoader(os.path.join(PIPELINES_DIR, 'templates')),
trim_blocks=True,
lstrip_blocks=True,
variable_start_string='[[', # 'default {{ has conflict with pipeline syntax'
variable_end_string=']]',
extensions=['jinja2.ext.loopcontrols']
)
BASE_BRANCH = "master" # when branching gpdb update to 7X_STABLE, 6X_STABLE, etc.
SECRETS_PATH = os.path.expanduser('~/workspace/gp-continuous-integration/secrets')
# Variables that govern pipeline validation
RELEASE_VALIDATOR_JOB = ['Release_Candidate', 'Build_Release_Candidate_RPMs']
JOBS_THAT_ARE_GATES = [
'gate_icw_start',
'gate_icw_end',
'gate_replication_start',
'gate_resource_groups_start',
'gate_cli_start',
'gate_ud_start',
'gate_advanced_analytics_start',
'gate_release_candidate_start'
]
JOBS_THAT_SHOULD_NOT_BLOCK_RELEASE = (
[
'combine_cli_coverage',
'compile_gpdb_binary_swap_centos7',
'compile_gpdb_clients_windows',
'walrep_2',
'madlib_build_gppkg',
'MADlib_Test_planner_centos7',
'MADlib_Test_orca_centos7',
'Publish Server Builds',
] + RELEASE_VALIDATOR_JOB + JOBS_THAT_ARE_GATES
)
def suggested_git_remote():
"""Try to guess the current git remote"""
default_remote = "<https://github.com/<github-user>/gpdb>"
remote = subprocess.check_output(["git", "ls-remote", "--get-url"]).decode('utf-8').rstrip()
if "greenplum-db/gpdb" in remote:
return default_remote
if "git@" in remote:
git_uri = remote.split('@')[1]
hostname, path = git_uri.split(':')
return 'https://%s/%s' % (hostname, path)
return remote
def suggested_git_branch():
"""Try to guess the current git branch"""
branch = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]).decode('utf-8').rstrip()
if branch == "master" or is_a_base_branch(branch):
return "<branch-name>"
return branch
def is_a_base_branch(branch):
# best effort in matching a base branch (5X_STABLE, 6X_STABLE, etc.)
matched = re.match("\d+X_STABLE", branch)
return matched is not None
def render_template(template_filename, context):
"""Render pipeline template yaml"""
return TEMPLATE_ENVIRONMENT.get_template(template_filename).render(context)
def validate_pipeline_release_jobs(raw_pipeline_yml):
"""Make sure all jobs in specified pipeline that don't block release are accounted
for (they should belong to JOBS_THAT_SHOULD_NOT_BLOCK_RELEASE, defined above)"""
print("======================================================================")
print("Validate Pipeline Release Jobs")
print("----------------------------------------------------------------------")
# ignore concourse v2.x variable interpolation
pipeline_yml_cleaned = re.sub('{{', '', re.sub('}}', '', raw_pipeline_yml))
pipeline = yaml.safe_load(pipeline_yml_cleaned)
jobs_raw = pipeline['jobs']
all_job_names = [job['name'] for job in jobs_raw]
rc_name = 'gate_release_candidate_start'
release_candidate_job = [j for j in jobs_raw if j['name'] == rc_name][0]
release_blocking_jobs = release_candidate_job['plan'][0]['in_parallel']['steps'][0]['passed']
non_release_blocking_jobs = [j for j in all_job_names if j not in release_blocking_jobs]
unaccounted_for_jobs = \
[j for j in non_release_blocking_jobs if j not in JOBS_THAT_SHOULD_NOT_BLOCK_RELEASE]
if unaccounted_for_jobs:
print("Please add the following jobs as a Release_Candidate dependency or ignore them")
print("by adding them to JOBS_THAT_SHOULD_NOT_BLOCK_RELEASE in " + __file__)
print(unaccounted_for_jobs)
return False
print("Pipeline validated: all jobs accounted for")
return True
def validate_target(target):
expected_secrets_file = "%s/ccp_ci_secrets_%s.yml" % (SECRETS_PATH, target)
if not os.path.exists(expected_secrets_file):
raise Exception('Invalid target "%s"; no secrets file found. Please ensure your secrets files in %s are up to date.' % (target, SECRETS_PATH))
def create_pipeline(args):
"""Generate OS specific pipeline sections"""
if args.test_trigger_false:
test_trigger = "true"
else:
test_trigger = "false"
context = {
'template_filename': args.template_filename,
'generator_filename': os.path.basename(__file__),
'timestamp': datetime.datetime.now(),
'os_types': args.os_types,
'test_sections': args.test_sections,
'pipeline_configuration': args.pipeline_configuration,
'test_trigger': test_trigger,
'use_ICW_workers': args.use_ICW_workers
}
pipeline_yml = render_template(args.template_filename, context)
if args.pipeline_target == 'prod':
validated = validate_pipeline_release_jobs(pipeline_yml)
if not validated:
print("Refusing to update the pipeline file")
return False
with open(args.output_filepath, 'w') as output:
header = render_template('pipeline_header.yml', context)
output.write(header)
output.write(pipeline_yml)
return True
def gen_pipeline(args, pipeline_name, secret_files,
git_remote=None,
git_branch=None):
if git_remote is None:
git_remote = suggested_git_remote()
if git_branch is None:
git_branch = suggested_git_branch()
secrets = ""
for secret in secret_files:
secrets += "-l %s/%s " % (SECRETS_PATH, secret)
format_args = {
'target': args.pipeline_target,
'name': pipeline_name,
'output_path': args.output_filepath,
'secrets_path': SECRETS_PATH,
'secrets': secrets,
'remote': git_remote,
'branch': git_branch,
}
return '''fly --target {target} \
set-pipeline \
--check-creds \
--pipeline {name} \
--config {output_path} \
--load-vars-from {secrets_path}/gpdb_common-ci-secrets.yml \
{secrets} \
--var gpdb-git-remote={remote} \
--var gpdb-git-branch={branch} \
--var pipeline-name={name} \
'''.format(**format_args)
def header(args):
return '''
======================================================================
Pipeline target: ......... : %s
Pipeline file ............ : %s
Template file ............ : %s
OS Types ................. : %s
Test sections ............ : %s
test_trigger ............. : %s
use_ICW_workers .......... : %s
======================================================================
''' % (args.pipeline_target,
args.output_filepath,
args.template_filename,
args.os_types,
args.test_sections,
args.test_trigger_false,
args.use_ICW_workers
)
def print_fly_commands(args):
pipeline_name = os.path.basename(args.output_filepath).rsplit('.', 1)[0]
print(header(args))
if args.pipeline_target == 'prod':
print('NOTE: You can set the production pipelines with the following:\n')
pipeline_name = "gpdb_%s" % BASE_BRANCH if BASE_BRANCH == "master" else BASE_BRANCH
print(gen_pipeline(args, pipeline_name, ["gpdb_%s-ci-secrets.prod.yml" % BASE_BRANCH],
"https://github.com/greenplum-db/gpdb.git", BASE_BRANCH))
print(gen_pipeline(args, "%s_without_asserts" % pipeline_name, ["gpdb_%s_without_asserts-ci-secrets.prod.yml" % BASE_BRANCH],
"https://github.com/greenplum-db/gpdb.git", BASE_BRANCH))
return
print('NOTE: You can set the developer pipeline with the following:\n')
print(gen_pipeline(args, pipeline_name, ["gpdb_%s-ci-secrets.dev.yml" % BASE_BRANCH,
"ccp_ci_secrets_%s.yml" % args.pipeline_target]))
def main():
"""main: parse args and create pipeline"""
parser = argparse.ArgumentParser(
description='Generate Concourse Pipeline utility',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-T',
'--template',
action='store',
dest='template_filename',
default="gpdb-tpl.yml",
help='Name of template to use, in templates/'
)
default_output_filename = "gpdb_%s-generated.yml" % BASE_BRANCH
parser.add_argument(
'-o',
'--output',
action='store',
dest='output_filepath',
default=os.path.join(PIPELINES_DIR, default_output_filename),
help='Output filepath to use for pipeline file, and from which to derive the pipeline name.'
)
parser.add_argument(
'-O',
'--os_types',
action='store',
dest='os_types',
default=['centos7'],
choices=['centos7', 'ubuntu18.04', 'win'],
nargs='+',
help='List of OS values to support'
)
parser.add_argument(
'-t',
'--pipeline_target',
action='store',
dest='pipeline_target',
default='dev',
help='Concourse target to use either: prod, dev, or <team abbreviation> '
'where abbreviation is found from the team\'s ccp secrets file name ending.'
)
parser.add_argument(
'-c',
'--configuration',
action='store',
dest='pipeline_configuration',
default='default',
help='Set of platforms and test sections to use; only works with dev and team targets, ignored with the prod target.'
'Valid options are prod (same as the prod pipeline), full (everything except release jobs), and default '
'(follow the -A and -O flags).'
)
parser.add_argument(
'-a',
'--test_sections',
action='store',
dest='test_sections',
choices=[
'ICW',
'Replication',
'ResourceGroups',
'Interconnect',
'CLI',
'UD',
'AA',
'Extensions'
],
default=['ICW'],
nargs='+',
help='Select tests sections to run'
)
parser.add_argument(
'-n',
'--test_trigger_false',
action='store_false',
default=True,
help='Set test triggers to "false". This only applies to dev pipelines.'
)
parser.add_argument(
'-u',
'--user',
action='store',
dest='user',
default=os.getlogin(),
help='Developer userid to use for pipeline name and filename.'
)
parser.add_argument(
'-U',
'--use_ICW_workers',
action='store_true',
default=False,
help='Set use_ICW_workers to "true".'
)
args = parser.parse_args()
validate_target(args.pipeline_target)
output_path_is_set = os.path.basename(args.output_filepath) != default_output_filename
if (args.user != os.getlogin() and output_path_is_set):
print("You can only use one of --output or --user.")
exit(1)
if args.pipeline_target == 'prod':
args.pipeline_configuration = 'prod'
# use_ICW_workers adds tags to the specified concourse definitions which
# correspond to dedicated concourse workers to increase performance.
if args.pipeline_target in ['prod', 'dev', 'cm']:
args.use_ICW_workers = True
if args.pipeline_configuration == 'prod' or args.pipeline_configuration == 'full':
args.os_types = ['centos6', 'centos7', 'ubuntu18.04', 'win']
args.test_sections = [
'ICW',
'Replication',
'ResourceGroups',
'Interconnect',
'CLI',
'UD',
'Extensions'
]
# if generating a dev pipeline but didn't specify an output,
# don't overwrite the master pipeline
if args.pipeline_target != 'prod' and not output_path_is_set:
pipeline_file_suffix = suggested_git_branch()
if args.user != os.getlogin():
pipeline_file_suffix = args.user
default_dev_output_filename = 'gpdb-' + args.pipeline_target + '-' + pipeline_file_suffix + '.yml'
args.output_filepath = os.path.join(PIPELINES_DIR, default_dev_output_filename)
pipeline_created = create_pipeline(args)
if not pipeline_created:
exit(1)
print_fly_commands(args)
if __name__ == "__main__":
main()
|
|
import time
import random
import os
import re
import numpy as np
import pandas as pd
from sklearn import preprocessing
from keras.preprocessing import sequence
import keras.callbacks
#====================================================== DATA GENERATOR =================================================================================
# Data generator that will provide training and testing with data. Works with mini batches of audio feat files.
# The data generator is called using the next_train() and next_val() methods.
# Class constructor to initialize the datagenerator object.
class DataGenerator(keras.callbacks.Callback):
"""
"""
def __init__(self,
minibatch_size,
numfeats,
maxlen,
nb_classes,
dataset,
val_split=0.2,
absolute_max_sequence_len=150):
"""
"""
self.minibatch_size = minibatch_size
self.maxlen = maxlen
self.numfeats = numfeats
self.val_split = val_split
self.absolute_max_sequence_len = absolute_max_sequence_len
self.train_index = 0
self.val_index = 0
self.nb_classes = nb_classes
self.blank_label = np.array([self.nb_classes - 1])
self.dataset = dataset
if self.dataset == 'train':
self.in_dir = '../data/train_audio'
elif self.dataset == 'val':
self.in_dir = '../data/val_audio'
self.build_dataset()
# Loads and preprocesses the dataset and splits it into training and validation set.
# The loaded data should be in a csv file with 41 columns. Columns 0-38 are the MFCC features.
# Column 39 is the audio file number and column 40 is the label column.
def build_dataset(self):
"""
"""
if self.dataset == 'train':
train_lab_file = '../data/training_oov.csv'
elif self.dataset == 'val':
train_lab_file = '../data/validation.csv'
labs = pd.read_csv(train_lab_file)
self.labs = labs
file_list = os.listdir(self.in_dir)
file_list = sorted([int(re.findall('audio_(\d+).csv',file_name)[0]) for file_name in file_list])
if self.dataset == 'train':
random.seed(10)
random.shuffle(file_list)
split_point = int(len(file_list) * (1 - self.val_split))
self.train_list, self.val_list = file_list[:split_point], file_list[split_point:]
self.train_size = len(self.train_list)
self.val_size = len(self.val_list)
#Make sure that train and validation lists have an even length to avoid mini-batches of size 1
train_mod_by_batch_size = self.train_size % self.minibatch_size
if train_mod_by_batch_size != 0:
del self.train_list[-train_mod_by_batch_size:]
self.train_size -= train_mod_by_batch_size
val_mod_by_batch_size = self.val_size % self.minibatch_size
if val_mod_by_batch_size != 0:
del self.val_list[-val_mod_by_batch_size:]
self.val_size -= val_mod_by_batch_size
else:
self.val_list = file_list
self.val_size = len(self.val_list)
return
# Return sizes.
def get_size(self,train):
"""
"""
if train:
return self.train_size
else:
return self.val_size
def get_file_list(self,train):
if train:
return self.train_list
else:
return self.val_list
# This method converts label sequences to word level label sequences.
# Input: lab_seq: nd array of class label sequence.
# Returns: lab_seq: nd array of word level label sequence.
def sent_2_words(self,lab_seq):
"""
This dicts will not be used
class_dict = {0:"oov", 1:"VA", 2:"VQ", 3:"PF", 4:"FU", 5:"CP", 6:"CV", 7:"DC", 8:"SP",
9:"CN", 10:"FN", 11:"OK", 12:"CF", 13:"BS", 14:"PR",
15:"NU", 16:"FM", 17:"TT", 18:"BN", 19:"MC", 20:"ST", 21:"sil"}
word_dict = {0:"oov", 1:"vattene", 2:"vieni", 3:"qui", 4:"perfetto", 5:"e'", 6:"un", 7:"furbo",
8:"che", 9:"due", 10:"palle", 11:"vuoi", 12:"vanno", 13:"d'accordo", 14:"sei", 15:"pazzo",
16:"cos'hai", 17:"combinato", 18:"non", 19:"me", 20:"ne", 21:"frega",
22:"niente", 23:"ok", 24:"cosa", 25:"ti", 26:"farei", 27:"basta", 28:"le", 29:"prendere",
30:"ce", 31:"n'e", 32:"piu", 33:"ho", 34:"fame", 35:"tanto", 36:"tempo", 37:"fa",
38:"buonissimo", 39:"si", 40:"sono", 41:"messi", 42:"stufo" , 43:"sil"}
"""
class_2_words = {0:[0.0], 1:[1.0], 2:[2.0,3.0], 3:[4.0], 4:[5.0,6.0,7.0], 5:[8.0,9.0,10.0], 6:[8.0,11.0], 7:[12.0,13.0], 8:[14.0,15.0],
9:[16.0,17.0], 10:[18.0,19.0,20.0,21.0,22.0], 11:[23.0], 12:[24.0,25.0,26.0], 13:[27.0], 14:[28.0,11.0,29.0],
15:[18.0,30.0,31.0,32.0], 16:[33.0,34.0], 17:[35.0,36.0,37.0], 18:[38.0], 19:[39.0,40.0,41.0,13.0], 20:[40.0,42.0], 21:[43.0]}
new_seq = []
for lab in lab_seq:
new_seq = new_seq + class_2_words[lab]
lab_seq = np.asarray(new_seq)
return lab_seq
# each time a batch (list of file ids) is requested from train/val/test
def get_batch(self, train):
"""
"""
if train:
file_list = self.train_list
index = self.train_index
else:
file_list = self.val_list
index = self.val_index
try:
batch = file_list[index:(index + self.minibatch_size)]
except:
batch = file_list[index:]
size = len(batch)
X_data = np.ones([size, self.maxlen, self.numfeats])
labels = np.ones([size, self.absolute_max_sequence_len])
input_length = np.zeros([size, 1])
label_length = np.zeros([size, 1])
for i in range(len(batch)):
file = batch[i]
file_name = 'audio_' + str(file) + '.csv'
file_path = os.path.join(self.in_dir,file_name)
vf = pd.read_csv(file_path).drop(['file_number'],axis=1)
if set(['39', '40']).issubset(vf.columns):
vf = vf.drop(['39','40'],axis=1)
# Downsample by 5 the audio.
vf = vf.iloc[::5, :].reset_index(drop=True)
gest_seq = vf.as_matrix().astype(float)
gest_seq = sequence.pad_sequences([gest_seq],
maxlen=self.maxlen,
padding='post',
truncating='post',
dtype='float32')
lab_seq = self.labs[self.labs['Id'] == file]
lab_seq = np.array([int(lab) for lab in lab_seq['Sequence'].values[0].split()]).astype('float32')
lab_seq = self.sent_2_words(lab_seq)
# If a sequence is not found insert a blank example and pad.
if lab_seq.shape[0] == 0:
lab_seq = sequence.pad_sequences([self.blank_label],
maxlen=(self.absolute_max_sequence_len),
padding='post',
value=-1)
labels[i, :] = lab_seq
label_length[i] = 1
else:
X_data[i, :, :] = gest_seq
label_length[i] = lab_seq.shape[0]
lab_seq = sequence.pad_sequences([lab_seq],
maxlen=(self.absolute_max_sequence_len),
padding='post',
value=-1)
labels[i, :] = lab_seq
input_length[i] = (X_data[i].shape[0] - 2)
# Returned values: a dictionary with 4 values
# the_input: data sequence
# the labels: label sequence
# input_length: length of data sequence
# label_length: length of label sequence
# an array of zeros
# outputs: dummy vector of zeros required for keras training
inputs = {'the_input': X_data,
'the_labels': labels,
'input_length': input_length,
'label_length': label_length,
}
outputs = {'ctc': np.zeros([size])} # dummy data for dummy loss function
return (inputs, outputs)
# Get the next training batch and update index. Called by the generator.
def next_train(self):
"""
"""
while 1:
ret = self.get_batch(train=True)
self.train_index += self.minibatch_size
if self.train_index >= self.train_size:
self.train_index = 0
yield ret
# Get the next validation batch and update index. Called by the generator.
def next_val(self):
"""
"""
while 1:
ret = self.get_batch(train=False)
self.val_index += self.minibatch_size
if self.val_index >= self.val_size:
self.val_index = 0
yield ret
# Save model and weights on epochs end.
def on_epoch_end(self, epoch, logs={}):
"""
"""
self.train_index = 0
self.val_index = 0
random.shuffle(self.train_list)
random.shuffle(self.val_list)
model_json = self.model.to_json()
with open("sp_ctc_lstm_model.json", "w") as json_file:
json_file.write(model_json)
self.model.save_weights("sp_ctc_lstm_weights.h5")
print "Saved model to disk"
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import json
from unittest import TestCase
from mock import Mock
from mock import patch
from pybutton.request import request
from pybutton.request import request_url
from pybutton.request import query_dict
from pybutton import ButtonClientError
class RequestTestCasePy2(TestCase):
@patch('pybutton.request.urlopen')
@patch('pybutton.request.Request')
def test_get_request(self, MockRequest, mock_url_open):
if sys.version_info[0] == 2:
url = 'http://usebutton.com/api'
method = 'GET'
headers = {'a': 1, 'b': 2}
instance = MockRequest.return_value
mock_response = Mock()
mock_response.read.return_value = '{ "a": 1 }'
mock_url_open.return_value = mock_response
response = request(url, method, headers)
MockRequest.assert_called_with(url)
self.assertEqual(instance.get_method(), method)
instance.add_header.assert_called_with('b', 2)
self.assertEqual(response, {'a': 1})
@patch('pybutton.request.urlopen')
@patch('pybutton.request.Request')
def test_post_request(self, MockRequest, mock_url_open):
if sys.version_info[0] == 2:
url = 'http://usebutton.com/api'
method = 'POST'
headers = {}
instance = MockRequest.return_value
mock_response = Mock()
mock_response.read.return_value = '{ "a": 1 }'
mock_url_open.return_value = mock_response
response = request(url, method, headers)
MockRequest.assert_called_with(url)
self.assertEqual(instance.get_method(), method)
self.assertEqual(response, {'a': 1})
@patch('pybutton.request.urlopen')
@patch('pybutton.request.Request')
def test_post_request_with_data(self, MockRequest, mock_url_open):
if sys.version_info[0] == 2:
url = 'http://usebutton.com/api'
method = 'POST'
headers = {}
data = {'a': {'b': 'c'}}
instance = MockRequest.return_value
mock_response = Mock()
mock_response.read.return_value = '{ "a": 1 }'
mock_url_open.return_value = mock_response
response = request(url, method, headers, data=data)
MockRequest.assert_called_with(url)
self.assertEqual(instance.get_method(), method)
instance.add_data.assert_called_with('{"a": {"b": "c"}}')
instance.add_header.assert_called_with(
'Content-Type',
'application/json'
)
self.assertEqual(response, {'a': 1})
@patch('pybutton.request.urlopen')
@patch('pybutton.request.Request')
def test_raises_with_invalid_response_data(self, MockRequest,
mock_url_open):
if sys.version_info[0] == 2:
url = 'http://usebutton.com/api'
method = 'GET'
headers = {}
mock_response = Mock()
mock_response.read.return_value = 'wat'
mock_url_open.return_value = mock_response
try:
request(url, method, headers)
self.assertTrue(False)
except ButtonClientError as e:
# We expect the generic ButtonClientError, and not a subclass,
# in this condition.
assert type(e) is ButtonClientError
class RequestTestCasePy3(TestCase):
@patch('pybutton.request.urlopen')
@patch('pybutton.request.Request')
def test_get_request(self, MockRequest, mock_url_open):
if sys.version_info[0] == 3:
url = 'http://usebutton.com/api'
method = 'GET'
headers = {'a': 1, 'b': 2}
instance = MockRequest.return_value
mock_response = Mock()
mock_response.read.return_value = '{ "a": 1 }'.encode()
mock_url_open.return_value = mock_response
response = request(url, method, headers)
MockRequest.assert_called_with(url, data=None, headers=headers)
self.assertEqual(instance.get_method(), method)
self.assertEqual(response, {'a': 1})
@patch('pybutton.request.urlopen')
@patch('pybutton.request.Request')
def test_post_request(self, MockRequest, mock_url_open):
if sys.version_info[0] == 3:
url = 'http://usebutton.com/api'
method = 'POST'
headers = {}
instance = MockRequest.return_value
mock_response = Mock()
mock_response.read.return_value = '{ "a": 1 }'.encode()
mock_url_open.return_value = mock_response
response = request(url, method, headers)
MockRequest.assert_called_with(url, data=None, headers=headers)
self.assertEqual(instance.get_method(), method)
self.assertEqual(response, {'a': 1})
@patch('pybutton.request.urlopen')
@patch('pybutton.request.Request')
def test_post_request_with_data(self, MockRequest, mock_url_open):
if sys.version_info[0] == 3:
url = 'http://usebutton.com/api'
method = 'POST'
headers = {}
data = {'a': {'b': 'c'}}
instance = MockRequest.return_value
mock_response = Mock()
mock_response.read.return_value = '{ "a": 1 }'.encode()
mock_url_open.return_value = mock_response
response = request(url, method, headers, data=data)
MockRequest.assert_called_with(
url,
data=json.dumps(data).encode(),
headers=headers
)
self.assertEqual(instance.get_method(), method)
instance.add_header.assert_called_with(
'Content-Type',
'application/json'
)
self.assertEqual(response, {'a': 1})
@patch('pybutton.request.urlopen')
@patch('pybutton.request.Request')
def test_raises_with_invalid_response_data(self, MockRequest,
mock_url_open):
if sys.version_info[0] == 3:
url = 'http://usebutton.com/api'
method = 'GET'
headers = {}
mock_response = Mock()
mock_response.read.return_value = 'wat'.encode()
mock_url_open.return_value = mock_response
try:
request(url, method, headers)
self.assertTrue(False)
except ButtonClientError:
pass
def test_request_url(self):
path = request_url(
True,
'api.usebutton.com',
443,
'/v1/api/btnorder-XXX'
)
self.assertEqual(
path,
'https://api.usebutton.com:443/v1/api/btnorder-XXX'
)
path = request_url(False, 'localhost', 80, '/v1/api/btnorder-XXX')
self.assertEqual(path, 'http://localhost:80/v1/api/btnorder-XXX')
def test_query_dict(self):
url = 'https://api.usebutton.com:/test/url?cursor=test_cursor'
result = query_dict(url)
self.assertEqual(result.get('cursor'), ['test_cursor'])
self.assertEqual(result.get('random_key'), None)
no_query_url = 'https://api.usebutton.com:/test/url'
result = query_dict(no_query_url)
self.assertEqual(result.get('cursor'), None)
|
|
import sys
import traceback
import code
import renderer
class PPU():
def __init__(self, cpu):
self.cpu = cpu
self.renderer = renderer.Renderer(self)
self.memory = [0x00] * 0x4000
self.sprMemory = [0x00] * 0x100
self.nt = [0x00] * (256*240*2)
self.xlines = 256
self.ylines = 240
self.screen = [0x40] * (self.xlines * self.ylines)
self.tube_x = 0
self.tube_y = 0
self.hblank = 0
self.vblank = 0
self.lastBGWrite = 0
self.cyclesPerHBlank = 144
self.bgIndex = 0x40
# PPU memory addresses
self.patternTable0 = 0x0000
self.patternTable1 = 0x1000
self.nameTable0 = 0x2000
self.nameTable1 = 0x2400
self.nameTable2 = 0x2800
self.nameTable3 = 0x2C00
self.nameTableEnd = 0x3000
self.paletteIndex = 0x3F00
self.paletteBG = 0x3F10
self.nameTableSize = 0x0400
self.nmi = 0
self.dirtyVram = 0
self.vramWrites = [0x00] * (32*30*4)
try:
self.mirrorType = self.cpu.rom.mirroring # 0 horiz - 1 vert
self.fourScreen = self.cpu.rom.fourScreen
except:
self.mirrorType = 0
self.fourScreen = 0
# shared memory registers, comprising 0x2000 - 0x2007
# and mirrored to 0x4000 on the main cpu memory
self.registers = [0x00] * 8
self.flipflop0 = 0
self.flipflop1 = 0
self.ctrl1 = 0
self.ctrl2 = 1
self.status = 2
self.sprAddr = 3
self.sprData = 4
self.vramScroll = 5
self.vramLatch = 6
self.vramData = 7
self.scanline = 0
self.scroll_x = 0
self.scroll_y = 0
self.latch_lo = 0
self.latch_hi = 0
def GetPC(self):
return self.cpu.get_register('PC')
def GetRegister(self, addr):
#print "Get Register " + hex(addr) + ": " + hex(self.registers[addr])
return self.registers[addr]
def SetRegister(self, addr, value):
#if addr == self.sprAddr:
#self.flipflop1 ^= 1
if addr == self.sprData:
addr = self.registers[self.sprAddr]
self.sprMemory[addr] = value
self.registers[self.sprAddr] = addr+1
if addr == self.vramScroll:
if self.flipflop0 == 0:
self.scroll_x = value
else:
self.scroll_y = value
self.flipflop0 ^= 1
if addr == self.vramLatch:
if self.flipflop1 == 0:
#print hex(self.GetPC()) + ": latch_lo = " + hex(value)
self.latch_lo = value
else:
#print hex(self.GetPC()) + ": latch_hi = " + hex(value)
self.latch_hi = value
self.flipflop1 ^= 1
if addr == self.vramData:
storAddr = self.latch_lo + (self.latch_hi << 8)
self.SetMemory(storAddr, value)
if self.GetVWrite() == 1:
storAddr += 32
else:
storAddr += 1
self.latch_lo = storAddr & 0xFF
self.latch_hi = (storAddr >> 8) & 0xFF
#print "Set Register " + hex(addr) + ": " + hex(value)
self.registers[self.status] = (self.registers[self.status] & 0xe0) | (value & 0x1F)
self.registers[addr] = value & 0xFF
return value & 0xFF
def SpriteDMA(self, value):
for i in range(0, 0x100):
self.sprMemory[i] = self.cpu.read_memory((value << 8)+i)
def GetVWrite(self):
return (self.registers[self.ctrl1] >> 2) & 1
def GetVBlank(self):
return (self.registers[self.status] >> 7) & 1
def SetVBlank(self):
#status = self.registers[self.status]
self.registers[self.status] |= 0x80
def ClearVBlank(self):
#status = self.registers[self.status]
self.registers[self.status] &= 0x7F
def GetNMIMask(self):
return (self.registers[self.ctrl1] >> 7) & 1
def GetHit(self):
return (self.registers[self.status] >> 6) &1
def GetImgMask(self):
return (self.registers[self.ctrl2] >> 1) &1
def GetSprMask(self):
return (self.registers[self.ctrl2] >> 2) &1
def GetScreenEnable(self):
return (self.registers[self.ctrl2] >> 3) &1
def GetSprEnable(self):
return (self.registers[self.ctrl2] >> 4) &1
def GetSprTable(self):
return (self.registers[self.ctrl1] >> 3) &1
def GetBGTable(self):
return (self.registers[self.ctrl1] >> 4) &1
def AddressTranslation(self, addr):
if addr < self.nameTable0: # mapped by mapper in cart
return self.cpu.map_vmem(addr)
# adjust for name table mirror
if addr >= 0x3000 and addr < 0x3F00:
addr -= 0x1000
# now if in nametable 2 or 3 mirror 0 or 1 based on the cart
# dont do mirroring at all if the fourScreen bit is set
if self.fourScreen == 0:
if self.mirrorType == 0: # horizontal
if addr >= self.nameTable1 and addr < self.nameTable2:
addr -= self.nameTableSize
if addr >= self.nameTable3 and addr < self.nameTableEnd:
addr -= self.nameTableSize
else: # vertical
if addr >= self.nameTable2 and addr < self.nameTable3:
addr -= (self.nameTableSize * 2)
if addr >= self.nameTable3 and addr < self.nameTableEnd:
addr -= (self.nameTableSize * 2)
return (self.memory, addr)
def MarkDirty(self, address):
self.dirtyVram = 1
base = (((address % 0x800)/0x400)*(32*30))
if (address % 0x400) < 0x3c0:
self.vramWrites[base + (address % 0x499)] = 1
else:
i = (address % 0x400) - 0x3c0
x = (i % 8) * 4
y = (i / 8) * 4
for ys in range(y, y+4):
for xs in range(x, x+4):
self.vramWrites[base + xs + (ys * 32)] = 1
def ReadMemory(self, address):
(mem, addr) = self.AddressTranslation(address)
return mem[addr]
def SetMemory(self, address, value):
(mem, addr) = self.AddressTranslation(address)
if addr >= 0x3F10 and addr < 0x3F20:
self.lastBGWrite = value
if addr >= self.nameTable0 and addr < self.nameTableEnd:
self.MarkDirty(address)
mem[addr] = value & 0xFF
return value & 0xFF
def DrawBackground(self):
return
q = 0
for y in range(0, self.ylines):
ntable = self.ReadMemory(self.nameTable0 + y)
ry = self.scroll_y + y
name_y = ry % self.ylines
rx = self.scroll_x
nnot = (rx/256)&1
for x in range(0, self.xlines):
name_x = rx % self.xlines
c = self.nt[name_x + (256 * name_y) + ((256 * 240) * (ntable^nnot))]
if c&3 != 0:
self.screen[q] = c
else:
self.screen[q] = self.bgIndex
q += 1
rx += 1
def DrawSprites(self):
if self.GetSprTable() != 0:
tableIndex = 0x1000
else:
tableIndex = 0x0000
for i in range(0, 64):
index = i * 4
sy = self.sprMemory[index]
sx = self.sprMemory[index+3]
fy = (self.sprMemory[index+2]>>7)&1
fx = (self.sprMemory[index+2]>>6)&1
pr = (self.sprMemory[index+2]>>5)&1
hi = self.sprMemory[index+2]&3
p = self.sprMemory[index+1]
for y in range(sy, sy+8):
for x in range(sx, sx+8):
if x > 0 and x < self.xlines and y > 0 and y < self.ylines:
if fx == 0:
ho = 7 - (x - sx)
else:
ho = x-sx
if fy == 0:
vo = y - sy
else:
vo = 7 - (y - sy)
addr = tableIndex + (p*0x10) + vo
c = ((self.ReadMemory(addr)>>ho)&1)
c |= ((self.ReadMemory(addr+8)>>ho)&1)<<1
c |= hi << 2
if c & 3 == 0:
dat = 0
else:
dat = self.ReadMemory(self.paletteIndex + c)
if dat != 0:
if pr == 0:
screen[x+(y*self.xlines)] = dat
else:
if screen[x+(y*self.xlines)] == 0x40:
screen[x+(y*self.xlines)] = dat
def DrawNametable(nameTable, xTile, yTile):
if self.GetBGTable != 0:
tableIndex = 0x1000
else:
tableIndex = 0
for y in range(yTile*8, (yTile*8)+4):
for x in range(xTile*8, (xTile*8)+4):
name_x = x/8
name_7 = y/8
sx = x%8
sy = y%8
addr = (((name_x%32) + (name_y%32) * 32))
addr += nameTable * 0x400
p = self.ReadMemory(self.nameTable0 + addr)
cIndex = tableIndex + (p*16) + sy
c = (self.ReadMemory(cIndex) >> (7 - sx)) & 1
c |= (self.ReadMemory(cIndex+8) >> (7 - sx)) << 1
name_x = x/32
name_y = y/32
addr = ((name_x % 8) + ((name_7 % 8) * 8)) + 0x3c0
addr += nameTable * 0x400
p = self.ReadMemory(self.nameTable0 + addr)
name_x = (x / 16) % 2
name_y = (y / 16) % 2
c |= ((p >> 2 * (name_x + (name_y << 1))) & 3) << 2
self.nt[x + (y * self.xlines) + (nameTable * (self.xlines * self.ylines))] = c
def BlankScreen(self):
self.screen = [0x00] * (self.xlines * self.ylines)
def UpdateFrame(self):
if self.GetScreenEnable() != 0:
self.BlankScreen()
else:
self.DrawBackground()
if self.GetSprEnable() != 0:
self.DrawSprites()
if self.dirtyVram != 0 and self.GetScreenEnable() != 0:
self.dirtyVram = 0
for i in range(0, (32*30*2)):
if self.vramWrites[i] != 0:
self.DrawNameTable(i/(32*30), i%32, (i%(32*30))/32)
self.vramWrites[i] = 0
def stepPPU(self):
# each step draws one pix, but updates only on hblank
#self.renderer.Update(self.screen, self.tube_x, self.tube_y)
self.tube_x += 1
if self.tube_x == self.xlines:
self.tube_x = 0
self.tube_y += 1
self.hblank = 1
if self.tube_y < self.ylines:
self.renderer.Update(self.screen, self.tube_y)
if self.tube_y == self.ylines:
self.SetVBlank()
if self.GetNMIMask() == 1:
self.nmi = 1
self.cpu.nmi_flipflop = 1
if self.tube_y == (self.ylines + 21):
self.tube_y = 0
self.ClearVBlank()
self.nmi = 0
self.cpu.nmi_flipflop = 1
def runPPU(self, numCPUCycles):
# we get to run 3 PPU cycles for every 1 CPU cycle
# we step the CPU first, then based on how long the
# instruction took, we step the PPU 3x that number
self.UpdateFrame()
for i in range(0, numCPUCycles*3):
self.stepPPU()
|
|
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None, realm=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST" and 'Content-Type' not in headers:
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = parse_qs(body)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
if not realm:
schema, rest = urllib.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.splithost(rest)
realm = schema + ':' + hierpart + host
if is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
|
|
"""
Copyright 2015, Institute e-Austria, Timisoara, Romania
http://www.ieat.ro/
Developers:
* Gabriel Iuhasz, [email protected]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flask import send_file
from flask import request
from flask.ext.restplus import Resource, fields
from flask import stream_with_context, Response, send_from_directory
import os
import jinja2
import sys
import subprocess
import platform
import logging
from logging.handlers import RotatingFileHandler
import glob
import tarfile
from pyUtil import *
from app import *
import tempfile
# directory location
logDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'log')
tmpDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
pidDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'pid')
collectdlog = '/var/log/collectd.log'
collectdpid = os.path.join(pidDir, 'collectd.pid')
lsflog = '/var/log/logstash-fowarder/logstash-fowarder.log'
lsferr = '/var/log/logstash-fowarder/logstash-fowarder.err'
collectdConf = '/etc/collectd/collectd.conf'
lsfConf = '/etc/logstash-forwarder.conf'
lsfList = os.path.join(tmpDir, 'logstashforwarder.list')
lsfGPG = os.path.join(tmpDir, 'GPG-KEY-elasticsearch')
certLoc = '/opt/certs/logstash-forwarder.crt'
stormLogDir = '/home/ubuntu/apache-storm-0.9.5/logs'
# supported aux components
# auxList = ['collectd', 'lsf', 'jmx']
nodeRoles = api.model('query details Model', {
'roles': fields.List(fields.String(required=False, default='hdfs',
description='Roles assigned to this node!'))
})
collectdConfModel = api.model('configuration details Model for collectd', {
'LogstashIP': fields.String(required=True, default='127.0.0.1', description='IP of the Logstash Server'),
'UDPPort': fields.String(required=True, default='25680', description='Port of UDP plugin from Logstash Server'),
'Interval': fields.String(required=False, default='15', description='Polling interval for all resources'),
'Cassandra': fields.Integer(required=False, default=0, description='Configure GenericJMX for cassandra monitoring'),
'MongoDB': fields.Integer(required=False, default=0, description='Configure collectd for MongoDB monitoring'),
'MongoHost': fields.String(required=False, default='127.0.0.1', description='Configure MongoDBHost'),
'MongoDBPort': fields.String(required=False, default='27017', description='Configure MongoDBPort'),
'MongoDBUser': fields.String(required=False, default='', description='Configure MongoDB Username'),
'MongoDBPasswd': fields.String(required=False, default='password', description='Configure MongoDB Password'),
'MongoDBs': fields.String(required=False, default='admin', description='Configure MongoDBs')
})
lsfConfModel = api.model('configuration details Model for LSF', {
'LogstashIP': fields.String(required=True, default='127.0.0.1', description='IP of the Logstash Server'),
'LumberjackPort': fields.String(required=True, default='5000', description='Logstash Lumberjack input port')
})
yarnProperties = api.model('Yarn properties configuration Model', {
'Period': fields.String(required=True, default='10', description='Polling period for all Yarn/HDFS metrics')
})
sparkProperties = api.model('Spark properties configuration Model', {
'LogstashIP': fields.String(required=True, default='109.231.121.210', description='Logstash IP (only Spark)'),
'GraphitePort': fields.String(required=True, default='5002', description='Logstash Graphite input Port (only Spark)'),
'Period': fields.String(required=True, default='5', description='Spark Polling Period')
})
# Instance of AuxComponent Class
aux = AuxComponent(lsfList, lsfGPG)
@agent.route('/v1/node')
class NodeInfo(Resource):
def get(self):
mType = platform.uname()
response = jsonify({'System': mType[0],
'Node': mType[1],
'Release': mType[2],
'Version': mType[3],
'Machine': mType[4],
'Processor': mType[5]})
response.status_code = 200
return response
@agent.route('/v1/deploy')
class NodeDeploy(Resource):
@api.expect(nodeRoles)
def post(self):
rolesList = request.json['roles']
app.logger.info('[%s] : [INFO] Role list received: %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(rolesList))
try:
aComp = aux.install(rolesList)
except Exception as inst:
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
app.logger.error('[%s] : [ERROR] Installing components based on roles with: %s and %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'System Error',
'Message': 'Error while installing components'})
response.status_code = 500
return response
response = jsonify({'Status': 'Done',
'Components': aComp})
app.logger.info('[%s] : [INFO] Installed: %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(aComp))
response.status_code = 201
return response
@agent.route('/v1/collectd')
class NodeDeployCollectd(Resource):
@api.expect(collectdConfModel)
def post(self):
collectdTemp = os.path.join(tmpDir, 'collectd.tmp')
if not request.json:
response = jsonify({'Status': 'Malformed request, json expected'})
response.status_code = 400
app.logger.warning('[%s] : [WARN] Malformed request, json expected', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
reqKeyList = ['LogstashIP', 'UDPPort', 'Interval', 'Cassandra', 'MongoDB', 'MongoHost', 'MongoDBPort',
'MongoDBUser', 'MongoDBPasswd', 'MongoDBs']
for k in request.json:
app.logger.info('[%s] : [INFO] Key found %s', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), k)
if k not in reqKeyList:
response = jsonify({'Status': 'Unrecognized key %s' %(k)})
response.status_code = 400
app.logger.warning('[%s] : [WARN] Unsuported key %s', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), k)
return response
if 'LogstashIP' not in request.json or 'UDPPort' not in request.json:
response = jsonify({'Status': 'Missing key(s)'})
response.status_code = 400
app.logger.warning('[%s] : [WARN] Missing key(s)', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
if 'Interval' not in request.json:
pollInterval = '10'
else:
pollInterval = request.json['Interval']
if 'Cassandra' not in request.json:
cassandra = 0
else:
cassandra = request.json['Cassandra']
if 'MongoDB' not in request.json:
mongodb = 0
mongohost = 0
mongodbport = 0
mongodbuser = 0
mongodbpasswd = 0
mongodbs = 0
else:
mongodb = request.json['MongoDB']
if 'MongoHost' not in request.json:
mongohost = '127.0.0.1'
else:
mongohost = request.json['MongoHost']
if 'MongoDBPort' not in request.json:
mongodbport = '27017'
else:
mongodbport = request.json['MongoDBPort']
if 'MongoDBUser' not in request.json:
mongodbuser = ' '
else:
mongodbuser = request.json['MongoDBUser']
if 'MongoDBPasswd' not in request.json:
mongodbpasswd = 'password'
else:
mongodbpasswd = request.json['MongoDBPasswd']
if 'MongoDBs' not in request.json:
mongodbs = 'admin'
else:
mongodbs = request.json['MongoDBs']
settingsDict = {'logstash_server_ip': request.json['LogstashIP'],
'logstash_server_port': request.json['UDPPort'],
'collectd_pid_file': '/var/run/collectd.pid',
'poll_interval': pollInterval,
'cassandra': cassandra, 'mongodb': mongodb, 'mongohost': mongohost,
'mongoPort': mongodbport, 'mongouser': mongodbuser,
'mongopassword': mongodbpasswd, 'mongoDBs': mongodbs}
aux.configureComponent(settingsDict, collectdTemp, collectdConf)
aux.controll('collectd', 'restart')
response = jsonify({'Status': 'Done',
'Message': 'Collectd Started'})
app.logger.info('[%s] : [INFO] collectd started with: %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(settingsDict))
response.status_code = 200
return response
@agent.route('/v1/lsf')
class NodeDeployLSF(Resource):
@api.expect(lsfConfModel)
def post(self):
lsfTemp = os.path.join(tmpDir, 'logstash-forwarder.tmp')
if not request.json:
response = jsonify({'Status': 'Malformed request, json expected'})
response.status_code = 400
app.logger.warning('[%s] : [WARN] Malformed request, json expected', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return response
reqKeyList = ['LogstashIP', 'LumberjackPort']
for k in request.json:
app.logger.info('[%s] : [INFO] Key found %s', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), k)
if k not in reqKeyList:
response = jsonify({'Status': 'Unrecognized key %s' %(k)})
response.status_code = 400
app.logger.warning('[%s] : [WARN] UNsuported key %s', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), k)
return response
settingsDict = {'ESCoreIP': request.json['LogstashIP'],
'LSLumberPort': request.json['LumberjackPort']}
app.logger.info('[%s] : [INFO] Logstash-Forwarder settings: %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(settingsDict))
if not os.path.isfile(certLoc):
app.logger.warning('[%s] : [WARN] Logstash Server certificate not detected',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'Env Error',
'Message': 'LS Server certificate is missing'})
response.status_code = 404
return response
aux.configureComponent(settingsDict, lsfTemp, lsfConf)
aux.controll('logstash-forwarder', 'restart')
response = jsonify({'Status': 'Done',
'Message': 'LSF Stated'})
response.status_code = 200
return response
@agent.route('/v1/start')
class NodeMonitStartAll(Resource):
def post(self):
try:
aux.controll('collectd', 'start')
except Exception as inst:
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
app.logger.error('[%s] : [ERROR] While starting collectd with: %s and %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': type(inst),
'Message': inst.args})
response.status_code = 500
return response
try:
aux.controll('logstash-forwarder', 'start')
except Exception as inst:
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
app.logger.error('[%s] : [ERROR] While logstash-forwarder collectd with: %s and %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': type(inst),
'Message': inst.args})
response.status_code = 500
return response
response = jsonify({'Status': 'Started',
'Message': 'Auxiliary components started!'})
response.status_code = 200
return response
@agent.route('/v1/stop')
class NodeMonitStopAll(Resource):
def post(self):
try:
aux.controll('collectd', 'stop')
except Exception as inst:
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
app.logger.error('[%s] : [ERROR] While stopping collectd with: %s and %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': type(inst),
'Message': inst.args})
response.status_code = 500
return response
try:
aux.controll('logstash-forwarder', 'stop')
except Exception as inst:
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
app.logger.error('[%s] : [ERROR] While stopping logstash-forwarder with: %s and %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': type(inst),
'Message': inst.args})
response.status_code = 500
return response
response = jsonify({'Status': 'Stopped',
'Message': 'Auxiliary components stopped!'})
response.status_code = 200
return response
@agent.route('/v1/start/<auxComp>')
@api.doc(params={'auxComp': 'Can be collectd or lsf'})
class NodeMonitStartSelective(Resource):
def post(self, auxComp):
if not aux.check(auxComp):
response = jsonify({'Status': 'Parameter error',
'Message': 'Unsupported Parameter' + auxComp})
app.logger.warning('[%s] : [WARN] Unsuported parameter: %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(auxComp))
response.status_code = 400
return response
try:
aux.controll(auxComp, 'start')
except Exception as inst:
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
response = jsonify({'Status': type(inst),
'Message': inst.args})
app.logger.error('[%s] : [ERROR] starting collectd with:%s and %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response.status_code = 500
return response
response = jsonify({'Status': 'Done',
'Message': 'Component ' + auxComp + ' started!'})
response.status_code = 200
return response
@agent.route('/v1/stop/<auxComp>')
@api.doc(params={'auxComp': 'Can be collectd or lsf'})
class NodeMonitStopSelective(Resource):
def post(self, auxComp):
if not aux.check(auxComp):
response = jsonify({'Status': 'Parameter error',
'Message': 'Unsupported Parameter' + auxComp})
app.logger.warning('[%s] : [WARN] Unsupported parameter: %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(auxComp))
response.status_code = 400
return response
try:
aux.controll(auxComp, 'stop')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Error stopping %s with : %s and %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(auxComp), type(inst), inst.args)
response = jsonify({'Status': type(inst),
'Message': inst.args})
response.status_code = 500
return response
response = jsonify({'Status': 'Done',
'Message': 'Component ' + auxComp + ' stopped!'})
response.status_code = 200
return response
@agent.route('/v1/log')
class NodeLog(Resource):
def get(self):
agentlog = os.path.join(logDir, 'dmon-agent.log')
try:
logFile1 = open(agentlog, 'r')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Opening log with %s and %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'File Error',
'Message': 'Cannot open log file'})
response.status_code = 500
return response
app.logger.info('[%s] : [INFO] Agent log file -> %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(agentlog))
return send_file(logFile1, mimetype='text/plain', as_attachment=True)
@agent.route('/v1/log/component/<auxComp>')
@api.doc(params={'auxComp': 'Can be collectd or lsf'})
class NodeMonitLogs(Resource):
def get(self, auxComp):
if not aux.check(auxComp):
response = jsonify({'Status': 'Parameter error',
'Message': 'Unsupported Parameter' + auxComp})
app.logger.warning('[%s] : [WARN] Unsupported parameter: %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(auxComp))
response.status_code = 400
return response
if auxComp == 'collectd':
try:
clog = open(collectdlog, 'w+')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Opening collectd log with %s and %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'File Error',
'Message': 'Cannot open log file'})
response.status_code = 500
return response
return send_file(clog, mimetype='text/plain', as_attachment=True)
if auxComp == 'lsf':
try:
clog = open(lsflog, 'w+')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Opening lsf log with %s and %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
response = jsonify({'Status': 'File Error',
'Message': 'Cannot open log file'})
response.status_code = 500
return response
return send_file(clog, mimetype='text/plain', as_attachment=True)
else:
response = jsonify({'Status': 'Unsupported comp' + auxComp})
response.status_code = 400
return response
@agent.route('/v1/conf/<auxComp>')
@api.doc(params={'auxComp': 'Can be collectd or lsf'})
class NodeMonitConf(Resource):
def get(self, auxComp):
if not aux.check(auxComp):
response = jsonify({'Status': 'Parameter error',
'Message': 'Unsupported Parameter' + auxComp})
app.logger.warning('[%s] : [WARN] Unsupported parameter: %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(auxComp))
response.status_code = 400
return response
if auxComp == 'collectd':
try:
cConf = open(collectdConf, 'r')
except Exception as inst:
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
app.logger.error('[%s] : [ERROR] Opening collectd conf file',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return send_file(cConf, mimetype='text/plain', as_attachment=True)
if auxComp == 'lsf':
try:
lConf = open(lsfConf, 'r')
except Exception as inst:
# print >> sys.stderr, type(inst)
# print >> sys.stderr, inst.args
app.logger.error('[%s] : [ERROR] Opening logstash-forwarder conf file',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return send_file(lConf, mimetype='application/json', as_attachment=True)
else:
response = jsonify({'Status': 'Unsupported comp' + auxComp})
response.status_code = 400
return response
@agent.route('/v1/check')
class NodeCheck(Resource): # TODO: implement check functionality
def get(self):
rCollectd = aux.checkAux('collectd')
rLSF = aux.checkAux('logstash-forwarder')
response = jsonify({'Collectd': rCollectd,
'LSF': rLSF})
response.status_code = 200
return response
@agent.route('/v1/bdp/<platform>') #TODO: Needs testing
@api.doc(params={'platform': 'Can be yarn or spark'})
class AgentMetricsSystem(Resource):
@api.expect(sparkProperties)
def post(self, platform):
if not request.json:
response = jsonify({'Status': 'Request Error',
'Message': 'Request body must be JSON'})
app.logger.warrning('[%s] : [WARN] Invalid request content-type: %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(request.content_type))
response.status_code = 400
return response
BDService = BDPlatform(tmpDir)
if platform == 'yarn':
if not BDService.checkRole('yarn'):
response = jsonify({'Status': 'Error',
'Message': 'Yarn not detected!'})
app.logger.warning('[%s] : [WARN] No YARN detected',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response.status_code = 404
return response
if 'Period' not in request.json:
response = jsonify({'Status': 'Request Error',
'Message': 'Must contain Period field'})
app.logger.error('[%s] : [ERROR] Period must be specified',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response.status_code = 400
return response
settingsDict = {'metrics2_period': request.json['Period']}
app.logger.info('[%s] : [INFO] Period is set to: %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(request.json['Period']))
BDService.generateYarnConfig(settingsDict)
response = jsonify({'Status': 'Done',
'Message': 'Yarn properties uploaded'})
response.status_code = 200
return response
if platform == 'spark':
if not BDService.checkRole('spark'):
response = jsonify({'Status': 'Error',
'Message': 'Spark not detected!'})
app.logger.warning('[%s] : [WARN] No Spark detected',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response.status_code = 404
return response
if 'Period' or 'LogstashIP' or 'GraphitePort' not in request.json:
response = jsonify({'Status': 'Request Error',
'Message': 'Must contain Period, Logstash IP and Graphite Port fields'})
app.logger.error('[%s] : [ERROR] No period, Logstash IP or graphite port fields detected',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response.status_code = 400
return response
settingsDict = {'logstashserverip': request.json['LogstashIP'],
'logstashportgraphite': request.json['GraphitePort'],
'period': request.json['Period']}
app.logger.info('[%s] : [INFO] Spark settings: ',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(settingsDict))
BDService.generateSparkConfig(settingsDict)
response = jsonify({'Status': 'Done',
'Message': 'Spark properties uploaded'})
response.status_code = 200
return response
else:
response = jsonify({'Status': 'Unsupported',
'Message': 'Platform Unsupported'})
app.logger.error('[%s] : [ERROR] Unsuported platform',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response.status_code = 404
return response
@agent.route('/v1/bdp/storm/logs')
class FetchStormLogs(Resource):
def get(self):
stDir = os.getenv('STORM_LOG', stormLogDir)
app.logger.warning('[%s] : [WARN] Storm log directory set to %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), stDir)
logFile = os.path.join(stDir, 'worker-6700.log')
if not os.path.isfile(logFile):
app.logger.warning('[%s] : [WARN] Storm logfile not found at %s: ',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(logFile))
response = jsonify({'Status': 'Not Found', 'Message': 'No storm logfile found'})
response.status_code = 404
return response
def readFile(lgFile):
with open(lgFile) as f:
yield f.readline()
return Response(stream_with_context(readFile(logFile)))
@agent.route('/v2/bdp/storm/logs')
class FetchStormLogsSDAll(Resource):
def get(self):
stDir = os.getenv('STORM_LOG', stormLogDir)
app.logger.warning('[%s] : [WARN] Storm log directory set to %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), stDir)
lFile = []
workerFile = 'worker-*.log'
# logFile = os.path.join(stDir, workerFile)
for name in glob.glob(os.path.join(stDir, workerFile)):
lFile.append(name)
if not lFile:
app.logger.warning('[%s] : [WARN] No Storm worker logs found',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'No Storm worker logs found'})
response.status_code = 404
return response
tmpDir = tempfile.gettempdir()
tarlog = os.path.join(tmpDir, 'workerlogs.tar')
if os.path.isfile(tarlog):
os.remove(tarlog)
app.logger.warning('[%s] : [WARN] Old Storm workerlog detected and removed',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
out = tarfile.open(tarlog, mode='w')
try:
for file in lFile:
path, filename = os.path.split(file)
out.add(file, arcname=filename)
finally:
out.close()
app.logger.info('[%s] : [INFO] Storm log tar file created at %s containing %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(tarlog), str(lFile))
if not os.path.isfile(tarlog):
app.logger.warning('[%s] : [WARN] Storm logfile tar not found at %s: ',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(tarlog))
response = jsonify({'Status': 'Not Found', 'Message': 'No storm tar logfile found'})
response.status_code = 404
return response
path, filename = os.path.split(tarlog)
return send_from_directory(tmpDir, filename, as_attachment=True, mimetype='application/tar')
@agent.route('/v3/bdp/storm/logs')
class FetchStormLogsSD(Resource):
def get(self):
stDir = os.getenv('STORM_LOG', stormLogDir)
app.logger.warning('[%s] : [WARN] Storm log directory set to %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), stDir)
lFile = []
workerFile = 'worker-' + ('[0-9]' * 4) + '.log'
# logFile = os.path.join(stDir, workerFile)
for name in glob.glob(os.path.join(stDir, workerFile)):
lFile.append(name)
if len(lFile) > 1:
app.logger.error('[%s] : [ERROR] More then one Storm worker logfile, -> %s: ',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(lFile))
response = jsonify({'Status': 'To many worker logs', 'Logs': lFile})
response.status_code = 500
return response
if not lFile:
app.logger.warning('[%s] : [WARN] No Storm worker logs found',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
response = jsonify({'Status': 'No Storm worker logs found'})
response.status_code = 404
return response
logFile = lFile[0]
if not os.path.isfile(logFile):
app.logger.warning('[%s] : [WARN] Storm logfile not found at %s: ',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(logFile))
response = jsonify({'Status': 'Not Found', 'Message': 'No storm logfile found'})
response.status_code = 404
return response
path, filename = os.path.split(logFile)
return send_from_directory(stDir, filename, as_attachment=True, mimetype='text/plain')
@agent.route('/v1test')
class Test(Resource):
def get(self):
test = {}
test[logDir] = os.path.isfile(logDir)
test[tmpDir] = os.path.isfile(tmpDir)
test[pidDir] = os.path.isfile(pidDir)
test[os.path.join(logDir, 'dmon-agent.log')] = os.path.isfile(os.path.join(logDir, 'dmon-agent.log'))
test[collectdlog] = os.path.isfile(collectdlog)
test[collectdpid] = os.path.isfile(collectdpid)
test[lsflog] = os.path.isfile(lsflog)
test[lsferr] = os.path.isfile(lsferr)
test[collectdConf] = os.path.isfile(collectdConf)
test[lsfConf] = os.path.isfile(lsfConf)
test[lsfList] = os.path.isfile(lsfList)
test[lsfGPG] = os.path.isfile(lsfGPG)
test[certLoc] = os.path.isfile(certLoc)
return test
@agent.route('/v1/shutdown')
class ShutDownAgent(Resource):
def post(self):
if aux.checkAux('collectd'):
try:
aux.controll('collectd', 'stop')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Error stopping %s with : %s and %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), 'collectd', type(inst), inst.args)
response = jsonify({'Status': type(inst),
'Message': inst.args})
response.status_code = 500
return response
collectdStatus = 'Stopped'
else:
collectdStatus = 'Offline'
if aux.checkAux('logstash-forwarder'):
try:
aux.controll('logstash-forwarder', 'stop')
except Exception as inst:
app.logger.error('[%s] : [ERROR] Error stopping %s with : %s and %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), 'logstash-forwarder', type(inst), inst.args)
response = jsonify({'Status': type(inst),
'Message': inst.args})
response.status_code = 500
return response
lsfStatus = 'Stopped'
else:
lsfStatus = 'Offline'
try:
shutdown_agent()
except Exception as inst:
app.logger.error('[%s] : [ERROR] Error while shutting down dmon-agent with: %s and %s',
datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
response = jsonify({'Status': 'Error',
'Message': 'Shutdown failed'})
response.status_code = 500
return response
response = jsonify({'Status': 'Shuting down',
'Collectd': collectdStatus,
'LSF': lsfStatus})
response.status_code = 200
return response
if __name__ == '__main__':
handler = RotatingFileHandler(os.path.join(logDir, 'dmon-agent.log'), maxBytes=100000000, backupCount=5)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
log = logging.getLogger('werkzeug')
log.setLevel(logging.DEBUG)
log.addHandler(handler)
app.run(host='0.0.0.0', port=5222, debug=True)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PublicIPPrefixesOperations(object):
"""PublicIPPrefixesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the PublicIpPrefix.
:type public_ip_prefix_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPPrefix"
"""Gets the specified public IP prefix in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.PublicIPPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "_models.PublicIPPrefix"
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPPrefix"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPPrefix')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "_models.PublicIPPrefix"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PublicIPPrefix"]
"""Creates or updates a static or dynamic public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to the create or update public IP prefix operation.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.PublicIPPrefix
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_04_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPPrefix"
"""Updates public IP prefix tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to update public IP prefix tags.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.PublicIPPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPPrefixListResult"]
"""Gets all the public IP prefixes in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPPrefixListResult"]
"""Gets all public IP prefixes in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
|
|
"""Test cases for traceback module"""
from collections import namedtuple
from io import StringIO
import linecache
import sys
import unittest
import re
from test import support
from test.support import TESTFN, Error, captured_output, unlink, cpython_only, ALWAYS_EQ
from test.support.script_helper import assert_python_ok
import textwrap
import traceback
test_code = namedtuple('code', ['co_filename', 'co_name'])
test_frame = namedtuple('frame', ['f_code', 'f_globals', 'f_locals'])
test_tb = namedtuple('tb', ['tb_frame', 'tb_lineno', 'tb_next'])
class TracebackCases(unittest.TestCase):
# For now, a very minimal set of tests. I want to be sure that
# formatting of SyntaxErrors works based on changes for 2.1.
def get_exception_format(self, func, exc):
try:
func()
except exc as value:
return traceback.format_exception_only(exc, value)
else:
raise ValueError("call did not raise exception")
def syntax_error_with_caret(self):
compile("def fact(x):\n\treturn x!\n", "?", "exec")
def syntax_error_with_caret_2(self):
compile("1 +\n", "?", "exec")
def syntax_error_bad_indentation(self):
compile("def spam():\n print(1)\n print(2)", "?", "exec")
def syntax_error_with_caret_non_ascii(self):
compile('Python = "\u1e54\xfd\u0163\u0125\xf2\xf1" +', "?", "exec")
def syntax_error_bad_indentation2(self):
compile(" print(2)", "?", "exec")
def test_caret(self):
err = self.get_exception_format(self.syntax_error_with_caret,
SyntaxError)
self.assertEqual(len(err), 4)
self.assertTrue(err[1].strip() == "return x!")
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[1].find("!"), err[2].find("^")) # in the right place
err = self.get_exception_format(self.syntax_error_with_caret_2,
SyntaxError)
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[2].count('\n'), 1) # and no additional newline
self.assertEqual(err[1].find("+") + 1, err[2].find("^")) # in the right place
err = self.get_exception_format(self.syntax_error_with_caret_non_ascii,
SyntaxError)
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[2].count('\n'), 1) # and no additional newline
self.assertEqual(err[1].find("+") + 1, err[2].find("^")) # in the right place
def test_nocaret(self):
exc = SyntaxError("error", ("x.py", 23, None, "bad syntax"))
err = traceback.format_exception_only(SyntaxError, exc)
self.assertEqual(len(err), 3)
self.assertEqual(err[1].strip(), "bad syntax")
def test_bad_indentation(self):
err = self.get_exception_format(self.syntax_error_bad_indentation,
IndentationError)
self.assertEqual(len(err), 4)
self.assertEqual(err[1].strip(), "print(2)")
self.assertIn("^", err[2])
self.assertEqual(err[1].find(")") + 1, err[2].find("^"))
# No caret for "unexpected indent"
err = self.get_exception_format(self.syntax_error_bad_indentation2,
IndentationError)
self.assertEqual(len(err), 3)
self.assertEqual(err[1].strip(), "print(2)")
def test_base_exception(self):
# Test that exceptions derived from BaseException are formatted right
e = KeyboardInterrupt()
lst = traceback.format_exception_only(e.__class__, e)
self.assertEqual(lst, ['KeyboardInterrupt\n'])
def test_format_exception_only_bad__str__(self):
class X(Exception):
def __str__(self):
1/0
err = traceback.format_exception_only(X, X())
self.assertEqual(len(err), 1)
str_value = '<unprintable %s object>' % X.__name__
if X.__module__ in ('__main__', 'builtins'):
str_name = X.__qualname__
else:
str_name = '.'.join([X.__module__, X.__qualname__])
self.assertEqual(err[0], "%s: %s\n" % (str_name, str_value))
def test_encoded_file(self):
# Test that tracebacks are correctly printed for encoded source files:
# - correct line number (Issue2384)
# - respect file encoding (Issue3975)
import sys, subprocess
# The spawned subprocess has its stdout redirected to a PIPE, and its
# encoding may be different from the current interpreter, on Windows
# at least.
process = subprocess.Popen([sys.executable, "-c",
"import sys; print(sys.stdout.encoding)"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
output_encoding = str(stdout, 'ascii').splitlines()[0]
def do_test(firstlines, message, charset, lineno):
# Raise the message in a subprocess, and catch the output
try:
with open(TESTFN, "w", encoding=charset) as output:
output.write("""{0}if 1:
import traceback;
raise RuntimeError('{1}')
""".format(firstlines, message))
process = subprocess.Popen([sys.executable, TESTFN],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
stdout = stdout.decode(output_encoding).splitlines()
finally:
unlink(TESTFN)
# The source lines are encoded with the 'backslashreplace' handler
encoded_message = message.encode(output_encoding,
'backslashreplace')
# and we just decoded them with the output_encoding.
message_ascii = encoded_message.decode(output_encoding)
err_line = "raise RuntimeError('{0}')".format(message_ascii)
err_msg = "RuntimeError: {0}".format(message_ascii)
self.assertIn(("line %s" % lineno), stdout[1],
"Invalid line number: {0!r} instead of {1}".format(
stdout[1], lineno))
self.assertTrue(stdout[2].endswith(err_line),
"Invalid traceback line: {0!r} instead of {1!r}".format(
stdout[2], err_line))
self.assertTrue(stdout[3] == err_msg,
"Invalid error message: {0!r} instead of {1!r}".format(
stdout[3], err_msg))
do_test("", "foo", "ascii", 3)
for charset in ("ascii", "iso-8859-1", "utf-8", "GBK"):
if charset == "ascii":
text = "foo"
elif charset == "GBK":
text = "\u4E02\u5100"
else:
text = "h\xe9 ho"
do_test("# coding: {0}\n".format(charset),
text, charset, 4)
do_test("#!shebang\n# coding: {0}\n".format(charset),
text, charset, 5)
do_test(" \t\f\n# coding: {0}\n".format(charset),
text, charset, 5)
# Issue #18960: coding spec should have no effect
do_test("x=0\n# coding: GBK\n", "h\xe9 ho", 'utf-8', 5)
def test_print_traceback_at_exit(self):
# Issue #22599: Ensure that it is possible to use the traceback module
# to display an exception at Python exit
code = textwrap.dedent("""
import sys
import traceback
class PrintExceptionAtExit(object):
def __init__(self):
try:
x = 1 / 0
except Exception:
self.exc_info = sys.exc_info()
# self.exc_info[1] (traceback) contains frames:
# explicitly clear the reference to self in the current
# frame to break a reference cycle
self = None
def __del__(self):
traceback.print_exception(*self.exc_info)
# Keep a reference in the module namespace to call the destructor
# when the module is unloaded
obj = PrintExceptionAtExit()
""")
rc, stdout, stderr = assert_python_ok('-c', code)
expected = [b'Traceback (most recent call last):',
b' File "<string>", line 8, in __init__',
b'ZeroDivisionError: division by zero']
self.assertEqual(stderr.splitlines(), expected)
def test_print_exception(self):
output = StringIO()
traceback.print_exception(
Exception, Exception("projector"), None, file=output
)
self.assertEqual(output.getvalue(), "Exception: projector\n")
class TracebackFormatTests(unittest.TestCase):
def some_exception(self):
raise KeyError('blah')
@cpython_only
def check_traceback_format(self, cleanup_func=None):
from _testcapi import traceback_print
try:
self.some_exception()
except KeyError:
type_, value, tb = sys.exc_info()
if cleanup_func is not None:
# Clear the inner frames, not this one
cleanup_func(tb.tb_next)
traceback_fmt = 'Traceback (most recent call last):\n' + \
''.join(traceback.format_tb(tb))
file_ = StringIO()
traceback_print(tb, file_)
python_fmt = file_.getvalue()
# Call all _tb and _exc functions
with captured_output("stderr") as tbstderr:
traceback.print_tb(tb)
tbfile = StringIO()
traceback.print_tb(tb, file=tbfile)
with captured_output("stderr") as excstderr:
traceback.print_exc()
excfmt = traceback.format_exc()
excfile = StringIO()
traceback.print_exc(file=excfile)
else:
raise Error("unable to create test traceback string")
# Make sure that Python and the traceback module format the same thing
self.assertEqual(traceback_fmt, python_fmt)
# Now verify the _tb func output
self.assertEqual(tbstderr.getvalue(), tbfile.getvalue())
# Now verify the _exc func output
self.assertEqual(excstderr.getvalue(), excfile.getvalue())
self.assertEqual(excfmt, excfile.getvalue())
# Make sure that the traceback is properly indented.
tb_lines = python_fmt.splitlines()
self.assertEqual(len(tb_lines), 5)
banner = tb_lines[0]
location, source_line = tb_lines[-2:]
self.assertTrue(banner.startswith('Traceback'))
self.assertTrue(location.startswith(' File'))
self.assertTrue(source_line.startswith(' raise'))
def test_traceback_format(self):
self.check_traceback_format()
def test_traceback_format_with_cleared_frames(self):
# Check that traceback formatting also works with a clear()ed frame
def cleanup_tb(tb):
tb.tb_frame.clear()
self.check_traceback_format(cleanup_tb)
def test_stack_format(self):
# Verify _stack functions. Note we have to use _getframe(1) to
# compare them without this frame appearing in the output
with captured_output("stderr") as ststderr:
traceback.print_stack(sys._getframe(1))
stfile = StringIO()
traceback.print_stack(sys._getframe(1), file=stfile)
self.assertEqual(ststderr.getvalue(), stfile.getvalue())
stfmt = traceback.format_stack(sys._getframe(1))
self.assertEqual(ststderr.getvalue(), "".join(stfmt))
def test_print_stack(self):
def prn():
traceback.print_stack()
with captured_output("stderr") as stderr:
prn()
lineno = prn.__code__.co_firstlineno
self.assertEqual(stderr.getvalue().splitlines()[-4:], [
' File "%s", line %d, in test_print_stack' % (__file__, lineno+3),
' prn()',
' File "%s", line %d, in prn' % (__file__, lineno+1),
' traceback.print_stack()',
])
# issue 26823 - Shrink recursive tracebacks
def _check_recursive_traceback_display(self, render_exc):
# Always show full diffs when this test fails
# Note that rearranging things may require adjusting
# the relative line numbers in the expected tracebacks
self.maxDiff = None
# Check hitting the recursion limit
def f():
f()
with captured_output("stderr") as stderr_f:
try:
f()
except RecursionError:
render_exc()
else:
self.fail("no recursion occurred")
lineno_f = f.__code__.co_firstlineno
result_f = (
'Traceback (most recent call last):\n'
f' File "{__file__}", line {lineno_f+5}, in _check_recursive_traceback_display\n'
' f()\n'
f' File "{__file__}", line {lineno_f+1}, in f\n'
' f()\n'
f' File "{__file__}", line {lineno_f+1}, in f\n'
' f()\n'
f' File "{__file__}", line {lineno_f+1}, in f\n'
' f()\n'
# XXX: The following line changes depending on whether the tests
# are run through the interactive interpreter or with -m
# It also varies depending on the platform (stack size)
# Fortunately, we don't care about exactness here, so we use regex
r' \[Previous line repeated (\d+) more times\]' '\n'
'RecursionError: maximum recursion depth exceeded\n'
)
expected = result_f.splitlines()
actual = stderr_f.getvalue().splitlines()
# Check the output text matches expectations
# 2nd last line contains the repetition count
self.assertEqual(actual[:-2], expected[:-2])
self.assertRegex(actual[-2], expected[-2])
# last line can have additional text appended
self.assertIn(expected[-1], actual[-1])
# Check the recursion count is roughly as expected
rec_limit = sys.getrecursionlimit()
self.assertIn(int(re.search(r"\d+", actual[-2]).group()), range(rec_limit-60, rec_limit))
# Check a known (limited) number of recursive invocations
def g(count=10):
if count:
return g(count-1)
raise ValueError
with captured_output("stderr") as stderr_g:
try:
g()
except ValueError:
render_exc()
else:
self.fail("no value error was raised")
lineno_g = g.__code__.co_firstlineno
result_g = (
f' File "{__file__}", line {lineno_g+2}, in g\n'
' return g(count-1)\n'
f' File "{__file__}", line {lineno_g+2}, in g\n'
' return g(count-1)\n'
f' File "{__file__}", line {lineno_g+2}, in g\n'
' return g(count-1)\n'
' [Previous line repeated 7 more times]\n'
f' File "{__file__}", line {lineno_g+3}, in g\n'
' raise ValueError\n'
'ValueError\n'
)
tb_line = (
'Traceback (most recent call last):\n'
f' File "{__file__}", line {lineno_g+7}, in _check_recursive_traceback_display\n'
' g()\n'
)
expected = (tb_line + result_g).splitlines()
actual = stderr_g.getvalue().splitlines()
self.assertEqual(actual, expected)
# Check 2 different repetitive sections
def h(count=10):
if count:
return h(count-1)
g()
with captured_output("stderr") as stderr_h:
try:
h()
except ValueError:
render_exc()
else:
self.fail("no value error was raised")
lineno_h = h.__code__.co_firstlineno
result_h = (
'Traceback (most recent call last):\n'
f' File "{__file__}", line {lineno_h+7}, in _check_recursive_traceback_display\n'
' h()\n'
f' File "{__file__}", line {lineno_h+2}, in h\n'
' return h(count-1)\n'
f' File "{__file__}", line {lineno_h+2}, in h\n'
' return h(count-1)\n'
f' File "{__file__}", line {lineno_h+2}, in h\n'
' return h(count-1)\n'
' [Previous line repeated 7 more times]\n'
f' File "{__file__}", line {lineno_h+3}, in h\n'
' g()\n'
)
expected = (result_h + result_g).splitlines()
actual = stderr_h.getvalue().splitlines()
self.assertEqual(actual, expected)
# Check the boundary conditions. First, test just below the cutoff.
with captured_output("stderr") as stderr_g:
try:
g(traceback._RECURSIVE_CUTOFF)
except ValueError:
render_exc()
else:
self.fail("no error raised")
result_g = (
f' File "{__file__}", line {lineno_g+2}, in g\n'
' return g(count-1)\n'
f' File "{__file__}", line {lineno_g+2}, in g\n'
' return g(count-1)\n'
f' File "{__file__}", line {lineno_g+2}, in g\n'
' return g(count-1)\n'
f' File "{__file__}", line {lineno_g+3}, in g\n'
' raise ValueError\n'
'ValueError\n'
)
tb_line = (
'Traceback (most recent call last):\n'
f' File "{__file__}", line {lineno_g+71}, in _check_recursive_traceback_display\n'
' g(traceback._RECURSIVE_CUTOFF)\n'
)
expected = (tb_line + result_g).splitlines()
actual = stderr_g.getvalue().splitlines()
self.assertEqual(actual, expected)
# Second, test just above the cutoff.
with captured_output("stderr") as stderr_g:
try:
g(traceback._RECURSIVE_CUTOFF + 1)
except ValueError:
render_exc()
else:
self.fail("no error raised")
result_g = (
f' File "{__file__}", line {lineno_g+2}, in g\n'
' return g(count-1)\n'
f' File "{__file__}", line {lineno_g+2}, in g\n'
' return g(count-1)\n'
f' File "{__file__}", line {lineno_g+2}, in g\n'
' return g(count-1)\n'
' [Previous line repeated 1 more time]\n'
f' File "{__file__}", line {lineno_g+3}, in g\n'
' raise ValueError\n'
'ValueError\n'
)
tb_line = (
'Traceback (most recent call last):\n'
f' File "{__file__}", line {lineno_g+99}, in _check_recursive_traceback_display\n'
' g(traceback._RECURSIVE_CUTOFF + 1)\n'
)
expected = (tb_line + result_g).splitlines()
actual = stderr_g.getvalue().splitlines()
self.assertEqual(actual, expected)
def test_recursive_traceback_python(self):
self._check_recursive_traceback_display(traceback.print_exc)
@cpython_only
def test_recursive_traceback_cpython_internal(self):
from _testcapi import exception_print
def render_exc():
exc_type, exc_value, exc_tb = sys.exc_info()
exception_print(exc_value)
self._check_recursive_traceback_display(render_exc)
def test_format_stack(self):
def fmt():
return traceback.format_stack()
result = fmt()
lineno = fmt.__code__.co_firstlineno
self.assertEqual(result[-2:], [
' File "%s", line %d, in test_format_stack\n'
' result = fmt()\n' % (__file__, lineno+2),
' File "%s", line %d, in fmt\n'
' return traceback.format_stack()\n' % (__file__, lineno+1),
])
@cpython_only
def test_unhashable(self):
from _testcapi import exception_print
class UnhashableException(Exception):
def __eq__(self, other):
return True
ex1 = UnhashableException('ex1')
ex2 = UnhashableException('ex2')
try:
raise ex2 from ex1
except UnhashableException:
try:
raise ex1
except UnhashableException:
exc_type, exc_val, exc_tb = sys.exc_info()
with captured_output("stderr") as stderr_f:
exception_print(exc_val)
tb = stderr_f.getvalue().strip().splitlines()
self.assertEqual(11, len(tb))
self.assertEqual(context_message.strip(), tb[5])
self.assertIn('UnhashableException: ex2', tb[3])
self.assertIn('UnhashableException: ex1', tb[10])
cause_message = (
"\nThe above exception was the direct cause "
"of the following exception:\n\n")
context_message = (
"\nDuring handling of the above exception, "
"another exception occurred:\n\n")
boundaries = re.compile(
'(%s|%s)' % (re.escape(cause_message), re.escape(context_message)))
class BaseExceptionReportingTests:
def get_exception(self, exception_or_callable):
if isinstance(exception_or_callable, Exception):
return exception_or_callable
try:
exception_or_callable()
except Exception as e:
return e
def zero_div(self):
1/0 # In zero_div
def check_zero_div(self, msg):
lines = msg.splitlines()
self.assertTrue(lines[-3].startswith(' File'))
self.assertIn('1/0 # In zero_div', lines[-2])
self.assertTrue(lines[-1].startswith('ZeroDivisionError'), lines[-1])
def test_simple(self):
try:
1/0 # Marker
except ZeroDivisionError as _:
e = _
lines = self.get_report(e).splitlines()
self.assertEqual(len(lines), 4)
self.assertTrue(lines[0].startswith('Traceback'))
self.assertTrue(lines[1].startswith(' File'))
self.assertIn('1/0 # Marker', lines[2])
self.assertTrue(lines[3].startswith('ZeroDivisionError'))
def test_cause(self):
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError as e:
raise KeyError from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_context(self):
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError:
raise KeyError
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], context_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_context_suppression(self):
try:
try:
raise Exception
except:
raise ZeroDivisionError from None
except ZeroDivisionError as _:
e = _
lines = self.get_report(e).splitlines()
self.assertEqual(len(lines), 4)
self.assertTrue(lines[0].startswith('Traceback'))
self.assertTrue(lines[1].startswith(' File'))
self.assertIn('ZeroDivisionError from None', lines[2])
self.assertTrue(lines[3].startswith('ZeroDivisionError'))
def test_cause_and_context(self):
# When both a cause and a context are set, only the cause should be
# displayed and the context should be muted.
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError as _e:
e = _e
try:
xyzzy
except NameError:
raise KeyError from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_cause_recursive(self):
def inner_raise():
try:
try:
self.zero_div()
except ZeroDivisionError as e:
z = e
raise KeyError from e
except KeyError as e:
raise z from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
# The first block is the KeyError raised from the ZeroDivisionError
self.assertIn('raise KeyError from e', blocks[0])
self.assertNotIn('1/0', blocks[0])
# The second block (apart from the boundary) is the ZeroDivisionError
# re-raised from the KeyError
self.assertIn('inner_raise() # Marker', blocks[2])
self.check_zero_div(blocks[2])
@unittest.skipIf(support.use_old_parser(), "Pegen is arguably better here, so no need to fix this")
def test_syntax_error_offset_at_eol(self):
# See #10186.
def e():
raise SyntaxError('', ('', 0, 5, 'hello'))
msg = self.get_report(e).splitlines()
self.assertEqual(msg[-2], " ^")
def e():
exec("x = 5 | 4 |")
msg = self.get_report(e).splitlines()
self.assertEqual(msg[-2], ' ^')
def test_message_none(self):
# A message that looks like "None" should not be treated specially
err = self.get_report(Exception(None))
self.assertIn('Exception: None\n', err)
err = self.get_report(Exception('None'))
self.assertIn('Exception: None\n', err)
err = self.get_report(Exception())
self.assertIn('Exception\n', err)
err = self.get_report(Exception(''))
self.assertIn('Exception\n', err)
def test_syntax_error_various_offsets(self):
for offset in range(-5, 10):
for add in [0, 2]:
text = " "*add + "text%d" % offset
expected = [' File "file.py", line 1']
if offset < 1:
expected.append(" %s" % text.lstrip())
elif offset <= 6:
expected.append(" %s" % text.lstrip())
expected.append(" %s^" % (" "*(offset-1)))
else:
expected.append(" %s" % text.lstrip())
expected.append(" %s^" % (" "*5))
expected.append("SyntaxError: msg")
expected.append("")
err = self.get_report(SyntaxError("msg", ("file.py", 1, offset+add, text)))
exp = "\n".join(expected)
self.assertEqual(exp, err)
class PyExcReportingTests(BaseExceptionReportingTests, unittest.TestCase):
#
# This checks reporting through the 'traceback' module, with both
# format_exception() and print_exception().
#
def get_report(self, e):
e = self.get_exception(e)
s = ''.join(
traceback.format_exception(type(e), e, e.__traceback__))
with captured_output("stderr") as sio:
traceback.print_exception(type(e), e, e.__traceback__)
self.assertEqual(sio.getvalue(), s)
return s
class CExcReportingTests(BaseExceptionReportingTests, unittest.TestCase):
#
# This checks built-in reporting by the interpreter.
#
@cpython_only
def get_report(self, e):
from _testcapi import exception_print
e = self.get_exception(e)
with captured_output("stderr") as s:
exception_print(e)
return s.getvalue()
class LimitTests(unittest.TestCase):
''' Tests for limit argument.
It's enough to test extact_tb, extract_stack and format_exception '''
def last_raises1(self):
raise Exception('Last raised')
def last_raises2(self):
self.last_raises1()
def last_raises3(self):
self.last_raises2()
def last_raises4(self):
self.last_raises3()
def last_raises5(self):
self.last_raises4()
def last_returns_frame1(self):
return sys._getframe()
def last_returns_frame2(self):
return self.last_returns_frame1()
def last_returns_frame3(self):
return self.last_returns_frame2()
def last_returns_frame4(self):
return self.last_returns_frame3()
def last_returns_frame5(self):
return self.last_returns_frame4()
def test_extract_stack(self):
frame = self.last_returns_frame5()
def extract(**kwargs):
return traceback.extract_stack(frame, **kwargs)
def assertEqualExcept(actual, expected, ignore):
self.assertEqual(actual[:ignore], expected[:ignore])
self.assertEqual(actual[ignore+1:], expected[ignore+1:])
self.assertEqual(len(actual), len(expected))
with support.swap_attr(sys, 'tracebacklimit', 1000):
nolim = extract()
self.assertGreater(len(nolim), 5)
self.assertEqual(extract(limit=2), nolim[-2:])
assertEqualExcept(extract(limit=100), nolim[-100:], -5-1)
self.assertEqual(extract(limit=-2), nolim[:2])
assertEqualExcept(extract(limit=-100), nolim[:100], len(nolim)-5-1)
self.assertEqual(extract(limit=0), [])
del sys.tracebacklimit
assertEqualExcept(extract(), nolim, -5-1)
sys.tracebacklimit = 2
self.assertEqual(extract(), nolim[-2:])
self.assertEqual(extract(limit=3), nolim[-3:])
self.assertEqual(extract(limit=-3), nolim[:3])
sys.tracebacklimit = 0
self.assertEqual(extract(), [])
sys.tracebacklimit = -1
self.assertEqual(extract(), [])
def test_extract_tb(self):
try:
self.last_raises5()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
def extract(**kwargs):
return traceback.extract_tb(tb, **kwargs)
with support.swap_attr(sys, 'tracebacklimit', 1000):
nolim = extract()
self.assertEqual(len(nolim), 5+1)
self.assertEqual(extract(limit=2), nolim[:2])
self.assertEqual(extract(limit=10), nolim)
self.assertEqual(extract(limit=-2), nolim[-2:])
self.assertEqual(extract(limit=-10), nolim)
self.assertEqual(extract(limit=0), [])
del sys.tracebacklimit
self.assertEqual(extract(), nolim)
sys.tracebacklimit = 2
self.assertEqual(extract(), nolim[:2])
self.assertEqual(extract(limit=3), nolim[:3])
self.assertEqual(extract(limit=-3), nolim[-3:])
sys.tracebacklimit = 0
self.assertEqual(extract(), [])
sys.tracebacklimit = -1
self.assertEqual(extract(), [])
def test_format_exception(self):
try:
self.last_raises5()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
# [1:-1] to exclude "Traceback (...)" header and
# exception type and value
def extract(**kwargs):
return traceback.format_exception(exc_type, exc_value, tb, **kwargs)[1:-1]
with support.swap_attr(sys, 'tracebacklimit', 1000):
nolim = extract()
self.assertEqual(len(nolim), 5+1)
self.assertEqual(extract(limit=2), nolim[:2])
self.assertEqual(extract(limit=10), nolim)
self.assertEqual(extract(limit=-2), nolim[-2:])
self.assertEqual(extract(limit=-10), nolim)
self.assertEqual(extract(limit=0), [])
del sys.tracebacklimit
self.assertEqual(extract(), nolim)
sys.tracebacklimit = 2
self.assertEqual(extract(), nolim[:2])
self.assertEqual(extract(limit=3), nolim[:3])
self.assertEqual(extract(limit=-3), nolim[-3:])
sys.tracebacklimit = 0
self.assertEqual(extract(), [])
sys.tracebacklimit = -1
self.assertEqual(extract(), [])
class MiscTracebackCases(unittest.TestCase):
#
# Check non-printing functions in traceback module
#
def test_clear(self):
def outer():
middle()
def middle():
inner()
def inner():
i = 1
1/0
try:
outer()
except:
type_, value, tb = sys.exc_info()
# Initial assertion: there's one local in the inner frame.
inner_frame = tb.tb_next.tb_next.tb_next.tb_frame
self.assertEqual(len(inner_frame.f_locals), 1)
# Clear traceback frames
traceback.clear_frames(tb)
# Local variable dict should now be empty.
self.assertEqual(len(inner_frame.f_locals), 0)
def test_extract_stack(self):
def extract():
return traceback.extract_stack()
result = extract()
lineno = extract.__code__.co_firstlineno
self.assertEqual(result[-2:], [
(__file__, lineno+2, 'test_extract_stack', 'result = extract()'),
(__file__, lineno+1, 'extract', 'return traceback.extract_stack()'),
])
self.assertEqual(len(result[0]), 4)
class TestFrame(unittest.TestCase):
def test_basics(self):
linecache.clearcache()
linecache.lazycache("f", globals())
f = traceback.FrameSummary("f", 1, "dummy")
self.assertEqual(f,
("f", 1, "dummy", '"""Test cases for traceback module"""'))
self.assertEqual(tuple(f),
("f", 1, "dummy", '"""Test cases for traceback module"""'))
self.assertEqual(f, traceback.FrameSummary("f", 1, "dummy"))
self.assertEqual(f, tuple(f))
# Since tuple.__eq__ doesn't support FrameSummary, the equality
# operator fallbacks to FrameSummary.__eq__.
self.assertEqual(tuple(f), f)
self.assertIsNone(f.locals)
self.assertNotEqual(f, object())
self.assertEqual(f, ALWAYS_EQ)
def test_lazy_lines(self):
linecache.clearcache()
f = traceback.FrameSummary("f", 1, "dummy", lookup_line=False)
self.assertEqual(None, f._line)
linecache.lazycache("f", globals())
self.assertEqual(
'"""Test cases for traceback module"""',
f.line)
def test_explicit_line(self):
f = traceback.FrameSummary("f", 1, "dummy", line="line")
self.assertEqual("line", f.line)
def test_len(self):
f = traceback.FrameSummary("f", 1, "dummy", line="line")
self.assertEqual(len(f), 4)
class TestStack(unittest.TestCase):
def test_walk_stack(self):
def deeper():
return list(traceback.walk_stack(None))
s1 = list(traceback.walk_stack(None))
s2 = deeper()
self.assertEqual(len(s2) - len(s1), 1)
self.assertEqual(s2[1:], s1)
def test_walk_tb(self):
try:
1/0
except Exception:
_, _, tb = sys.exc_info()
s = list(traceback.walk_tb(tb))
self.assertEqual(len(s), 1)
def test_extract_stack(self):
s = traceback.StackSummary.extract(traceback.walk_stack(None))
self.assertIsInstance(s, traceback.StackSummary)
def test_extract_stack_limit(self):
s = traceback.StackSummary.extract(traceback.walk_stack(None), limit=5)
self.assertEqual(len(s), 5)
def test_extract_stack_lookup_lines(self):
linecache.clearcache()
linecache.updatecache('/foo.py', globals())
c = test_code('/foo.py', 'method')
f = test_frame(c, None, None)
s = traceback.StackSummary.extract(iter([(f, 6)]), lookup_lines=True)
linecache.clearcache()
self.assertEqual(s[0].line, "import sys")
def test_extract_stackup_deferred_lookup_lines(self):
linecache.clearcache()
c = test_code('/foo.py', 'method')
f = test_frame(c, None, None)
s = traceback.StackSummary.extract(iter([(f, 6)]), lookup_lines=False)
self.assertEqual({}, linecache.cache)
linecache.updatecache('/foo.py', globals())
self.assertEqual(s[0].line, "import sys")
def test_from_list(self):
s = traceback.StackSummary.from_list([('foo.py', 1, 'fred', 'line')])
self.assertEqual(
[' File "foo.py", line 1, in fred\n line\n'],
s.format())
def test_from_list_edited_stack(self):
s = traceback.StackSummary.from_list([('foo.py', 1, 'fred', 'line')])
s[0] = ('foo.py', 2, 'fred', 'line')
s2 = traceback.StackSummary.from_list(s)
self.assertEqual(
[' File "foo.py", line 2, in fred\n line\n'],
s2.format())
def test_format_smoke(self):
# For detailed tests see the format_list tests, which consume the same
# code.
s = traceback.StackSummary.from_list([('foo.py', 1, 'fred', 'line')])
self.assertEqual(
[' File "foo.py", line 1, in fred\n line\n'],
s.format())
def test_locals(self):
linecache.updatecache('/foo.py', globals())
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1})
s = traceback.StackSummary.extract(iter([(f, 6)]), capture_locals=True)
self.assertEqual(s[0].locals, {'something': '1'})
def test_no_locals(self):
linecache.updatecache('/foo.py', globals())
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1})
s = traceback.StackSummary.extract(iter([(f, 6)]))
self.assertEqual(s[0].locals, None)
def test_format_locals(self):
def some_inner(k, v):
a = 1
b = 2
return traceback.StackSummary.extract(
traceback.walk_stack(None), capture_locals=True, limit=1)
s = some_inner(3, 4)
self.assertEqual(
[' File "%s", line %d, in some_inner\n'
' return traceback.StackSummary.extract(\n'
' a = 1\n'
' b = 2\n'
' k = 3\n'
' v = 4\n' % (__file__, some_inner.__code__.co_firstlineno + 3)
], s.format())
class TestTracebackException(unittest.TestCase):
def test_smoke(self):
try:
1/0
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]))
self.assertEqual(None, exc.__cause__)
self.assertEqual(None, exc.__context__)
self.assertEqual(False, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_from_exception(self):
# Check all the parameters are accepted.
def foo():
1/0
try:
foo()
except Exception as e:
exc_info = sys.exc_info()
self.expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]), limit=1, lookup_lines=False,
capture_locals=True)
self.exc = traceback.TracebackException.from_exception(
e, limit=1, lookup_lines=False, capture_locals=True)
expected_stack = self.expected_stack
exc = self.exc
self.assertEqual(None, exc.__cause__)
self.assertEqual(None, exc.__context__)
self.assertEqual(False, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_cause(self):
try:
try:
1/0
finally:
exc_info_context = sys.exc_info()
exc_context = traceback.TracebackException(*exc_info_context)
cause = Exception("cause")
raise Exception("uh oh") from cause
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]))
exc_cause = traceback.TracebackException(Exception, cause, None)
self.assertEqual(exc_cause, exc.__cause__)
self.assertEqual(exc_context, exc.__context__)
self.assertEqual(True, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_context(self):
try:
try:
1/0
finally:
exc_info_context = sys.exc_info()
exc_context = traceback.TracebackException(*exc_info_context)
raise Exception("uh oh")
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]))
self.assertEqual(None, exc.__cause__)
self.assertEqual(exc_context, exc.__context__)
self.assertEqual(False, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_comparison(self):
try:
1/0
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
exc2 = traceback.TracebackException(*exc_info)
self.assertIsNot(exc, exc2)
self.assertEqual(exc, exc2)
self.assertNotEqual(exc, object())
self.assertEqual(exc, ALWAYS_EQ)
def test_unhashable(self):
class UnhashableException(Exception):
def __eq__(self, other):
return True
ex1 = UnhashableException('ex1')
ex2 = UnhashableException('ex2')
try:
raise ex2 from ex1
except UnhashableException:
try:
raise ex1
except UnhashableException:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
formatted = list(exc.format())
self.assertIn('UnhashableException: ex2\n', formatted[2])
self.assertIn('UnhashableException: ex1\n', formatted[6])
def test_limit(self):
def recurse(n):
if n:
recurse(n-1)
else:
1/0
try:
recurse(10)
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info, limit=5)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]), limit=5)
self.assertEqual(expected_stack, exc.stack)
def test_lookup_lines(self):
linecache.clearcache()
e = Exception("uh oh")
c = test_code('/foo.py', 'method')
f = test_frame(c, None, None)
tb = test_tb(f, 6, None)
exc = traceback.TracebackException(Exception, e, tb, lookup_lines=False)
self.assertEqual({}, linecache.cache)
linecache.updatecache('/foo.py', globals())
self.assertEqual(exc.stack[0].line, "import sys")
def test_locals(self):
linecache.updatecache('/foo.py', globals())
e = Exception("uh oh")
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1, 'other': 'string'})
tb = test_tb(f, 6, None)
exc = traceback.TracebackException(
Exception, e, tb, capture_locals=True)
self.assertEqual(
exc.stack[0].locals, {'something': '1', 'other': "'string'"})
def test_no_locals(self):
linecache.updatecache('/foo.py', globals())
e = Exception("uh oh")
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1})
tb = test_tb(f, 6, None)
exc = traceback.TracebackException(Exception, e, tb)
self.assertEqual(exc.stack[0].locals, None)
def test_traceback_header(self):
# do not print a traceback header if exc_traceback is None
# see issue #24695
exc = traceback.TracebackException(Exception, Exception("haven"), None)
self.assertEqual(list(exc.format()), ["Exception: haven\n"])
class MiscTest(unittest.TestCase):
def test_all(self):
expected = set()
blacklist = {'print_list'}
for name in dir(traceback):
if name.startswith('_') or name in blacklist:
continue
module_object = getattr(traceback, name)
if getattr(module_object, '__module__', None) == 'traceback':
expected.add(name)
self.assertCountEqual(traceback.__all__, expected)
if __name__ == "__main__":
unittest.main()
|
|
import re
from Queue import Queue
from math import ceil
from types import *
WHITESPACE = ['\n', '\t', ' ', '', u'\u3000']
# from helpers.constants import WHITESPACE
def splitKeepWhitespace(string):
"""
Splits the string on whitespace, while keeping the tokens on which the string was split.
Args:
string: The string to split.
Returns:
The split string with the whitespace kept.
"""
return re.split(u'(\u3000|\n| |\t)', string)
# Note: Regex in capture group keeps the delimiter in the resultant list
def countWords(textList): # Ignores WHITESPACE as being 'not words'
"""
Counts the "words" in a list of tokens, where words are anything not in the WHITESPACE global.
Args:
textList: A list of tokens in the text.
Returns:
The number of words in the list.
"""
return len([x for x in textList if x not in WHITESPACE])
def stripLeadingWhiteSpace(q):
"""
Takes in the queue representation of the text and strips the leading whitespace.
Args:
q: The text in a Queue object.
Returns:
None
"""
if not q.empty():
while q.queue[0] in WHITESPACE:
trash = q.get()
if q.empty():
break
def stripLeadingBlankLines(q):
"""
Takes in the queue representation of the text and strips the leading blank lines.
Args:
q: The text in a Queue object.
Returns:
None
"""
while q.queue == '':
trash = q.get()
if q.empty():
break
def stripLeadingCharacters(charQueue, numChars):
"""
Takes in the queue representation of the text and strips the leading numChars characters.
Args:
charQueue: The text in a Queue object.
numChars: The number of characters to remove.
Returns:
None
"""
for i in xrange(numChars):
removedChar = charQueue.get()
def stripLeadingWords(wordQueue, numWords):
"""
Takes in the queue representation of the text and strips the leading numWords words.
Args:
wordQueue: The text in a Queue object.
numWords: The number of words to remove.
Returns:
None
"""
for i in xrange(numWords):
stripLeadingWhiteSpace(wordQueue)
removedWord = wordQueue.get()
stripLeadingWhiteSpace(wordQueue)
def stripLeadingLines(lineQueue, numLines):
"""
Takes in the queue representation of the text and strips the leading numLines lines.
Args:
lineQueue: The text in a Queue object.
numLines: The number of lines to remove.
Returns:
None
"""
for i in xrange(numLines):
stripLeadingBlankLines(lineQueue)
removedLine = lineQueue.get()
stripLeadingBlankLines(lineQueue)
def cutByCharacters(text, chunkSize, overlap, lastProp):
"""
Cuts the text into equally sized chunks, where the segment size is measured by counts of characters,
with an option for an amount of overlap between chunks and a minimum proportion threshold for the last chunk.
Args:
text: The string with the contents of the file.
chunkSize: The size of the chunk, in characters.
overlap: The number of characters to overlap between chunks.
lastProp: The minimum proportional size that the last chunk has to be.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
tillNextChunk = chunkSize - overlap # The distance between the starts of chunks
for token in text:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
stripLeadingCharacters(charQueue=chunkSoFar, numChars=tillNextChunk)
currChunkSize -= tillNextChunk
chunkSoFar.put(token)
# Making sure the last chunk is of a sufficient proportion
lastChunk = list(chunkSoFar.queue)
if (float(len(lastChunk)) / chunkSize) < lastProp:
if len(chunkList)==0:
chunkList.extend(lastChunk)
else:
chunkList[-1].extend(lastChunk)
else:
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
countSubList = 0
stringList=[]
for subList in chunkList:
stringList.extend([''.join(subList)])
if type(subList) is ListType:
countSubList+=1
# Prevent there isn't subList inside chunkList
if countSubList==0:
stringList = []
stringList.extend([''.join(chunkList)])
return stringList
def cutByWords(text, chunkSize, overlap, lastProp):
"""
Cuts the text into equally sized chunks, where the segment size is measured by counts of words,
with an option for an amount of overlap between chunks and a minim
um proportion threshold for the last chunk.
Args:
text: The string with the contents of the file.
chunkSize: The size of the chunk, in words.
overlap: The number of words to overlap between chunks.
lastProp: The minimum proportional size that the last chunk has to be.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
tillNextChunk = chunkSize - overlap # The distance between the starts of chunks
splitText = splitKeepWhitespace(text)
# Create list of chunks (chunks are lists of words and whitespace) by using a queue as a rolling window
for token in splitText:
if token in WHITESPACE:
chunkSoFar.put(token)
else:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
stripLeadingWords(wordQueue=chunkSoFar, numWords=tillNextChunk)
currChunkSize -= tillNextChunk
chunkSoFar.put(token)
# Making sure the last chunk is of a sufficient proportion
lastChunk = list(chunkSoFar.queue) # Grab the final (partial) chunk
if (float(countWords(lastChunk)) / chunkSize) < lastProp: # If the proportion of the last chunk is too low
if len(chunkList)==0:
chunkList.extend(lastChunk)
else:
chunkList[-1].extend(lastChunk)
else:
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
countSubList = 0
stringList=[]
for subList in chunkList:
stringList.extend([''.join(subList)])
if type(subList) is ListType:
countSubList+=1
# Prevent there isn't subList inside chunkList
if countSubList==0:
stringList = []
stringList.extend([''.join(chunkList)])
return stringList
def cutByLines(text, chunkSize, overlap, lastProp):
"""
Cuts the text into equally sized chunks, where the segment size is measured by counts of lines,
with an option for an amount of overlap between chunks and a minimum proportion threshold for the last chunk.
Args:
text: The string with the contents of the file.
chunkSize: The size of the chunk, in lines.
overlap: The number of lines to overlap between chunks.
lastProp: The minimum proportional size that the last chunk has to be.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a. a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
tillNextChunk = chunkSize - overlap # The distance between the starts of chunks
splitText = text.splitlines(True)
# Create list of chunks (chunks are lists of words and whitespace) by using a queue as a rolling window
for token in splitText:
if token == '':
chunkSoFar.put(token)
else:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
stripLeadingLines(lineQueue=chunkSoFar, numLines=tillNextChunk)
currChunkSize -= tillNextChunk
chunkSoFar.put(token)
# Making sure the last chunk is of a sufficient proportion
lastChunk = list(chunkSoFar.queue) # Grab the final (partial) chunk
if (float(countWords(lastChunk)) / chunkSize) < lastProp: # If the proportion of the last chunk is too low
if len(chunkList)==0:
chunkList.extend(lastChunk)
else:
chunkList[-1].extend(lastChunk)
else:
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
countSubList = 0
stringList=[]
for subList in chunkList:
stringList.extend([''.join(subList)])
if type(subList) is ListType:
countSubList+=1
# Prevent there isn't subList inside chunkList
if countSubList==0:
stringList = []
stringList.extend([''.join(chunkList)])
return stringList
def cutByNumber(text, numChunks):
"""
Cuts the text into equally sized chunks, where the size of the chunk is determined by the number of desired chunks.
Args:
text: The string with the contents of the file.
numChunks: The number of chunks to cut the text into.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a. a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
splitText = splitKeepWhitespace(text)
textLength = countWords(splitText)
chunkSizes = []
for i in xrange(numChunks):
chunkSizes.append(textLength / numChunks)
for i in xrange(textLength % numChunks):
chunkSizes[i] += 1
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
chunkIndex = 0
chunkSize = chunkSizes[chunkIndex]
# Create list of chunks (chunks are lists of words and whitespace) by using a queue as a rolling window
for token in splitText:
if token in WHITESPACE:
chunkSoFar.put(token)
else:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
chunkSoFar.queue.clear()
currChunkSize = 1
chunkSoFar.put(token)
chunkIndex += 1
chunkSize = chunkSizes[chunkIndex]
else:
chunkSoFar.put(token)
lastChunk = list(chunkSoFar.queue) # Grab the final (partial) chunk
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
stringList = [''.join(subList) for subList in chunkList]
return stringList
def cutByMilestone(text, cuttingValue):
"""
Cuts the file into as many chunks as there are instances of the
substring cuttingValue. Chunk boundaries are made wherever
the string appears.
Args: text -- the text to be chunked as a single string
Returns: A list of strings which are to become the new chunks.
"""
chunkList = [] #container for chunks
lenMS = len(cuttingValue) #length of milestone term
cuttingValue = cuttingValue.encode('utf-8')
if len(cuttingValue) > 0:
chunkstop = text.find(cuttingValue) #first boundary
print len(cuttingValue)
while chunkstop == 0: #trap for error when first word in file is Milestone
text = text[lenMS:]
chunkstop = text.find(cuttingValue)
while chunkstop >= 0: #while next boundary != -1 (while next boundary exists)
print chunkstop
nextchunk = text[:chunkstop-1] #new chunk = current text up to boundary index
text = text[chunkstop+lenMS:] #text = text left after the boundary
chunkstop = text.find(cuttingValue) #first boundary
while chunkstop == 0:
if chunkstop == 0: #trap for error when first word in file is Milestone
text = text[lenMS:]
chunkstop = text.find(cuttingValue)
chunkList.append(nextchunk) #append this chunk to chunk list
if len(text) > 0 :
chunkList.append(text)
else:
chunkList.append(text)
return chunkList
def cut(text, cuttingValue, cuttingType, overlap, lastProp):
"""
Cuts each text string into various segments according to the options chosen by the user.
Args:
text: A string with the text to be split
cuttingValue: The value by which to cut the texts by.
cuttingType: A string representing which cutting method to use.
overlap: A unicode string representing the number of words to be overlapped between each text segment.
lastProp: A unicode string representing the minimum proportion percentage the last chunk has to be to not get assimilated by the previous.
Returns:
A list of strings, each representing a chunk of the original.
"""
cuttingType = str(cuttingType)
if cuttingType != 'milestone' :
cuttingValue = int(cuttingValue)
overlap = int(overlap)
lastProp = float(lastProp.strip('%')) / 100
if cuttingType == 'letters':
stringList = cutByCharacters(text, cuttingValue, overlap, lastProp)
elif cuttingType == 'words':
stringList = cutByWords(text, cuttingValue, overlap, lastProp)
elif cuttingType == 'lines':
stringList = cutByLines(text, cuttingValue, overlap, lastProp)
elif cuttingType == 'milestone':
stringList = cutByMilestone(text, cuttingValue)
else:
stringList = cutByNumber(text, cuttingValue)
return stringList
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.